Index: stable/11/sys/dev/ocs_fc/ocs.h =================================================================== --- stable/11/sys/dev/ocs_fc/ocs.h (revision 344013) +++ stable/11/sys/dev/ocs_fc/ocs.h (revision 344014) @@ -1,261 +1,284 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * OCS bsd driver common include file */ #if !defined(__OCS_H__) #define __OCS_H__ #include "ocs_os.h" #include "ocs_utils.h" #include "ocs_hw.h" #include "ocs_scsi.h" #include "ocs_io.h" #include "version.h" #define DRV_NAME "ocs_fc" #define DRV_VERSION \ STR_BE_MAJOR "." STR_BE_MINOR "." STR_BE_BUILD "." STR_BE_BRANCH /** * @brief Interrupt context */ typedef struct ocs_intr_ctx_s { uint32_t vec; /** Zero based interrupt vector */ void *softc; /** software context for interrupt */ char name[64]; /** label for this context */ } ocs_intr_ctx_t; +typedef struct ocs_fc_rport_db_s { + uint32_t node_id; + uint32_t state; + uint8_t is_target; + uint8_t is_initiator; + + uint32_t port_id; + uint64_t wwnn; + uint64_t wwpn; + uint32_t gone_timer; + +} ocs_fc_target_t; + +#define OCS_TGT_STATE_NONE 0 /* Empty DB slot */ +#define OCS_TGT_STATE_VALID 1 /* Valid*/ +#define OCS_TGT_STATE_LOST 2 /* LOST*/ + typedef struct ocs_fcport_s { - struct cam_sim *sim; - struct cam_path *path; - uint32_t role; + ocs_t *ocs; + struct cam_sim *sim; + struct cam_path *path; + uint32_t role; - ocs_tgt_resource_t targ_rsrc_wildcard; - ocs_tgt_resource_t targ_rsrc[OCS_MAX_LUN]; + ocs_fc_target_t tgt[OCS_MAX_TARGETS]; + int lost_device_time; + struct callout ldt; /* device lost timer */ + struct task ltask; + + ocs_tgt_resource_t targ_rsrc_wildcard; + ocs_tgt_resource_t targ_rsrc[OCS_MAX_LUN]; ocs_vport_spec_t *vport; } ocs_fcport; #define FCPORT(ocs, chan) (&((ocs_fcport *)(ocs)->fcports)[(chan)]) /** * @brief Driver's context */ struct ocs_softc { device_t dev; struct cdev *cdev; ocs_pci_reg_t reg[PCI_MAX_BAR]; uint32_t instance_index; const char *desc; uint32_t irqid; struct resource *irq; void *tag; ocs_intr_ctx_t intr_ctx; uint32_t n_vec; bus_dma_tag_t dmat; /** Parent DMA tag */ bus_dma_tag_t buf_dmat;/** IO buffer DMA tag */ char display_name[OCS_DISPLAY_NAME_LENGTH]; uint16_t pci_vendor; uint16_t pci_device; uint16_t pci_subsystem_vendor; uint16_t pci_subsystem_device; char businfo[16]; const char *driver_version; const char *fw_version; const char *model; ocs_hw_t hw; ocs_rlock_t lock; /**< device wide lock */ ocs_xport_e ocs_xport; ocs_xport_t *xport; /**< pointer to transport object */ ocs_domain_t *domain; ocs_list_t domain_list; uint32_t domain_instance_count; void (*domain_list_empty_cb)(ocs_t *ocs, void *arg); void *domain_list_empty_cb_arg; uint8_t enable_ini; uint8_t enable_tgt; uint8_t fc_type; int ctrlmask; uint8_t explicit_buffer_list; uint8_t external_loopback; uint8_t skip_hw_teardown; int speed; int topology; int ethernet_license; int num_scsi_ios; uint8_t enable_hlm; uint32_t hlm_group_size; uint32_t max_isr_time_msec; /*>> Maximum ISR time */ uint32_t auto_xfer_rdy_size; /*>> Max sized write to use auto xfer rdy*/ uint8_t esoc; int logmask; char *hw_war_version; uint32_t num_vports; uint32_t target_io_timer_sec; uint32_t hw_bounce; uint8_t rq_threads; uint8_t rq_selection_policy; uint8_t rr_quanta; char *filter_def; uint32_t max_remote_nodes; /* * tgt_rscn_delay - delay in kicking off RSCN processing * (nameserver queries) after receiving an RSCN on the target. * This prevents thrashing of nameserver requests due to a huge burst of * RSCNs received in a short period of time. * Note: this is only valid when target RSCN handling is enabled -- see * ctrlmask. */ time_t tgt_rscn_delay_msec; /*>> minimum target RSCN delay */ /* * tgt_rscn_period - determines maximum frequency when processing * back-to-back RSCNs; e.g. if this value is 30, there will never be * any more than 1 RSCN handling per 30s window. This prevents * initiators on a faulty link generating many RSCN from causing the * target to continually query the nameserver. * Note: This is only valid when target RSCN handling is enabled */ time_t tgt_rscn_period_msec; /*>> minimum target RSCN period */ uint32_t enable_task_set_full; uint32_t io_in_use; uint32_t io_high_watermark; /**< used to send task set full */ - struct mtx sim_lock; + struct mtx sim_lock; uint32_t config_tgt:1, /**< Configured to support target mode */ config_ini:1; /**< Configured to support initiator mode */ uint32_t nodedb_mask; /**< Node debugging mask */ char modeldesc[64]; char serialnum[64]; char fwrev[64]; char sli_intf[9]; ocs_ramlog_t *ramlog; ocs_textbuf_t ddump_saved; ocs_mgmt_functions_t *mgmt_functions; ocs_mgmt_functions_t *tgt_mgmt_functions; ocs_mgmt_functions_t *ini_mgmt_functions; ocs_err_injection_e err_injection; uint32_t cmd_err_inject; time_t delay_value_msec; bool attached; struct mtx dbg_lock; struct cam_devq *devq; ocs_fcport *fcports; void* tgt_ocs; }; static inline void ocs_device_lock_init(ocs_t *ocs) { ocs_rlock_init(ocs, &ocs->lock, "ocsdevicelock"); } static inline int32_t ocs_device_lock_try(ocs_t *ocs) { return ocs_rlock_try(&ocs->lock); } static inline void ocs_device_lock(ocs_t *ocs) { ocs_rlock_acquire(&ocs->lock); } static inline void ocs_device_unlock(ocs_t *ocs) { ocs_rlock_release(&ocs->lock); } static inline void ocs_device_lock_free(ocs_t *ocs) { ocs_rlock_free(&ocs->lock); } extern int32_t ocs_device_detach(ocs_t *ocs); extern int32_t ocs_device_attach(ocs_t *ocs); #define ocs_is_initiator_enabled() (ocs->enable_ini) #define ocs_is_target_enabled() (ocs->enable_tgt) #include "ocs_xport.h" #include "ocs_domain.h" #include "ocs_sport.h" #include "ocs_node.h" #include "ocs_unsol.h" #include "ocs_scsi.h" #include "ocs_ioctl.h" static inline ocs_io_t * ocs_io_alloc(ocs_t *ocs) { return ocs_io_pool_io_alloc(ocs->xport->io_pool); } static inline void ocs_io_free(ocs_t *ocs, ocs_io_t *io) { ocs_io_pool_io_free(ocs->xport->io_pool, io); } #endif /* __OCS_H__ */ Index: stable/11/sys/dev/ocs_fc/ocs_cam.c =================================================================== --- stable/11/sys/dev/ocs_fc/ocs_cam.c (revision 344013) +++ stable/11/sys/dev/ocs_fc/ocs_cam.c (revision 344014) @@ -1,2640 +1,2826 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @defgroup scsi_api_target SCSI Target API * @defgroup scsi_api_initiator SCSI Initiator API * @defgroup cam_api Common Access Method (CAM) API * @defgroup cam_io CAM IO */ /** * @file * Provides CAM functionality. */ #include "ocs.h" #include "ocs_scsi.h" #include "ocs_device.h" /* Default IO timeout value for initiators is 30 seconds */ #define OCS_CAM_IO_TIMEOUT 30 typedef struct { ocs_scsi_sgl_t *sgl; uint32_t sgl_max; uint32_t sgl_count; int32_t rc; } ocs_dmamap_load_arg_t; static void ocs_action(struct cam_sim *, union ccb *); static void ocs_poll(struct cam_sim *); static ocs_tgt_resource_t *ocs_tgt_resource_get(ocs_fcport *, struct ccb_hdr *, uint32_t *); static int32_t ocs_tgt_resource_abort(struct ocs_softc *, ocs_tgt_resource_t *); static uint32_t ocs_abort_initiator_io(struct ocs_softc *ocs, union ccb *accb); static void ocs_abort_inot(struct ocs_softc *ocs, union ccb *ccb); static void ocs_abort_atio(struct ocs_softc *ocs, union ccb *ccb); static int32_t ocs_target_tmf_cb(ocs_io_t *, ocs_scsi_io_status_e, uint32_t, void *); static int32_t ocs_io_abort_cb(ocs_io_t *, ocs_scsi_io_status_e, uint32_t, void *); static int32_t ocs_task_set_full_or_busy(ocs_io_t *io); static int32_t ocs_initiator_tmf_cb(ocs_io_t *, ocs_scsi_io_status_e, ocs_scsi_cmd_resp_t *, uint32_t, void *); static uint32_t ocs_fcp_change_role(struct ocs_softc *ocs, ocs_fcport *fcp, uint32_t new_role); +static void ocs_ldt(void *arg); +static void ocs_ldt_task(void *arg, int pending); +static void ocs_delete_target(ocs_t *ocs, ocs_fcport *fcp, int tgt); +uint32_t ocs_add_new_tgt(ocs_node_t *node, ocs_fcport *fcp); +uint32_t ocs_update_tgt(ocs_node_t *node, ocs_fcport *fcp, uint32_t tgt_id); + +int32_t ocs_tgt_find(ocs_fcport *fcp, ocs_node_t *node); + static inline ocs_io_t *ocs_scsi_find_io(struct ocs_softc *ocs, uint32_t tag) { return ocs_io_get_instance(ocs, tag); } static inline void ocs_target_io_free(ocs_io_t *io) { io->tgt_io.state = OCS_CAM_IO_FREE; io->tgt_io.flags = 0; io->tgt_io.app = NULL; ocs_scsi_io_complete(io); if(io->ocs->io_in_use != 0) atomic_subtract_acq_32(&io->ocs->io_in_use, 1); } static int32_t ocs_attach_port(ocs_t *ocs, int chan) { struct cam_sim *sim = NULL; struct cam_path *path = NULL; uint32_t max_io = ocs_scsi_get_property(ocs, OCS_SCSI_MAX_IOS); ocs_fcport *fcp = FCPORT(ocs, chan); if (NULL == (sim = cam_sim_alloc(ocs_action, ocs_poll, device_get_name(ocs->dev), ocs, device_get_unit(ocs->dev), &ocs->sim_lock, max_io, max_io, ocs->devq))) { device_printf(ocs->dev, "Can't allocate SIM\n"); return 1; } mtx_lock(&ocs->sim_lock); if (CAM_SUCCESS != xpt_bus_register(sim, ocs->dev, chan)) { device_printf(ocs->dev, "Can't register bus %d\n", 0); mtx_unlock(&ocs->sim_lock); cam_sim_free(sim, FALSE); return 1; } mtx_unlock(&ocs->sim_lock); if (CAM_REQ_CMP != xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)) { device_printf(ocs->dev, "Can't create path\n"); xpt_bus_deregister(cam_sim_path(sim)); mtx_unlock(&ocs->sim_lock); cam_sim_free(sim, FALSE); return 1; } - + + fcp->ocs = ocs; fcp->sim = sim; fcp->path = path; - return 0; + callout_init_mtx(&fcp->ldt, &ocs->sim_lock, 0); + TASK_INIT(&fcp->ltask, 1, ocs_ldt_task, fcp); + return 0; } static int32_t ocs_detach_port(ocs_t *ocs, int32_t chan) { ocs_fcport *fcp = NULL; struct cam_sim *sim = NULL; struct cam_path *path = NULL; fcp = FCPORT(ocs, chan); sim = fcp->sim; path = fcp->path; + callout_drain(&fcp->ldt); + ocs_ldt_task(fcp, 0); + if (fcp->sim) { mtx_lock(&ocs->sim_lock); ocs_tgt_resource_abort(ocs, &fcp->targ_rsrc_wildcard); if (path) { xpt_async(AC_LOST_DEVICE, path, NULL); xpt_free_path(path); fcp->path = NULL; } xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, FALSE); fcp->sim = NULL; mtx_unlock(&ocs->sim_lock); } return 0; } int32_t ocs_cam_attach(ocs_t *ocs) { struct cam_devq *devq = NULL; int i = 0; uint32_t max_io = ocs_scsi_get_property(ocs, OCS_SCSI_MAX_IOS); if (NULL == (devq = cam_simq_alloc(max_io))) { device_printf(ocs->dev, "Can't allocate SIMQ\n"); return -1; } ocs->devq = devq; if (mtx_initialized(&ocs->sim_lock) == 0) { mtx_init(&ocs->sim_lock, "ocs_sim_lock", NULL, MTX_DEF); } for (i = 0; i < (ocs->num_vports + 1); i++) { if (ocs_attach_port(ocs, i)) { ocs_log_err(ocs, "Attach port failed for chan: %d\n", i); goto detach_port; } } ocs->io_high_watermark = max_io; ocs->io_in_use = 0; return 0; detach_port: while (--i >= 0) { ocs_detach_port(ocs, i); } cam_simq_free(ocs->devq); if (mtx_initialized(&ocs->sim_lock)) mtx_destroy(&ocs->sim_lock); return 1; } int32_t ocs_cam_detach(ocs_t *ocs) { int i = 0; for (i = (ocs->num_vports); i >= 0; i--) { ocs_detach_port(ocs, i); } cam_simq_free(ocs->devq); if (mtx_initialized(&ocs->sim_lock)) mtx_destroy(&ocs->sim_lock); return 0; } /*************************************************************************** * Functions required by SCSI base driver API */ /** * @ingroup scsi_api_target * @brief Attach driver to the BSD SCSI layer (a.k.a CAM) * * Allocates + initializes CAM related resources and attaches to the CAM * * @param ocs the driver instance's software context * * @return 0 on success, non-zero otherwise */ int32_t ocs_scsi_tgt_new_device(ocs_t *ocs) { ocs->enable_task_set_full = ocs_scsi_get_property(ocs, OCS_SCSI_ENABLE_TASK_SET_FULL); ocs_log_debug(ocs, "task set full processing is %s\n", ocs->enable_task_set_full ? "enabled" : "disabled"); return 0; } /** * @ingroup scsi_api_target * @brief Tears down target members of ocs structure. * * Called by OS code when device is removed. * * @param ocs pointer to ocs * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_del_device(ocs_t *ocs) { return 0; } /** * @ingroup scsi_api_target * @brief accept new domain notification * * Called by base drive when new domain is discovered. A target-server * will use this call to prepare for new remote node notifications * arising from ocs_scsi_new_initiator(). * * The domain context has an element ocs_scsi_tgt_domain_t tgt_domain * which is declared by the target-server code and is used for target-server * private data. * * This function will only be called if the base-driver has been enabled for * target capability. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_new_domain() function is called to initiator-client backends. * * @param domain pointer to domain * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_new_domain(ocs_domain_t *domain) { return 0; } /** * @ingroup scsi_api_target * @brief accept domain lost notification * * Called by base-driver when a domain goes away. A target-server will * use this call to clean up all domain scoped resources. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_del_domain() function is called to initiator-client backends. * * @param domain pointer to domain * * @return returns 0 for success, a negative error code value for failure. */ void ocs_scsi_tgt_del_domain(ocs_domain_t *domain) { } /** * @ingroup scsi_api_target * @brief accept new sli port (sport) notification * * Called by base drive when new sport is discovered. A target-server * will use this call to prepare for new remote node notifications * arising from ocs_scsi_new_initiator(). * * The domain context has an element ocs_scsi_tgt_sport_t tgt_sport * which is declared by the target-server code and is used for * target-server private data. * * This function will only be called if the base-driver has been enabled for * target capability. * * Note that this call is made to target-server backends, * the ocs_scsi_tgt_new_domain() is called to initiator-client backends. * * @param sport pointer to SLI port * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_new_sport(ocs_sport_t *sport) { ocs_t *ocs = sport->ocs; if(!sport->is_vport) { sport->tgt_data = FCPORT(ocs, 0); } return 0; } /** * @ingroup scsi_api_target * @brief accept SLI port gone notification * * Called by base-driver when a sport goes away. A target-server will * use this call to clean up all sport scoped resources. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_del_sport() is called to initiator-client backends. * * @param sport pointer to SLI port * * @return returns 0 for success, a negative error code value for failure. */ void ocs_scsi_tgt_del_sport(ocs_sport_t *sport) { return; } /** * @ingroup scsi_api_target * @brief receive notification of a new SCSI initiator node * * Sent by base driver to notify a target-server of the presense of a new * remote initiator. The target-server may use this call to prepare for * inbound IO from this node. * * The ocs_node_t structure has and elment of type ocs_scsi_tgt_node_t named * tgt_node that is declared and used by a target-server for private * information. * * This function is only called if the target capability is enabled in driver. * * @param node pointer to new remote initiator node * * @return returns 0 for success, a negative error code value for failure. * * @note */ int32_t ocs_scsi_new_initiator(ocs_node_t *node) { ocs_t *ocs = node->ocs; struct ac_contract ac; struct ac_device_changed *adc; ocs_fcport *fcp = NULL; fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs, "FCP is NULL \n"); return 1; } /* * Update the IO watermark by decrementing it by the * number of IOs reserved for each initiator. */ atomic_subtract_acq_32(&ocs->io_high_watermark, OCS_RSVD_INI_IO); ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = ocs_node_get_wwpn(node); adc->port = node->rnode.fc_id; adc->target = node->instance_index; adc->arrived = 1; xpt_async(AC_CONTRACT, fcp->path, &ac); return 0; } /** * @ingroup scsi_api_target * @brief validate new initiator * * Sent by base driver to validate a remote initiatiator. The target-server * returns TRUE if this initiator should be accepted. * * This function is only called if the target capability is enabled in driver. * * @param node pointer to remote initiator node to validate * * @return TRUE if initiator should be accepted, FALSE if it should be rejected * * @note */ int32_t ocs_scsi_validate_initiator(ocs_node_t *node) { return 1; } /** * @ingroup scsi_api_target * @brief Delete a SCSI initiator node * * Sent by base driver to notify a target-server that a remote initiator * is now gone. The base driver will have terminated all outstanding IOs * and the target-server will receive appropriate completions. * * This function is only called if the base driver is enabled for * target capability. * * @param node pointer node being deleted * @param reason Reason why initiator is gone. * * @return OCS_SCSI_CALL_COMPLETE to indicate that all work was completed * * @note */ int32_t ocs_scsi_del_initiator(ocs_node_t *node, ocs_scsi_del_initiator_reason_e reason) { ocs_t *ocs = node->ocs; struct ac_contract ac; struct ac_device_changed *adc; ocs_fcport *fcp = NULL; fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs, "FCP is NULL \n"); return 1; } ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = ocs_node_get_wwpn(node); adc->port = node->rnode.fc_id; adc->target = node->instance_index; adc->arrived = 0; xpt_async(AC_CONTRACT, fcp->path, &ac); if (reason == OCS_SCSI_INITIATOR_MISSING) { return OCS_SCSI_CALL_COMPLETE; } /* * Update the IO watermark by incrementing it by the * number of IOs reserved for each initiator. */ atomic_add_acq_32(&ocs->io_high_watermark, OCS_RSVD_INI_IO); return OCS_SCSI_CALL_COMPLETE; } /** * @ingroup scsi_api_target * @brief receive FCP SCSI Command * * Called by the base driver when a new SCSI command has been received. The * target-server will process the command, and issue data and/or response phase * requests to the base driver. * * The IO context (ocs_io_t) structure has and element of type * ocs_scsi_tgt_io_t named tgt_io that is declared and used by * a target-server for private information. * * @param io pointer to IO context * @param lun LUN for this IO * @param cdb pointer to SCSI CDB * @param cdb_len length of CDB in bytes * @param flags command flags * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_recv_cmd(ocs_io_t *io, uint64_t lun, uint8_t *cdb, uint32_t cdb_len, uint32_t flags) { ocs_t *ocs = io->ocs; struct ccb_accept_tio *atio = NULL; ocs_node_t *node = io->node; ocs_tgt_resource_t *trsrc = NULL; int32_t rc = -1; ocs_fcport *fcp = NULL; fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs, "FCP is NULL \n"); return 1; } atomic_add_acq_32(&ocs->io_in_use, 1); /* set target io timeout */ io->timeout = ocs->target_io_timer_sec; if (ocs->enable_task_set_full && (ocs->io_in_use >= ocs->io_high_watermark)) { return ocs_task_set_full_or_busy(io); } else { atomic_store_rel_32(&io->node->tgt_node.busy_sent, FALSE); } if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) { trsrc = &fcp->targ_rsrc[lun]; } else if (fcp->targ_rsrc_wildcard.enabled) { trsrc = &fcp->targ_rsrc_wildcard; } if (trsrc) { atio = (struct ccb_accept_tio *)STAILQ_FIRST(&trsrc->atio); } if (atio) { STAILQ_REMOVE_HEAD(&trsrc->atio, sim_links.stqe); atio->ccb_h.status = CAM_CDB_RECVD; atio->ccb_h.target_lun = lun; atio->sense_len = 0; atio->init_id = node->instance_index; atio->tag_id = io->tag; atio->ccb_h.ccb_io_ptr = io; if (flags & OCS_SCSI_CMD_SIMPLE) atio->tag_action = MSG_SIMPLE_Q_TAG; else if (flags & FCP_TASK_ATTR_HEAD_OF_QUEUE) atio->tag_action = MSG_HEAD_OF_Q_TAG; else if (flags & FCP_TASK_ATTR_ORDERED) atio->tag_action = MSG_ORDERED_Q_TAG; else atio->tag_action = 0; atio->cdb_len = cdb_len; ocs_memcpy(atio->cdb_io.cdb_bytes, cdb, cdb_len); io->tgt_io.flags = 0; io->tgt_io.state = OCS_CAM_IO_COMMAND; io->tgt_io.lun = lun; xpt_done((union ccb *)atio); rc = 0; } else { device_printf( ocs->dev, "%s: no ATIO for LUN %lx (en=%s) OX_ID %#x\n", __func__, (unsigned long)lun, trsrc ? (trsrc->enabled ? "T" : "F") : "X", be16toh(io->init_task_tag)); io->tgt_io.state = OCS_CAM_IO_MAX; ocs_target_io_free(io); } return rc; } /** * @ingroup scsi_api_target * @brief receive FCP SCSI Command with first burst data. * * Receive a new FCP SCSI command from the base driver with first burst data. * * @param io pointer to IO context * @param lun LUN for this IO * @param cdb pointer to SCSI CDB * @param cdb_len length of CDB in bytes * @param flags command flags * @param first_burst_buffers first burst buffers * @param first_burst_buffer_count The number of bytes received in the first burst * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_recv_cmd_first_burst(ocs_io_t *io, uint64_t lun, uint8_t *cdb, uint32_t cdb_len, uint32_t flags, ocs_dma_t first_burst_buffers[], uint32_t first_burst_buffer_count) { return -1; } /** * @ingroup scsi_api_target * @brief receive a TMF command IO * * Called by the base driver when a SCSI TMF command has been received. The * target-server will process the command, aborting commands as needed, and post * a response using ocs_scsi_send_resp() * * The IO context (ocs_io_t) structure has and element of type ocs_scsi_tgt_io_t named * tgt_io that is declared and used by a target-server for private information. * * If the target-server walks the nodes active_ios linked list, and starts IO * abort processing, the code must be sure not to abort the IO passed into the * ocs_scsi_recv_tmf() command. * * @param tmfio pointer to IO context * @param lun logical unit value * @param cmd command request * @param abortio pointer to IO object to abort for TASK_ABORT (NULL for all other TMF) * @param flags flags * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_recv_tmf(ocs_io_t *tmfio, uint64_t lun, ocs_scsi_tmf_cmd_e cmd, ocs_io_t *abortio, uint32_t flags) { ocs_t *ocs = tmfio->ocs; ocs_node_t *node = tmfio->node; ocs_tgt_resource_t *trsrc = NULL; struct ccb_immediate_notify *inot = NULL; int32_t rc = -1; ocs_fcport *fcp = NULL; fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs, "FCP is NULL \n"); return 1; } if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) { trsrc = &fcp->targ_rsrc[lun]; } else if (fcp->targ_rsrc_wildcard.enabled) { trsrc = &fcp->targ_rsrc_wildcard; } device_printf(tmfio->ocs->dev, "%s: io=%p cmd=%#x LU=%lx en=%s\n", __func__, tmfio, cmd, (unsigned long)lun, trsrc ? (trsrc->enabled ? "T" : "F") : "X"); if (trsrc) { inot = (struct ccb_immediate_notify *)STAILQ_FIRST(&trsrc->inot); } - if (!inot) { + if (!inot) { device_printf( ocs->dev, "%s: no INOT for LUN %llx (en=%s) OX_ID %#x\n", __func__, (unsigned long long)lun, trsrc ? (trsrc->enabled ? "T" : "F") : "X", be16toh(tmfio->init_task_tag)); if (abortio) { ocs_scsi_io_complete(abortio); } ocs_scsi_io_complete(tmfio); goto ocs_scsi_recv_tmf_out; } tmfio->tgt_io.app = abortio; STAILQ_REMOVE_HEAD(&trsrc->inot, sim_links.stqe); inot->tag_id = tmfio->tag; inot->seq_id = tmfio->tag; if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) { inot->initiator_id = node->instance_index; } else { inot->initiator_id = CAM_TARGET_WILDCARD; } inot->ccb_h.status = CAM_MESSAGE_RECV; inot->ccb_h.target_lun = lun; switch (cmd) { case OCS_SCSI_TMF_ABORT_TASK: inot->arg = MSG_ABORT_TASK; inot->seq_id = abortio->tag; device_printf(ocs->dev, "%s: ABTS IO.%#x st=%#x\n", __func__, abortio->tag, abortio->tgt_io.state); abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_RECV; abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_NOTIFY; break; case OCS_SCSI_TMF_QUERY_TASK_SET: device_printf(ocs->dev, "%s: OCS_SCSI_TMF_QUERY_TASK_SET not supported\n", __func__); STAILQ_INSERT_TAIL(&trsrc->inot, &inot->ccb_h, sim_links.stqe); ocs_scsi_io_complete(tmfio); goto ocs_scsi_recv_tmf_out; break; case OCS_SCSI_TMF_ABORT_TASK_SET: inot->arg = MSG_ABORT_TASK_SET; break; case OCS_SCSI_TMF_CLEAR_TASK_SET: inot->arg = MSG_CLEAR_TASK_SET; break; case OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT: inot->arg = MSG_QUERY_ASYNC_EVENT; break; case OCS_SCSI_TMF_LOGICAL_UNIT_RESET: inot->arg = MSG_LOGICAL_UNIT_RESET; break; case OCS_SCSI_TMF_CLEAR_ACA: inot->arg = MSG_CLEAR_ACA; break; case OCS_SCSI_TMF_TARGET_RESET: inot->arg = MSG_TARGET_RESET; break; default: device_printf(ocs->dev, "%s: unsupported TMF %#x\n", __func__, cmd); STAILQ_INSERT_TAIL(&trsrc->inot, &inot->ccb_h, sim_links.stqe); goto ocs_scsi_recv_tmf_out; } rc = 0; xpt_print(inot->ccb_h.path, "%s: func=%#x stat=%#x id=%#x lun=%#x" " flags=%#x tag=%#x seq=%#x ini=%#x arg=%#x\n", __func__, inot->ccb_h.func_code, inot->ccb_h.status, inot->ccb_h.target_id, (unsigned int)inot->ccb_h.target_lun, inot->ccb_h.flags, inot->tag_id, inot->seq_id, inot->initiator_id, inot->arg); xpt_done((union ccb *)inot); if (abortio) { abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_DEV; rc = ocs_scsi_tgt_abort_io(abortio, ocs_io_abort_cb, tmfio); } ocs_scsi_recv_tmf_out: return rc; } /** * @ingroup scsi_api_initiator * @brief Initializes any initiator fields on the ocs structure. * * Called by OS initialization code when a new device is discovered. * * @param ocs pointer to ocs * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_new_device(ocs_t *ocs) { return 0; } /** * @ingroup scsi_api_initiator * @brief Tears down initiator members of ocs structure. * * Called by OS code when device is removed. * * @param ocs pointer to ocs * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_del_device(ocs_t *ocs) { return 0; } /** * @ingroup scsi_api_initiator * @brief accept new domain notification * * Called by base drive when new domain is discovered. An initiator-client * will accept this call to prepare for new remote node notifications * arising from ocs_scsi_new_target(). * * The domain context has the element ocs_scsi_ini_domain_t ini_domain * which is declared by the initiator-client code and is used for * initiator-client private data. * * This function will only be called if the base-driver has been enabled for * initiator capability. * * Note that this call is made to initiator-client backends, * the ocs_scsi_tgt_new_domain() function is called to target-server backends. * * @param domain pointer to domain * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_new_domain(ocs_domain_t *domain) { return 0; } /** * @ingroup scsi_api_initiator * @brief accept domain lost notification * * Called by base-driver when a domain goes away. An initiator-client will * use this call to clean up all domain scoped resources. * * This function will only be called if the base-driver has been enabled for * initiator capability. * * Note that this call is made to initiator-client backends, * the ocs_scsi_tgt_del_domain() function is called to target-server backends. * * @param domain pointer to domain * * @return returns 0 for success, a negative error code value for failure. */ void ocs_scsi_ini_del_domain(ocs_domain_t *domain) { } /** * @ingroup scsi_api_initiator * @brief accept new sli port notification * * Called by base drive when new sli port (sport) is discovered. * A target-server will use this call to prepare for new remote node * notifications arising from ocs_scsi_new_initiator(). * * This function will only be called if the base-driver has been enabled for * target capability. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_new_sport() function is called to initiator-client backends. * * @param sport pointer to sport * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_new_sport(ocs_sport_t *sport) { ocs_t *ocs = sport->ocs; if(!sport->is_vport) { sport->tgt_data = FCPORT(ocs, 0); } return 0; } /** * @ingroup scsi_api_initiator * @brief accept sli port gone notification * * Called by base-driver when a sport goes away. A target-server will * use this call to clean up all sport scoped resources. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_del_sport() function is called to initiator-client backends. * * @param sport pointer to SLI port * * @return returns 0 for success, a negative error code value for failure. */ void ocs_scsi_ini_del_sport(ocs_sport_t *sport) { } void ocs_scsi_sport_deleted(ocs_sport_t *sport) { ocs_t *ocs = sport->ocs; ocs_fcport *fcp = NULL; ocs_xport_stats_t value; if (!sport->is_vport) { return; } fcp = sport->tgt_data; ocs_xport_status(ocs->xport, OCS_XPORT_PORT_STATUS, &value); if (value.value == 0) { ocs_log_debug(ocs, "PORT offline,.. skipping\n"); return; } if ((fcp->role != KNOB_ROLE_NONE)) { if(fcp->vport->sport != NULL) { ocs_log_debug(ocs,"sport is not NULL, skipping\n"); return; } ocs_sport_vport_alloc(ocs->domain, fcp->vport); return; } } +int32_t +ocs_tgt_find(ocs_fcport *fcp, ocs_node_t *node) +{ + ocs_fc_target_t *tgt = NULL; + uint32_t i; + + for (i = 0; i < OCS_MAX_TARGETS; i++) { + tgt = &fcp->tgt[i]; + + if (tgt->state == OCS_TGT_STATE_NONE) + continue; + + if (ocs_node_get_wwpn(node) == tgt->wwpn) { + return i; + } + } + + return -1; +} + /** * @ingroup scsi_api_initiator * @brief receive notification of a new SCSI target node * * Sent by base driver to notify an initiator-client of the presense of a new * remote target. The initiator-server may use this call to prepare for * inbound IO from this node. * * This function is only called if the base driver is enabled for * initiator capability. * * @param node pointer to new remote initiator node * * @return none * * @note */ -int32_t -ocs_scsi_new_target(ocs_node_t *node) + +uint32_t +ocs_update_tgt(ocs_node_t *node, ocs_fcport *fcp, uint32_t tgt_id) { + ocs_fc_target_t *tgt = NULL; + + tgt = &fcp->tgt[tgt_id]; + + tgt->node_id = node->instance_index; + tgt->state = OCS_TGT_STATE_VALID; + + tgt->port_id = node->rnode.fc_id; + tgt->wwpn = ocs_node_get_wwpn(node); + tgt->wwnn = ocs_node_get_wwnn(node); + return 0; +} + +uint32_t +ocs_add_new_tgt(ocs_node_t *node, ocs_fcport *fcp) +{ + uint32_t i; + struct ocs_softc *ocs = node->ocs; union ccb *ccb = NULL; - ocs_fcport *fcp = NULL; - - fcp = node->sport->tgt_data; - if (fcp == NULL) { - printf("%s:FCP is NULL \n", __func__); - return 0; + for (i = 0; i < OCS_MAX_TARGETS; i++) { + if (fcp->tgt[i].state == OCS_TGT_STATE_NONE) + break; } if (NULL == (ccb = xpt_alloc_ccb_nowait())) { device_printf(ocs->dev, "%s: ccb allocation failed\n", __func__); return -1; } - + if (CAM_REQ_CMP != xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(fcp->sim), - node->instance_index, CAM_LUN_WILDCARD)) { + i, CAM_LUN_WILDCARD)) { device_printf( ocs->dev, "%s: target path creation failed\n", __func__); xpt_free_ccb(ccb); return -1; } + ocs_update_tgt(node, fcp, i); xpt_rescan(ccb); + return 0; +} +int32_t +ocs_scsi_new_target(ocs_node_t *node) +{ + ocs_fcport *fcp = NULL; + int32_t i; + + fcp = node->sport->tgt_data; + if (fcp == NULL) { + printf("%s:FCP is NULL \n", __func__); + return 0; + } + + i = ocs_tgt_find(fcp, node); + + if (i < 0) { + ocs_add_new_tgt(node, fcp); + return 0; + } + + ocs_update_tgt(node, fcp, i); return 0; } +static void +ocs_delete_target(ocs_t *ocs, ocs_fcport *fcp, int tgt) +{ + struct cam_path *cpath = NULL; + + if (!fcp->sim) { + device_printf(ocs->dev, "%s: calling with NULL sim\n", __func__); + return; + } + + if (CAM_REQ_CMP == xpt_create_path(&cpath, NULL, cam_sim_path(fcp->sim), + tgt, CAM_LUN_WILDCARD)) { + xpt_async(AC_LOST_DEVICE, cpath, NULL); + + xpt_free_path(cpath); + } +} + +/* + * Device Lost Timer Function- when we have decided that a device was lost, + * we wait a specific period of time prior to telling the OS about lost device. + * + * This timer function gets activated when the device was lost. + * This function fires once a second and then scans the port database + * for devices that are marked dead but still have a virtual target assigned. + * We decrement a counter for that port database entry, and when it hits zero, + * we tell the OS the device was lost. Timer will be stopped when the device + * comes back active or removed from the OS. + */ +static void +ocs_ldt(void *arg) +{ + ocs_fcport *fcp = arg; + taskqueue_enqueue(taskqueue_thread, &fcp->ltask); +} + +static void +ocs_ldt_task(void *arg, int pending) +{ + ocs_fcport *fcp = arg; + ocs_t *ocs = fcp->ocs; + int i, more_to_do = 0; + ocs_fc_target_t *tgt = NULL; + + for (i = 0; i < OCS_MAX_TARGETS; i++) { + tgt = &fcp->tgt[i]; + + if (tgt->state != OCS_TGT_STATE_LOST) { + continue; + } + + if ((tgt->gone_timer != 0) && (ocs->attached)){ + tgt->gone_timer -= 1; + more_to_do++; + continue; + } + + if (tgt->is_target) { + tgt->is_target = 0; + ocs_delete_target(ocs, fcp, i); + } + + tgt->state = OCS_TGT_STATE_NONE; + } + + if (more_to_do) { + callout_reset(&fcp->ldt, hz, ocs_ldt, fcp); + } else { + callout_deactivate(&fcp->ldt); + } + +} + /** * @ingroup scsi_api_initiator * @brief Delete a SCSI target node * * Sent by base driver to notify a initiator-client that a remote target * is now gone. The base driver will have terminated all outstanding IOs * and the initiator-client will receive appropriate completions. * * The ocs_node_t structure has and elment of type ocs_scsi_ini_node_t named * ini_node that is declared and used by a target-server for private * information. * * This function is only called if the base driver is enabled for * initiator capability. * * @param node pointer node being deleted * @param reason reason for deleting the target * * @return Returns OCS_SCSI_CALL_ASYNC if target delete is queued for async * completion and OCS_SCSI_CALL_COMPLETE if call completed or error. * * @note */ int32_t ocs_scsi_del_target(ocs_node_t *node, ocs_scsi_del_target_reason_e reason) { struct ocs_softc *ocs = node->ocs; - struct cam_path *cpath = NULL; ocs_fcport *fcp = NULL; + ocs_fc_target_t *tgt = NULL; + uint32_t tgt_id; fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs,"FCP is NULL \n"); return 0; } - if (!fcp->sim) { - device_printf(ocs->dev, "%s: calling with NULL sim\n", __func__); - return OCS_SCSI_CALL_COMPLETE; - } + tgt_id = ocs_tgt_find(fcp, node); + + tgt = &fcp->tgt[tgt_id]; + + // IF in shutdown delete target. + if(!ocs->attached) { + ocs_delete_target(ocs, fcp, tgt_id); + } else { - if (CAM_REQ_CMP == xpt_create_path(&cpath, NULL, cam_sim_path(fcp->sim), - node->instance_index, CAM_LUN_WILDCARD)) { - xpt_async(AC_LOST_DEVICE, cpath, NULL); - - xpt_free_path(cpath); + tgt->state = OCS_TGT_STATE_LOST; + tgt->gone_timer = 30; + if (!callout_active(&fcp->ldt)) { + callout_reset(&fcp->ldt, hz, ocs_ldt, fcp); + } } - return OCS_SCSI_CALL_COMPLETE; + + return 0; } /** * @brief Initialize SCSI IO * * Initialize SCSI IO, this function is called once per IO during IO pool * allocation so that the target server may initialize any of its own private * data. * * @param io pointer to SCSI IO object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_io_init(ocs_io_t *io) { return 0; } /** * @brief Uninitialize SCSI IO * * Uninitialize target server private data in a SCSI io object * * @param io pointer to SCSI IO object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_io_exit(ocs_io_t *io) { return 0; } /** * @brief Initialize SCSI IO * * Initialize SCSI IO, this function is called once per IO during IO pool * allocation so that the initiator client may initialize any of its own private * data. * * @param io pointer to SCSI IO object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_io_init(ocs_io_t *io) { return 0; } /** * @brief Uninitialize SCSI IO * * Uninitialize initiator client private data in a SCSI io object * * @param io pointer to SCSI IO object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_io_exit(ocs_io_t *io) { return 0; } /* * End of functions required by SCSI base driver API ***************************************************************************/ static __inline void ocs_set_ccb_status(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } static int32_t ocs_task_set_full_or_busy_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg) { ocs_target_io_free(io); return 0; } /** * @brief send SCSI task set full or busy status * * A SCSI task set full or busy response is sent depending on whether * another IO is already active on the LUN. * * @param io pointer to IO context * * @return returns 0 for success, a negative error code value for failure. */ static int32_t ocs_task_set_full_or_busy(ocs_io_t *io) { ocs_scsi_cmd_resp_t rsp = { 0 }; ocs_t *ocs = io->ocs; /* * If there is another command for the LUN, then send task set full, * if this is the first one, then send the busy status. * * if 'busy sent' is FALSE, set it to TRUE and send BUSY * otherwise send FULL */ if (atomic_cmpset_acq_32(&io->node->tgt_node.busy_sent, FALSE, TRUE)) { rsp.scsi_status = SCSI_STATUS_BUSY; /* Busy */ printf("%s: busy [%s] tag=%x iiu=%d ihw=%d\n", __func__, io->node->display_name, io->tag, io->ocs->io_in_use, io->ocs->io_high_watermark); } else { rsp.scsi_status = SCSI_STATUS_TASK_SET_FULL; /* Task set full */ printf("%s: full tag=%x iiu=%d\n", __func__, io->tag, io->ocs->io_in_use); } /* Log a message here indicating a busy or task set full state */ if (OCS_LOG_ENABLE_Q_FULL_BUSY_MSG(ocs)) { /* Log Task Set Full */ if (rsp.scsi_status == SCSI_STATUS_TASK_SET_FULL) { /* Task Set Full Message */ ocs_log_info(ocs, "OCS CAM TASK SET FULL. Tasks >= %d\n", ocs->io_high_watermark); } else if (rsp.scsi_status == SCSI_STATUS_BUSY) { /* Log Busy Message */ ocs_log_info(ocs, "OCS CAM SCSI BUSY\n"); } } /* Send the response */ return ocs_scsi_send_resp(io, 0, &rsp, ocs_task_set_full_or_busy_cb, NULL); } /** * @ingroup cam_io * @brief Process target IO completions * * @param io * @param scsi_status did the IO complete successfully * @param flags * @param arg application specific pointer provided in the call to ocs_target_io() * * @todo */ static int32_t ocs_scsi_target_io_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg) { union ccb *ccb = arg; struct ccb_scsiio *csio = &ccb->csio; struct ocs_softc *ocs = csio->ccb_h.ccb_ocs_ptr; uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK; uint32_t io_is_done = (ccb->ccb_h.flags & CAM_SEND_STATUS) == CAM_SEND_STATUS; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if (CAM_DIR_NONE != cam_dir) { bus_dmasync_op_t op; if (CAM_DIR_IN == cam_dir) { op = BUS_DMASYNC_POSTREAD; } else { op = BUS_DMASYNC_POSTWRITE; } /* Synchronize the DMA memory with the CPU and free the mapping */ bus_dmamap_sync(ocs->buf_dmat, io->tgt_io.dmap, op); if (io->tgt_io.flags & OCS_CAM_IO_F_DMAPPED) { bus_dmamap_unload(ocs->buf_dmat, io->tgt_io.dmap); } } if (io->tgt_io.sendresp) { io->tgt_io.sendresp = 0; ocs_scsi_cmd_resp_t resp = { 0 }; io->tgt_io.state = OCS_CAM_IO_RESP; resp.scsi_status = scsi_status; if (ccb->ccb_h.flags & CAM_SEND_SENSE) { resp.sense_data = (uint8_t *)&csio->sense_data; resp.sense_data_length = csio->sense_len; } resp.residual = io->exp_xfer_len - io->transferred; return ocs_scsi_send_resp(io, 0, &resp, ocs_scsi_target_io_cb, ccb); } switch (scsi_status) { case OCS_SCSI_STATUS_GOOD: ocs_set_ccb_status(ccb, CAM_REQ_CMP); break; case OCS_SCSI_STATUS_ABORTED: ocs_set_ccb_status(ccb, CAM_REQ_ABORTED); break; default: ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } if (io_is_done) { if ((io->tgt_io.flags & OCS_CAM_IO_F_ABORT_NOTIFY) == 0) { ocs_target_io_free(io); } } else { io->tgt_io.state = OCS_CAM_IO_DATA_DONE; /*device_printf(ocs->dev, "%s: CTIO state=%d tag=%#x\n", __func__, io->tgt_io.state, io->tag);*/ } xpt_done(ccb); return 0; } /** * @note 1. Since the CCB is assigned to the ocs_io_t on an XPT_CONT_TARGET_IO * action, if an initiator aborts a command prior to the SIM receiving * a CTIO, the IO's CCB will be NULL. */ static int32_t ocs_io_abort_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg) { struct ocs_softc *ocs = NULL; ocs_io_t *tmfio = arg; ocs_scsi_tmf_resp_e tmf_resp = OCS_SCSI_TMF_FUNCTION_COMPLETE; int32_t rc = 0; ocs = io->ocs; io->tgt_io.flags &= ~OCS_CAM_IO_F_ABORT_DEV; /* A good status indicates the IO was aborted and will be completed in * the IO's completion handler. Handle the other cases here. */ switch (scsi_status) { case OCS_SCSI_STATUS_GOOD: break; case OCS_SCSI_STATUS_NO_IO: break; default: device_printf(ocs->dev, "%s: unhandled status %d\n", __func__, scsi_status); tmf_resp = OCS_SCSI_TMF_FUNCTION_REJECTED; rc = -1; } ocs_scsi_send_tmf_resp(tmfio, tmf_resp, NULL, ocs_target_tmf_cb, NULL); return rc; } /** * @ingroup cam_io * @brief Process initiator IO completions * * @param io * @param scsi_status did the IO complete successfully * @param rsp pointer to response buffer * @param flags * @param arg application specific pointer provided in the call to ocs_target_io() * * @todo */ static int32_t ocs_scsi_initiator_io_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, ocs_scsi_cmd_resp_t *rsp, uint32_t flags, void *arg) { union ccb *ccb = arg; struct ccb_scsiio *csio = &ccb->csio; struct ocs_softc *ocs = csio->ccb_h.ccb_ocs_ptr; uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK; cam_status ccb_status= CAM_REQ_CMP_ERR; if (CAM_DIR_NONE != cam_dir) { bus_dmasync_op_t op; if (CAM_DIR_IN == cam_dir) { op = BUS_DMASYNC_POSTREAD; } else { op = BUS_DMASYNC_POSTWRITE; } /* Synchronize the DMA memory with the CPU and free the mapping */ bus_dmamap_sync(ocs->buf_dmat, io->tgt_io.dmap, op); if (io->tgt_io.flags & OCS_CAM_IO_F_DMAPPED) { bus_dmamap_unload(ocs->buf_dmat, io->tgt_io.dmap); } } if (scsi_status == OCS_SCSI_STATUS_CHECK_RESPONSE) { csio->scsi_status = rsp->scsi_status; if (SCSI_STATUS_OK != rsp->scsi_status) { ccb_status = CAM_SCSI_STATUS_ERROR; } csio->resid = rsp->residual; if (rsp->residual > 0) { uint32_t length = rsp->response_wire_length; /* underflow */ if (csio->dxfer_len == (length + csio->resid)) { ccb_status = CAM_REQ_CMP; } } else if (rsp->residual < 0) { ccb_status = CAM_DATA_RUN_ERR; } if ((rsp->sense_data_length) && !(ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR))) { uint32_t sense_len = 0; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; if (rsp->sense_data_length < csio->sense_len) { csio->sense_resid = csio->sense_len - rsp->sense_data_length; sense_len = rsp->sense_data_length; } else { csio->sense_resid = 0; sense_len = csio->sense_len; } ocs_memcpy(&csio->sense_data, rsp->sense_data, sense_len); } } else if (scsi_status != OCS_SCSI_STATUS_GOOD) { ccb_status = CAM_REQ_CMP_ERR; + ocs_set_ccb_status(ccb, ccb_status); + csio->ccb_h.status |= CAM_DEV_QFRZN; + xpt_freeze_devq(csio->ccb_h.path, 1); + } else { ccb_status = CAM_REQ_CMP; } ocs_set_ccb_status(ccb, ccb_status); ocs_scsi_io_free(io); csio->ccb_h.ccb_io_ptr = NULL; csio->ccb_h.ccb_ocs_ptr = NULL; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return 0; } /** * @brief Load scatter-gather list entries into an IO * * This routine relies on the driver instance's software context pointer and * the IO object pointer having been already assigned to hooks in the CCB. * Although the routine does not return success/fail, callers can look at the * n_sge member to determine if the mapping failed (0 on failure). * * @param arg pointer to the CAM ccb for this IO * @param seg DMA address/length pairs * @param nseg number of DMA address/length pairs * @param error any errors while mapping the IO */ static void ocs_scsi_dmamap_load(void *arg, bus_dma_segment_t *seg, int nseg, int error) { ocs_dmamap_load_arg_t *sglarg = (ocs_dmamap_load_arg_t*) arg; if (error) { printf("%s: seg=%p nseg=%d error=%d\n", __func__, seg, nseg, error); sglarg->rc = -1; } else { uint32_t i = 0; uint32_t c = 0; if ((sglarg->sgl_count + nseg) > sglarg->sgl_max) { printf("%s: sgl_count=%d nseg=%d max=%d\n", __func__, sglarg->sgl_count, nseg, sglarg->sgl_max); sglarg->rc = -2; return; } for (i = 0, c = sglarg->sgl_count; i < nseg; i++, c++) { sglarg->sgl[c].addr = seg[i].ds_addr; sglarg->sgl[c].len = seg[i].ds_len; } sglarg->sgl_count = c; sglarg->rc = 0; } } /** * @brief Build a scatter-gather list from a CAM CCB * * @param ocs the driver instance's software context * @param ccb pointer to the CCB * @param io pointer to the previously allocated IO object * @param sgl pointer to SGL * @param sgl_max number of entries in sgl * * @return 0 on success, non-zero otherwise */ static int32_t ocs_build_scsi_sgl(struct ocs_softc *ocs, union ccb *ccb, ocs_io_t *io, ocs_scsi_sgl_t *sgl, uint32_t sgl_max) { ocs_dmamap_load_arg_t dmaarg; int32_t err = 0; if (!ocs || !ccb || !io || !sgl) { printf("%s: bad param o=%p c=%p i=%p s=%p\n", __func__, ocs, ccb, io, sgl); return -1; } io->tgt_io.flags &= ~OCS_CAM_IO_F_DMAPPED; dmaarg.sgl = sgl; dmaarg.sgl_count = 0; dmaarg.sgl_max = sgl_max; dmaarg.rc = 0; err = bus_dmamap_load_ccb(ocs->buf_dmat, io->tgt_io.dmap, ccb, ocs_scsi_dmamap_load, &dmaarg, 0); if (err || dmaarg.rc) { device_printf( ocs->dev, "%s: bus_dmamap_load_ccb error (%d %d)\n", __func__, err, dmaarg.rc); return -1; } io->tgt_io.flags |= OCS_CAM_IO_F_DMAPPED; return dmaarg.sgl_count; } /** * @ingroup cam_io * @brief Send a target IO * * @param ocs the driver instance's software context * @param ccb pointer to the CCB * * @return 0 on success, non-zero otherwise */ static int32_t ocs_target_io(struct ocs_softc *ocs, union ccb *ccb) { struct ccb_scsiio *csio = &ccb->csio; ocs_io_t *io = NULL; uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK; bool sendstatus = ccb->ccb_h.flags & CAM_SEND_STATUS; uint32_t xferlen = csio->dxfer_len; int32_t rc = 0; io = ocs_scsi_find_io(ocs, csio->tag_id); if (io == NULL) { ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); panic("bad tag value"); return 1; } /* Received an ABORT TASK for this IO */ if (io->tgt_io.flags & OCS_CAM_IO_F_ABORT_RECV) { /*device_printf(ocs->dev, "%s: XPT_CONT_TARGET_IO state=%d tag=%#x xid=%#x flags=%#x\n", __func__, io->tgt_io.state, io->tag, io->init_task_tag, io->tgt_io.flags);*/ io->tgt_io.flags |= OCS_CAM_IO_F_ABORT_CAM; if (ccb->ccb_h.flags & CAM_SEND_STATUS) { ocs_set_ccb_status(ccb, CAM_REQ_CMP); ocs_target_io_free(io); return 1; } ocs_set_ccb_status(ccb, CAM_REQ_ABORTED); return 1; } io->tgt_io.app = ccb; ocs_set_ccb_status(ccb, CAM_REQ_INPROG); ccb->ccb_h.status |= CAM_SIM_QUEUED; csio->ccb_h.ccb_ocs_ptr = ocs; csio->ccb_h.ccb_io_ptr = io; if ((sendstatus && (xferlen == 0))) { ocs_scsi_cmd_resp_t resp = { 0 }; ocs_assert(ccb->ccb_h.flags & CAM_SEND_STATUS, -1); io->tgt_io.state = OCS_CAM_IO_RESP; resp.scsi_status = csio->scsi_status; if (ccb->ccb_h.flags & CAM_SEND_SENSE) { resp.sense_data = (uint8_t *)&csio->sense_data; resp.sense_data_length = csio->sense_len; } resp.residual = io->exp_xfer_len - io->transferred; rc = ocs_scsi_send_resp(io, 0, &resp, ocs_scsi_target_io_cb, ccb); } else if (xferlen != 0) { ocs_scsi_sgl_t sgl[OCS_FC_MAX_SGL]; int32_t sgl_count = 0; io->tgt_io.state = OCS_CAM_IO_DATA; if (sendstatus) io->tgt_io.sendresp = 1; sgl_count = ocs_build_scsi_sgl(ocs, ccb, io, sgl, ARRAY_SIZE(sgl)); if (sgl_count > 0) { if (cam_dir == CAM_DIR_IN) { rc = ocs_scsi_send_rd_data(io, 0, NULL, sgl, sgl_count, csio->dxfer_len, ocs_scsi_target_io_cb, ccb); } else if (cam_dir == CAM_DIR_OUT) { rc = ocs_scsi_recv_wr_data(io, 0, NULL, sgl, sgl_count, csio->dxfer_len, ocs_scsi_target_io_cb, ccb); } else { device_printf(ocs->dev, "%s:" " unknown CAM direction %#x\n", __func__, cam_dir); ocs_set_ccb_status(ccb, CAM_REQ_INVALID); rc = 1; } } else { device_printf(ocs->dev, "%s: building SGL failed\n", __func__); ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); rc = 1; } } else { device_printf(ocs->dev, "%s: Wrong value xfer and sendstatus" " are 0 \n", __func__); ocs_set_ccb_status(ccb, CAM_REQ_INVALID); rc = 1; } if (rc) { ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; io->tgt_io.state = OCS_CAM_IO_DATA_DONE; device_printf(ocs->dev, "%s: CTIO state=%d tag=%#x\n", __func__, io->tgt_io.state, io->tag); if ((sendstatus && (xferlen == 0))) { ocs_target_io_free(io); } } return rc; } static int32_t ocs_target_tmf_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg) { /*device_printf(io->ocs->dev, "%s: tag=%x io=%p s=%#x\n", __func__, io->tag, io, scsi_status);*/ ocs_scsi_io_complete(io); return 0; } /** * @ingroup cam_io * @brief Send an initiator IO * * @param ocs the driver instance's software context * @param ccb pointer to the CCB * * @return 0 on success, non-zero otherwise */ static int32_t ocs_initiator_io(struct ocs_softc *ocs, union ccb *ccb) { int32_t rc; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccb_h = &csio->ccb_h; ocs_node_t *node = NULL; ocs_io_t *io = NULL; ocs_scsi_sgl_t sgl[OCS_FC_MAX_SGL]; int32_t sgl_count; - node = ocs_node_get_instance(ocs, ccb_h->target_id); + ocs_fcport *fcp = NULL; + fcp = FCPORT(ocs, cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path))); + if (fcp == NULL) { + device_printf(ocs->dev, "%s: fcp is NULL\n", __func__); + return -1; + } + + if (fcp->tgt[ccb_h->target_id].state == OCS_TGT_STATE_LOST) { + device_printf(ocs->dev, "%s: device LOST %d\n", __func__, + ccb_h->target_id); + return CAM_REQUEUE_REQ; + } + + if (fcp->tgt[ccb_h->target_id].state == OCS_TGT_STATE_NONE) { + device_printf(ocs->dev, "%s: device not ready %d\n", __func__, + ccb_h->target_id); + return CAM_SEL_TIMEOUT; + } + + node = ocs_node_get_instance(ocs, fcp->tgt[ccb_h->target_id].node_id); if (node == NULL) { device_printf(ocs->dev, "%s: no device %d\n", __func__, ccb_h->target_id); - return CAM_DEV_NOT_THERE; + return CAM_SEL_TIMEOUT; } if (!node->targ) { - device_printf(ocs->dev, "%s: not target device %d\n", __func__, + device_printf(ocs->dev, "%s: not target device %d\n", __func__, ccb_h->target_id); - return CAM_DEV_NOT_THERE; - } + return CAM_SEL_TIMEOUT; + } io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_ORIGINATOR); if (io == NULL) { device_printf(ocs->dev, "%s: unable to alloc IO\n", __func__); return -1; } /* eventhough this is INI, use target structure as ocs_build_scsi_sgl * only references the tgt_io part of an ocs_io_t */ io->tgt_io.app = ccb; csio->ccb_h.ccb_ocs_ptr = ocs; csio->ccb_h.ccb_io_ptr = io; sgl_count = ocs_build_scsi_sgl(ocs, ccb, io, sgl, ARRAY_SIZE(sgl)); if (sgl_count < 0) { ocs_scsi_io_free(io); device_printf(ocs->dev, "%s: building SGL failed\n", __func__); return -1; } if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) { io->timeout = 0; } else if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) { io->timeout = OCS_CAM_IO_TIMEOUT; } else { io->timeout = ccb->ccb_h.timeout; } switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_NONE: rc = ocs_scsi_send_nodata_io(node, io, ccb_h->target_lun, ccb->ccb_h.flags & CAM_CDB_POINTER ? csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes, csio->cdb_len, ocs_scsi_initiator_io_cb, ccb); - break; + break; case CAM_DIR_IN: rc = ocs_scsi_send_rd_io(node, io, ccb_h->target_lun, ccb->ccb_h.flags & CAM_CDB_POINTER ? csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes, csio->cdb_len, NULL, sgl, sgl_count, csio->dxfer_len, ocs_scsi_initiator_io_cb, ccb); break; case CAM_DIR_OUT: rc = ocs_scsi_send_wr_io(node, io, ccb_h->target_lun, ccb->ccb_h.flags & CAM_CDB_POINTER ? csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes, csio->cdb_len, NULL, sgl, sgl_count, csio->dxfer_len, ocs_scsi_initiator_io_cb, ccb); break; default: panic("%s invalid data direction %08x\n", __func__, ccb->ccb_h.flags); break; } return rc; } static uint32_t ocs_fcp_change_role(struct ocs_softc *ocs, ocs_fcport *fcp, uint32_t new_role) { uint32_t rc = 0, was = 0, i = 0; ocs_vport_spec_t *vport = fcp->vport; for (was = 0, i = 0; i < (ocs->num_vports + 1); i++) { - if (FCPORT(ocs, i)->role != KNOB_ROLE_NONE) - was++; - } + if (FCPORT(ocs, i)->role != KNOB_ROLE_NONE) + was++; + } // Physical port if ((was == 0) || (vport == NULL)) { fcp->role = new_role; if (vport == NULL) { ocs->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0; ocs->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0; } else { vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0; vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (rc) { ocs_log_debug(ocs, "port offline failed : %d\n", rc); } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (rc) { ocs_log_debug(ocs, "port online failed : %d\n", rc); } return 0; } if ((fcp->role != KNOB_ROLE_NONE)){ fcp->role = new_role; vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0; vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0; /* New Sport will be created in sport deleted cb */ return ocs_sport_vport_del(ocs, ocs->domain, vport->wwpn, vport->wwnn); } fcp->role = new_role; vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0; vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0; - if (fcp->role != KNOB_ROLE_NONE) { + if (fcp->role != KNOB_ROLE_NONE) { return ocs_sport_vport_alloc(ocs->domain, vport); } return (0); } /** * @ingroup cam_api * @brief Process CAM actions * * The driver supplies this routine to the CAM during intialization and * is the main entry point for processing CAM Control Blocks (CCB) * * @param sim pointer to the SCSI Interface Module * @param ccb CAM control block * * @todo * - populate path inquiry data via info retrieved from SLI port */ static void ocs_action(struct cam_sim *sim, union ccb *ccb) { struct ocs_softc *ocs = (struct ocs_softc *)cam_sim_softc(sim); struct ccb_hdr *ccb_h = &ccb->ccb_h; int32_t rc, bus; bus = cam_sim_bus(sim); switch (ccb_h->func_code) { case XPT_SCSI_IO: - + if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { - if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { - ccb->ccb_h.status = CAM_REQ_INVALID; + if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { + ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); - break; - } - } + break; + } + } rc = ocs_initiator_io(ocs, ccb); if (0 == rc) { ocs_set_ccb_status(ccb, CAM_REQ_INPROG | CAM_SIM_QUEUED); break; } else { + if (rc == CAM_REQUEUE_REQ) { + cam_freeze_devq(ccb->ccb_h.path); + cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 100, 0); + ccb->ccb_h.status = CAM_REQUEUE_REQ; + xpt_done(ccb); + break; + } + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if (rc > 0) { ocs_set_ccb_status(ccb, rc); } else { ocs_set_ccb_status(ccb, CAM_SEL_TIMEOUT); } } xpt_done(ccb); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; struct ccb_pathinq_settings_fc *fc = &cpi->xport_specific.fc; uint64_t wwn = 0; ocs_xport_stats_t value; cpi->version_num = 1; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC; if (ocs->ocs_xport == OCS_XPORT_FC) { cpi->transport = XPORT_FC; } else { cpi->transport = XPORT_UNKNOWN; } cpi->transport_version = 0; /* Set the transport parameters of the SIM */ ocs_xport_status(ocs->xport, OCS_XPORT_LINK_SPEED, &value); fc->bitrate = value.value * 1000; /* speed in Mbps */ wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWPN)); fc->wwpn = be64toh(wwn); wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWNN)); fc->wwnn = be64toh(wwn); if (ocs->domain && ocs->domain->attached) { fc->port = ocs->domain->sport->fc_id; } if (ocs->config_tgt) { cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; } cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; - cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN; + cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN; cpi->hba_inquiry = PI_TAG_ABLE; cpi->max_target = OCS_MAX_TARGETS; cpi->initiator_id = ocs->max_remote_nodes + 1; if (!ocs->enable_ini) { cpi->hba_misc |= PIM_NOINITIATOR; } cpi->max_lun = OCS_MAX_LUN; cpi->bus_id = cam_sim_bus(sim); /* Need to supply a base transfer speed prior to linking up * Worst case, this would be FC 1Gbps */ cpi->base_transfer_speed = 1 * 1000 * 1000; /* Calculate the max IO supported * Worst case would be an OS page per SGL entry */ cpi->maxio = PAGE_SIZE * (ocs_scsi_get_property(ocs, OCS_SCSI_MAX_SGL) - 1); strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Emulex", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; ocs_xport_stats_t value; + ocs_fcport *fcp = FCPORT(ocs, bus); ocs_node_t *fnode = NULL; if (ocs->ocs_xport != OCS_XPORT_FC) { ocs_set_ccb_status(ccb, CAM_REQ_INVALID); xpt_done(ccb); break; } - fnode = ocs_node_get_instance(ocs, cts->ccb_h.target_id); + fnode = ocs_node_get_instance(ocs, fcp->tgt[cts->ccb_h.target_id].node_id); if (fnode == NULL) { ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); break; } cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_FC; cts->transport_version = 2; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; /* speed in Mbps */ ocs_xport_status(ocs->xport, OCS_XPORT_LINK_SPEED, &value); fc->bitrate = value.value * 100; fc->wwpn = ocs_node_get_wwpn(fnode); fc->wwnn = ocs_node_get_wwnn(fnode); fc->port = fnode->rnode.fc_id; fc->valid = CTS_FC_VALID_SPEED | CTS_FC_VALID_WWPN | CTS_FC_VALID_WWNN | CTS_FC_VALID_PORT; ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; } case XPT_SET_TRAN_SETTINGS: ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, TRUE); xpt_done(ccb); break; case XPT_GET_SIM_KNOB: { struct ccb_sim_knob *knob = &ccb->knob; uint64_t wwn = 0; ocs_fcport *fcp = FCPORT(ocs, bus); if (ocs->ocs_xport != OCS_XPORT_FC) { ocs_set_ccb_status(ccb, CAM_REQ_INVALID); xpt_done(ccb); break; } if (bus == 0) { wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWNN)); knob->xport_specific.fc.wwnn = be64toh(wwn); wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWPN)); knob->xport_specific.fc.wwpn = be64toh(wwn); } else { knob->xport_specific.fc.wwnn = fcp->vport->wwnn; knob->xport_specific.fc.wwpn = fcp->vport->wwpn; } knob->xport_specific.fc.role = fcp->role; knob->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; } case XPT_SET_SIM_KNOB: { struct ccb_sim_knob *knob = &ccb->knob; bool role_changed = FALSE; ocs_fcport *fcp = FCPORT(ocs, bus); if (ocs->ocs_xport != OCS_XPORT_FC) { ocs_set_ccb_status(ccb, CAM_REQ_INVALID); xpt_done(ccb); break; } if (knob->xport_specific.fc.valid & KNOB_VALID_ADDRESS) { device_printf(ocs->dev, "%s: XPT_SET_SIM_KNOB wwnn=%llx wwpn=%llx\n", __func__, (unsigned long long)knob->xport_specific.fc.wwnn, (unsigned long long)knob->xport_specific.fc.wwpn); } if (knob->xport_specific.fc.valid & KNOB_VALID_ROLE) { switch (knob->xport_specific.fc.role) { case KNOB_ROLE_NONE: if (fcp->role != KNOB_ROLE_NONE) { role_changed = TRUE; } break; case KNOB_ROLE_TARGET: if (fcp->role != KNOB_ROLE_TARGET) { role_changed = TRUE; } break; case KNOB_ROLE_INITIATOR: if (fcp->role != KNOB_ROLE_INITIATOR) { role_changed = TRUE; } break; case KNOB_ROLE_BOTH: if (fcp->role != KNOB_ROLE_BOTH) { role_changed = TRUE; } break; default: device_printf(ocs->dev, "%s: XPT_SET_SIM_KNOB unsupported role: %d\n", __func__, knob->xport_specific.fc.role); } if (role_changed) { device_printf(ocs->dev, "BUS:%d XPT_SET_SIM_KNOB old_role: %d new_role: %d\n", bus, fcp->role, knob->xport_specific.fc.role); ocs_fcp_change_role(ocs, fcp, knob->xport_specific.fc.role); } } ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; } case XPT_ABORT: { union ccb *accb = ccb->cab.abort_ccb; switch (accb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: ocs_abort_atio(ocs, ccb); break; case XPT_IMMEDIATE_NOTIFY: ocs_abort_inot(ocs, ccb); break; case XPT_SCSI_IO: rc = ocs_abort_initiator_io(ocs, accb); if (rc) { ccb->ccb_h.status = CAM_UA_ABORT; } else { ccb->ccb_h.status = CAM_REQ_CMP; } break; default: printf("abort of unknown func %#x\n", accb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; break; } break; } case XPT_RESET_BUS: if (ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE) == 0) { ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); ocs_set_ccb_status(ccb, CAM_REQ_CMP); } else { ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } xpt_done(ccb); break; case XPT_RESET_DEV: { ocs_node_t *node = NULL; ocs_io_t *io = NULL; int32_t rc = 0; + ocs_fcport *fcp = FCPORT(ocs, bus); - node = ocs_node_get_instance(ocs, ccb_h->target_id); + node = ocs_node_get_instance(ocs, fcp->tgt[ccb_h->target_id].node_id); if (node == NULL) { device_printf(ocs->dev, "%s: no device %d\n", __func__, ccb_h->target_id); ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); break; } io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_ORIGINATOR); if (io == NULL) { device_printf(ocs->dev, "%s: unable to alloc IO\n", __func__); ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); xpt_done(ccb); break; } rc = ocs_scsi_send_tmf(node, io, NULL, ccb_h->target_lun, OCS_SCSI_TMF_LOGICAL_UNIT_RESET, NULL, 0, 0, /* sgl, sgl_count, length */ ocs_initiator_tmf_cb, NULL/*arg*/); if (rc) { ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } else { ocs_set_ccb_status(ccb, CAM_REQ_CMP); } - if (node->fcp2device) { + if (node->fcp2device) { ocs_reset_crn(node, ccb_h->target_lun); } xpt_done(ccb); break; } case XPT_EN_LUN: /* target support */ { ocs_tgt_resource_t *trsrc = NULL; uint32_t status = 0; ocs_fcport *fcp = FCPORT(ocs, bus); device_printf(ocs->dev, "XPT_EN_LUN %sable %d:%d\n", ccb->cel.enable ? "en" : "dis", ccb->ccb_h.target_id, (unsigned int)ccb->ccb_h.target_lun); trsrc = ocs_tgt_resource_get(fcp, &ccb->ccb_h, &status); if (trsrc) { trsrc->enabled = ccb->cel.enable; /* Abort all ATIO/INOT on LUN disable */ if (trsrc->enabled == FALSE) { ocs_tgt_resource_abort(ocs, trsrc); } else { STAILQ_INIT(&trsrc->atio); STAILQ_INIT(&trsrc->inot); } status = CAM_REQ_CMP; } ocs_set_ccb_status(ccb, status); xpt_done(ccb); break; } /* * The flow of target IOs in CAM is: * - CAM supplies a number of CCBs to the driver used for received * commands. * - when the driver receives a command, it copies the relevant * information to the CCB and returns it to the CAM using xpt_done() * - after the target server processes the request, it creates * a new CCB containing information on how to continue the IO and * passes that to the driver * - the driver processes the "continue IO" (a.k.a CTIO) CCB * - once the IO completes, the driver returns the CTIO to the CAM * using xpt_done() */ case XPT_ACCEPT_TARGET_IO: /* used to inform upper layer of received CDB (a.k.a. ATIO) */ case XPT_IMMEDIATE_NOTIFY: /* used to inform upper layer of other event (a.k.a. INOT) */ { ocs_tgt_resource_t *trsrc = NULL; uint32_t status = 0; ocs_fcport *fcp = FCPORT(ocs, bus); /*printf("XPT_%s %p\n", ccb_h->func_code == XPT_ACCEPT_TARGET_IO ? "ACCEPT_TARGET_IO" : "IMMEDIATE_NOTIFY", ccb);*/ trsrc = ocs_tgt_resource_get(fcp, &ccb->ccb_h, &status); if (trsrc == NULL) { ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); break; } if (XPT_ACCEPT_TARGET_IO == ccb->ccb_h.func_code) { struct ccb_accept_tio *atio = NULL; atio = (struct ccb_accept_tio *)ccb; atio->init_id = 0x0badbeef; atio->tag_id = 0xdeadc0de; STAILQ_INSERT_TAIL(&trsrc->atio, &ccb->ccb_h, sim_links.stqe); } else { STAILQ_INSERT_TAIL(&trsrc->inot, &ccb->ccb_h, sim_links.stqe); } ccb->ccb_h.ccb_io_ptr = NULL; ccb->ccb_h.ccb_ocs_ptr = ocs; ocs_set_ccb_status(ccb, CAM_REQ_INPROG); /* * These actions give resources to the target driver. * If we didn't return here, this function would call * xpt_done(), signaling to the upper layers that an * IO or other event had arrived. */ break; } case XPT_NOTIFY_ACKNOWLEDGE: { ocs_io_t *io = NULL; ocs_io_t *abortio = NULL; /* Get the IO reference for this tag */ io = ocs_scsi_find_io(ocs, ccb->cna2.tag_id); if (io == NULL) { device_printf(ocs->dev, "%s: XPT_NOTIFY_ACKNOWLEDGE no IO with tag %#x\n", __func__, ccb->cna2.tag_id); ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); xpt_done(ccb); break; } abortio = io->tgt_io.app; if (abortio) { abortio->tgt_io.flags &= ~OCS_CAM_IO_F_ABORT_NOTIFY; device_printf(ocs->dev, "%s: XPT_NOTIFY_ACK state=%d tag=%#x xid=%#x" " flags=%#x\n", __func__, abortio->tgt_io.state, abortio->tag, abortio->init_task_tag, abortio->tgt_io.flags); /* TMF response was sent in abort callback */ } else { ocs_scsi_send_tmf_resp(io, OCS_SCSI_TMF_FUNCTION_COMPLETE, NULL, ocs_target_tmf_cb, NULL); } ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; } case XPT_CONT_TARGET_IO: /* continue target IO, sending data/response (a.k.a. CTIO) */ if (ocs_target_io(ocs, ccb)) { device_printf(ocs->dev, "XPT_CONT_TARGET_IO failed flags=%x tag=%#x\n", ccb->ccb_h.flags, ccb->csio.tag_id); xpt_done(ccb); } break; default: device_printf(ocs->dev, "unhandled func_code = %#x\n", ccb_h->func_code); ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /** * @ingroup cam_api * @brief Process events * * @param sim pointer to the SCSI Interface Module * */ static void ocs_poll(struct cam_sim *sim) { printf("%s\n", __func__); } static int32_t ocs_initiator_tmf_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, ocs_scsi_cmd_resp_t *rsp, uint32_t flags, void *arg) { int32_t rc = 0; switch (scsi_status) { case OCS_SCSI_STATUS_GOOD: case OCS_SCSI_STATUS_NO_IO: break; case OCS_SCSI_STATUS_CHECK_RESPONSE: if (rsp->response_data_length == 0) { ocs_log_test(io->ocs, "check response without data?!?\n"); rc = -1; break; } if (rsp->response_data[3] != 0) { ocs_log_test(io->ocs, "TMF status %08x\n", be32toh(*((uint32_t *)rsp->response_data))); rc = -1; break; } break; default: ocs_log_test(io->ocs, "status=%#x\n", scsi_status); rc = -1; } ocs_scsi_io_free(io); return rc; } /** * @brief lookup target resource structure * * Arbitrarily support * - wildcard target ID + LU * - 0 target ID + non-wildcard LU * * @param ocs the driver instance's software context * @param ccb_h pointer to the CCB header * @param status returned status value * * @return pointer to the target resource, NULL if none available (e.g. if LU * is not enabled) */ static ocs_tgt_resource_t *ocs_tgt_resource_get(ocs_fcport *fcp, struct ccb_hdr *ccb_h, uint32_t *status) { target_id_t tid = ccb_h->target_id; lun_id_t lun = ccb_h->target_lun; if (CAM_TARGET_WILDCARD == tid) { if (CAM_LUN_WILDCARD != lun) { *status = CAM_LUN_INVALID; return NULL; } return &fcp->targ_rsrc_wildcard; } else { if (lun < OCS_MAX_LUN) { return &fcp->targ_rsrc[lun]; } else { *status = CAM_LUN_INVALID; return NULL; } } } static int32_t ocs_tgt_resource_abort(struct ocs_softc *ocs, ocs_tgt_resource_t *trsrc) { union ccb *ccb = NULL; uint32_t count; count = 0; do { ccb = (union ccb *)STAILQ_FIRST(&trsrc->atio); if (ccb) { STAILQ_REMOVE_HEAD(&trsrc->atio, sim_links.stqe); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); count++; } } while (ccb); count = 0; do { ccb = (union ccb *)STAILQ_FIRST(&trsrc->inot); if (ccb) { STAILQ_REMOVE_HEAD(&trsrc->inot, sim_links.stqe); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); count++; } } while (ccb); return 0; } static void ocs_abort_atio(struct ocs_softc *ocs, union ccb *ccb) { - ocs_io_t *aio = NULL; + ocs_io_t *aio = NULL; ocs_tgt_resource_t *trsrc = NULL; uint32_t status = CAM_REQ_INVALID; struct ccb_hdr *cur = NULL; union ccb *accb = ccb->cab.abort_ccb; int bus = cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path)); ocs_fcport *fcp = FCPORT(ocs, bus); trsrc = ocs_tgt_resource_get(fcp, &accb->ccb_h, &status); if (trsrc != NULL) { STAILQ_FOREACH(cur, &trsrc->atio, sim_links.stqe) { if (cur != &accb->ccb_h) continue; STAILQ_REMOVE(&trsrc->atio, cur, ccb_hdr, sim_links.stqe); accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); ocs_set_ccb_status(ccb, CAM_REQ_CMP); return; } } /* if the ATIO has a valid IO pointer, CAM is telling * the driver that the ATIO (which represents the entire * exchange) has been aborted. */ aio = accb->ccb_h.ccb_io_ptr; if (aio == NULL) { ccb->ccb_h.status = CAM_UA_ABORT; return; } device_printf(ocs->dev, "%s: XPT_ABORT ATIO state=%d tag=%#x" " xid=%#x flags=%#x\n", __func__, aio->tgt_io.state, aio->tag, aio->init_task_tag, aio->tgt_io.flags); /* Expectations are: * - abort task was received * - already aborted IO in the DEVICE * - already received NOTIFY ACKNOWLEDGE */ if ((aio->tgt_io.flags & OCS_CAM_IO_F_ABORT_RECV) == 0) { device_printf(ocs->dev, "%s: abort not received or io completed \n", __func__); ocs_set_ccb_status(ccb, CAM_REQ_CMP); return; } aio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_CAM; ocs_target_io_free(aio); ocs_set_ccb_status(ccb, CAM_REQ_CMP); return; } static void ocs_abort_inot(struct ocs_softc *ocs, union ccb *ccb) { ocs_tgt_resource_t *trsrc = NULL; uint32_t status = CAM_REQ_INVALID; struct ccb_hdr *cur = NULL; union ccb *accb = ccb->cab.abort_ccb; int bus = cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path)); ocs_fcport *fcp = FCPORT(ocs, bus); trsrc = ocs_tgt_resource_get(fcp, &accb->ccb_h, &status); if (trsrc) { STAILQ_FOREACH(cur, &trsrc->inot, sim_links.stqe) { if (cur != &accb->ccb_h) continue; STAILQ_REMOVE(&trsrc->inot, cur, ccb_hdr, sim_links.stqe); accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); ocs_set_ccb_status(ccb, CAM_REQ_CMP); return; } } ocs_set_ccb_status(ccb, CAM_UA_ABORT); return; } static uint32_t ocs_abort_initiator_io(struct ocs_softc *ocs, union ccb *accb) { - ocs_node_t *node = NULL; - ocs_io_t *io = NULL; + ocs_node_t *node = NULL; + ocs_io_t *io = NULL; int32_t rc = 0; struct ccb_scsiio *csio = &accb->csio; - node = ocs_node_get_instance(ocs, accb->ccb_h.target_id); + ocs_fcport *fcp = FCPORT(ocs, cam_sim_bus(xpt_path_sim((accb)->ccb_h.path))); + node = ocs_node_get_instance(ocs, fcp->tgt[accb->ccb_h.target_id].node_id); if (node == NULL) { device_printf(ocs->dev, "%s: no device %d\n", __func__, accb->ccb_h.target_id); ocs_set_ccb_status(accb, CAM_DEV_NOT_THERE); xpt_done(accb); return (-1); } io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_ORIGINATOR); if (io == NULL) { device_printf(ocs->dev, "%s: unable to alloc IO\n", __func__); ocs_set_ccb_status(accb, CAM_REQ_CMP_ERR); xpt_done(accb); return (-1); } rc = ocs_scsi_send_tmf(node, io, (ocs_io_t *)csio->ccb_h.ccb_io_ptr, accb->ccb_h.target_lun, OCS_SCSI_TMF_ABORT_TASK, NULL, 0, 0, ocs_initiator_tmf_cb, NULL/*arg*/); return rc; } void ocs_scsi_ini_ddump(ocs_textbuf_t *textbuf, ocs_scsi_ddump_type_e type, void *obj) { switch(type) { case OCS_SCSI_DDUMP_DEVICE: { //ocs_t *ocs = obj; break; } case OCS_SCSI_DDUMP_DOMAIN: { //ocs_domain_t *domain = obj; break; } case OCS_SCSI_DDUMP_SPORT: { //ocs_sport_t *sport = obj; break; } case OCS_SCSI_DDUMP_NODE: { //ocs_node_t *node = obj; break; } case OCS_SCSI_DDUMP_IO: { //ocs_io_t *io = obj; break; } default: { break; } } } void ocs_scsi_tgt_ddump(ocs_textbuf_t *textbuf, ocs_scsi_ddump_type_e type, void *obj) { switch(type) { case OCS_SCSI_DDUMP_DEVICE: { //ocs_t *ocs = obj; break; } case OCS_SCSI_DDUMP_DOMAIN: { //ocs_domain_t *domain = obj; break; } case OCS_SCSI_DDUMP_SPORT: { //ocs_sport_t *sport = obj; break; } case OCS_SCSI_DDUMP_NODE: { //ocs_node_t *node = obj; break; } case OCS_SCSI_DDUMP_IO: { ocs_io_t *io = obj; char *state_str = NULL; switch (io->tgt_io.state) { case OCS_CAM_IO_FREE: state_str = "FREE"; break; case OCS_CAM_IO_COMMAND: state_str = "COMMAND"; break; case OCS_CAM_IO_DATA: state_str = "DATA"; break; case OCS_CAM_IO_DATA_DONE: state_str = "DATA_DONE"; break; case OCS_CAM_IO_RESP: state_str = "RESP"; break; default: state_str = "xxx BAD xxx"; } ocs_ddump_value(textbuf, "cam_st", "%s", state_str); if (io->tgt_io.app) { ocs_ddump_value(textbuf, "cam_flags", "%#x", ((union ccb *)(io->tgt_io.app))->ccb_h.flags); ocs_ddump_value(textbuf, "cam_status", "%#x", ((union ccb *)(io->tgt_io.app))->ccb_h.status); } break; } default: { break; } } } int32_t ocs_scsi_get_block_vaddr(ocs_io_t *io, uint64_t blocknumber, ocs_scsi_vaddr_len_t addrlen[], uint32_t max_addrlen, void **dif_vaddr) { return -1; } uint32_t ocs_get_crn(ocs_node_t *node, uint8_t *crn, uint64_t lun) { uint32_t idx; struct ocs_lun_crn *lcrn = NULL; idx = lun % OCS_MAX_LUN; lcrn = node->ini_node.lun_crn[idx]; if (lcrn == NULL) { lcrn = ocs_malloc(node->ocs, sizeof(struct ocs_lun_crn), M_ZERO|M_NOWAIT); if (lcrn == NULL) { return (1); } lcrn->lun = lun; node->ini_node.lun_crn[idx] = lcrn; } if (lcrn->lun != lun) { return (1); } if (lcrn->crnseed == 0) lcrn->crnseed = 1; *crn = lcrn->crnseed++; return (0); } void ocs_del_crn(ocs_node_t *node) { uint32_t i; struct ocs_lun_crn *lcrn = NULL; for(i = 0; i < OCS_MAX_LUN; i++) { lcrn = node->ini_node.lun_crn[i]; if (lcrn) { ocs_free(node->ocs, lcrn, sizeof(*lcrn)); } } return; } void ocs_reset_crn(ocs_node_t *node, uint64_t lun) { uint32_t idx; struct ocs_lun_crn *lcrn = NULL; idx = lun % OCS_MAX_LUN; lcrn = node->ini_node.lun_crn[idx]; if (lcrn) lcrn->crnseed = 0; return; } Index: stable/11/sys/dev/ocs_fc/ocs_pci.c =================================================================== --- stable/11/sys/dev/ocs_fc/ocs_pci.c (revision 344013) +++ stable/11/sys/dev/ocs_fc/ocs_pci.c (revision 344014) @@ -1,963 +1,963 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #define OCS_COPYRIGHT "Copyright (C) 2017 Broadcom. All rights reserved." /** * @file * Implementation of required FreeBSD PCI interface functions */ #include "ocs.h" #include "version.h" #include #include static MALLOC_DEFINE(M_OCS, "OCS", "OneCore Storage data"); #include #include #include /** * Tunable parameters for transport */ int logmask = 0; int ctrlmask = 2; int logdest = 1; int loglevel = LOG_INFO; int ramlog_size = 1*1024*1024; int ddump_saved_size = 0; static const char *queue_topology = "eq cq rq cq mq $nulp($nwq(cq wq:ulp=$rpt1)) cq wq:len=256:class=1"; static void ocs_release_bus(struct ocs_softc *); static int32_t ocs_intr_alloc(struct ocs_softc *); static int32_t ocs_intr_setup(struct ocs_softc *); static int32_t ocs_intr_teardown(struct ocs_softc *); static int ocs_pci_intx_filter(void *); static void ocs_pci_intr(void *); static int32_t ocs_init_dma_tag(struct ocs_softc *ocs); static int32_t ocs_setup_fcports(ocs_t *ocs); ocs_t *ocs_devices[MAX_OCS_DEVICES]; /** * @brief Check support for the given device * * Determine support for a given device by examining the PCI vendor and * device IDs * * @param dev device abstraction * * @return 0 if device is supported, ENXIO otherwise */ static int ocs_pci_probe(device_t dev) { char *desc = NULL; if (pci_get_vendor(dev) != PCI_VENDOR_EMULEX) { return ENXIO; } switch (pci_get_device(dev)) { case PCI_PRODUCT_EMULEX_OCE16001: desc = "Emulex LightPulse FC Adapter"; break; case PCI_PRODUCT_EMULEX_LPE31004: desc = "Emulex LightPulse FC Adapter"; break; case PCI_PRODUCT_EMULEX_OCE50102: desc = "Emulex LightPulse 10GbE FCoE/NIC Adapter"; break; default: return ENXIO; } device_set_desc(dev, desc); return BUS_PROBE_DEFAULT; } static int ocs_map_bars(device_t dev, struct ocs_softc *ocs) { /* * Map PCI BAR0 register into the CPU's space. */ ocs->reg[0].rid = PCIR_BAR(PCI_64BIT_BAR0); ocs->reg[0].res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ocs->reg[0].rid, RF_ACTIVE); if (ocs->reg[0].res == NULL) { device_printf(dev, "bus_alloc_resource failed rid=%#x\n", ocs->reg[0].rid); return ENXIO; } ocs->reg[0].btag = rman_get_bustag(ocs->reg[0].res); ocs->reg[0].bhandle = rman_get_bushandle(ocs->reg[0].res); return 0; } static int ocs_setup_params(struct ocs_softc *ocs) { int32_t i = 0; const char *hw_war_version; /* Setup tunable parameters */ ocs->ctrlmask = ctrlmask; ocs->speed = 0; ocs->topology = 0; ocs->ethernet_license = 0; ocs->num_scsi_ios = 8192; ocs->enable_hlm = 0; ocs->hlm_group_size = 8; ocs->logmask = logmask; ocs->config_tgt = FALSE; if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "target", &i)) { if (1 == i) { ocs->config_tgt = TRUE; device_printf(ocs->dev, "Enabling target\n"); } } ocs->config_ini = TRUE; if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "initiator", &i)) { if (0 == i) { ocs->config_ini = FALSE; device_printf(ocs->dev, "Disabling initiator\n"); } } ocs->enable_ini = ocs->config_ini; if (!ocs->config_ini && !ocs->config_tgt) { device_printf(ocs->dev, "Unsupported, both initiator and target mode disabled.\n"); return 1; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "logmask", &logmask)) { device_printf(ocs->dev, "logmask = %#x\n", logmask); } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "logdest", &logdest)) { device_printf(ocs->dev, "logdest = %#x\n", logdest); } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "loglevel", &loglevel)) { device_printf(ocs->dev, "loglevel = %#x\n", loglevel); } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "ramlog_size", &ramlog_size)) { device_printf(ocs->dev, "ramlog_size = %#x\n", ramlog_size); } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "ddump_saved_size", &ddump_saved_size)) { device_printf(ocs->dev, "ddump_saved_size= %#x\n", ddump_saved_size); } /* If enabled, initailize a RAM logging buffer */ if (logdest & 2) { ocs->ramlog = ocs_ramlog_init(ocs, ramlog_size/OCS_RAMLOG_DEFAULT_BUFFERS, OCS_RAMLOG_DEFAULT_BUFFERS); /* If NULL was returned, then we'll simply skip using the ramlog but */ /* set logdest to 1 to ensure that we at least get default logging. */ if (ocs->ramlog == NULL) { logdest = 1; } } /* initialize a saved ddump */ if (ddump_saved_size) { if (ocs_textbuf_alloc(ocs, &ocs->ddump_saved, ddump_saved_size)) { ocs_log_err(ocs, "failed to allocate memory for saved ddump\n"); } } if (0 == resource_string_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "hw_war_version", &hw_war_version)) { device_printf(ocs->dev, "hw_war_version = %s\n", hw_war_version); ocs->hw_war_version = strdup(hw_war_version, M_OCS); } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "explicit_buffer_list", &i)) { ocs->explicit_buffer_list = i; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "ethernet_license", &i)) { ocs->ethernet_license = i; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "speed", &i)) { device_printf(ocs->dev, "speed = %d Mbps\n", i); ocs->speed = i; } ocs->desc = device_get_desc(ocs->dev); ocs_device_lock_init(ocs); ocs->driver_version = STR_BE_MAJOR "." STR_BE_MINOR "." STR_BE_BUILD "." STR_BE_BRANCH; ocs->model = ocs_pci_model(ocs->pci_vendor, ocs->pci_device); if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "enable_hlm", &i)) { device_printf(ocs->dev, "enable_hlm = %d\n", i); ocs->enable_hlm = i; if (ocs->enable_hlm) { ocs->hlm_group_size = 8; if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "hlm_group_size", &i)) { ocs->hlm_group_size = i; } device_printf(ocs->dev, "hlm_group_size = %d\n", i); } } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "num_scsi_ios", &i)) { ocs->num_scsi_ios = i; device_printf(ocs->dev, "num_scsi_ios = %d\n", ocs->num_scsi_ios); } else { ocs->num_scsi_ios = 8192; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "topology", &i)) { ocs->topology = i; device_printf(ocs->dev, "Setting topology=%#x\n", i); } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "num_vports", &i)) { if (i >= 0 && i <= 254) { device_printf(ocs->dev, "num_vports = %d\n", i); ocs->num_vports = i; } else { device_printf(ocs->dev, "num_vports: %d not supported \n", i); } } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "external_loopback", &i)) { device_printf(ocs->dev, "external_loopback = %d\n", i); ocs->external_loopback = i; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "tgt_rscn_delay", &i)) { device_printf(ocs->dev, "tgt_rscn_delay = %d\n", i); ocs->tgt_rscn_delay_msec = i * 1000; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "tgt_rscn_period", &i)) { device_printf(ocs->dev, "tgt_rscn_period = %d\n", i); ocs->tgt_rscn_period_msec = i * 1000; } if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev), "target_io_timer", &i)) { device_printf(ocs->dev, "target_io_timer = %d\n", i); ocs->target_io_timer_sec = i; } hw_global.queue_topology_string = queue_topology; ocs->rq_selection_policy = 0; ocs->rr_quanta = 1; ocs->filter_def = "0,0,0,0"; return 0; } static int32_t ocs_setup_fcports(ocs_t *ocs) { uint32_t i = 0, role = 0; uint64_t sli_wwpn, sli_wwnn; size_t size; ocs_xport_t *xport = ocs->xport; ocs_vport_spec_t *vport; ocs_fcport *fcp = NULL; size = sizeof(ocs_fcport) * (ocs->num_vports + 1); ocs->fcports = ocs_malloc(ocs, size, M_ZERO|M_NOWAIT); if (ocs->fcports == NULL) { device_printf(ocs->dev, "Can't allocate fcport \n"); return 1; } role = (ocs->enable_ini)? KNOB_ROLE_INITIATOR: 0 | (ocs->enable_tgt)? KNOB_ROLE_TARGET: 0; fcp = FCPORT(ocs, i); fcp->role = role; i++; ocs_list_foreach(&xport->vport_list, vport) { fcp = FCPORT(ocs, i); vport->tgt_data = fcp; fcp->vport = vport; fcp->role = role; if (ocs_hw_get_def_wwn(ocs, i, &sli_wwpn, &sli_wwnn)) { ocs_log_err(ocs, "Get default wwn failed \n"); i++; continue; } vport->wwpn = ocs_be64toh(sli_wwpn); vport->wwnn = ocs_be64toh(sli_wwnn); i++; ocs_log_debug(ocs, "VPort wwpn: %lx wwnn: %lx \n", vport->wwpn, vport->wwnn); } return 0; } int32_t ocs_device_attach(ocs_t *ocs) { int32_t i; ocs_io_t *io = NULL; if (ocs->attached) { ocs_log_warn(ocs, "%s: Device is already attached\n", __func__); return -1; } /* Allocate transport object and bring online */ ocs->xport = ocs_xport_alloc(ocs); if (ocs->xport == NULL) { device_printf(ocs->dev, "failed to allocate transport object\n"); return ENOMEM; } else if (ocs_xport_attach(ocs->xport) != 0) { device_printf(ocs->dev, "%s: failed to attach transport object\n", __func__); goto fail_xport_attach; } else if (ocs_xport_initialize(ocs->xport) != 0) { device_printf(ocs->dev, "%s: failed to initialize transport object\n", __func__); goto fail_xport_init; } if (ocs_init_dma_tag(ocs)) { goto fail_intr_setup; } for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) { if (bus_dmamap_create(ocs->buf_dmat, 0, &io->tgt_io.dmap)) { device_printf(ocs->dev, "%s: bad dma map create\n", __func__); } io->tgt_io.state = OCS_CAM_IO_FREE; } if (ocs_setup_fcports(ocs)) { device_printf(ocs->dev, "FCports creation failed\n"); goto fail_intr_setup; } if(ocs_cam_attach(ocs)) { device_printf(ocs->dev, "cam attach failed \n"); goto fail_intr_setup; } if (ocs_intr_setup(ocs)) { device_printf(ocs->dev, "Interrupt setup failed\n"); goto fail_intr_setup; } if (ocs->enable_ini || ocs->enable_tgt) { if (ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE)) { device_printf(ocs->dev, "Can't init port\n"); goto fail_xport_online; } } ocs->attached = true; return 0; fail_xport_online: if (ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN)) { device_printf(ocs->dev, "Transport Shutdown timed out\n"); } ocs_intr_teardown(ocs); fail_intr_setup: fail_xport_init: ocs_xport_detach(ocs->xport); if (ocs->config_tgt) ocs_scsi_tgt_del_device(ocs); ocs_xport_free(ocs->xport); ocs->xport = NULL; fail_xport_attach: if (ocs->xport) ocs_free(ocs, ocs->xport, sizeof(*(ocs->xport))); ocs->xport = NULL; return ENXIO; } /** * @brief Connect the driver to the given device * * If the probe routine is successful, the OS will give the driver * the opportunity to connect itself to the device. This routine * maps PCI resources (memory BARs and interrupts) and initialize a * hardware object. * * @param dev device abstraction * * @return 0 if the driver attaches to the device, ENXIO otherwise */ static int ocs_pci_attach(device_t dev) { struct ocs_softc *ocs; int instance; instance = device_get_unit(dev); ocs = (struct ocs_softc *)device_get_softc(dev); if (NULL == ocs) { device_printf(dev, "cannot allocate softc\n"); return ENOMEM; } memset(ocs, 0, sizeof(struct ocs_softc)); if (instance < ARRAY_SIZE(ocs_devices)) { ocs_devices[instance] = ocs; } else { device_printf(dev, "got unexpected ocs instance number %d\n", instance); } ocs->instance_index = instance; ocs->dev = dev; pci_enable_io(dev, SYS_RES_MEMORY); pci_enable_busmaster(dev); ocs->pci_vendor = pci_get_vendor(dev); ocs->pci_device = pci_get_device(dev); snprintf(ocs->businfo, sizeof(ocs->businfo), "%02X:%02X:%02X", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); /* Map all memory BARs */ if (ocs_map_bars(dev, ocs)) { device_printf(dev, "Failed to map pci bars\n"); goto release_bus; } /* create a root DMA tag for the device */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, /* byte alignment */ 0, /* no boundary restrictions */ BUS_SPACE_MAXADDR, /* no minimum low address */ BUS_SPACE_MAXADDR, /* no maximum high address */ NULL, /* no filter function */ NULL, /* or arguments */ BUS_SPACE_MAXSIZE, /* max size covered by tag */ BUS_SPACE_UNRESTRICTED, /* no segment count restrictions */ BUS_SPACE_MAXSIZE, /* no segment length restrictions */ 0, /* flags */ NULL, /* no lock manipulation function */ NULL, /* or arguments */ &ocs->dmat)) { device_printf(dev, "parent DMA tag allocation failed\n"); goto release_bus; } if (ocs_intr_alloc(ocs)) { device_printf(dev, "Interrupt allocation failed\n"); goto release_bus; } if (PCIC_SERIALBUS == pci_get_class(dev) && PCIS_SERIALBUS_FC == pci_get_subclass(dev)) ocs->ocs_xport = OCS_XPORT_FC; else { device_printf(dev, "unsupported class (%#x : %#x)\n", pci_get_class(dev), pci_get_class(dev)); goto release_bus; } /* Setup tunable parameters */ if (ocs_setup_params(ocs)) { device_printf(ocs->dev, "failed to setup params\n"); goto release_bus; } if (ocs_device_attach(ocs)) { device_printf(ocs->dev, "failed to attach device\n"); goto release_params; } ocs->fc_type = FC_TYPE_FCP; ocs_debug_attach(ocs); return 0; release_params: ocs_ramlog_free(ocs, ocs->ramlog); ocs_device_lock_free(ocs); free(ocs->hw_war_version, M_OCS); release_bus: ocs_release_bus(ocs); return ENXIO; } /** * @brief free resources when pci device detach * * @param ocs pointer to ocs structure * * @return 0 for success, a negative error code value for failure. */ int32_t ocs_device_detach(ocs_t *ocs) { int32_t rc = 0, i; ocs_io_t *io = NULL; if (ocs != NULL) { if (!ocs->attached) { ocs_log_warn(ocs, "%s: Device is not attached\n", __func__); return -1; } + ocs->attached = FALSE; + rc = ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN); if (rc) { ocs_log_err(ocs, "%s: Transport Shutdown timed out\n", __func__); } ocs_intr_teardown(ocs); if (ocs_xport_detach(ocs->xport) != 0) { ocs_log_err(ocs, "%s: Transport detach failed\n", __func__); } ocs_cam_detach(ocs); ocs_free(ocs, ocs->fcports, sizeof(ocs->fcports)); for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) { if (bus_dmamap_destroy(ocs->buf_dmat, io->tgt_io.dmap)) { device_printf(ocs->dev, "%s: bad dma map destroy\n", __func__); } } bus_dma_tag_destroy(ocs->dmat); ocs_xport_free(ocs->xport); ocs->xport = NULL; - - ocs->attached = FALSE; } return 0; } /** * @brief Detach the driver from the given device * * If the driver is a loadable module, this routine gets called at unload * time. This routine will stop the device and free any allocated resources. * * @param dev device abstraction * * @return 0 if the driver detaches from the device, ENXIO otherwise */ static int ocs_pci_detach(device_t dev) { struct ocs_softc *ocs; ocs = (struct ocs_softc *)device_get_softc(dev); if (!ocs) { device_printf(dev, "no driver context?!?\n"); return -1; } if (ocs->config_tgt && ocs->enable_tgt) { device_printf(dev, "can't detach with target mode enabled\n"); return EBUSY; } ocs_device_detach(ocs); /* * Workaround for OCS SCSI Transport quirk. * * CTL requires that target mode is disabled prior to unloading the * driver (ie ocs->enable_tgt = FALSE), but once the target is disabled, * the transport will not call ocs_scsi_tgt_del_device() which deallocates * CAM resources. The workaround is to explicitly make the call here. */ if (ocs->config_tgt) ocs_scsi_tgt_del_device(ocs); /* free strdup created buffer.*/ free(ocs->hw_war_version, M_OCS); ocs_device_lock_free(ocs); ocs_debug_detach(ocs); ocs_ramlog_free(ocs, ocs->ramlog); ocs_release_bus(ocs); return 0; } /** * @brief Notify driver of system shutdown * * @param dev device abstraction * * @return 0 if the driver attaches to the device, ENXIO otherwise */ static int ocs_pci_shutdown(device_t dev) { device_printf(dev, "%s\n", __func__); return 0; } /** * @brief Release bus resources allocated within the soft context * * @param ocs Pointer to the driver's context * * @return none */ static void ocs_release_bus(struct ocs_softc *ocs) { if (NULL != ocs) { uint32_t i; ocs_intr_teardown(ocs); if (ocs->irq) { bus_release_resource(ocs->dev, SYS_RES_IRQ, rman_get_rid(ocs->irq), ocs->irq); if (ocs->n_vec) { pci_release_msi(ocs->dev); ocs->n_vec = 0; } ocs->irq = NULL; } bus_dma_tag_destroy(ocs->dmat); for (i = 0; i < PCI_MAX_BAR; i++) { if (ocs->reg[i].res) { bus_release_resource(ocs->dev, SYS_RES_MEMORY, ocs->reg[i].rid, ocs->reg[i].res); } } } } /** * @brief Allocate and initialize interrupts * * @param ocs Pointer to the driver's context * * @return none */ static int32_t ocs_intr_alloc(struct ocs_softc *ocs) { ocs->n_vec = 1; if (pci_alloc_msix(ocs->dev, &ocs->n_vec)) { device_printf(ocs->dev, "MSI-X allocation failed\n"); if (pci_alloc_msi(ocs->dev, &ocs->n_vec)) { device_printf(ocs->dev, "MSI allocation failed \n"); ocs->irqid = 0; ocs->n_vec = 0; } else ocs->irqid = 1; } else { ocs->irqid = 1; } ocs->irq = bus_alloc_resource_any(ocs->dev, SYS_RES_IRQ, &ocs->irqid, RF_ACTIVE | RF_SHAREABLE); if (NULL == ocs->irq) { device_printf(ocs->dev, "could not allocate interrupt\n"); return -1; } ocs->intr_ctx.vec = 0; ocs->intr_ctx.softc = ocs; snprintf(ocs->intr_ctx.name, sizeof(ocs->intr_ctx.name), "%s_intr_%d", device_get_nameunit(ocs->dev), ocs->intr_ctx.vec); return 0; } /** * @brief Create and attach an interrupt handler * * @param ocs Pointer to the driver's context * * @return 0 on success, non-zero otherwise */ static int32_t ocs_intr_setup(struct ocs_softc *ocs) { driver_filter_t *filter = NULL; if (0 == ocs->n_vec) { filter = ocs_pci_intx_filter; } if (bus_setup_intr(ocs->dev, ocs->irq, INTR_MPSAFE | INTR_TYPE_CAM, filter, ocs_pci_intr, &ocs->intr_ctx, &ocs->tag)) { device_printf(ocs->dev, "could not initialize interrupt\n"); return -1; } return 0; } /** * @brief Detach an interrupt handler * * @param ocs Pointer to the driver's context * * @return 0 on success, non-zero otherwise */ static int32_t ocs_intr_teardown(struct ocs_softc *ocs) { if (!ocs) { printf("%s: bad driver context?!?\n", __func__); return -1; } if (ocs->tag) { bus_teardown_intr(ocs->dev, ocs->irq, ocs->tag); ocs->tag = NULL; } return 0; } /** * @brief PCI interrupt handler * * @param arg pointer to the driver's software context * * @return FILTER_HANDLED if interrupt is processed, FILTER_STRAY otherwise */ static int ocs_pci_intx_filter(void *arg) { ocs_intr_ctx_t *intr = arg; struct ocs_softc *ocs = NULL; uint16_t val = 0; if (NULL == intr) { return FILTER_STRAY; } ocs = intr->softc; #ifndef PCIM_STATUS_INTR #define PCIM_STATUS_INTR 0x0008 #endif val = pci_read_config(ocs->dev, PCIR_STATUS, 2); if (0xffff == val) { device_printf(ocs->dev, "%s: pci_read_config(PCIR_STATUS) failed\n", __func__); return FILTER_STRAY; } if (0 == (val & PCIM_STATUS_INTR)) { return FILTER_STRAY; } val = pci_read_config(ocs->dev, PCIR_COMMAND, 2); val |= PCIM_CMD_INTxDIS; pci_write_config(ocs->dev, PCIR_COMMAND, val, 2); return FILTER_SCHEDULE_THREAD; } /** * @brief interrupt handler * * @param context pointer to the interrupt context */ static void ocs_pci_intr(void *context) { ocs_intr_ctx_t *intr = context; struct ocs_softc *ocs = intr->softc; mtx_lock(&ocs->sim_lock); ocs_hw_process(&ocs->hw, intr->vec, OCS_OS_MAX_ISR_TIME_MSEC); mtx_unlock(&ocs->sim_lock); } /** * @brief Initialize DMA tag * * @param ocs the driver instance's software context * * @return 0 on success, non-zero otherwise */ static int32_t ocs_init_dma_tag(struct ocs_softc *ocs) { uint32_t max_sgl = 0; uint32_t max_sge = 0; /* * IOs can't use the parent DMA tag and must create their * own, based primarily on a restricted number of DMA segments. * This is more of a BSD requirement than a SLI Port requirement */ ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &max_sgl); ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge); if (bus_dma_tag_create(ocs->dmat, 1, /* byte alignment */ 0, /* no boundary restrictions */ BUS_SPACE_MAXADDR, /* no minimum low address */ BUS_SPACE_MAXADDR, /* no maximum high address */ NULL, /* no filter function */ NULL, /* or arguments */ BUS_SPACE_MAXSIZE, /* max size covered by tag */ max_sgl, /* segment count restrictions */ max_sge, /* segment length restrictions */ 0, /* flags */ NULL, /* no lock manipulation function */ NULL, /* or arguments */ &ocs->buf_dmat)) { device_printf(ocs->dev, "%s: bad bus_dma_tag_create(buf_dmat)\n", __func__); return -1; } return 0; } int32_t ocs_get_property(const char *prop_name, char *buffer, uint32_t buffer_len) { return -1; } /** * @brief return pointer to ocs structure given instance index * * A pointer to an ocs structure is returned given an instance index. * * @param index index to ocs_devices array * * @return ocs pointer */ ocs_t *ocs_get_instance(uint32_t index) { if (index < ARRAY_SIZE(ocs_devices)) { return ocs_devices[index]; } return NULL; } /** * @brief Return instance index of an opaque ocs structure * * Returns the ocs instance index * * @param os pointer to ocs instance * * @return pointer to ocs instance index */ uint32_t ocs_instance(void *os) { ocs_t *ocs = os; return ocs->instance_index; } static device_method_t ocs_methods[] = { DEVMETHOD(device_probe, ocs_pci_probe), DEVMETHOD(device_attach, ocs_pci_attach), DEVMETHOD(device_detach, ocs_pci_detach), DEVMETHOD(device_shutdown, ocs_pci_shutdown), {0, 0} }; static driver_t ocs_driver = { "ocs_fc", ocs_methods, sizeof(struct ocs_softc) }; static devclass_t ocs_devclass; DRIVER_MODULE(ocs_fc, pci, ocs_driver, ocs_devclass, 0, 0); MODULE_VERSION(ocs_fc, 1); Index: stable/11/sys/dev/ocs_fc/ocs_xport.c =================================================================== --- stable/11/sys/dev/ocs_fc/ocs_xport.c (revision 344013) +++ stable/11/sys/dev/ocs_fc/ocs_xport.c (revision 344014) @@ -1,1105 +1,1107 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * FC transport API * */ #include "ocs.h" #include "ocs_device.h" static void ocs_xport_link_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_link_stat_counts_t *counters, void *arg); static void ocs_xport_host_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_host_stat_counts_t *counters, void *arg); /** * @brief Post node event callback argument. */ typedef struct { ocs_sem_t sem; ocs_node_t *node; ocs_sm_event_t evt; void *context; } ocs_xport_post_node_event_t; /** * @brief Allocate a transport object. * * @par Description * A transport object is allocated, and associated with a device instance. * * @param ocs Pointer to device instance. * * @return Returns the pointer to the allocated transport object, or NULL if failed. */ ocs_xport_t * ocs_xport_alloc(ocs_t *ocs) { ocs_xport_t *xport; ocs_assert(ocs, NULL); xport = ocs_malloc(ocs, sizeof(*xport), OCS_M_ZERO); if (xport != NULL) { xport->ocs = ocs; } return xport; } /** * @brief Create the RQ threads and the circular buffers used to pass sequences. * * @par Description * Creates the circular buffers and the servicing threads for RQ processing. * * @param xport Pointer to transport object * * @return Returns 0 on success, or a non-zero value on failure. */ static void ocs_xport_rq_threads_teardown(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; uint32_t i; if (xport->num_rq_threads == 0 || xport->rq_thread_info == NULL) { return; } /* Abort any threads */ for (i = 0; i < xport->num_rq_threads; i++) { if (xport->rq_thread_info[i].thread_started) { ocs_thread_terminate(&xport->rq_thread_info[i].thread); /* wait for the thread to exit */ ocs_log_debug(ocs, "wait for thread %d to exit\n", i); while (xport->rq_thread_info[i].thread_started) { ocs_udelay(10000); } ocs_log_debug(ocs, "thread %d to exited\n", i); } if (xport->rq_thread_info[i].seq_cbuf != NULL) { ocs_cbuf_free(xport->rq_thread_info[i].seq_cbuf); xport->rq_thread_info[i].seq_cbuf = NULL; } } } /** * @brief Create the RQ threads and the circular buffers used to pass sequences. * * @par Description * Creates the circular buffers and the servicing threads for RQ processing. * * @param xport Pointer to transport object. * @param num_rq_threads Number of RQ processing threads that the * driver creates. * * @return Returns 0 on success, or a non-zero value on failure. */ static int32_t ocs_xport_rq_threads_create(ocs_xport_t *xport, uint32_t num_rq_threads) { ocs_t *ocs = xport->ocs; int32_t rc = 0; uint32_t i; xport->num_rq_threads = num_rq_threads; ocs_log_debug(ocs, "number of RQ threads %d\n", num_rq_threads); if (num_rq_threads == 0) { return 0; } /* Allocate the space for the thread objects */ xport->rq_thread_info = ocs_malloc(ocs, sizeof(ocs_xport_rq_thread_info_t) * num_rq_threads, OCS_M_ZERO); if (xport->rq_thread_info == NULL) { ocs_log_err(ocs, "memory allocation failure\n"); return -1; } /* Create the circular buffers and threads. */ for (i = 0; i < num_rq_threads; i++) { xport->rq_thread_info[i].ocs = ocs; xport->rq_thread_info[i].seq_cbuf = ocs_cbuf_alloc(ocs, OCS_HW_RQ_NUM_HDR); if (xport->rq_thread_info[i].seq_cbuf == NULL) { goto ocs_xport_rq_threads_create_error; } ocs_snprintf(xport->rq_thread_info[i].thread_name, sizeof(xport->rq_thread_info[i].thread_name), "ocs_unsol_rq:%d:%d", ocs->instance_index, i); rc = ocs_thread_create(ocs, &xport->rq_thread_info[i].thread, ocs_unsol_rq_thread, xport->rq_thread_info[i].thread_name, &xport->rq_thread_info[i], OCS_THREAD_RUN); if (rc) { ocs_log_err(ocs, "ocs_thread_create failed: %d\n", rc); goto ocs_xport_rq_threads_create_error; } xport->rq_thread_info[i].thread_started = TRUE; } return 0; ocs_xport_rq_threads_create_error: ocs_xport_rq_threads_teardown(xport); return -1; } /** * @brief Do as much allocation as possible, but do not initialization the device. * * @par Description * Performs the functions required to get a device ready to run. * * @param xport Pointer to transport object. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t ocs_xport_attach(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; int32_t rc; uint32_t max_sgl; uint32_t n_sgl; uint32_t i; uint32_t value; uint32_t max_remote_nodes; /* booleans used for cleanup if initialization fails */ uint8_t io_pool_created = FALSE; uint8_t node_pool_created = FALSE; uint8_t rq_threads_created = FALSE; ocs_list_init(&ocs->domain_list, ocs_domain_t, link); for (i = 0; i < SLI4_MAX_FCFI; i++) { xport->fcfi[i].hold_frames = 1; ocs_lock_init(ocs, &xport->fcfi[i].pend_frames_lock, "xport pend_frames[%d]", i); ocs_list_init(&xport->fcfi[i].pend_frames, ocs_hw_sequence_t, link); } rc = ocs_hw_set_ptr(&ocs->hw, OCS_HW_WAR_VERSION, ocs->hw_war_version); if (rc) { ocs_log_test(ocs, "can't set OCS_HW_WAR_VERSION\n"); return -1; } rc = ocs_hw_setup(&ocs->hw, ocs, SLI4_PORT_TYPE_FC); if (rc) { ocs_log_err(ocs, "%s: Can't setup hardware\n", ocs->desc); return -1; } else if (ocs->ctrlmask & OCS_CTRLMASK_CRASH_RESET) { ocs_log_debug(ocs, "stopping after ocs_hw_setup\n"); return -1; } ocs_hw_set(&ocs->hw, OCS_HW_BOUNCE, ocs->hw_bounce); ocs_log_debug(ocs, "HW bounce: %d\n", ocs->hw_bounce); ocs_hw_set(&ocs->hw, OCS_HW_RQ_SELECTION_POLICY, ocs->rq_selection_policy); ocs_hw_set(&ocs->hw, OCS_HW_RR_QUANTA, ocs->rr_quanta); ocs_hw_get(&ocs->hw, OCS_HW_RQ_SELECTION_POLICY, &value); ocs_log_debug(ocs, "RQ Selection Policy: %d\n", value); ocs_hw_set_ptr(&ocs->hw, OCS_HW_FILTER_DEF, (void*) ocs->filter_def); ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGL, &max_sgl); max_sgl -= SLI4_SGE_MAX_RESERVED; n_sgl = MIN(OCS_FC_MAX_SGL, max_sgl); /* EVT: For chained SGL testing */ if (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) { n_sgl = 4; } /* Note: number of SGLs must be set for ocs_node_create_pool */ if (ocs_hw_set(&ocs->hw, OCS_HW_N_SGL, n_sgl) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set number of SGLs\n", ocs->desc); return -1; } else { ocs_log_debug(ocs, "%s: Configured for %d SGLs\n", ocs->desc, n_sgl); } ocs_hw_get(&ocs->hw, OCS_HW_MAX_NODES, &max_remote_nodes); - rc = ocs_node_create_pool(ocs, (ocs->max_remote_nodes) ? - ocs->max_remote_nodes : max_remote_nodes); + if (!ocs->max_remote_nodes) + ocs->max_remote_nodes = max_remote_nodes; + + rc = ocs_node_create_pool(ocs, ocs->max_remote_nodes); if (rc) { ocs_log_err(ocs, "Can't allocate node pool\n"); goto ocs_xport_attach_cleanup; } else { node_pool_created = TRUE; } /* EVT: if testing chained SGLs allocate OCS_FC_MAX_SGL SGE's in the IO */ xport->io_pool = ocs_io_pool_create(ocs, ocs->num_scsi_ios, (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) ? OCS_FC_MAX_SGL : n_sgl); if (xport->io_pool == NULL) { ocs_log_err(ocs, "Can't allocate IO pool\n"); goto ocs_xport_attach_cleanup; } else { io_pool_created = TRUE; } /* * setup the RQ processing threads */ if (ocs_xport_rq_threads_create(xport, ocs->rq_threads) != 0) { ocs_log_err(ocs, "failure creating RQ threads\n"); goto ocs_xport_attach_cleanup; } rq_threads_created = TRUE; return 0; ocs_xport_attach_cleanup: if (io_pool_created) { ocs_io_pool_free(xport->io_pool); } if (node_pool_created) { ocs_node_free_pool(ocs); } if (rq_threads_created) { ocs_xport_rq_threads_teardown(xport); } return -1; } /** * @brief Determines how to setup auto Xfer ready. * * @par Description * @param xport Pointer to transport object. * * @return Returns 0 on success or a non-zero value on failure. */ static int32_t ocs_xport_initialize_auto_xfer_ready(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; uint32_t auto_xfer_rdy; char prop_buf[32]; uint32_t ramdisc_blocksize = 512; uint8_t p_type = 0; ocs_hw_get(&ocs->hw, OCS_HW_AUTO_XFER_RDY_CAPABLE, &auto_xfer_rdy); if (!auto_xfer_rdy) { ocs->auto_xfer_rdy_size = 0; ocs_log_test(ocs, "Cannot enable auto xfer rdy for this port\n"); return 0; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_SIZE, ocs->auto_xfer_rdy_size)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc); return -1; } /* * Determine if we are doing protection in the backend. We are looking * at the modules parameters here. The backend cannot allow a format * command to change the protection mode when using this feature, * otherwise the firmware will not do the proper thing. */ if (ocs_get_property("p_type", prop_buf, sizeof(prop_buf)) == 0) { p_type = ocs_strtoul(prop_buf, 0, 0); } if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) { ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0); } if (ocs_get_property("external_dif", prop_buf, sizeof(prop_buf)) == 0) { if(ocs_strlen(prop_buf)) { if (p_type == 0) { p_type = 1; } } } if (p_type != 0) { if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_T10_ENABLE, TRUE)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_BLK_SIZE, ramdisc_blocksize)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy blk size\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_P_TYPE, p_type)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA, TRUE)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy ref tag\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID, FALSE)) { ocs_log_test(ocs, "%s: Can't set auto xfer rdy app tag valid\n", ocs->desc); return -1; } } ocs_log_debug(ocs, "Auto xfer rdy is enabled, p_type=%d, blksize=%d\n", p_type, ramdisc_blocksize); return 0; } /** * @brief Initializes the device. * * @par Description * Performs the functions required to make a device functional. * * @param xport Pointer to transport object. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t ocs_xport_initialize(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; int32_t rc; uint32_t i; uint32_t max_hw_io; uint32_t max_sgl; uint32_t hlm; uint32_t rq_limit; uint32_t dif_capable; uint8_t dif_separate = 0; char prop_buf[32]; /* booleans used for cleanup if initialization fails */ uint8_t ini_device_set = FALSE; uint8_t tgt_device_set = FALSE; uint8_t hw_initialized = FALSE; ocs_hw_get(&ocs->hw, OCS_HW_MAX_IO, &max_hw_io); if (ocs_hw_set(&ocs->hw, OCS_HW_N_IO, max_hw_io) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set number of IOs\n", ocs->desc); return -1; } ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGL, &max_sgl); max_sgl -= SLI4_SGE_MAX_RESERVED; if (ocs->enable_hlm) { ocs_hw_get(&ocs->hw, OCS_HW_HIGH_LOGIN_MODE, &hlm); if (!hlm) { ocs->enable_hlm = FALSE; ocs_log_err(ocs, "Cannot enable high login mode for this port\n"); } else { ocs_log_debug(ocs, "High login mode is enabled\n"); if (ocs_hw_set(&ocs->hw, OCS_HW_HIGH_LOGIN_MODE, TRUE)) { ocs_log_err(ocs, "%s: Can't set high login mode\n", ocs->desc); return -1; } } } /* validate the auto xfer_rdy size */ if (ocs->auto_xfer_rdy_size > 0 && (ocs->auto_xfer_rdy_size < 2048 || ocs->auto_xfer_rdy_size > 65536)) { ocs_log_err(ocs, "Auto XFER_RDY size is out of range (2K-64K)\n"); return -1; } ocs_hw_get(&ocs->hw, OCS_HW_MAX_IO, &max_hw_io); if (ocs->auto_xfer_rdy_size > 0) { if (ocs_xport_initialize_auto_xfer_ready(xport)) { ocs_log_err(ocs, "%s: Failed auto xfer ready setup\n", ocs->desc); return -1; } if (ocs->esoc){ ocs_hw_set(&ocs->hw, OCS_ESOC, TRUE); } } if (ocs->explicit_buffer_list) { /* Are pre-registered SGL's required? */ ocs_hw_get(&ocs->hw, OCS_HW_PREREGISTER_SGL, &i); if (i == TRUE) { ocs_log_err(ocs, "Explicit Buffer List not supported on this device, not enabled\n"); } else { ocs_hw_set(&ocs->hw, OCS_HW_PREREGISTER_SGL, FALSE); } } if (ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, ocs->topology) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the toplogy\n", ocs->desc); return -1; } ocs_hw_set(&ocs->hw, OCS_HW_RQ_DEFAULT_BUFFER_SIZE, OCS_FC_RQ_SIZE_DEFAULT); if (ocs_hw_set(&ocs->hw, OCS_HW_LINK_SPEED, ocs->speed) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the link speed\n", ocs->desc); return -1; } if (ocs_hw_set(&ocs->hw, OCS_HW_ETH_LICENSE, ocs->ethernet_license) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the ethernet license\n", ocs->desc); return -1; } /* currently only lancer support setting the CRC seed value */ if (ocs->hw.sli.asic_type == SLI4_ASIC_TYPE_LANCER) { if (ocs_hw_set(&ocs->hw, OCS_HW_DIF_SEED, OCS_FC_DIF_SEED) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the DIF seed\n", ocs->desc); return -1; } } /* Set the Dif mode */ if (0 == ocs_hw_get(&ocs->hw, OCS_HW_DIF_CAPABLE, &dif_capable)) { if (dif_capable) { if (ocs_get_property("dif_separate", prop_buf, sizeof(prop_buf)) == 0) { dif_separate = ocs_strtoul(prop_buf, 0, 0); } if ((rc = ocs_hw_set(&ocs->hw, OCS_HW_DIF_MODE, (dif_separate == 0 ? OCS_HW_DIF_MODE_INLINE : OCS_HW_DIF_MODE_SEPARATE)))) { ocs_log_err(ocs, "Requested DIF MODE not supported\n"); } } } if (ocs->target_io_timer_sec) { ocs_log_debug(ocs, "setting target io timer=%d\n", ocs->target_io_timer_sec); ocs_hw_set(&ocs->hw, OCS_HW_EMULATE_TARGET_WQE_TIMEOUT, TRUE); } ocs_hw_callback(&ocs->hw, OCS_HW_CB_DOMAIN, ocs_domain_cb, ocs); ocs_hw_callback(&ocs->hw, OCS_HW_CB_REMOTE_NODE, ocs_remote_node_cb, ocs); ocs_hw_callback(&ocs->hw, OCS_HW_CB_UNSOLICITED, ocs_unsolicited_cb, ocs); ocs_hw_callback(&ocs->hw, OCS_HW_CB_PORT, ocs_port_cb, ocs); ocs->fw_version = (const char*) ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV); /* Initialize vport list */ ocs_list_init(&xport->vport_list, ocs_vport_spec_t, link); ocs_lock_init(ocs, &xport->io_pending_lock, "io_pending_lock[%d]", ocs->instance_index); ocs_list_init(&xport->io_pending_list, ocs_io_t, io_pending_link); ocs_atomic_init(&xport->io_active_count, 0); ocs_atomic_init(&xport->io_pending_count, 0); ocs_atomic_init(&xport->io_total_free, 0); ocs_atomic_init(&xport->io_total_pending, 0); ocs_atomic_init(&xport->io_alloc_failed_count, 0); ocs_atomic_init(&xport->io_pending_recursing, 0); ocs_lock_init(ocs, &ocs->hw.watchdog_lock, " Watchdog Lock[%d]", ocs_instance(ocs)); rc = ocs_hw_init(&ocs->hw); if (rc) { ocs_log_err(ocs, "ocs_hw_init failure\n"); goto ocs_xport_init_cleanup; } else { hw_initialized = TRUE; } rq_limit = max_hw_io/2; if (ocs_hw_set(&ocs->hw, OCS_HW_RQ_PROCESS_LIMIT, rq_limit) != OCS_HW_RTN_SUCCESS) { ocs_log_err(ocs, "%s: Can't set the RQ process limit\n", ocs->desc); } if (ocs->config_tgt) { rc = ocs_scsi_tgt_new_device(ocs); if (rc) { ocs_log_err(ocs, "failed to initialize target\n"); goto ocs_xport_init_cleanup; } else { tgt_device_set = TRUE; } } if (ocs->enable_ini) { rc = ocs_scsi_ini_new_device(ocs); if (rc) { ocs_log_err(ocs, "failed to initialize initiator\n"); goto ocs_xport_init_cleanup; } else { ini_device_set = TRUE; } } /* Add vports */ if (ocs->num_vports != 0) { uint32_t max_vports; ocs_hw_get(&ocs->hw, OCS_HW_MAX_VPORTS, &max_vports); if (ocs->num_vports < max_vports) { ocs_log_debug(ocs, "Provisioning %d vports\n", ocs->num_vports); for (i = 0; i < ocs->num_vports; i++) { ocs_vport_create_spec(ocs, 0, 0, UINT32_MAX, ocs->enable_ini, ocs->enable_tgt, NULL, NULL); } } else { ocs_log_err(ocs, "failed to create vports. num_vports range should be (1-%d) \n", max_vports-1); goto ocs_xport_init_cleanup; } } return 0; ocs_xport_init_cleanup: if (ini_device_set) { ocs_scsi_ini_del_device(ocs); } if (tgt_device_set) { ocs_scsi_tgt_del_device(ocs); } if (hw_initialized) { /* ocs_hw_teardown can only execute after ocs_hw_init */ ocs_hw_teardown(&ocs->hw); } return -1; } /** * @brief Detaches the transport from the device. * * @par Description * Performs the functions required to shut down a device. * * @param xport Pointer to transport object. * * @return Returns 0 on success or a non-zero value on failure. */ int32_t ocs_xport_detach(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; /* free resources associated with target-server and initiator-client */ if (ocs->config_tgt) ocs_scsi_tgt_del_device(ocs); if (ocs->enable_ini) { ocs_scsi_ini_del_device(ocs); /*Shutdown FC Statistics timer*/ if (ocs_timer_pending(&ocs->xport->stats_timer)) ocs_del_timer(&ocs->xport->stats_timer); } ocs_hw_teardown(&ocs->hw); return 0; } /** * @brief domain list empty callback * * @par Description * Function is invoked when the device domain list goes empty. By convention * @c arg points to an ocs_sem_t instance, that is incremented. * * @param ocs Pointer to device object. * @param arg Pointer to semaphore instance. * * @return None. */ static void ocs_xport_domain_list_empty_cb(ocs_t *ocs, void *arg) { ocs_sem_t *sem = arg; ocs_assert(ocs); ocs_assert(sem); ocs_sem_v(sem); } /** * @brief post node event callback * * @par Description * This function is called from the mailbox completion interrupt context to post an * event to a node object. By doing this in the interrupt context, it has * the benefit of only posting events in the interrupt context, deferring the need to * create a per event node lock. * * @param hw Pointer to HW structure. * @param status Completion status for mailbox command. * @param mqe Mailbox queue completion entry. * @param arg Callback argument. * * @return Returns 0 on success, a negative error code value on failure. */ static int32_t ocs_xport_post_node_event_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_xport_post_node_event_t *payload = arg; if (payload != NULL) { ocs_node_post_event(payload->node, payload->evt, payload->context); ocs_sem_v(&payload->sem); } return 0; } /** * @brief Initiate force free. * * @par Description * Perform force free of OCS. * * @param xport Pointer to transport object. * * @return None. */ static void ocs_xport_force_free(ocs_xport_t *xport) { ocs_t *ocs = xport->ocs; ocs_domain_t *domain; ocs_domain_t *next; ocs_log_debug(ocs, "reset required, do force shutdown\n"); ocs_device_lock(ocs); ocs_list_foreach_safe(&ocs->domain_list, domain, next) { ocs_domain_force_free(domain); } ocs_device_unlock(ocs); } /** * @brief Perform transport attach function. * * @par Description * Perform the attach function, which for the FC transport makes a HW call * to bring up the link. * * @param xport pointer to transport object. * @param cmd command to execute. * * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_ONLINE) * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_OFFLINE) * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_SHUTDOWN) * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_POST_NODE_EVENT, ocs_node_t *node, ocs_sm_event_t, void *context) * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_xport_control(ocs_xport_t *xport, ocs_xport_ctrl_e cmd, ...) { uint32_t rc = 0; ocs_t *ocs = NULL; va_list argp; ocs_assert(xport, -1); ocs_assert(xport->ocs, -1); ocs = xport->ocs; switch (cmd) { case OCS_XPORT_PORT_ONLINE: { /* Bring the port on-line */ rc = ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_INIT, 0, NULL, NULL); if (rc) { ocs_log_err(ocs, "%s: Can't init port\n", ocs->desc); } else { xport->configured_link_state = cmd; } break; } case OCS_XPORT_PORT_OFFLINE: { if (ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SHUTDOWN, 0, NULL, NULL)) { ocs_log_err(ocs, "port shutdown failed\n"); } else { xport->configured_link_state = cmd; } break; } case OCS_XPORT_SHUTDOWN: { ocs_sem_t sem; uint32_t reset_required; /* if a PHYSDEV reset was performed (e.g. hw dump), will affect * all PCI functions; orderly shutdown won't work, just force free */ /* TODO: need to poll this regularly... */ if (ocs_hw_get(&ocs->hw, OCS_HW_RESET_REQUIRED, &reset_required) != OCS_HW_RTN_SUCCESS) { reset_required = 0; } if (reset_required) { ocs_log_debug(ocs, "reset required, do force shutdown\n"); ocs_xport_force_free(xport); break; } ocs_sem_init(&sem, 0, "domain_list_sem"); ocs_register_domain_list_empty_cb(ocs, ocs_xport_domain_list_empty_cb, &sem); if (ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SHUTDOWN, 0, NULL, NULL)) { ocs_log_debug(ocs, "port shutdown failed, do force shutdown\n"); ocs_xport_force_free(xport); } else { ocs_log_debug(ocs, "Waiting %d seconds for domain shutdown.\n", (OCS_FC_DOMAIN_SHUTDOWN_TIMEOUT_USEC/1000000)); rc = ocs_sem_p(&sem, OCS_FC_DOMAIN_SHUTDOWN_TIMEOUT_USEC); if (rc) { ocs_log_debug(ocs, "Note: Domain shutdown timed out\n"); ocs_xport_force_free(xport); } } ocs_register_domain_list_empty_cb(ocs, NULL, NULL); /* Free up any saved virtual ports */ ocs_vport_del_all(ocs); break; } /* * POST_NODE_EVENT: post an event to a node object * * This transport function is used to post an event to a node object. It does * this by submitting a NOP mailbox command to defer execution to the * interrupt context (thereby enforcing the serialized execution of event posting * to the node state machine instances) * * A counting semaphore is used to make the call synchronous (we wait until * the callback increments the semaphore before returning (or times out) */ case OCS_XPORT_POST_NODE_EVENT: { ocs_node_t *node; ocs_sm_event_t evt; void *context; ocs_xport_post_node_event_t payload; ocs_t *ocs; ocs_hw_t *hw; /* Retrieve arguments */ va_start(argp, cmd); node = va_arg(argp, ocs_node_t*); evt = va_arg(argp, ocs_sm_event_t); context = va_arg(argp, void *); va_end(argp); ocs_assert(node, -1); ocs_assert(node->ocs, -1); ocs = node->ocs; hw = &ocs->hw; /* if node's state machine is disabled, don't bother continuing */ if (!node->sm.current_state) { ocs_log_test(ocs, "node %p state machine disabled\n", node); return -1; } /* Setup payload */ ocs_memset(&payload, 0, sizeof(payload)); ocs_sem_init(&payload.sem, 0, "xport_post_node_Event"); payload.node = node; payload.evt = evt; payload.context = context; if (ocs_hw_async_call(hw, ocs_xport_post_node_event_cb, &payload)) { ocs_log_test(ocs, "ocs_hw_async_call failed\n"); rc = -1; break; } /* Wait for completion */ if (ocs_sem_p(&payload.sem, OCS_SEM_FOREVER)) { ocs_log_test(ocs, "POST_NODE_EVENT: sem wait failed\n"); rc = -1; } break; } /* * Set wwnn for the port. This will be used instead of the default provided by FW. */ case OCS_XPORT_WWNN_SET: { uint64_t wwnn; /* Retrieve arguments */ va_start(argp, cmd); wwnn = va_arg(argp, uint64_t); va_end(argp); ocs_log_debug(ocs, " WWNN %016" PRIx64 "\n", wwnn); xport->req_wwnn = wwnn; break; } /* * Set wwpn for the port. This will be used instead of the default provided by FW. */ case OCS_XPORT_WWPN_SET: { uint64_t wwpn; /* Retrieve arguments */ va_start(argp, cmd); wwpn = va_arg(argp, uint64_t); va_end(argp); ocs_log_debug(ocs, " WWPN %016" PRIx64 "\n", wwpn); xport->req_wwpn = wwpn; break; } default: break; } return rc; } /** * @brief Return status on a link. * * @par Description * Returns status information about a link. * * @param xport Pointer to transport object. * @param cmd Command to execute. * @param result Pointer to result value. * * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_PORT_STATUS) * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_SPEED, ocs_xport_stats_t *result) * return link speed in MB/sec * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_IS_SUPPORTED_LINK_SPEED, ocs_xport_stats_t *result) * [in] *result is speed to check in MB/s * returns 1 if supported, 0 if not * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_STATISTICS, ocs_xport_stats_t *result) * return link/host port stats * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_STAT_RESET, ocs_xport_stats_t *result) * resets link/host stats * * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_xport_status(ocs_xport_t *xport, ocs_xport_status_e cmd, ocs_xport_stats_t *result) { uint32_t rc = 0; ocs_t *ocs = NULL; ocs_xport_stats_t value; ocs_hw_rtn_e hw_rc; ocs_assert(xport, -1); ocs_assert(xport->ocs, -1); ocs = xport->ocs; switch (cmd) { case OCS_XPORT_CONFIG_PORT_STATUS: ocs_assert(result, -1); if (xport->configured_link_state == 0) { /* Initial state is offline. configured_link_state is */ /* set to online explicitly when port is brought online. */ xport->configured_link_state = OCS_XPORT_PORT_OFFLINE; } result->value = xport->configured_link_state; break; case OCS_XPORT_PORT_STATUS: ocs_assert(result, -1); /* Determine port status based on link speed. */ hw_rc = ocs_hw_get(&(ocs->hw), OCS_HW_LINK_SPEED, &value.value); if (hw_rc == OCS_HW_RTN_SUCCESS) { if (value.value == 0) { result->value = 0; } else { result->value = 1; } rc = 0; } else { rc = -1; } break; case OCS_XPORT_LINK_SPEED: { uint32_t speed; ocs_assert(result, -1); result->value = 0; rc = ocs_hw_get(&ocs->hw, OCS_HW_LINK_SPEED, &speed); if (rc == 0) { result->value = speed; } break; } case OCS_XPORT_IS_SUPPORTED_LINK_SPEED: { uint32_t speed; uint32_t link_module_type; ocs_assert(result, -1); speed = result->value; rc = ocs_hw_get(&ocs->hw, OCS_HW_LINK_MODULE_TYPE, &link_module_type); if (rc == 0) { switch(speed) { case 1000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_1GB) != 0; break; case 2000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_2GB) != 0; break; case 4000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_4GB) != 0; break; case 8000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_8GB) != 0; break; case 10000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_10GB) != 0; break; case 16000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_16GB) != 0; break; case 32000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_32GB) != 0; break; default: rc = 0; break; } } else { rc = 0; } break; } case OCS_XPORT_LINK_STATISTICS: ocs_device_lock(ocs); ocs_memcpy((void *)result, &ocs->xport->fc_xport_stats, sizeof(ocs_xport_stats_t)); ocs_device_unlock(ocs); break; case OCS_XPORT_LINK_STAT_RESET: { /* Create a semaphore to synchronize the stat reset process. */ ocs_sem_init(&(result->stats.semaphore), 0, "fc_stats_reset"); /* First reset the link stats */ if ((rc = ocs_hw_get_link_stats(&ocs->hw, 0, 1, 1, ocs_xport_link_stats_cb, result)) != 0) { ocs_log_err(ocs, "%s: Failed to reset link statistics\n", __func__); break; } /* Wait for semaphore to be signaled when the command completes */ /* TODO: Should there be a timeout on this? If so, how long? */ if (ocs_sem_p(&(result->stats.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_test(ocs, "ocs_sem_p failed\n"); rc = -ENXIO; break; } /* Next reset the host stats */ if ((rc = ocs_hw_get_host_stats(&ocs->hw, 1, ocs_xport_host_stats_cb, result)) != 0) { ocs_log_err(ocs, "%s: Failed to reset host statistics\n", __func__); break; } /* Wait for semaphore to be signaled when the command completes */ if (ocs_sem_p(&(result->stats.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_test(ocs, "ocs_sem_p failed\n"); rc = -ENXIO; break; } break; } case OCS_XPORT_IS_QUIESCED: ocs_device_lock(ocs); result->value = ocs_list_empty(&ocs->domain_list); ocs_device_unlock(ocs); break; default: rc = -1; break; } return rc; } static void ocs_xport_link_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_link_stat_counts_t *counters, void *arg) { ocs_xport_stats_t *result = arg; result->stats.link_stats.link_failure_error_count = counters[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter; result->stats.link_stats.loss_of_sync_error_count = counters[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter; result->stats.link_stats.primitive_sequence_error_count = counters[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter; result->stats.link_stats.invalid_transmission_word_error_count = counters[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter; result->stats.link_stats.crc_error_count = counters[OCS_HW_LINK_STAT_CRC_COUNT].counter; ocs_sem_v(&(result->stats.semaphore)); } static void ocs_xport_host_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_host_stat_counts_t *counters, void *arg) { ocs_xport_stats_t *result = arg; result->stats.host_stats.transmit_kbyte_count = counters[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter; result->stats.host_stats.receive_kbyte_count = counters[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter; result->stats.host_stats.transmit_frame_count = counters[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter; result->stats.host_stats.receive_frame_count = counters[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter; ocs_sem_v(&(result->stats.semaphore)); } /** * @brief Free a transport object. * * @par Description * The transport object is freed. * * @param xport Pointer to transport object. * * @return None. */ void ocs_xport_free(ocs_xport_t *xport) { ocs_t *ocs; uint32_t i; if (xport) { ocs = xport->ocs; ocs_io_pool_free(xport->io_pool); ocs_node_free_pool(ocs); if(mtx_initialized(&xport->io_pending_lock.lock)) ocs_lock_free(&xport->io_pending_lock); for (i = 0; i < SLI4_MAX_FCFI; i++) { ocs_lock_free(&xport->fcfi[i].pend_frames_lock); } ocs_xport_rq_threads_teardown(xport); ocs_free(ocs, xport, sizeof(*xport)); } } Index: stable/11 =================================================================== --- stable/11 (revision 344013) +++ stable/11 (revision 344014) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r336446