Index: head/sys/cam/cam_ccb.h =================================================================== --- head/sys/cam/cam_ccb.h (revision 367043) +++ head/sys/cam/cam_ccb.h (revision 367044) @@ -1,1538 +1,1542 @@ /*- * Data structures and definitions for CAM Control Blocks (CCBs). * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_CCB_H #define _CAM_CAM_CCB_H 1 #include #include #include #include #ifndef _KERNEL #include #endif #include #include #include #include #include /* General allocation length definitions for CCB structures */ #define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */ #define VUHBALEN 14 /* Vendor Unique HBA length */ #define SIM_IDLEN 16 /* ASCII string len for SIM ID */ #define HBA_IDLEN 16 /* ASCII string len for HBA ID */ #define DEV_IDLEN 16 /* ASCII string len for device names */ #define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */ #define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */ /* Struct definitions for CAM control blocks */ /* Common CCB header */ /* CAM CCB flags */ typedef enum { CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */ CAM_unused1 = 0x00000002, CAM_unused2 = 0x00000004, CAM_NEGOTIATE = 0x00000008,/* * Perform transport negotiation * with this command. */ CAM_DATA_ISPHYS = 0x00000010,/* Data type with physical addrs */ CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */ CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */ CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */ CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */ CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */ CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */ CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */ CAM_DATA_PADDR = 0x00000010,/* Data type (001:Physical) */ CAM_DATA_SG = 0x00040000,/* Data type (010:sglist) */ CAM_DATA_SG_PADDR = 0x00040010,/* Data type (011:sglist phys) */ CAM_DATA_BIO = 0x00200000,/* Data type (100:bio) */ CAM_DATA_MASK = 0x00240010,/* Data type mask */ CAM_unused3 = 0x00000100, CAM_unused4 = 0x00000200, CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */ CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */ CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */ CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */ CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/ CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/ CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/ CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */ CAM_unused5 = 0x00080000, CAM_unused6 = 0x00100000, CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */ CAM_unused7 = 0x00800000, /* Phase cognizant mode flags */ CAM_unused8 = 0x01000000, CAM_unused9 = 0x02000000, CAM_unused10 = 0x04000000, CAM_unused11 = 0x08000000, CAM_unused12 = 0x10000000, CAM_unused13 = 0x20000000, CAM_unused14 = 0x40000000, /* Host target Mode flags */ CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */ CAM_unused15 = 0x10000000, CAM_unused16 = 0x20000000, CAM_SEND_STATUS = 0x40000000,/* Send status after data phase */ CAM_UNLOCKED = 0x80000000 /* Call callback without lock. */ } ccb_flags; typedef enum { CAM_USER_DATA_ADDR = 0x00000002,/* Userspace data pointers */ CAM_SG_FORMAT_IOVEC = 0x00000004,/* iovec instead of busdma S/G*/ CAM_UNMAPPED_BUF = 0x00000008 /* use unmapped I/O */ } ccb_xflags; /* XPT Opcodes for xpt_action */ typedef enum { /* Function code flags are bits greater than 0xff */ XPT_FC_QUEUED = 0x100, /* Non-immediate function code */ XPT_FC_USER_CCB = 0x200, XPT_FC_XPT_ONLY = 0x400, /* Only for the transport layer device */ XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED, /* Passes through the device queues */ /* Common function commands: 0x00->0x0F */ XPT_NOOP = 0x00, /* Execute Nothing */ XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED, /* Execute the requested I/O operation */ XPT_GDEV_TYPE = 0x02, /* Get type information for specified device */ XPT_GDEVLIST = 0x03, /* Get a list of peripheral devices */ XPT_PATH_INQ = 0x04, /* Path routing inquiry */ XPT_REL_SIMQ = 0x05, /* Release a frozen device queue */ XPT_SASYNC_CB = 0x06, /* Set Asynchronous Callback Parameters */ XPT_SDEV_TYPE = 0x07, /* Set device type information */ XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* (Re)Scan the SCSI Bus */ XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY, /* Get EDT entries matching the given pattern */ XPT_DEBUG = 0x0a, /* Turn on debugging for a bus, target or lun */ XPT_PATH_STATS = 0x0b, /* Path statistics (error counts, etc.) */ XPT_GDEV_STATS = 0x0c, /* Device statistics (error counts, etc.) */ XPT_DEV_ADVINFO = 0x0e, /* Get/Set Device advanced information */ XPT_ASYNC = 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Asynchronous event */ /* SCSI Control Functions: 0x10->0x1F */ XPT_ABORT = 0x10, /* Abort the specified CCB */ XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY, /* Reset the specified SCSI bus */ XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED, /* Bus Device Reset the specified SCSI device */ XPT_TERM_IO = 0x13, /* Terminate the I/O process */ XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Logical Unit */ XPT_GET_TRAN_SETTINGS = 0x15, /* * Get default/user transfer settings * for the target */ XPT_SET_TRAN_SETTINGS = 0x16, /* * Set transfer rate/width * negotiation settings */ XPT_CALC_GEOMETRY = 0x17, /* * Calculate the geometry parameters for * a device give the sector size and * volume size. */ XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED, /* Execute the requested ATA I/O operation */ XPT_GET_SIM_KNOB_OLD = 0x18, /* Compat only */ XPT_SET_SIM_KNOB = 0x19, /* * Set SIM specific knob values. */ XPT_GET_SIM_KNOB = 0x1a, /* * Get SIM specific knob values. */ XPT_SMP_IO = 0x1b | XPT_FC_DEV_QUEUED, /* Serial Management Protocol */ XPT_NVME_IO = 0x1c | XPT_FC_DEV_QUEUED, /* Execute the requested NVMe I/O operation */ XPT_MMC_IO = 0x1d | XPT_FC_DEV_QUEUED, /* Placeholder for MMC / SD / SDIO I/O stuff */ XPT_SCAN_TGT = 0x1e | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Target */ XPT_NVME_ADMIN = 0x1f | XPT_FC_DEV_QUEUED, /* Execute the requested NVMe Admin operation */ /* HBA engine commands 0x20->0x2F */ XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY, /* HBA engine feature inquiry */ XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED, /* HBA execute engine request */ /* Target mode commands: 0x30->0x3F */ XPT_EN_LUN = 0x30, /* Enable LUN as a target */ XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED, /* Execute target I/O request */ XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Accept Host Target Mode CDB */ XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED, /* Continue Host Target I/O Connection */ XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event (obsolete) */ XPT_NOTIFY_ACK = 0x35, /* Acknowledgement of event (obsolete) */ XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event */ XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Acknowledgement of event */ XPT_REPROBE_LUN = 0x38 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Query device capacity and notify GEOM */ /* Vendor Unique codes: 0x80->0x8F */ XPT_VUNIQUE = 0x80 } xpt_opcode; #define XPT_FC_GROUP_MASK 0xF0 #define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK) #define XPT_FC_GROUP_COMMON 0x00 #define XPT_FC_GROUP_SCSI_CONTROL 0x10 #define XPT_FC_GROUP_HBA_ENGINE 0x20 #define XPT_FC_GROUP_TMODE 0x30 #define XPT_FC_GROUP_VENDOR_UNIQUE 0x80 #define XPT_FC_IS_DEV_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED) #define XPT_FC_IS_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0) typedef enum { PROTO_UNKNOWN, PROTO_UNSPECIFIED, PROTO_SCSI, /* Small Computer System Interface */ PROTO_ATA, /* AT Attachment */ PROTO_ATAPI, /* AT Attachment Packetized Interface */ PROTO_SATAPM, /* SATA Port Multiplier */ PROTO_SEMB, /* SATA Enclosure Management Bridge */ PROTO_NVME, /* NVME */ PROTO_MMCSD, /* MMC, SD, SDIO */ } cam_proto; typedef enum { XPORT_UNKNOWN, XPORT_UNSPECIFIED, XPORT_SPI, /* SCSI Parallel Interface */ XPORT_FC, /* Fiber Channel */ XPORT_SSA, /* Serial Storage Architecture */ XPORT_USB, /* Universal Serial Bus */ XPORT_PPB, /* Parallel Port Bus */ XPORT_ATA, /* AT Attachment */ XPORT_SAS, /* Serial Attached SCSI */ XPORT_SATA, /* Serial AT Attachment */ XPORT_ISCSI, /* iSCSI */ XPORT_SRP, /* SCSI RDMA Protocol */ XPORT_NVME, /* NVMe over PCIe */ XPORT_MMCSD, /* MMC, SD, SDIO card */ } cam_xport; #define XPORT_IS_NVME(t) ((t) == XPORT_NVME) #define XPORT_IS_ATA(t) ((t) == XPORT_ATA || (t) == XPORT_SATA) #define XPORT_IS_SCSI(t) ((t) != XPORT_UNKNOWN && \ (t) != XPORT_UNSPECIFIED && \ !XPORT_IS_ATA(t) && !XPORT_IS_NVME(t)) #define XPORT_DEVSTAT_TYPE(t) (XPORT_IS_ATA(t) ? DEVSTAT_TYPE_IF_IDE : \ XPORT_IS_SCSI(t) ? DEVSTAT_TYPE_IF_SCSI : \ DEVSTAT_TYPE_IF_OTHER) #define PROTO_VERSION_UNKNOWN (UINT_MAX - 1) #define PROTO_VERSION_UNSPECIFIED UINT_MAX #define XPORT_VERSION_UNKNOWN (UINT_MAX - 1) #define XPORT_VERSION_UNSPECIFIED UINT_MAX typedef union { LIST_ENTRY(ccb_hdr) le; SLIST_ENTRY(ccb_hdr) sle; TAILQ_ENTRY(ccb_hdr) tqe; STAILQ_ENTRY(ccb_hdr) stqe; } camq_entry; typedef union { void *ptr; u_long field; u_int8_t bytes[sizeof(uintptr_t)]; } ccb_priv_entry; typedef union { ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE]; u_int8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_ppriv_area; typedef union { ccb_priv_entry entries[CCB_SIM_PRIV_SIZE]; u_int8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_spriv_area; typedef struct { struct timeval *etime; uintptr_t sim_data; uintptr_t periph_data; } ccb_qos_area; struct ccb_hdr { cam_pinfo pinfo; /* Info for priority scheduling */ camq_entry xpt_links; /* For chaining in the XPT layer */ camq_entry sim_links; /* For chaining in the SIM layer */ camq_entry periph_links; /* For chaining in the type driver */ u_int32_t retry_count; void (*cbfcnp)(struct cam_periph *, union ccb *); /* Callback on completion function */ xpt_opcode func_code; /* XPT function code */ u_int32_t status; /* Status returned by CAM subsystem */ struct cam_path *path; /* Compiled path for this ccb */ path_id_t path_id; /* Path ID for the request */ target_id_t target_id; /* Target device ID */ lun_id_t target_lun; /* Target LUN number */ u_int32_t flags; /* ccb_flags */ u_int32_t xflags; /* Extended flags */ ccb_ppriv_area periph_priv; ccb_spriv_area sim_priv; ccb_qos_area qos; u_int32_t timeout; /* Hard timeout value in mseconds */ struct timeval softtimeout; /* Soft timeout value in sec + usec */ }; /* Get Device Information CCB */ struct ccb_getdev { struct ccb_hdr ccb_h; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; u_int8_t serial_num[252]; u_int8_t inq_flags; u_int8_t serial_num_len; void *padding[2]; }; /* Device Statistics CCB */ struct ccb_getdevstats { struct ccb_hdr ccb_h; int dev_openings; /* Space left for more work on device*/ int dev_active; /* Transactions running on the device */ int allocated; /* CCBs allocated for the device */ int queued; /* CCBs queued to be sent to the device */ int held; /* * CCBs held by peripheral drivers * for this device */ int maxtags; /* * Boundary conditions for number of * tagged operations */ int mintags; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { CAM_GDEVLIST_LAST_DEVICE, CAM_GDEVLIST_LIST_CHANGED, CAM_GDEVLIST_MORE_DEVS, CAM_GDEVLIST_ERROR } ccb_getdevlist_status_e; struct ccb_getdevlist { struct ccb_hdr ccb_h; char periph_name[DEV_IDLEN]; u_int32_t unit_number; unsigned int generation; u_int32_t index; ccb_getdevlist_status_e status; }; typedef enum { PERIPH_MATCH_NONE = 0x000, PERIPH_MATCH_PATH = 0x001, PERIPH_MATCH_TARGET = 0x002, PERIPH_MATCH_LUN = 0x004, PERIPH_MATCH_NAME = 0x008, PERIPH_MATCH_UNIT = 0x010, PERIPH_MATCH_ANY = 0x01f } periph_pattern_flags; struct periph_match_pattern { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; periph_pattern_flags flags; }; typedef enum { DEV_MATCH_NONE = 0x000, DEV_MATCH_PATH = 0x001, DEV_MATCH_TARGET = 0x002, DEV_MATCH_LUN = 0x004, DEV_MATCH_INQUIRY = 0x008, DEV_MATCH_DEVID = 0x010, DEV_MATCH_ANY = 0x00f } dev_pattern_flags; struct device_id_match_pattern { uint8_t id_len; uint8_t id[256]; }; struct device_match_pattern { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; dev_pattern_flags flags; union { struct scsi_static_inquiry_pattern inq_pat; struct device_id_match_pattern devid_pat; } data; }; typedef enum { BUS_MATCH_NONE = 0x000, BUS_MATCH_PATH = 0x001, BUS_MATCH_NAME = 0x002, BUS_MATCH_UNIT = 0x004, BUS_MATCH_BUS_ID = 0x008, BUS_MATCH_ANY = 0x00f } bus_pattern_flags; struct bus_match_pattern { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; bus_pattern_flags flags; }; union match_pattern { struct periph_match_pattern periph_pattern; struct device_match_pattern device_pattern; struct bus_match_pattern bus_pattern; }; typedef enum { DEV_MATCH_PERIPH, DEV_MATCH_DEVICE, DEV_MATCH_BUS } dev_match_type; struct dev_match_pattern { dev_match_type type; union match_pattern pattern; }; struct periph_match_result { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; }; typedef enum { DEV_RESULT_NOFLAG = 0x00, DEV_RESULT_UNCONFIGURED = 0x01 } dev_result_flags; struct device_match_result { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; dev_result_flags flags; }; struct bus_match_result { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; }; union match_result { struct periph_match_result periph_result; struct device_match_result device_result; struct bus_match_result bus_result; }; struct dev_match_result { dev_match_type type; union match_result result; }; typedef enum { CAM_DEV_MATCH_LAST, CAM_DEV_MATCH_MORE, CAM_DEV_MATCH_LIST_CHANGED, CAM_DEV_MATCH_SIZE_ERROR, CAM_DEV_MATCH_ERROR } ccb_dev_match_status; typedef enum { CAM_DEV_POS_NONE = 0x000, CAM_DEV_POS_BUS = 0x001, CAM_DEV_POS_TARGET = 0x002, CAM_DEV_POS_DEVICE = 0x004, CAM_DEV_POS_PERIPH = 0x008, CAM_DEV_POS_PDPTR = 0x010, CAM_DEV_POS_TYPEMASK = 0xf00, CAM_DEV_POS_EDT = 0x100, CAM_DEV_POS_PDRV = 0x200 } dev_pos_type; struct ccb_dm_cookie { void *bus; void *target; void *device; void *periph; void *pdrv; }; struct ccb_dev_position { u_int generations[4]; #define CAM_BUS_GENERATION 0x00 #define CAM_TARGET_GENERATION 0x01 #define CAM_DEV_GENERATION 0x02 #define CAM_PERIPH_GENERATION 0x03 dev_pos_type position_type; struct ccb_dm_cookie cookie; }; struct ccb_dev_match { struct ccb_hdr ccb_h; ccb_dev_match_status status; u_int32_t num_patterns; u_int32_t pattern_buf_len; struct dev_match_pattern *patterns; u_int32_t num_matches; u_int32_t match_buf_len; struct dev_match_result *matches; struct ccb_dev_position pos; }; /* * Definitions for the path inquiry CCB fields. */ #define CAM_VERSION 0x19 /* Hex value for current version */ typedef enum { PI_MDP_ABLE = 0x80, /* Supports MDP message */ PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */ PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */ PI_SDTR_ABLE = 0x10, /* Supports SDTR message */ PI_LINKED_CDB = 0x08, /* Supports linked CDBs */ PI_SATAPM = 0x04, /* Supports SATA PM */ PI_TAG_ABLE = 0x02, /* Supports tag queue messages */ PI_SOFT_RST = 0x01 /* Supports soft reset alternative */ } pi_inqflag; typedef enum { PIT_PROCESSOR = 0x80, /* Target mode processor mode */ PIT_PHASE = 0x40, /* Target mode phase cog. mode */ PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */ PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */ PIT_GRP_6 = 0x08, /* Group 6 commands supported */ PIT_GRP_7 = 0x04 /* Group 7 commands supported */ } pi_tmflag; typedef enum { PIM_ATA_EXT = 0x200,/* ATA requests can understand ata_ext requests */ PIM_EXTLUNS = 0x100,/* 64bit extended LUNs supported */ PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */ PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */ PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */ PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */ PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */ PIM_SEQSCAN = 0x04, /* Do bus scans sequentially, not in parallel */ PIM_UNMAPPED = 0x02, PIM_NOSCAN = 0x01 /* SIM does its own scanning */ } pi_miscflag; /* Path Inquiry CCB */ struct ccb_pathinq_settings_spi { u_int8_t ppr_options; }; struct ccb_pathinq_settings_fc { u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_pathinq_settings_sas { u_int32_t bitrate; /* Mbps */ }; #define NVME_DEV_NAME_LEN 52 struct ccb_pathinq_settings_nvme { uint32_t nsid; /* Namespace ID for this path */ uint32_t domain; uint8_t bus; uint8_t slot; uint8_t function; uint8_t extra; char dev_name[NVME_DEV_NAME_LEN]; /* nvme controller dev name for this device */ }; _Static_assert(sizeof(struct ccb_pathinq_settings_nvme) == 64, "ccb_pathinq_settings_nvme too big"); #define PATHINQ_SETTINGS_SIZE 128 struct ccb_pathinq { struct ccb_hdr ccb_h; u_int8_t version_num; /* Version number for the SIM/HBA */ u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */ u_int16_t target_sprt; /* Flags for target mode support */ u_int32_t hba_misc; /* Misc HBA features */ u_int16_t hba_eng_cnt; /* HBA engine count */ /* Vendor Unique capabilities */ u_int8_t vuhba_flags[VUHBALEN]; u_int32_t max_target; /* Maximum supported Target */ u_int32_t max_lun; /* Maximum supported Lun */ u_int32_t async_flags; /* Installed Async handlers */ path_id_t hpath_id; /* Highest Path ID in the subsystem */ target_id_t initiator_id; /* ID of the HBA on the SCSI bus */ char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */ char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */ char dev_name[DEV_IDLEN];/* Device name for SIM */ u_int32_t unit_number; /* Unit number for SIM */ u_int32_t bus_id; /* Bus ID for SIM */ u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { struct ccb_pathinq_settings_spi spi; struct ccb_pathinq_settings_fc fc; struct ccb_pathinq_settings_sas sas; struct ccb_pathinq_settings_nvme nvme; char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE]; } xport_specific; u_int maxio; /* Max supported I/O size, in bytes. */ u_int16_t hba_vendor; /* HBA vendor ID */ u_int16_t hba_device; /* HBA device ID */ u_int16_t hba_subvendor; /* HBA subvendor ID */ u_int16_t hba_subdevice; /* HBA subdevice ID */ }; /* Path Statistics CCB */ struct ccb_pathstats { struct ccb_hdr ccb_h; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { SMP_FLAG_NONE = 0x00, SMP_FLAG_REQ_SG = 0x01, SMP_FLAG_RSP_SG = 0x02 } ccb_smp_pass_flags; /* * Serial Management Protocol CCB * XXX Currently the semantics for this CCB are that it is executed either * by the addressed device, or that device's parent (i.e. an expander for * any device on an expander) if the addressed device doesn't support SMP. * Later, once we have the ability to probe SMP-only devices and put them * in CAM's topology, the CCB will only be executed by the addressed device * if possible. */ struct ccb_smpio { struct ccb_hdr ccb_h; uint8_t *smp_request; int smp_request_len; uint16_t smp_request_sglist_cnt; uint8_t *smp_response; int smp_response_len; uint16_t smp_response_sglist_cnt; ccb_smp_pass_flags flags; }; typedef union { u_int8_t *sense_ptr; /* * Pointer to storage * for sense information */ /* Storage Area for sense information */ struct scsi_sense_data sense_buf; } sense_t; typedef union { u_int8_t *cdb_ptr; /* Pointer to the CDB bytes to send */ /* Area for the CDB send */ u_int8_t cdb_bytes[IOCDBLEN]; } cdb_t; /* * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO * function codes. */ struct ccb_scsiio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ u_int8_t *req_map; /* Ptr to mapping info */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ /* Autosense storage */ struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes to autosense */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int16_t sglist_cnt; /* Number of SG list entries */ u_int8_t scsi_status; /* Returned SCSI status */ u_int8_t sense_resid; /* Autosense resid length: 2's comp */ u_int32_t resid; /* Transfer residual length: 2's comp */ cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t *msg_ptr; /* Pointer to the message buffer */ u_int16_t msg_len; /* Number of bytes for the Message */ u_int8_t tag_action; /* What to do for tag queueing */ /* * The tag action should be either the define below (to send a * non-tagged transaction) or one of the defined scsi tag messages * from scsi_message.h. */ #define CAM_TAG_ACTION_NONE 0x00 + uint8_t priority; /* Command priority for SIMPLE tag */ u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) struct bio *bio; /* Associated bio */ #endif }; static __inline uint8_t * scsiio_cdb_ptr(struct ccb_scsiio *ccb) { return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); } /* * ATA I/O Request CCB used for the XPT_ATA_IO function code. */ struct ccb_ataio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct ata_cmd cmd; /* ATA command register set */ struct ata_res res; /* ATA result register set */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int32_t resid; /* Transfer residual length: 2's comp */ u_int8_t ata_flags; /* Flags for the rest of the buffer */ #define ATA_FLAG_AUX 0x1 uint32_t aux; uint32_t unused; }; /* * MMC I/O Request CCB used for the XPT_MMC_IO function code. */ struct ccb_mmcio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct mmc_command cmd; struct mmc_command stop; }; struct ccb_accept_tio { struct ccb_hdr ccb_h; cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int8_t tag_action; /* What to do for tag queueing */ u_int8_t sense_len; /* Number of bytes of Sense Data */ + uint8_t priority; /* Command priority for SIMPLE tag */ u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ struct scsi_sense_data sense_data; }; static __inline uint8_t * atio_cdb_ptr(struct ccb_accept_tio *ccb) { return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); } /* Release SIM Queue */ struct ccb_relsim { struct ccb_hdr ccb_h; u_int32_t release_flags; #define RELSIM_ADJUST_OPENINGS 0x01 #define RELSIM_RELEASE_AFTER_TIMEOUT 0x02 #define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04 #define RELSIM_RELEASE_AFTER_QEMPTY 0x08 u_int32_t openings; u_int32_t release_timeout; /* Abstract argument. */ u_int32_t qfrozen_cnt; }; /* * NVMe I/O Request CCB used for the XPT_NVME_IO and XPT_NVME_ADMIN function codes. */ struct ccb_nvmeio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct nvme_command cmd; /* NVME command, per NVME standard */ struct nvme_completion cpl; /* NVME completion, per NVME standard */ uint8_t *data_ptr; /* Ptr to the data buf/SG list */ uint32_t dxfer_len; /* Data transfer length */ uint16_t sglist_cnt; /* Number of SG list entries */ uint16_t unused; /* padding for removed uint32_t */ }; /* * Definitions for the asynchronous callback CCB fields. */ typedef enum { AC_UNIT_ATTENTION = 0x4000,/* Device reported UNIT ATTENTION */ AC_ADVINFO_CHANGED = 0x2000,/* Advance info might have changes */ AC_CONTRACT = 0x1000,/* A contractual callback */ AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */ AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */ AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */ AC_LOST_DEVICE = 0x100,/* A device went away */ AC_FOUND_DEVICE = 0x080,/* A new device was found */ AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */ AC_PATH_REGISTERED = 0x020,/* A new path has been registered */ AC_SENT_BDR = 0x010,/* A BDR message was sent to target */ AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */ AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */ AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */ } ac_code; typedef void ac_callback_t (void *softc, u_int32_t code, struct cam_path *path, void *args); /* * Generic Asynchronous callbacks. * * Generic arguments passed bac which are then interpreted between a per-system * contract number. */ #define AC_CONTRACT_DATA_MAX (128 - sizeof (u_int64_t)) struct ac_contract { u_int64_t contract_number; u_int8_t contract_data[AC_CONTRACT_DATA_MAX]; }; #define AC_CONTRACT_DEV_CHG 1 struct ac_device_changed { u_int64_t wwpn; u_int32_t port; target_id_t target; u_int8_t arrived; }; /* Set Asynchronous Callback CCB */ struct ccb_setasync { struct ccb_hdr ccb_h; u_int32_t event_enable; /* Async Event enables */ ac_callback_t *callback; void *callback_arg; }; /* Set Device Type CCB */ struct ccb_setdev { struct ccb_hdr ccb_h; u_int8_t dev_type; /* Value for dev type field in EDT */ }; /* SCSI Control Functions */ /* Abort XPT request CCB */ struct ccb_abort { struct ccb_hdr ccb_h; union ccb *abort_ccb; /* Pointer to CCB to abort */ }; /* Reset SCSI Bus CCB */ struct ccb_resetbus { struct ccb_hdr ccb_h; }; /* Reset SCSI Device CCB */ struct ccb_resetdev { struct ccb_hdr ccb_h; }; /* Terminate I/O Process Request CCB */ struct ccb_termio { struct ccb_hdr ccb_h; union ccb *termio_ccb; /* Pointer to CCB to terminate */ }; typedef enum { CTS_TYPE_CURRENT_SETTINGS, CTS_TYPE_USER_SETTINGS } cts_type; struct ccb_trans_settings_scsi { u_int valid; /* Which fields to honor */ #define CTS_SCSI_VALID_TQ 0x01 u_int flags; #define CTS_SCSI_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_ata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_TQ 0x01 u_int flags; #define CTS_ATA_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_spi { u_int valid; /* Which fields to honor */ #define CTS_SPI_VALID_SYNC_RATE 0x01 #define CTS_SPI_VALID_SYNC_OFFSET 0x02 #define CTS_SPI_VALID_BUS_WIDTH 0x04 #define CTS_SPI_VALID_DISC 0x08 #define CTS_SPI_VALID_PPR_OPTIONS 0x10 u_int flags; #define CTS_SPI_FLAGS_DISC_ENB 0x01 u_int sync_period; u_int sync_offset; u_int bus_width; u_int ppr_options; }; struct ccb_trans_settings_fc { u_int valid; /* Which fields to honor */ #define CTS_FC_VALID_WWNN 0x8000 #define CTS_FC_VALID_WWPN 0x4000 #define CTS_FC_VALID_PORT 0x2000 #define CTS_FC_VALID_SPEED 0x1000 u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_sas { u_int valid; /* Which fields to honor */ #define CTS_SAS_VALID_SPEED 0x1000 u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_pata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_MODE 0x01 #define CTS_ATA_VALID_BYTECOUNT 0x02 #define CTS_ATA_VALID_ATAPI 0x20 #define CTS_ATA_VALID_CAPS 0x40 int mode; /* Mode */ u_int bytecount; /* Length of PIO transaction */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_ATA_CAPS_H 0x0000ffff #define CTS_ATA_CAPS_H_DMA48 0x00000001 /* 48-bit DMA */ #define CTS_ATA_CAPS_D 0xffff0000 }; struct ccb_trans_settings_sata { u_int valid; /* Which fields to honor */ #define CTS_SATA_VALID_MODE 0x01 #define CTS_SATA_VALID_BYTECOUNT 0x02 #define CTS_SATA_VALID_REVISION 0x04 #define CTS_SATA_VALID_PM 0x08 #define CTS_SATA_VALID_TAGS 0x10 #define CTS_SATA_VALID_ATAPI 0x20 #define CTS_SATA_VALID_CAPS 0x40 int mode; /* Legacy PATA mode */ u_int bytecount; /* Length of PIO transaction */ int revision; /* SATA revision */ u_int pm_present; /* PM is present (XPT->SIM) */ u_int tags; /* Number of allowed tags */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_SATA_CAPS_H 0x0000ffff #define CTS_SATA_CAPS_H_PMREQ 0x00000001 #define CTS_SATA_CAPS_H_APST 0x00000002 #define CTS_SATA_CAPS_H_DMAAA 0x00000010 /* Auto-activation */ #define CTS_SATA_CAPS_H_AN 0x00000020 /* Async. notification */ #define CTS_SATA_CAPS_D 0xffff0000 #define CTS_SATA_CAPS_D_PMREQ 0x00010000 #define CTS_SATA_CAPS_D_APST 0x00020000 }; struct ccb_trans_settings_nvme { u_int valid; /* Which fields to honor */ #define CTS_NVME_VALID_SPEC 0x01 #define CTS_NVME_VALID_CAPS 0x02 #define CTS_NVME_VALID_LINK 0x04 uint32_t spec; /* NVMe spec implemented -- same as vs register */ uint32_t max_xfer; /* Max transfer size (0 -> unlimited */ uint32_t caps; uint8_t lanes; /* Number of PCIe lanes */ uint8_t speed; /* PCIe generation for each lane */ uint8_t max_lanes; /* Number of PCIe lanes */ uint8_t max_speed; /* PCIe generation for each lane */ }; #include struct ccb_trans_settings_mmc { struct mmc_ios ios; #define MMC_CLK (1 << 1) #define MMC_VDD (1 << 2) #define MMC_CS (1 << 3) #define MMC_BW (1 << 4) #define MMC_PM (1 << 5) #define MMC_BT (1 << 6) #define MMC_BM (1 << 7) #define MMC_VCCQ (1 << 8) uint32_t ios_valid; /* The folowing is used only for GET_TRAN_SETTINGS */ uint32_t host_ocr; int host_f_min; int host_f_max; /* Copied from sys/dev/mmc/bridge.h */ #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can do 4-bit data transfers */ #define MMC_CAP_8_BIT_DATA (1 << 1) /* Can do 8-bit data transfers */ #define MMC_CAP_HSPEED (1 << 2) /* Can do High Speed transfers */ #define MMC_CAP_BOOT_NOACC (1 << 4) /* Cannot access boot partitions */ #define MMC_CAP_WAIT_WHILE_BUSY (1 << 5) /* Host waits for busy responses */ #define MMC_CAP_UHS_SDR12 (1 << 6) /* Can do UHS SDR12 */ #define MMC_CAP_UHS_SDR25 (1 << 7) /* Can do UHS SDR25 */ #define MMC_CAP_UHS_SDR50 (1 << 8) /* Can do UHS SDR50 */ #define MMC_CAP_UHS_SDR104 (1 << 9) /* Can do UHS SDR104 */ #define MMC_CAP_UHS_DDR50 (1 << 10) /* Can do UHS DDR50 */ #define MMC_CAP_MMC_DDR52_120 (1 << 11) /* Can do eMMC DDR52 at 1.2 V */ #define MMC_CAP_MMC_DDR52_180 (1 << 12) /* Can do eMMC DDR52 at 1.8 V */ #define MMC_CAP_MMC_DDR52 (MMC_CAP_MMC_DDR52_120 | MMC_CAP_MMC_DDR52_180) #define MMC_CAP_MMC_HS200_120 (1 << 13) /* Can do eMMC HS200 at 1.2 V */ #define MMC_CAP_MMC_HS200_180 (1 << 14) /* Can do eMMC HS200 at 1.8 V */ #define MMC_CAP_MMC_HS200 (MMC_CAP_MMC_HS200_120| MMC_CAP_MMC_HS200_180) #define MMC_CAP_MMC_HS400_120 (1 << 15) /* Can do eMMC HS400 at 1.2 V */ #define MMC_CAP_MMC_HS400_180 (1 << 16) /* Can do eMMC HS400 at 1.8 V */ #define MMC_CAP_MMC_HS400 (MMC_CAP_MMC_HS400_120 | MMC_CAP_MMC_HS400_180) #define MMC_CAP_MMC_HSX00_120 (MMC_CAP_MMC_HS200_120 | MMC_CAP_MMC_HS400_120) #define MMC_CAP_MMC_ENH_STROBE (1 << 17) /* Can do eMMC Enhanced Strobe */ #define MMC_CAP_SIGNALING_120 (1 << 18) /* Can do signaling at 1.2 V */ #define MMC_CAP_SIGNALING_180 (1 << 19) /* Can do signaling at 1.8 V */ #define MMC_CAP_SIGNALING_330 (1 << 20) /* Can do signaling at 3.3 V */ #define MMC_CAP_DRIVER_TYPE_A (1 << 21) /* Can do Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 22) /* Can do Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 23) /* Can do Driver Type D */ uint32_t host_caps; uint32_t host_max_data; }; /* Get/Set transfer rate/width/disconnection/tag queueing settings */ struct ccb_trans_settings { struct ccb_hdr ccb_h; cts_type type; /* Current or User settings */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_ata ata; struct ccb_trans_settings_scsi scsi; struct ccb_trans_settings_nvme nvme; struct ccb_trans_settings_mmc mmc; } proto_specific; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_spi spi; struct ccb_trans_settings_fc fc; struct ccb_trans_settings_sas sas; struct ccb_trans_settings_pata ata; struct ccb_trans_settings_sata sata; struct ccb_trans_settings_nvme nvme; } xport_specific; }; /* * Calculate the geometry parameters for a device * give the block size and volume size in blocks. */ struct ccb_calc_geometry { struct ccb_hdr ccb_h; u_int32_t block_size; u_int64_t volume_size; u_int32_t cylinders; u_int8_t heads; u_int8_t secs_per_track; }; /* * Set or get SIM (and transport) specific knobs */ #define KNOB_VALID_ADDRESS 0x1 #define KNOB_VALID_ROLE 0x2 #define KNOB_ROLE_NONE 0x0 #define KNOB_ROLE_INITIATOR 0x1 #define KNOB_ROLE_TARGET 0x2 #define KNOB_ROLE_BOTH 0x3 struct ccb_sim_knob_settings_spi { u_int valid; u_int initiator_id; u_int role; }; struct ccb_sim_knob_settings_fc { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int role; }; struct ccb_sim_knob_settings_sas { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int role; }; #define KNOB_SETTINGS_SIZE 128 struct ccb_sim_knob { struct ccb_hdr ccb_h; union { u_int valid; /* Which fields to honor */ struct ccb_sim_knob_settings_spi spi; struct ccb_sim_knob_settings_fc fc; struct ccb_sim_knob_settings_sas sas; char pad[KNOB_SETTINGS_SIZE]; } xport_specific; }; /* * Rescan the given bus, or bus/target/lun */ struct ccb_rescan { struct ccb_hdr ccb_h; cam_flags flags; }; /* * Turn on debugging for the given bus, bus/target, or bus/target/lun. */ struct ccb_debug { struct ccb_hdr ccb_h; cam_debug_flags flags; }; /* Target mode structures. */ struct ccb_en_lun { struct ccb_hdr ccb_h; u_int16_t grp6_len; /* Group 6 VU CDB length */ u_int16_t grp7_len; /* Group 7 VU CDB length */ u_int8_t enable; }; /* old, barely used immediate notify, binary compatibility */ struct ccb_immed_notify { struct ccb_hdr ccb_h; struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes in sense buffer */ u_int8_t initiator_id; /* Id of initiator that selected */ u_int8_t message_args[7]; /* Message Arguments */ }; struct ccb_notify_ack { struct ccb_hdr ccb_h; u_int16_t seq_id; /* Sequence identifier */ u_int8_t event; /* Event flags */ }; struct ccb_immediate_notify { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tag for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Function specific */ }; struct ccb_notify_acknowledge { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tar for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Response information */ /* * Lower byte of arg is one of RESPONSE CODE values defined below * (subset of response codes from SPL-4 and FCP-4 specifications), * upper 3 bytes is code-specific ADDITIONAL RESPONSE INFORMATION. */ #define CAM_RSP_TMF_COMPLETE 0x00 #define CAM_RSP_TMF_REJECTED 0x04 #define CAM_RSP_TMF_FAILED 0x05 #define CAM_RSP_TMF_SUCCEEDED 0x08 #define CAM_RSP_TMF_INCORRECT_LUN 0x09 }; /* HBA engine structures. */ typedef enum { EIT_BUFFER, /* Engine type: buffer memory */ EIT_LOSSLESS, /* Engine type: lossless compression */ EIT_LOSSY, /* Engine type: lossy compression */ EIT_ENCRYPT /* Engine type: encryption */ } ei_type; typedef enum { EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */ EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */ EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */ EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */ } ei_algo; struct ccb_eng_inq { struct ccb_hdr ccb_h; u_int16_t eng_num; /* The engine number for this inquiry */ ei_type eng_type; /* Returned engine type */ ei_algo eng_algo; /* Returned engine algorithm type */ u_int32_t eng_memeory; /* Returned engine memory size */ }; struct ccb_eng_exec { /* This structure must match SCSIIO size */ struct ccb_hdr ccb_h; u_int8_t *pdrv_ptr; /* Ptr used by the peripheral driver */ u_int8_t *req_map; /* Ptr for mapping info on the req. */ u_int8_t *data_ptr; /* Pointer to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int8_t *engdata_ptr; /* Pointer to the engine buffer data */ u_int16_t sglist_cnt; /* Num of scatter gather list entries */ u_int32_t dmax_len; /* Destination data maximum length */ u_int32_t dest_len; /* Destination data length */ int32_t src_resid; /* Source residual length: 2's comp */ u_int32_t timeout; /* Timeout value */ u_int16_t eng_num; /* Engine number for this request */ u_int16_t vu_flags; /* Vendor Unique flags */ }; /* * Definitions for the timeout field in the SCSI I/O CCB. */ #define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */ #define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */ #define CAM_SUCCESS 0 /* For signaling general success */ #define CAM_FAILURE 1 /* For signaling general failure */ #define CAM_FALSE 0 #define CAM_TRUE 1 #define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */ /* * CCB for working with advanced device information. This operates in a fashion * similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer * type requested, and provide a buffer size/buffer to write to. If the * buffer is too small, provsiz will be larger than bufsiz. */ struct ccb_dev_advinfo { struct ccb_hdr ccb_h; uint32_t flags; #define CDAI_FLAG_NONE 0x0 /* No flags set */ #define CDAI_FLAG_STORE 0x1 /* If set, action becomes store */ uint32_t buftype; /* IN: Type of data being requested */ /* NB: buftype is interpreted on a per-transport basis */ #define CDAI_TYPE_SCSI_DEVID 1 #define CDAI_TYPE_SERIAL_NUM 2 #define CDAI_TYPE_PHYS_PATH 3 #define CDAI_TYPE_RCAPLONG 4 #define CDAI_TYPE_EXT_INQ 5 #define CDAI_TYPE_NVME_CNTRL 6 /* NVMe Identify Controller data */ #define CDAI_TYPE_NVME_NS 7 /* NVMe Identify Namespace data */ #define CDAI_TYPE_MMC_PARAMS 8 /* MMC/SD ident */ off_t bufsiz; /* IN: Size of external buffer */ #define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */ off_t provsiz; /* OUT: Size required/used */ uint8_t *buf; /* IN/OUT: Buffer for requested data */ }; /* * CCB for sending async events */ struct ccb_async { struct ccb_hdr ccb_h; uint32_t async_code; off_t async_arg_size; void *async_arg_ptr; }; /* * Union of all CCB types for kernel space allocation. This union should * never be used for manipulating CCBs - its only use is for the allocation * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc * and the argument to xpt_ccb_free. */ union ccb { struct ccb_hdr ccb_h; /* For convenience */ struct ccb_scsiio csio; struct ccb_getdev cgd; struct ccb_getdevlist cgdl; struct ccb_pathinq cpi; struct ccb_relsim crs; struct ccb_setasync csa; struct ccb_setdev csd; struct ccb_pathstats cpis; struct ccb_getdevstats cgds; struct ccb_dev_match cdm; struct ccb_trans_settings cts; struct ccb_calc_geometry ccg; struct ccb_sim_knob knob; struct ccb_abort cab; struct ccb_resetbus crb; struct ccb_resetdev crd; struct ccb_termio tio; struct ccb_accept_tio atio; struct ccb_scsiio ctio; struct ccb_en_lun cel; struct ccb_immed_notify cin; struct ccb_notify_ack cna; struct ccb_immediate_notify cin1; struct ccb_notify_acknowledge cna2; struct ccb_eng_inq cei; struct ccb_eng_exec cee; struct ccb_smpio smpio; struct ccb_rescan crcn; struct ccb_debug cdbg; struct ccb_ataio ataio; struct ccb_dev_advinfo cdai; struct ccb_async casync; struct ccb_nvmeio nvmeio; struct ccb_mmcio mmcio; }; #define CCB_CLEAR_ALL_EXCEPT_HDR(ccbp) \ bzero((char *)(ccbp) + sizeof((ccbp)->ccb_h), \ sizeof(*(ccbp)) - sizeof((ccbp)->ccb_h)) __BEGIN_DECLS static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_SCSI_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->sense_len = sense_len; csio->cdb_len = cdb_len; csio->tag_action = tag_action; + csio->priority = 0; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) csio->bio = NULL; #endif } static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_CONT_TARGET_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->scsi_status = scsi_status; csio->tag_action = tag_action; + csio->priority = 0; csio->tag_id = tag_id; csio->init_id = init_id; } static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action __unused, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { ataio->ccb_h.func_code = XPT_ATA_IO; ataio->ccb_h.flags = flags; ataio->ccb_h.retry_count = retries; ataio->ccb_h.cbfcnp = cbfcnp; ataio->ccb_h.timeout = timeout; ataio->data_ptr = data_ptr; ataio->dxfer_len = dxfer_len; ataio->ata_flags = 0; } static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout) { #ifdef _KERNEL KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH, ("direction != CAM_DIR_BOTH")); KASSERT((smp_request != NULL) && (smp_response != NULL), ("need valid request and response buffers")); KASSERT((smp_request_len != 0) && (smp_response_len != 0), ("need non-zero request and response lengths")); #endif /*_KERNEL*/ smpio->ccb_h.func_code = XPT_SMP_IO; smpio->ccb_h.flags = flags; smpio->ccb_h.retry_count = retries; smpio->ccb_h.cbfcnp = cbfcnp; smpio->ccb_h.timeout = timeout; smpio->smp_request = smp_request; smpio->smp_request_len = smp_request_len; smpio->smp_response = smp_response; smpio->smp_response_len = smp_response_len; } static __inline void cam_fill_mmcio(struct ccb_mmcio *mmcio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint32_t mmc_opcode, uint32_t mmc_arg, uint32_t mmc_flags, struct mmc_data *mmc_d, uint32_t timeout) { mmcio->ccb_h.func_code = XPT_MMC_IO; mmcio->ccb_h.flags = flags; mmcio->ccb_h.retry_count = retries; mmcio->ccb_h.cbfcnp = cbfcnp; mmcio->ccb_h.timeout = timeout; mmcio->cmd.opcode = mmc_opcode; mmcio->cmd.arg = mmc_arg; mmcio->cmd.flags = mmc_flags; mmcio->stop.opcode = 0; mmcio->stop.arg = 0; mmcio->stop.flags = 0; if (mmc_d != NULL) { mmcio->cmd.data = mmc_d; } else mmcio->cmd.data = NULL; mmcio->cmd.resp[0] = 0; mmcio->cmd.resp[1] = 0; mmcio->cmd.resp[2] = 0; mmcio->cmd.resp[3] = 0; } static __inline void cam_set_ccbstatus(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } static __inline cam_status cam_ccb_status(union ccb *ccb) { return ((cam_status)(ccb->ccb_h.status & CAM_STATUS_MASK)); } void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended); static __inline void cam_fill_nvmeio(struct ccb_nvmeio *nvmeio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { nvmeio->ccb_h.func_code = XPT_NVME_IO; nvmeio->ccb_h.flags = flags; nvmeio->ccb_h.retry_count = retries; nvmeio->ccb_h.cbfcnp = cbfcnp; nvmeio->ccb_h.timeout = timeout; nvmeio->data_ptr = data_ptr; nvmeio->dxfer_len = dxfer_len; } static __inline void cam_fill_nvmeadmin(struct ccb_nvmeio *nvmeio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { nvmeio->ccb_h.func_code = XPT_NVME_ADMIN; nvmeio->ccb_h.flags = flags; nvmeio->ccb_h.retry_count = retries; nvmeio->ccb_h.cbfcnp = cbfcnp; nvmeio->ccb_h.timeout = timeout; nvmeio->data_ptr = data_ptr; nvmeio->dxfer_len = dxfer_len; } __END_DECLS #endif /* _CAM_CAM_CCB_H */ Index: head/sys/cam/ctl/ctl.c =================================================================== --- head/sys/cam/ctl/ctl.c (revision 367043) +++ head/sys/cam/ctl/ctl.c (revision 367044) @@ -1,13567 +1,13571 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2009 Silicon Graphics International Corp. * Copyright (c) 2012 The FreeBSD Foundation * Copyright (c) 2014-2017 Alexander Motin * Copyright (c) 2017 Jakub Wojciech Klama * Copyright (c) 2018 Marcelo Araujo * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id$ */ /* * CAM Target Layer, a SCSI device emulation subsystem. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct ctl_softc *control_softc = NULL; /* * Template mode pages. */ /* * Note that these are default values only. The actual values will be * filled in when the user does a mode sense. */ const static struct scsi_da_rw_recovery_page rw_er_page_default = { /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, /*read_retry_count*/0, /*correction_span*/0, /*head_offset_count*/0, /*data_strobe_offset_cnt*/0, /*byte8*/SMS_RWER_LBPERE, /*write_retry_count*/0, /*reserved2*/0, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, /*byte3*/SMS_RWER_PER, /*read_retry_count*/0, /*correction_span*/0, /*head_offset_count*/0, /*data_strobe_offset_cnt*/0, /*byte8*/SMS_RWER_LBPERE, /*write_retry_count*/0, /*reserved2*/0, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_format_page format_page_default = { /*page_code*/SMS_FORMAT_DEVICE_PAGE, /*page_length*/sizeof(struct scsi_format_page) - 2, /*tracks_per_zone*/ {0, 0}, /*alt_sectors_per_zone*/ {0, 0}, /*alt_tracks_per_zone*/ {0, 0}, /*alt_tracks_per_lun*/ {0, 0}, /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, /*bytes_per_sector*/ {0, 0}, /*interleave*/ {0, 0}, /*track_skew*/ {0, 0}, /*cylinder_skew*/ {0, 0}, /*flags*/ SFP_HSEC, /*reserved*/ {0, 0, 0} }; const static struct scsi_format_page format_page_changeable = { /*page_code*/SMS_FORMAT_DEVICE_PAGE, /*page_length*/sizeof(struct scsi_format_page) - 2, /*tracks_per_zone*/ {0, 0}, /*alt_sectors_per_zone*/ {0, 0}, /*alt_tracks_per_zone*/ {0, 0}, /*alt_tracks_per_lun*/ {0, 0}, /*sectors_per_track*/ {0, 0}, /*bytes_per_sector*/ {0, 0}, /*interleave*/ {0, 0}, /*track_skew*/ {0, 0}, /*cylinder_skew*/ {0, 0}, /*flags*/ 0, /*reserved*/ {0, 0, 0} }; const static struct scsi_rigid_disk_page rigid_disk_page_default = { /*page_code*/SMS_RIGID_DISK_PAGE, /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, /*cylinders*/ {0, 0, 0}, /*heads*/ CTL_DEFAULT_HEADS, /*start_write_precomp*/ {0, 0, 0}, /*start_reduced_current*/ {0, 0, 0}, /*step_rate*/ {0, 0}, /*landing_zone_cylinder*/ {0, 0, 0}, /*rpl*/ SRDP_RPL_DISABLED, /*rotational_offset*/ 0, /*reserved1*/ 0, /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, CTL_DEFAULT_ROTATION_RATE & 0xff}, /*reserved2*/ {0, 0} }; const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { /*page_code*/SMS_RIGID_DISK_PAGE, /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, /*cylinders*/ {0, 0, 0}, /*heads*/ 0, /*start_write_precomp*/ {0, 0, 0}, /*start_reduced_current*/ {0, 0, 0}, /*step_rate*/ {0, 0}, /*landing_zone_cylinder*/ {0, 0, 0}, /*rpl*/ 0, /*rotational_offset*/ 0, /*reserved1*/ 0, /*rotation_rate*/ {0, 0}, /*reserved2*/ {0, 0} }; const static struct scsi_da_verify_recovery_page verify_er_page_default = { /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, /*byte3*/0, /*read_retry_count*/0, /*reserved*/{ 0, 0, 0, 0, 0, 0 }, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, /*byte3*/SMS_VER_PER, /*read_retry_count*/0, /*reserved*/{ 0, 0, 0, 0, 0, 0 }, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_caching_page caching_page_default = { /*page_code*/SMS_CACHING_PAGE, /*page_length*/sizeof(struct scsi_caching_page) - 2, /*flags1*/ SCP_DISC | SCP_WCE, /*ret_priority*/ 0, /*disable_pf_transfer_len*/ {0xff, 0xff}, /*min_prefetch*/ {0, 0}, /*max_prefetch*/ {0xff, 0xff}, /*max_pf_ceiling*/ {0xff, 0xff}, /*flags2*/ 0, /*cache_segments*/ 0, /*cache_seg_size*/ {0, 0}, /*reserved*/ 0, /*non_cache_seg_size*/ {0, 0, 0} }; const static struct scsi_caching_page caching_page_changeable = { /*page_code*/SMS_CACHING_PAGE, /*page_length*/sizeof(struct scsi_caching_page) - 2, /*flags1*/ SCP_WCE | SCP_RCD, /*ret_priority*/ 0, /*disable_pf_transfer_len*/ {0, 0}, /*min_prefetch*/ {0, 0}, /*max_prefetch*/ {0, 0}, /*max_pf_ceiling*/ {0, 0}, /*flags2*/ 0, /*cache_segments*/ 0, /*cache_seg_size*/ {0, 0}, /*reserved*/ 0, /*non_cache_seg_size*/ {0, 0, 0} }; const static struct scsi_control_page control_page_default = { /*page_code*/SMS_CONTROL_MODE_PAGE, /*page_length*/sizeof(struct scsi_control_page) - 2, /*rlec*/0, /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, /*eca_and_aen*/0, /*flags4*/SCP_TAS, /*aen_holdoff_period*/{0, 0}, /*busy_timeout_period*/{0, 0}, /*extended_selftest_completion_time*/{0, 0} }; const static struct scsi_control_page control_page_changeable = { /*page_code*/SMS_CONTROL_MODE_PAGE, /*page_length*/sizeof(struct scsi_control_page) - 2, /*rlec*/SCP_DSENSE, /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, /*eca_and_aen*/SCP_SWP, /*flags4*/0, /*aen_holdoff_period*/{0, 0}, /*busy_timeout_period*/{0, 0}, /*extended_selftest_completion_time*/{0, 0} }; #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) const static struct scsi_control_ext_page control_ext_page_default = { /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, /*subpage_code*/0x01, /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, /*flags*/0, /*prio*/0, /*max_sense*/0 }; const static struct scsi_control_ext_page control_ext_page_changeable = { /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, /*subpage_code*/0x01, /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, /*flags*/0, /*prio*/0, /*max_sense*/0xff }; const static struct scsi_info_exceptions_page ie_page_default = { /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, /*info_flags*/SIEP_FLAGS_EWASC, /*mrie*/SIEP_MRIE_NO, /*interval_timer*/{0, 0, 0, 0}, /*report_count*/{0, 0, 0, 1} }; const static struct scsi_info_exceptions_page ie_page_changeable = { /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | SIEP_FLAGS_LOGERR, /*mrie*/0x0f, /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, /*report_count*/{0xff, 0xff, 0xff, 0xff} }; #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, /*subpage_code*/0x02, /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, /*flags*/0, /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /*descr*/{}}, {{/*flags*/0, /*resource*/0x01, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0x02, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0xf1, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0xf2, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}} } }; const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, /*subpage_code*/0x02, /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, /*flags*/SLBPP_SITUA, /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /*descr*/{}}, {{/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}} } }; const static struct scsi_cddvd_capabilities_page cddvd_page_default = { /*page_code*/SMS_CDDVD_CAPS_PAGE, /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, /*caps1*/0x3f, /*caps2*/0x00, /*caps3*/0xf0, /*caps4*/0x00, /*caps5*/0x29, /*caps6*/0x00, /*obsolete*/{0, 0}, /*nvol_levels*/{0, 0}, /*buffer_size*/{8, 0}, /*obsolete2*/{0, 0}, /*reserved*/0, /*digital*/0, /*obsolete3*/0, /*copy_management*/0, /*reserved2*/0, /*rotation_control*/0, /*cur_write_speed*/0, /*num_speed_descr*/0, }; const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { /*page_code*/SMS_CDDVD_CAPS_PAGE, /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, /*caps1*/0, /*caps2*/0, /*caps3*/0, /*caps4*/0, /*caps5*/0, /*caps6*/0, /*obsolete*/{0, 0}, /*nvol_levels*/{0, 0}, /*buffer_size*/{0, 0}, /*obsolete2*/{0, 0}, /*reserved*/0, /*digital*/0, /*obsolete3*/0, /*copy_management*/0, /*reserved2*/0, /*rotation_control*/0, /*cur_write_speed*/0, /*num_speed_descr*/0, }; SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer"); static int worker_threads = -1; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, &worker_threads, 1, "Number of worker threads"); static int ctl_debug = CTL_DEBUG_NONE; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, &ctl_debug, 0, "Enabled debug flags"); static int ctl_lun_map_size = 1024; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); #ifdef CTL_TIME_IO static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN, &ctl_time_io_secs, 0, "Log requests taking more seconds"); #endif /* * Maximum number of LUNs we support. MUST be a power of 2. */ #define CTL_DEFAULT_MAX_LUNS 1024 static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS; TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns); SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN, &ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs"); /* * Maximum number of ports registered at one time. */ #define CTL_DEFAULT_MAX_PORTS 256 static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS; TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports); SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN, &ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports"); /* * Maximum number of initiators we support. */ #define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * ctl_max_ports) /* * Supported pages (0x00), Serial number (0x80), Device ID (0x83), * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), * SCSI Ports (0x88), Third-party Copy (0x8F), SCSI Feature Sets (0x92), * Block limits (0xB0), Block Device Characteristics (0xB1) and * Logical Block Provisioning (0xB2) */ #define SCSI_EVPD_NUM_SUPPORTED_PAGES 11 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, int param); static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); static int ctl_init(void); static int ctl_shutdown(void); static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries); static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static int ctl_enable_lun(struct ctl_lun *lun); static int ctl_disable_lun(struct ctl_lun *lun); static int ctl_free_lun(struct ctl_lun *lun); static int ctl_do_mode_select(union ctl_io *io); static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, uint64_t sa_res_key, uint8_t type, uint32_t residx, struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, struct scsi_per_res_out_parms* param); static void ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg); static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); static int ctl_inquiry_std(struct ctl_scsiio *ctsio); static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq); static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *ooa_io); static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io **starting_io); static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip); static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *io, bool skip); static int ctl_scsiio_lun_check(struct ctl_lun *lun, const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio); static void ctl_failover_lun(union ctl_io *io); static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio); static int ctl_scsiio(struct ctl_scsiio *ctsio); static int ctl_target_reset(union ctl_io *io); static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type); static int ctl_lun_reset(union ctl_io *io); static int ctl_abort_task(union ctl_io *io); static int ctl_abort_task_set(union ctl_io *io); static int ctl_query_task(union ctl_io *io, int task_set); static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, ctl_ua_type ua_type); static int ctl_i_t_nexus_reset(union ctl_io *io); static int ctl_query_async_event(union ctl_io *io); static void ctl_run_task(union ctl_io *io); #ifdef CTL_IO_DELAY static void ctl_datamove_timer_wakeup(void *arg); static void ctl_done_timer_wakeup(void *arg); #endif /* CTL_IO_DELAY */ static void ctl_send_datamove_done(union ctl_io *io, int have_lock); static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); static void ctl_datamove_remote_write(union ctl_io *io); static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); static int ctl_datamove_remote_sgl_setup(union ctl_io *io); static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, ctl_ha_dt_cb callback); static void ctl_datamove_remote_read(union ctl_io *io); static void ctl_datamove_remote(union ctl_io *io); static void ctl_process_done(union ctl_io *io); static void ctl_thresh_thread(void *arg); static void ctl_work_thread(void *arg); static void ctl_enqueue_incoming(union ctl_io *io); static void ctl_enqueue_rtr(union ctl_io *io); static void ctl_enqueue_done(union ctl_io *io); static void ctl_enqueue_isc(union ctl_io *io); static const struct ctl_cmd_entry * ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); static const struct ctl_cmd_entry * ctl_validate_command(struct ctl_scsiio *ctsio); static int ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry); static int ctl_ha_init(void); static int ctl_ha_shutdown(void); static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); /* * Load the serialization table. This isn't very pretty, but is probably * the easiest way to do it. */ #include "ctl_ser_table.c" /* * We only need to define open, close and ioctl routines for this driver. */ static struct cdevsw ctl_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ctl_open, .d_close = ctl_close, .d_ioctl = ctl_ioctl, .d_name = "ctl", }; MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); static moduledata_t ctl_moduledata = { "ctl", ctl_module_event_handler, NULL }; DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); MODULE_VERSION(ctl, 1); static struct ctl_frontend ha_frontend = { .name = "ha", .init = ctl_ha_init, .shutdown = ctl_ha_shutdown, }; static int ctl_ha_init(void) { struct ctl_softc *softc = control_softc; if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, &softc->othersc_pool) != 0) return (ENOMEM); if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { ctl_pool_free(softc->othersc_pool); return (EIO); } if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) != CTL_HA_STATUS_SUCCESS) { ctl_ha_msg_destroy(softc); ctl_pool_free(softc->othersc_pool); return (EIO); } return (0); }; static int ctl_ha_shutdown(void) { struct ctl_softc *softc = control_softc; struct ctl_port *port; ctl_ha_msg_shutdown(softc); if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) return (EIO); if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) return (EIO); ctl_pool_free(softc->othersc_pool); while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { ctl_port_deregister(port); free(port->port_name, M_CTL); free(port, M_CTL); } return (0); }; static void ctl_ha_datamove(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct ctl_sg_entry *sgl; union ctl_ha_msg msg; uint32_t sg_entries_sent; int do_sg_copy, i, j; memset(&msg.dt, 0, sizeof(msg.dt)); msg.hdr.msg_type = CTL_MSG_DATAMOVE; msg.hdr.original_sc = io->io_hdr.remote_io; msg.hdr.serializing_sc = io; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.dt.flags = io->io_hdr.flags; /* * We convert everything into a S/G list here. We can't * pass by reference, only by value between controllers. * So we can't pass a pointer to the S/G list, only as many * S/G entries as we can fit in here. If it's possible for * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, * then we need to break this up into multiple transfers. */ if (io->scsiio.kern_sg_entries == 0) { msg.dt.kern_sg_entries = 1; #if 0 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; } else { /* XXX KDM use busdma here! */ msg.dt.sg_list[0].addr = (void *)vtophys(io->scsiio.kern_data_ptr); } #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; #endif msg.dt.sg_list[0].len = io->scsiio.kern_data_len; do_sg_copy = 0; } else { msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; do_sg_copy = 1; } msg.dt.kern_data_len = io->scsiio.kern_data_len; msg.dt.kern_total_len = io->scsiio.kern_total_len; msg.dt.kern_data_resid = io->scsiio.kern_data_resid; msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; msg.dt.sg_sequence = 0; /* * Loop until we've sent all of the S/G entries. On the * other end, we'll recompose these S/G entries into one * contiguous list before processing. */ for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / sizeof(msg.dt.sg_list[0])), msg.dt.kern_sg_entries - sg_entries_sent); if (do_sg_copy != 0) { sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; for (i = sg_entries_sent, j = 0; i < msg.dt.cur_sg_entries; i++, j++) { #if 0 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { msg.dt.sg_list[j].addr = sgl[i].addr; } else { /* XXX KDM use busdma here! */ msg.dt.sg_list[j].addr = (void *)vtophys(sgl[i].addr); } #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); msg.dt.sg_list[j].addr = sgl[i].addr; #endif msg.dt.sg_list[j].len = sgl[i].len; } } sg_entries_sent += msg.dt.cur_sg_entries; msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.dt) - sizeof(msg.dt.sg_list) + sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, M_WAITOK) > CTL_HA_STATUS_SUCCESS) { io->io_hdr.port_status = 31341; io->scsiio.be_move_done(io); return; } msg.dt.sent_sg_entries = sg_entries_sent; } /* * Officially handover the request from us to peer. * If failover has just happened, then we must return error. * If failover happen just after, then it is not our problem. */ if (lun) mtx_lock(&lun->lun_lock); if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { if (lun) mtx_unlock(&lun->lun_lock); io->io_hdr.port_status = 31342; io->scsiio.be_move_done(io); return; } io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; if (lun) mtx_unlock(&lun->lun_lock); } static void ctl_ha_done(union ctl_io *io) { union ctl_ha_msg msg; if (io->io_hdr.io_type == CTL_IO_SCSI) { memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_FINISH_IO; msg.hdr.original_sc = io->io_hdr.remote_io; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.scsi.scsi_status = io->scsiio.scsi_status; msg.scsi.tag_num = io->scsiio.tag_num; msg.scsi.tag_type = io->scsiio.tag_type; msg.scsi.sense_len = io->scsiio.sense_len; memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, io->scsiio.sense_len); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + msg.scsi.sense_len, M_WAITOK); } ctl_free_io(io); } static void ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, union ctl_ha_msg *msg_info) { struct ctl_scsiio *ctsio; if (msg_info->hdr.original_sc == NULL) { printf("%s: original_sc == NULL!\n", __func__); /* XXX KDM now what? */ return; } ctsio = &msg_info->hdr.original_sc->scsiio; ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; ctsio->io_hdr.status = msg_info->hdr.status; ctsio->scsi_status = msg_info->scsi.scsi_status; ctsio->sense_len = msg_info->scsi.sense_len; memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, msg_info->scsi.sense_len); ctl_enqueue_isc((union ctl_io *)ctsio); } static void ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, union ctl_ha_msg *msg_info) { struct ctl_scsiio *ctsio; if (msg_info->hdr.serializing_sc == NULL) { printf("%s: serializing_sc == NULL!\n", __func__); /* XXX KDM now what? */ return; } ctsio = &msg_info->hdr.serializing_sc->scsiio; ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; ctl_enqueue_isc((union ctl_io *)ctsio); } void ctl_isc_announce_lun(struct ctl_lun *lun) { struct ctl_softc *softc = lun->ctl_softc; union ctl_ha_msg *msg; struct ctl_ha_msg_lun_pr_key pr_key; int i, k; if (softc->ha_link != CTL_HA_LINK_ONLINE) return; mtx_lock(&lun->lun_lock); i = sizeof(msg->lun); if (lun->lun_devid) i += lun->lun_devid->len; i += sizeof(pr_key) * lun->pr_key_count; alloc: mtx_unlock(&lun->lun_lock); msg = malloc(i, M_CTL, M_WAITOK); mtx_lock(&lun->lun_lock); k = sizeof(msg->lun); if (lun->lun_devid) k += lun->lun_devid->len; k += sizeof(pr_key) * lun->pr_key_count; if (i < k) { free(msg, M_CTL); i = k; goto alloc; } bzero(&msg->lun, sizeof(msg->lun)); msg->hdr.msg_type = CTL_MSG_LUN_SYNC; msg->hdr.nexus.targ_lun = lun->lun; msg->hdr.nexus.targ_mapped_lun = lun->lun; msg->lun.flags = lun->flags; msg->lun.pr_generation = lun->pr_generation; msg->lun.pr_res_idx = lun->pr_res_idx; msg->lun.pr_res_type = lun->pr_res_type; msg->lun.pr_key_count = lun->pr_key_count; i = 0; if (lun->lun_devid) { msg->lun.lun_devid_len = lun->lun_devid->len; memcpy(&msg->lun.data[i], lun->lun_devid->data, msg->lun.lun_devid_len); i += msg->lun.lun_devid_len; } for (k = 0; k < CTL_MAX_INITIATORS; k++) { if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) continue; pr_key.pr_iid = k; memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); i += sizeof(pr_key); } mtx_unlock(&lun->lun_lock); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, M_WAITOK); free(msg, M_CTL); if (lun->flags & CTL_LUN_PRIMARY_SC) { for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { ctl_isc_announce_mode(lun, -1, lun->mode_pages.index[i].page_code & SMPH_PC_MASK, lun->mode_pages.index[i].subpage); } } } void ctl_isc_announce_port(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; union ctl_ha_msg *msg; int i; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max || softc->ha_link != CTL_HA_LINK_ONLINE) return; i = sizeof(msg->port) + strlen(port->port_name) + 1; if (port->lun_map) i += port->lun_map_size * sizeof(uint32_t); if (port->port_devid) i += port->port_devid->len; if (port->target_devid) i += port->target_devid->len; if (port->init_devid) i += port->init_devid->len; msg = malloc(i, M_CTL, M_WAITOK); bzero(&msg->port, sizeof(msg->port)); msg->hdr.msg_type = CTL_MSG_PORT_SYNC; msg->hdr.nexus.targ_port = port->targ_port; msg->port.port_type = port->port_type; msg->port.physical_port = port->physical_port; msg->port.virtual_port = port->virtual_port; msg->port.status = port->status; i = 0; msg->port.name_len = sprintf(&msg->port.data[i], "%d:%s", softc->ha_id, port->port_name) + 1; i += msg->port.name_len; if (port->lun_map) { msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); memcpy(&msg->port.data[i], port->lun_map, msg->port.lun_map_len); i += msg->port.lun_map_len; } if (port->port_devid) { msg->port.port_devid_len = port->port_devid->len; memcpy(&msg->port.data[i], port->port_devid->data, msg->port.port_devid_len); i += msg->port.port_devid_len; } if (port->target_devid) { msg->port.target_devid_len = port->target_devid->len; memcpy(&msg->port.data[i], port->target_devid->data, msg->port.target_devid_len); i += msg->port.target_devid_len; } if (port->init_devid) { msg->port.init_devid_len = port->init_devid->len; memcpy(&msg->port.data[i], port->init_devid->data, msg->port.init_devid_len); i += msg->port.init_devid_len; } ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, M_WAITOK); free(msg, M_CTL); } void ctl_isc_announce_iid(struct ctl_port *port, int iid) { struct ctl_softc *softc = port->ctl_softc; union ctl_ha_msg *msg; int i, l; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max || softc->ha_link != CTL_HA_LINK_ONLINE) return; mtx_lock(&softc->ctl_lock); i = sizeof(msg->iid); l = 0; if (port->wwpn_iid[iid].name) l = strlen(port->wwpn_iid[iid].name) + 1; i += l; msg = malloc(i, M_CTL, M_NOWAIT); if (msg == NULL) { mtx_unlock(&softc->ctl_lock); return; } bzero(&msg->iid, sizeof(msg->iid)); msg->hdr.msg_type = CTL_MSG_IID_SYNC; msg->hdr.nexus.targ_port = port->targ_port; msg->hdr.nexus.initid = iid; msg->iid.in_use = port->wwpn_iid[iid].in_use; msg->iid.name_len = l; msg->iid.wwpn = port->wwpn_iid[iid].wwpn; if (port->wwpn_iid[iid].name) strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); mtx_unlock(&softc->ctl_lock); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); free(msg, M_CTL); } void ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, uint8_t page, uint8_t subpage) { struct ctl_softc *softc = lun->ctl_softc; union ctl_ha_msg msg; u_int i; if (softc->ha_link != CTL_HA_LINK_ONLINE) return; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == page && lun->mode_pages.index[i].subpage == subpage) break; } if (i == CTL_NUM_MODE_PAGES) return; /* Don't try to replicate pages not present on this device. */ if (lun->mode_pages.index[i].page_data == NULL) return; bzero(&msg.mode, sizeof(msg.mode)); msg.hdr.msg_type = CTL_MSG_MODE_SYNC; msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.mode.page_code = page; msg.mode.subpage = subpage; msg.mode.page_len = lun->mode_pages.index[i].page_len; memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, msg.mode.page_len); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), M_WAITOK); } static void ctl_isc_ha_link_up(struct ctl_softc *softc) { struct ctl_port *port; struct ctl_lun *lun; union ctl_ha_msg msg; int i; /* Announce this node parameters to peer for validation. */ msg.login.msg_type = CTL_MSG_LOGIN; msg.login.version = CTL_HA_VERSION; msg.login.ha_mode = softc->ha_mode; msg.login.ha_id = softc->ha_id; msg.login.max_luns = ctl_max_luns; msg.login.max_ports = ctl_max_ports; msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), M_WAITOK); STAILQ_FOREACH(port, &softc->port_list, links) { ctl_isc_announce_port(port); for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use) ctl_isc_announce_iid(port, i); } } STAILQ_FOREACH(lun, &softc->lun_list, links) ctl_isc_announce_lun(lun); } static void ctl_isc_ha_link_down(struct ctl_softc *softc) { struct ctl_port *port; struct ctl_lun *lun; union ctl_io *io; int i; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); } mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); io = ctl_alloc_io(softc->othersc_pool); mtx_lock(&softc->ctl_lock); ctl_zero_io(io); io->io_hdr.msg_type = CTL_MSG_FAILOVER; io->io_hdr.nexus.targ_mapped_lun = lun->lun; ctl_enqueue_isc(io); } STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port >= softc->port_min && port->targ_port < softc->port_max) continue; port->status &= ~CTL_PORT_STATUS_ONLINE; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { port->wwpn_iid[i].in_use = 0; free(port->wwpn_iid[i].name, M_CTL); port->wwpn_iid[i].name = NULL; } } mtx_unlock(&softc->ctl_lock); } static void ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); mtx_lock(&softc->ctl_lock); if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns || (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); if (msg->ua.ua_all) { if (msg->ua.ua_set) ctl_est_ua_all(lun, iid, msg->ua.ua_type); else ctl_clr_ua_all(lun, iid, msg->ua.ua_type); } else { if (msg->ua.ua_set) ctl_est_ua(lun, iid, msg->ua.ua_type); else ctl_clr_ua(lun, iid, msg->ua.ua_type); } mtx_unlock(&lun->lun_lock); } static void ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; struct ctl_ha_msg_lun_pr_key pr_key; int i, k; ctl_lun_flags oflags; uint32_t targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; if (msg->lun.lun_devid_len != i || (i > 0 && memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { mtx_unlock(&lun->lun_lock); printf("%s: Received conflicting HA LUN %d\n", __func__, targ_lun); return; } else { /* Record whether peer is primary. */ oflags = lun->flags; if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && (msg->lun.flags & CTL_LUN_DISABLED) == 0) lun->flags |= CTL_LUN_PEER_SC_PRIMARY; else lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; if (oflags != lun->flags) ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); /* If peer is primary and we are not -- use data */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { lun->pr_generation = msg->lun.pr_generation; lun->pr_res_idx = msg->lun.pr_res_idx; lun->pr_res_type = msg->lun.pr_res_type; lun->pr_key_count = msg->lun.pr_key_count; for (k = 0; k < CTL_MAX_INITIATORS; k++) ctl_clr_prkey(lun, k); for (k = 0; k < msg->lun.pr_key_count; k++) { memcpy(&pr_key, &msg->lun.data[i], sizeof(pr_key)); ctl_alloc_prkey(lun, pr_key.pr_iid); ctl_set_prkey(lun, pr_key.pr_iid, pr_key.pr_key); i += sizeof(pr_key); } } mtx_unlock(&lun->lun_lock); CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", __func__, targ_lun, (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? "primary" : "secondary")); /* If we are primary but peer doesn't know -- notify */ if ((lun->flags & CTL_LUN_PRIMARY_SC) && (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) ctl_isc_announce_lun(lun); } } static void ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_port *port; struct ctl_lun *lun; int i, new; port = softc->ctl_ports[msg->hdr.nexus.targ_port]; if (port == NULL) { CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, msg->hdr.nexus.targ_port)); new = 1; port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); port->frontend = &ha_frontend; port->targ_port = msg->hdr.nexus.targ_port; port->fe_datamove = ctl_ha_datamove; port->fe_done = ctl_ha_done; } else if (port->frontend == &ha_frontend) { CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, msg->hdr.nexus.targ_port)); new = 0; } else { printf("%s: Received conflicting HA port %d\n", __func__, msg->hdr.nexus.targ_port); return; } port->port_type = msg->port.port_type; port->physical_port = msg->port.physical_port; port->virtual_port = msg->port.virtual_port; port->status = msg->port.status; i = 0; free(port->port_name, M_CTL); port->port_name = strndup(&msg->port.data[i], msg->port.name_len, M_CTL); i += msg->port.name_len; if (msg->port.lun_map_len != 0) { if (port->lun_map == NULL || port->lun_map_size * sizeof(uint32_t) < msg->port.lun_map_len) { port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = malloc(msg->port.lun_map_len, M_CTL, M_WAITOK); } memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); i += msg->port.lun_map_len; } else { port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = NULL; } if (msg->port.port_devid_len != 0) { if (port->port_devid == NULL || port->port_devid->len < msg->port.port_devid_len) { free(port->port_devid, M_CTL); port->port_devid = malloc(sizeof(struct ctl_devid) + msg->port.port_devid_len, M_CTL, M_WAITOK); } memcpy(port->port_devid->data, &msg->port.data[i], msg->port.port_devid_len); port->port_devid->len = msg->port.port_devid_len; i += msg->port.port_devid_len; } else { free(port->port_devid, M_CTL); port->port_devid = NULL; } if (msg->port.target_devid_len != 0) { if (port->target_devid == NULL || port->target_devid->len < msg->port.target_devid_len) { free(port->target_devid, M_CTL); port->target_devid = malloc(sizeof(struct ctl_devid) + msg->port.target_devid_len, M_CTL, M_WAITOK); } memcpy(port->target_devid->data, &msg->port.data[i], msg->port.target_devid_len); port->target_devid->len = msg->port.target_devid_len; i += msg->port.target_devid_len; } else { free(port->target_devid, M_CTL); port->target_devid = NULL; } if (msg->port.init_devid_len != 0) { if (port->init_devid == NULL || port->init_devid->len < msg->port.init_devid_len) { free(port->init_devid, M_CTL); port->init_devid = malloc(sizeof(struct ctl_devid) + msg->port.init_devid_len, M_CTL, M_WAITOK); } memcpy(port->init_devid->data, &msg->port.data[i], msg->port.init_devid_len); port->init_devid->len = msg->port.init_devid_len; i += msg->port.init_devid_len; } else { free(port->init_devid, M_CTL); port->init_devid = NULL; } if (new) { if (ctl_port_register(port) != 0) { printf("%s: ctl_port_register() failed with error\n", __func__); } } mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); } static void ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_port *port; int iid; port = softc->ctl_ports[msg->hdr.nexus.targ_port]; if (port == NULL) { printf("%s: Received IID for unknown port %d\n", __func__, msg->hdr.nexus.targ_port); return; } iid = msg->hdr.nexus.initid; if (port->wwpn_iid[iid].in_use != 0 && msg->iid.in_use == 0) ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); port->wwpn_iid[iid].in_use = msg->iid.in_use; port->wwpn_iid[iid].wwpn = msg->iid.wwpn; free(port->wwpn_iid[iid].name, M_CTL); if (msg->iid.name_len) { port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], msg->iid.name_len, M_CTL); } else port->wwpn_iid[iid].name = NULL; } static void ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { if (msg->login.version != CTL_HA_VERSION) { printf("CTL HA peers have different versions %d != %d\n", msg->login.version, CTL_HA_VERSION); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.ha_mode != softc->ha_mode) { printf("CTL HA peers have different ha_mode %d != %d\n", msg->login.ha_mode, softc->ha_mode); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.ha_id == softc->ha_id) { printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.max_luns != ctl_max_luns || msg->login.max_ports != ctl_max_ports || msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { printf("CTL HA peers have different limits\n"); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } } static void ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; u_int i; uint32_t initidx, targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == msg->mode.page_code && lun->mode_pages.index[i].subpage == msg->mode.subpage) break; } if (i == CTL_NUM_MODE_PAGES) { mtx_unlock(&lun->lun_lock); return; } memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, lun->mode_pages.index[i].page_len); initidx = ctl_get_initindex(&msg->hdr.nexus); if (initidx != -1) ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); } /* * ISC (Inter Shelf Communication) event handler. Events from the HA * subsystem come in here. */ static void ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) { struct ctl_softc *softc = control_softc; union ctl_io *io; struct ctl_prio *presio; ctl_ha_status isc_status; CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); if (event == CTL_HA_EVT_MSG_RECV) { union ctl_ha_msg *msg, msgbuf; if (param > sizeof(msgbuf)) msg = malloc(param, M_CTL, M_WAITOK); else msg = &msgbuf; isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, M_WAITOK); if (isc_status != CTL_HA_STATUS_SUCCESS) { printf("%s: Error receiving message: %d\n", __func__, isc_status); if (msg != &msgbuf) free(msg, M_CTL); return; } CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->hdr.msg_type)); switch (msg->hdr.msg_type) { case CTL_MSG_SERIALIZE: io = ctl_alloc_io(softc->othersc_pool); ctl_zero_io(io); // populate ctsio from msg io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.msg_type = CTL_MSG_SERIALIZE; io->io_hdr.remote_io = msg->hdr.original_sc; io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | CTL_FLAG_IO_ACTIVE; /* * If we're in serialization-only mode, we don't * want to go through full done processing. Thus * the COPY flag. * * XXX KDM add another flag that is more specific. */ if (softc->ha_mode != CTL_HA_MODE_XFER) io->io_hdr.flags |= CTL_FLAG_INT_COPY; io->io_hdr.nexus = msg->hdr.nexus; + io->scsiio.priority = msg->scsi.priority; io->scsiio.tag_num = msg->scsi.tag_num; io->scsiio.tag_type = msg->scsi.tag_type; #ifdef CTL_TIME_IO io->io_hdr.start_time = time_uptime; getbinuptime(&io->io_hdr.start_bt); #endif /* CTL_TIME_IO */ io->scsiio.cdb_len = msg->scsi.cdb_len; memcpy(io->scsiio.cdb, msg->scsi.cdb, CTL_MAX_CDBLEN); if (softc->ha_mode == CTL_HA_MODE_XFER) { const struct ctl_cmd_entry *entry; entry = ctl_get_cmd_entry(&io->scsiio, NULL); io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; io->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; } ctl_enqueue_isc(io); break; /* Performed on the Originating SC, XFER mode only */ case CTL_MSG_DATAMOVE: { struct ctl_sg_entry *sgl; int i, j; io = msg->hdr.original_sc; if (io == NULL) { printf("%s: original_sc == NULL!\n", __func__); /* XXX KDM do something here */ break; } io->io_hdr.msg_type = CTL_MSG_DATAMOVE; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; /* * Keep track of this, we need to send it back over * when the datamove is complete. */ io->io_hdr.remote_io = msg->hdr.serializing_sc; if (msg->hdr.status == CTL_SUCCESS) io->io_hdr.status = msg->hdr.status; if (msg->dt.sg_sequence == 0) { #ifdef CTL_TIME_IO getbinuptime(&io->io_hdr.dma_start_bt); #endif i = msg->dt.kern_sg_entries + msg->dt.kern_data_len / CTL_HA_DATAMOVE_SEGMENT + 1; sgl = malloc(sizeof(*sgl) * i, M_CTL, M_WAITOK | M_ZERO); CTL_RSGL(io) = sgl; CTL_LSGL(io) = &sgl[msg->dt.kern_sg_entries]; io->scsiio.kern_data_ptr = (uint8_t *)sgl; io->scsiio.kern_sg_entries = msg->dt.kern_sg_entries; io->scsiio.rem_sg_entries = msg->dt.kern_sg_entries; io->scsiio.kern_data_len = msg->dt.kern_data_len; io->scsiio.kern_total_len = msg->dt.kern_total_len; io->scsiio.kern_data_resid = msg->dt.kern_data_resid; io->scsiio.kern_rel_offset = msg->dt.kern_rel_offset; io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; io->io_hdr.flags |= msg->dt.flags & CTL_FLAG_BUS_ADDR; } else sgl = (struct ctl_sg_entry *) io->scsiio.kern_data_ptr; for (i = msg->dt.sent_sg_entries, j = 0; i < (msg->dt.sent_sg_entries + msg->dt.cur_sg_entries); i++, j++) { sgl[i].addr = msg->dt.sg_list[j].addr; sgl[i].len = msg->dt.sg_list[j].len; } /* * If this is the last piece of the I/O, we've got * the full S/G list. Queue processing in the thread. * Otherwise wait for the next piece. */ if (msg->dt.sg_last != 0) ctl_enqueue_isc(io); break; } /* Performed on the Serializing (primary) SC, XFER mode only */ case CTL_MSG_DATAMOVE_DONE: { if (msg->hdr.serializing_sc == NULL) { printf("%s: serializing_sc == NULL!\n", __func__); /* XXX KDM now what? */ break; } /* * We grab the sense information here in case * there was a failure, so we can return status * back to the initiator. */ io = msg->hdr.serializing_sc; io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; io->io_hdr.port_status = msg->scsi.port_status; io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; if (msg->hdr.status != CTL_STATUS_NONE) { io->io_hdr.status = msg->hdr.status; io->scsiio.scsi_status = msg->scsi.scsi_status; io->scsiio.sense_len = msg->scsi.sense_len; memcpy(&io->scsiio.sense_data, &msg->scsi.sense_data, msg->scsi.sense_len); if (msg->hdr.status == CTL_SUCCESS) io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; } ctl_enqueue_isc(io); break; } /* Preformed on Originating SC, SER_ONLY mode */ case CTL_MSG_R2R: io = msg->hdr.original_sc; if (io == NULL) { printf("%s: original_sc == NULL!\n", __func__); break; } io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; io->io_hdr.msg_type = CTL_MSG_R2R; io->io_hdr.remote_io = msg->hdr.serializing_sc; ctl_enqueue_isc(io); break; /* * Performed on Serializing(i.e. primary SC) SC in SER_ONLY * mode. * Performed on the Originating (i.e. secondary) SC in XFER * mode */ case CTL_MSG_FINISH_IO: if (softc->ha_mode == CTL_HA_MODE_XFER) ctl_isc_handler_finish_xfer(softc, msg); else ctl_isc_handler_finish_ser_only(softc, msg); break; /* Preformed on Originating SC */ case CTL_MSG_BAD_JUJU: io = msg->hdr.original_sc; if (io == NULL) { printf("%s: Bad JUJU!, original_sc is NULL!\n", __func__); break; } ctl_copy_sense_data(msg, io); /* * IO should have already been cleaned up on other * SC so clear this flag so we won't send a message * back to finish the IO there. */ io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; /* io = msg->hdr.serializing_sc; */ io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_enqueue_isc(io); break; /* Handle resets sent from the other side */ case CTL_MSG_MANAGE_TASKS: { struct ctl_taskio *taskio; taskio = (struct ctl_taskio *)ctl_alloc_io( softc->othersc_pool); ctl_zero_io((union ctl_io *)taskio); taskio->io_hdr.io_type = CTL_IO_TASK; taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; taskio->io_hdr.nexus = msg->hdr.nexus; taskio->task_action = msg->task.task_action; taskio->tag_num = msg->task.tag_num; taskio->tag_type = msg->task.tag_type; #ifdef CTL_TIME_IO taskio->io_hdr.start_time = time_uptime; getbinuptime(&taskio->io_hdr.start_bt); #endif /* CTL_TIME_IO */ ctl_run_task((union ctl_io *)taskio); break; } /* Persistent Reserve action which needs attention */ case CTL_MSG_PERS_ACTION: presio = (struct ctl_prio *)ctl_alloc_io( softc->othersc_pool); ctl_zero_io((union ctl_io *)presio); presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; presio->io_hdr.nexus = msg->hdr.nexus; presio->pr_msg = msg->pr; ctl_enqueue_isc((union ctl_io *)presio); break; case CTL_MSG_UA: ctl_isc_ua(softc, msg, param); break; case CTL_MSG_PORT_SYNC: ctl_isc_port_sync(softc, msg, param); break; case CTL_MSG_LUN_SYNC: ctl_isc_lun_sync(softc, msg, param); break; case CTL_MSG_IID_SYNC: ctl_isc_iid_sync(softc, msg, param); break; case CTL_MSG_LOGIN: ctl_isc_login(softc, msg, param); break; case CTL_MSG_MODE_SYNC: ctl_isc_mode_sync(softc, msg, param); break; default: printf("Received HA message of unknown type %d\n", msg->hdr.msg_type); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); break; } if (msg != &msgbuf) free(msg, M_CTL); } else if (event == CTL_HA_EVT_LINK_CHANGE) { printf("CTL: HA link status changed from %d to %d\n", softc->ha_link, param); if (param == softc->ha_link) return; if (softc->ha_link == CTL_HA_LINK_ONLINE) { softc->ha_link = param; ctl_isc_ha_link_down(softc); } else { softc->ha_link = param; if (softc->ha_link == CTL_HA_LINK_ONLINE) ctl_isc_ha_link_up(softc); } return; } else { printf("ctl_isc_event_handler: Unknown event %d\n", event); return; } } static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) { memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, src->scsi.sense_len); dest->scsiio.scsi_status = src->scsi.scsi_status; dest->scsiio.sense_len = src->scsi.sense_len; dest->io_hdr.status = src->hdr.status; } static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) { memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, src->scsiio.sense_len); dest->scsi.scsi_status = src->scsiio.scsi_status; dest->scsi.sense_len = src->scsiio.sense_len; dest->hdr.status = src->io_hdr.status; } void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; ctl_ua_type *pu; if (initidx < softc->init_min || initidx >= softc->init_max) return; mtx_assert(&lun->lun_lock, MA_OWNED); pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; if (pu == NULL) return; pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; } void ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) { int i; mtx_assert(&lun->lun_lock, MA_OWNED); if (lun->pending_ua[port] == NULL) return; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port * CTL_MAX_INIT_PER_PORT + i == except) continue; lun->pending_ua[port][i] |= ua; } } void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; int i; mtx_assert(&lun->lun_lock, MA_OWNED); for (i = softc->port_min; i < softc->port_max; i++) ctl_est_ua_port(lun, i, except, ua); } void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; ctl_ua_type *pu; if (initidx < softc->init_min || initidx >= softc->init_max) return; mtx_assert(&lun->lun_lock, MA_OWNED); pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; if (pu == NULL) return; pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; } void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; int i, j; mtx_assert(&lun->lun_lock, MA_OWNED); for (i = softc->port_min; i < softc->port_max; i++) { if (lun->pending_ua[i] == NULL) continue; for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (i * CTL_MAX_INIT_PER_PORT + j == except) continue; lun->pending_ua[i][j] &= ~ua; } } } void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, ctl_ua_type ua_type) { struct ctl_lun *lun; mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { mtx_lock(&lun->lun_lock); ctl_clr_ua(lun, initidx, ua_type); mtx_unlock(&lun->lun_lock); } } static int ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) { struct ctl_softc *softc = (struct ctl_softc *)arg1; struct ctl_lun *lun; struct ctl_lun_req ireq; int error, value; value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); mtx_lock(&softc->ctl_lock); if (value == 0) softc->flags |= CTL_FLAG_ACTIVE_SHELF; else softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_unlock(&softc->ctl_lock); bzero(&ireq, sizeof(ireq)); ireq.reqtype = CTL_LUNREQ_MODIFY; ireq.reqdata.modify.lun_id = lun->lun; lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, curthread); if (ireq.status != CTL_LUN_OK) { printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", __func__, ireq.status, ireq.error_str); } mtx_lock(&softc->ctl_lock); } mtx_unlock(&softc->ctl_lock); return (0); } static int ctl_init(void) { struct make_dev_args args; struct ctl_softc *softc; int i, error; softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, M_WAITOK | M_ZERO); make_dev_args_init(&args); args.mda_devsw = &ctl_cdevsw; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = softc; args.mda_si_drv2 = NULL; error = make_dev_s(&args, &softc->dev, "cam/ctl"); if (error != 0) { free(softc, M_DEVBUF); control_softc = NULL; return (error); } sysctl_ctx_init(&softc->sysctl_ctx); softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer"); if (softc->sysctl_tree == NULL) { printf("%s: unable to allocate sysctl tree\n", __func__); destroy_dev(softc->dev); free(softc, M_DEVBUF); control_softc = NULL; return (ENOMEM); } mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); softc->flags = 0; SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) { printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n", ctl_max_luns, CTL_DEFAULT_MAX_LUNS); ctl_max_luns = CTL_DEFAULT_MAX_LUNS; } softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns, M_DEVBUF, M_WAITOK | M_ZERO); softc->ctl_lun_mask = malloc(sizeof(uint32_t) * ((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) { printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n", ctl_max_ports, CTL_DEFAULT_MAX_PORTS); ctl_max_ports = CTL_DEFAULT_MAX_PORTS; } softc->ctl_port_mask = malloc(sizeof(uint32_t) * ((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); /* * In Copan's HA scheme, the "master" and "slave" roles are * figured out through the slot the controller is in. Although it * is an active/active system, someone has to be in charge. */ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, "HA head ID (0 - no HA)"); if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { softc->flags |= CTL_FLAG_ACTIVE_SHELF; softc->is_single = 1; softc->port_cnt = ctl_max_ports; softc->port_min = 0; } else { softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES; softc->port_min = (softc->ha_id - 1) * softc->port_cnt; } softc->port_max = softc->port_min + softc->port_cnt; softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, "HA link state (0 - offline, 1 - unknown, 2 - online)"); STAILQ_INIT(&softc->lun_list); STAILQ_INIT(&softc->fe_list); STAILQ_INIT(&softc->port_list); STAILQ_INIT(&softc->be_list); ctl_tpc_init(softc); if (worker_threads <= 0) worker_threads = max(1, mp_ncpus / 4); if (worker_threads > CTL_MAX_THREADS) worker_threads = CTL_MAX_THREADS; for (i = 0; i < worker_threads; i++) { struct ctl_thread *thr = &softc->threads[i]; mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); thr->ctl_softc = softc; STAILQ_INIT(&thr->incoming_queue); STAILQ_INIT(&thr->rtr_queue); STAILQ_INIT(&thr->done_queue); STAILQ_INIT(&thr->isc_queue); error = kproc_kthread_add(ctl_work_thread, thr, &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); if (error != 0) { printf("error creating CTL work thread!\n"); return (error); } } error = kproc_kthread_add(ctl_thresh_thread, softc, &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); if (error != 0) { printf("error creating CTL threshold thread!\n"); return (error); } SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); if (softc->is_single == 0) { if (ctl_frontend_register(&ha_frontend) != 0) softc->is_single = 1; } return (0); } static int ctl_shutdown(void) { struct ctl_softc *softc = control_softc; int i; if (softc->is_single == 0) ctl_frontend_deregister(&ha_frontend); destroy_dev(softc->dev); /* Shutdown CTL threads. */ softc->shutdown = 1; for (i = 0; i < worker_threads; i++) { struct ctl_thread *thr = &softc->threads[i]; while (thr->thread != NULL) { wakeup(thr); if (thr->thread != NULL) pause("CTL thr shutdown", 1); } mtx_destroy(&thr->queue_lock); } while (softc->thresh_thread != NULL) { wakeup(softc->thresh_thread); if (softc->thresh_thread != NULL) pause("CTL thr shutdown", 1); } ctl_tpc_shutdown(softc); uma_zdestroy(softc->io_zone); mtx_destroy(&softc->ctl_lock); free(softc->ctl_luns, M_DEVBUF); free(softc->ctl_lun_mask, M_DEVBUF); free(softc->ctl_port_mask, M_DEVBUF); free(softc->ctl_ports, M_DEVBUF); sysctl_ctx_free(&softc->sysctl_ctx); free(softc, M_DEVBUF); control_softc = NULL; return (0); } static int ctl_module_event_handler(module_t mod, int what, void *arg) { switch (what) { case MOD_LOAD: return (ctl_init()); case MOD_UNLOAD: return (ctl_shutdown()); default: return (EOPNOTSUPP); } } /* * XXX KDM should we do some access checks here? Bump a reference count to * prevent a CTL module from being unloaded while someone has it open? */ static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } /* * Remove an initiator by port number and initiator ID. * Returns 0 for success, -1 for failure. */ int ctl_remove_initiator(struct ctl_port *port, int iid) { struct ctl_softc *softc = port->ctl_softc; int last; mtx_assert(&softc->ctl_lock, MA_NOTOWNED); if (iid > CTL_MAX_INIT_PER_PORT) { printf("%s: initiator ID %u > maximun %u!\n", __func__, iid, CTL_MAX_INIT_PER_PORT); return (-1); } mtx_lock(&softc->ctl_lock); last = (--port->wwpn_iid[iid].in_use == 0); port->wwpn_iid[iid].last_use = time_uptime; mtx_unlock(&softc->ctl_lock); if (last) ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); ctl_isc_announce_iid(port, iid); return (0); } /* * Add an initiator to the initiator map. * Returns iid for success, < 0 for failure. */ int ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) { struct ctl_softc *softc = port->ctl_softc; time_t best_time; int i, best; mtx_assert(&softc->ctl_lock, MA_NOTOWNED); if (iid >= CTL_MAX_INIT_PER_PORT) { printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); free(name, M_CTL); return (-1); } mtx_lock(&softc->ctl_lock); if (iid < 0 && (wwpn != 0 || name != NULL)) { for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { iid = i; break; } if (name != NULL && port->wwpn_iid[i].name != NULL && strcmp(name, port->wwpn_iid[i].name) == 0) { iid = i; break; } } } if (iid < 0) { for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use == 0 && port->wwpn_iid[i].wwpn == 0 && port->wwpn_iid[i].name == NULL) { iid = i; break; } } } if (iid < 0) { best = -1; best_time = INT32_MAX; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use == 0) { if (port->wwpn_iid[i].last_use < best_time) { best = i; best_time = port->wwpn_iid[i].last_use; } } } iid = best; } if (iid < 0) { mtx_unlock(&softc->ctl_lock); free(name, M_CTL); return (-2); } if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { /* * This is not an error yet. */ if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { #if 0 printf("%s: port %d iid %u WWPN %#jx arrived" " again\n", __func__, port->targ_port, iid, (uintmax_t)wwpn); #endif goto take; } if (name != NULL && port->wwpn_iid[iid].name != NULL && strcmp(name, port->wwpn_iid[iid].name) == 0) { #if 0 printf("%s: port %d iid %u name '%s' arrived" " again\n", __func__, port->targ_port, iid, name); #endif goto take; } /* * This is an error, but what do we do about it? The * driver is telling us we have a new WWPN for this * initiator ID, so we pretty much need to use it. */ printf("%s: port %d iid %u WWPN %#jx '%s' arrived," " but WWPN %#jx '%s' is still at that address\n", __func__, port->targ_port, iid, wwpn, name, (uintmax_t)port->wwpn_iid[iid].wwpn, port->wwpn_iid[iid].name); } take: free(port->wwpn_iid[iid].name, M_CTL); port->wwpn_iid[iid].name = name; port->wwpn_iid[iid].wwpn = wwpn; port->wwpn_iid[iid].in_use++; mtx_unlock(&softc->ctl_lock); ctl_isc_announce_iid(port, iid); return (iid); } static int ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) { int len; switch (port->port_type) { case CTL_PORT_FC: { struct scsi_transportid_fcp *id = (struct scsi_transportid_fcp *)buf; if (port->wwpn_iid[iid].wwpn == 0) return (0); memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_FC; scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); return (sizeof(*id)); } case CTL_PORT_ISCSI: { struct scsi_transportid_iscsi_port *id = (struct scsi_transportid_iscsi_port *)buf; if (port->wwpn_iid[iid].name == NULL) return (0); memset(id, 0, 256); id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | SCSI_PROTO_ISCSI; len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; len = roundup2(min(len, 252), 4); scsi_ulto2b(len, id->additional_length); return (sizeof(*id) + len); } case CTL_PORT_SAS: { struct scsi_transportid_sas *id = (struct scsi_transportid_sas *)buf; if (port->wwpn_iid[iid].wwpn == 0) return (0); memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_SAS; scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); return (sizeof(*id)); } default: { struct scsi_transportid_spi *id = (struct scsi_transportid_spi *)buf; memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_SPI; scsi_ulto2b(iid, id->scsi_addr); scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); return (sizeof(*id)); } } } /* * Serialize a command that went down the "wrong" side, and so was sent to * this controller for execution. The logic is a little different than the * standard case in ctl_scsiio_precheck(). Errors in this case need to get * sent back to the other side, but in the success case, we execute the * command on this side (XFER mode) or tell the other side to execute it * (SER_ONLY mode). */ static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); union ctl_ha_msg msg_info; struct ctl_lun *lun; const struct ctl_cmd_entry *entry; union ctl_io *bio; uint32_t targ_lun; targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; /* Make sure that we know about this port. */ if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 1); goto badjuju; } /* Make sure that we know about this LUN. */ mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); /* * The other node would not send this request to us unless * received announce that we are primary node for this LUN. * If this LUN does not exist now, it is probably result of * a race, so respond to initiator in the most opaque way. */ ctl_set_busy(ctsio); goto badjuju; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * If the LUN is invalid, pretend that it doesn't exist. * It will go away as soon as all pending I/Os completed. */ if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); ctl_set_busy(ctsio); goto badjuju; } entry = ctl_get_cmd_entry(ctsio, NULL); if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { mtx_unlock(&lun->lun_lock); goto badjuju; } CTL_LUN(ctsio) = lun; CTL_BACKEND_LUN(ctsio) = lun->be_lun; /* * Every I/O goes into the OOA queue for a * particular LUN, and stays there until completion. */ #ifdef CTL_TIME_IO if (TAILQ_EMPTY(&lun->ooa_queue)) lun->idle_time += getsbinuptime() - lun->last_busy; #endif TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links); switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { case CTL_ACTION_BLOCK: ctsio->io_hdr.blocker = bio; TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, blocked_links); mtx_unlock(&lun->lun_lock); break; case CTL_ACTION_PASS: case CTL_ACTION_SKIP: if (softc->ha_mode == CTL_HA_MODE_XFER) { ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr((union ctl_io *)ctsio); mtx_unlock(&lun->lun_lock); } else { ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; mtx_unlock(&lun->lun_lock); /* send msg back to other side */ msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; msg_info.hdr.msg_type = CTL_MSG_R2R; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.hdr), M_WAITOK); } break; case CTL_ACTION_OVERLAP: TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_overlapped_cmd(ctsio); goto badjuju; case CTL_ACTION_OVERLAP_TAG: TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_overlapped_tag(ctsio, ctsio->tag_num); goto badjuju; case CTL_ACTION_ERROR: default: TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 0); badjuju: ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.scsi), M_WAITOK); ctl_free_io((union ctl_io *)ctsio); break; } } /* * Returns 0 for success, errno for failure. */ static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) { union ctl_io *io; mtx_lock(&lun->lun_lock); for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links)) { struct ctl_ooa_entry *entry; /* * If we've got more than we can fit, just count the * remaining entries. */ if (*cur_fill_num >= ooa_hdr->alloc_num) continue; entry = &kern_entries[*cur_fill_num]; entry->tag_num = io->scsiio.tag_num; entry->lun_num = lun->lun; #ifdef CTL_TIME_IO entry->start_bt = io->io_hdr.start_bt; #endif bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); entry->cdb_len = io->scsiio.cdb_len; if (io->io_hdr.blocker != NULL) entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; if (io->io_hdr.flags & CTL_FLAG_ABORT) entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; if (io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_QUEUED; if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_SENT; } mtx_unlock(&lun->lun_lock); } /* * Escape characters that are illegal or not recommended in XML. */ int ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) { char *end = str + size; int retval; retval = 0; for (; *str && str < end; str++) { switch (*str) { case '&': retval = sbuf_printf(sb, "&"); break; case '>': retval = sbuf_printf(sb, ">"); break; case '<': retval = sbuf_printf(sb, "<"); break; default: retval = sbuf_putc(sb, *str); break; } if (retval != 0) break; } return (retval); } static void ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) { struct scsi_vpd_id_descriptor *desc; int i; if (id == NULL || id->len < 4) return; desc = (struct scsi_vpd_id_descriptor *)id->data; switch (desc->id_type & SVPD_ID_TYPE_MASK) { case SVPD_ID_TYPE_T10: sbuf_printf(sb, "t10."); break; case SVPD_ID_TYPE_EUI64: sbuf_printf(sb, "eui."); break; case SVPD_ID_TYPE_NAA: sbuf_printf(sb, "naa."); break; case SVPD_ID_TYPE_SCSI_NAME: break; } switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { case SVPD_ID_CODESET_BINARY: for (i = 0; i < desc->length; i++) sbuf_printf(sb, "%02x", desc->identifier[i]); break; case SVPD_ID_CODESET_ASCII: sbuf_printf(sb, "%.*s", (int)desc->length, (char *)desc->identifier); break; case SVPD_ID_CODESET_UTF8: sbuf_printf(sb, "%s", (char *)desc->identifier); break; } } static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_softc *softc = dev->si_drv1; struct ctl_port *port; struct ctl_lun *lun; int retval; retval = 0; switch (cmd) { case CTL_IO: retval = ctl_ioctl_io(dev, cmd, addr, flag, td); break; case CTL_ENABLE_PORT: case CTL_DISABLE_PORT: case CTL_SET_PORT_WWNS: { struct ctl_port *port; struct ctl_port_entry *entry; entry = (struct ctl_port_entry *)addr; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { int action, done; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max) continue; action = 0; done = 0; if ((entry->port_type == CTL_PORT_NONE) && (entry->targ_port == port->targ_port)) { /* * If the user only wants to enable or * disable or set WWNs on a specific port, * do the operation and we're done. */ action = 1; done = 1; } else if (entry->port_type & port->port_type) { /* * Compare the user's type mask with the * particular frontend type to see if we * have a match. */ action = 1; done = 0; /* * Make sure the user isn't trying to set * WWNs on multiple ports at the same time. */ if (cmd == CTL_SET_PORT_WWNS) { printf("%s: Can't set WWNs on " "multiple ports\n", __func__); retval = EINVAL; break; } } if (action == 0) continue; /* * XXX KDM we have to drop the lock here, because * the online/offline operations can potentially * block. We need to reference count the frontends * so they can't go away, */ if (cmd == CTL_ENABLE_PORT) { mtx_unlock(&softc->ctl_lock); ctl_port_online(port); mtx_lock(&softc->ctl_lock); } else if (cmd == CTL_DISABLE_PORT) { mtx_unlock(&softc->ctl_lock); ctl_port_offline(port); mtx_lock(&softc->ctl_lock); } else if (cmd == CTL_SET_PORT_WWNS) { ctl_port_set_wwns(port, (entry->flags & CTL_PORT_WWNN_VALID) ? 1 : 0, entry->wwnn, (entry->flags & CTL_PORT_WWPN_VALID) ? 1 : 0, entry->wwpn); } if (done != 0) break; } mtx_unlock(&softc->ctl_lock); break; } case CTL_GET_OOA: { struct ctl_ooa *ooa_hdr; struct ctl_ooa_entry *entries; uint32_t cur_fill_num; ooa_hdr = (struct ctl_ooa *)addr; if ((ooa_hdr->alloc_len == 0) || (ooa_hdr->alloc_num == 0)) { printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " "must be non-zero\n", __func__, ooa_hdr->alloc_len, ooa_hdr->alloc_num); retval = EINVAL; break; } if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * sizeof(struct ctl_ooa_entry))) { printf("%s: CTL_GET_OOA: alloc len %u must be alloc " "num %d * sizeof(struct ctl_ooa_entry) %zd\n", __func__, ooa_hdr->alloc_len, ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); retval = EINVAL; break; } entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); if (entries == NULL) { printf("%s: could not allocate %d bytes for OOA " "dump\n", __func__, ooa_hdr->alloc_len); retval = ENOMEM; break; } mtx_lock(&softc->ctl_lock); if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && (ooa_hdr->lun_num >= ctl_max_luns || softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { mtx_unlock(&softc->ctl_lock); free(entries, M_CTL); printf("%s: CTL_GET_OOA: invalid LUN %ju\n", __func__, (uintmax_t)ooa_hdr->lun_num); retval = EINVAL; break; } cur_fill_num = 0; if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { STAILQ_FOREACH(lun, &softc->lun_list, links) { ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, entries); } } else { lun = softc->ctl_luns[ooa_hdr->lun_num]; ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, entries); } mtx_unlock(&softc->ctl_lock); ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); ooa_hdr->fill_len = ooa_hdr->fill_num * sizeof(struct ctl_ooa_entry); retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); if (retval != 0) { printf("%s: error copying out %d bytes for OOA dump\n", __func__, ooa_hdr->fill_len); } getbinuptime(&ooa_hdr->cur_bt); if (cur_fill_num > ooa_hdr->alloc_num) { ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; } else { ooa_hdr->dropped_num = 0; ooa_hdr->status = CTL_OOA_OK; } free(entries, M_CTL); break; } case CTL_DELAY_IO: { struct ctl_io_delay_info *delay_info; delay_info = (struct ctl_io_delay_info *)addr; #ifdef CTL_IO_DELAY mtx_lock(&softc->ctl_lock); if (delay_info->lun_id >= ctl_max_luns || (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { mtx_unlock(&softc->ctl_lock); delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); delay_info->status = CTL_DELAY_STATUS_OK; switch (delay_info->delay_type) { case CTL_DELAY_TYPE_CONT: case CTL_DELAY_TYPE_ONESHOT: break; default: delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; break; } switch (delay_info->delay_loc) { case CTL_DELAY_LOC_DATAMOVE: lun->delay_info.datamove_type = delay_info->delay_type; lun->delay_info.datamove_delay = delay_info->delay_secs; break; case CTL_DELAY_LOC_DONE: lun->delay_info.done_type = delay_info->delay_type; lun->delay_info.done_delay = delay_info->delay_secs; break; default: delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; break; } mtx_unlock(&lun->lun_lock); #else delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; #endif /* CTL_IO_DELAY */ break; } case CTL_ERROR_INJECT: { struct ctl_error_desc *err_desc, *new_err_desc; err_desc = (struct ctl_error_desc *)addr; new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, M_WAITOK | M_ZERO); bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); mtx_lock(&softc->ctl_lock); if (err_desc->lun_id >= ctl_max_luns || (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { mtx_unlock(&softc->ctl_lock); free(new_err_desc, M_CTL); printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", __func__, (uintmax_t)err_desc->lun_id); retval = EINVAL; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * We could do some checking here to verify the validity * of the request, but given the complexity of error * injection requests, the checking logic would be fairly * complex. * * For now, if the request is invalid, it just won't get * executed and might get deleted. */ STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); /* * XXX KDM check to make sure the serial number is unique, * in case we somehow manage to wrap. That shouldn't * happen for a very long time, but it's the right thing to * do. */ new_err_desc->serial = lun->error_serial; err_desc->serial = lun->error_serial; lun->error_serial++; mtx_unlock(&lun->lun_lock); break; } case CTL_ERROR_INJECT_DELETE: { struct ctl_error_desc *delete_desc, *desc, *desc2; int delete_done; delete_desc = (struct ctl_error_desc *)addr; delete_done = 0; mtx_lock(&softc->ctl_lock); if (delete_desc->lun_id >= ctl_max_luns || (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { mtx_unlock(&softc->ctl_lock); printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", __func__, (uintmax_t)delete_desc->lun_id); retval = EINVAL; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { if (desc->serial != delete_desc->serial) continue; STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); free(desc, M_CTL); delete_done = 1; } mtx_unlock(&lun->lun_lock); if (delete_done == 0) { printf("%s: CTL_ERROR_INJECT_DELETE: can't find " "error serial %ju on LUN %u\n", __func__, delete_desc->serial, delete_desc->lun_id); retval = EINVAL; break; } break; } case CTL_DUMP_STRUCTS: { int j, k; struct ctl_port *port; struct ctl_frontend *fe; mtx_lock(&softc->ctl_lock); printf("CTL Persistent Reservation information start:\n"); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_DISABLED) != 0) { mtx_unlock(&lun->lun_lock); continue; } for (j = 0; j < ctl_max_ports; j++) { if (lun->pr_keys[j] == NULL) continue; for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ if (lun->pr_keys[j][k] == 0) continue; printf(" LUN %ju port %d iid %d key " "%#jx\n", lun->lun, j, k, (uintmax_t)lun->pr_keys[j][k]); } } mtx_unlock(&lun->lun_lock); } printf("CTL Persistent Reservation information end\n"); printf("CTL Ports:\n"); STAILQ_FOREACH(port, &softc->port_list, links) { printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " "%#jx WWPN %#jx\n", port->targ_port, port->port_name, port->frontend->name, port->port_type, port->physical_port, port->virtual_port, (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (port->wwpn_iid[j].in_use == 0 && port->wwpn_iid[j].wwpn == 0 && port->wwpn_iid[j].name == NULL) continue; printf(" iid %u use %d WWPN %#jx '%s'\n", j, port->wwpn_iid[j].in_use, (uintmax_t)port->wwpn_iid[j].wwpn, port->wwpn_iid[j].name); } } printf("CTL Port information end\n"); mtx_unlock(&softc->ctl_lock); /* * XXX KDM calling this without a lock. We'd likely want * to drop the lock before calling the frontend's dump * routine anyway. */ printf("CTL Frontends:\n"); STAILQ_FOREACH(fe, &softc->fe_list, links) { printf(" Frontend '%s'\n", fe->name); if (fe->fe_dump != NULL) fe->fe_dump(); } printf("CTL Frontend information end\n"); break; } case CTL_LUN_REQ: { struct ctl_lun_req *lun_req; struct ctl_backend_driver *backend; void *packed; nvlist_t *tmp_args_nvl; size_t packed_len; lun_req = (struct ctl_lun_req *)addr; tmp_args_nvl = lun_req->args_nvl; backend = ctl_backend_find(lun_req->backend); if (backend == NULL) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Backend \"%s\" not found.", lun_req->backend); break; } if (lun_req->args != NULL) { packed = malloc(lun_req->args_len, M_CTL, M_WAITOK); if (copyin(lun_req->args, packed, lun_req->args_len) != 0) { free(packed, M_CTL); lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Cannot copyin args."); break; } lun_req->args_nvl = nvlist_unpack(packed, lun_req->args_len, 0); free(packed, M_CTL); if (lun_req->args_nvl == NULL) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Cannot unpack args nvlist."); break; } } else lun_req->args_nvl = nvlist_create(0); retval = backend->ioctl(dev, cmd, addr, flag, td); nvlist_destroy(lun_req->args_nvl); lun_req->args_nvl = tmp_args_nvl; if (lun_req->result_nvl != NULL) { if (lun_req->result != NULL) { packed = nvlist_pack(lun_req->result_nvl, &packed_len); if (packed == NULL) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Cannot pack result nvlist."); break; } if (packed_len > lun_req->result_len) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Result nvlist too large."); free(packed, M_NVLIST); break; } if (copyout(packed, lun_req->result, packed_len)) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Cannot copyout() the result."); free(packed, M_NVLIST); break; } lun_req->result_len = packed_len; free(packed, M_NVLIST); } nvlist_destroy(lun_req->result_nvl); } break; } case CTL_LUN_LIST: { struct sbuf *sb; struct ctl_lun_list *list; const char *name, *value; void *cookie; int type; list = (struct ctl_lun_list *)addr; /* * Allocate a fixed length sbuf here, based on the length * of the user's buffer. We could allocate an auto-extending * buffer, and then tell the user how much larger our * amount of data is than his buffer, but that presents * some problems: * * 1. The sbuf(9) routines use a blocking malloc, and so * we can't hold a lock while calling them with an * auto-extending buffer. * * 2. There is not currently a LUN reference counting * mechanism, outside of outstanding transactions on * the LUN's OOA queue. So a LUN could go away on us * while we're getting the LUN number, backend-specific * information, etc. Thus, given the way things * currently work, we need to hold the CTL lock while * grabbing LUN information. * * So, from the user's standpoint, the best thing to do is * allocate what he thinks is a reasonable buffer length, * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, * double the buffer length and try again. (And repeat * that until he succeeds.) */ sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { list->status = CTL_LUN_LIST_ERROR; snprintf(list->error_str, sizeof(list->error_str), "Unable to allocate %d bytes for LUN list", list->alloc_len); break; } sbuf_printf(sb, "\n"); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); retval = sbuf_printf(sb, "\n", (uintmax_t)lun->lun); /* * Bail out as soon as we see that we've overfilled * the buffer. */ if (retval != 0) break; retval = sbuf_printf(sb, "\t%s" "\n", (lun->backend == NULL) ? "none" : lun->backend->name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", lun->be_lun->lun_type); if (retval != 0) break; if (lun->backend == NULL) { retval = sbuf_printf(sb, "\n"); if (retval != 0) break; continue; } retval = sbuf_printf(sb, "\t%ju\n", (lun->be_lun->maxlba > 0) ? lun->be_lun->maxlba + 1 : 0); if (retval != 0) break; retval = sbuf_printf(sb, "\t%u\n", lun->be_lun->blocksize); if (retval != 0) break; retval = sbuf_printf(sb, "\t"); if (retval != 0) break; retval = ctl_sbuf_printf_esc(sb, lun->be_lun->serial_num, sizeof(lun->be_lun->serial_num)); if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; retval = sbuf_printf(sb, "\t"); if (retval != 0) break; retval = ctl_sbuf_printf_esc(sb, lun->be_lun->device_id, sizeof(lun->be_lun->device_id)); if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; if (lun->backend->lun_info != NULL) { retval = lun->backend->lun_info(lun->be_lun, sb); if (retval != 0) break; } cookie = NULL; while ((name = nvlist_next(lun->be_lun->options, &type, &cookie)) != NULL) { sbuf_printf(sb, "\t<%s>", name); if (type == NV_TYPE_STRING) { value = dnvlist_get_string( lun->be_lun->options, name, NULL); if (value != NULL) sbuf_printf(sb, "%s", value); } sbuf_printf(sb, "\n", name); } retval = sbuf_printf(sb, "\n"); if (retval != 0) break; mtx_unlock(&lun->lun_lock); } if (lun != NULL) mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if ((retval != 0) || ((retval = sbuf_printf(sb, "\n")) != 0)) { retval = 0; sbuf_delete(sb); list->status = CTL_LUN_LIST_NEED_MORE_SPACE; snprintf(list->error_str, sizeof(list->error_str), "Out of space, %d bytes is too small", list->alloc_len); break; } sbuf_finish(sb); retval = copyout(sbuf_data(sb), list->lun_xml, sbuf_len(sb) + 1); list->fill_len = sbuf_len(sb) + 1; list->status = CTL_LUN_LIST_OK; sbuf_delete(sb); break; } case CTL_ISCSI: { struct ctl_iscsi *ci; struct ctl_frontend *fe; ci = (struct ctl_iscsi *)addr; fe = ctl_frontend_find("iscsi"); if (fe == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Frontend \"iscsi\" not found."); break; } retval = fe->ioctl(dev, cmd, addr, flag, td); break; } case CTL_PORT_REQ: { struct ctl_req *req; struct ctl_frontend *fe; void *packed; nvlist_t *tmp_args_nvl; size_t packed_len; req = (struct ctl_req *)addr; tmp_args_nvl = req->args_nvl; fe = ctl_frontend_find(req->driver); if (fe == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Frontend \"%s\" not found.", req->driver); break; } if (req->args != NULL) { packed = malloc(req->args_len, M_CTL, M_WAITOK); if (copyin(req->args, packed, req->args_len) != 0) { free(packed, M_CTL); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Cannot copyin args."); break; } req->args_nvl = nvlist_unpack(packed, req->args_len, 0); free(packed, M_CTL); if (req->args_nvl == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Cannot unpack args nvlist."); break; } } else req->args_nvl = nvlist_create(0); if (fe->ioctl) retval = fe->ioctl(dev, cmd, addr, flag, td); else retval = ENODEV; nvlist_destroy(req->args_nvl); req->args_nvl = tmp_args_nvl; if (req->result_nvl != NULL) { if (req->result != NULL) { packed = nvlist_pack(req->result_nvl, &packed_len); if (packed == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Cannot pack result nvlist."); break; } if (packed_len > req->result_len) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Result nvlist too large."); free(packed, M_NVLIST); break; } if (copyout(packed, req->result, packed_len)) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Cannot copyout() the result."); free(packed, M_NVLIST); break; } req->result_len = packed_len; free(packed, M_NVLIST); } nvlist_destroy(req->result_nvl); } break; } case CTL_PORT_LIST: { struct sbuf *sb; struct ctl_port *port; struct ctl_lun_list *list; const char *name, *value; void *cookie; int j, type; uint32_t plun; list = (struct ctl_lun_list *)addr; sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { list->status = CTL_LUN_LIST_ERROR; snprintf(list->error_str, sizeof(list->error_str), "Unable to allocate %d bytes for LUN list", list->alloc_len); break; } sbuf_printf(sb, "\n"); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { retval = sbuf_printf(sb, "\n", (uintmax_t)port->targ_port); /* * Bail out as soon as we see that we've overfilled * the buffer. */ if (retval != 0) break; retval = sbuf_printf(sb, "\t%s" "\n", port->frontend->name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->port_type); if (retval != 0) break; retval = sbuf_printf(sb, "\t%s\n", (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); if (retval != 0) break; retval = sbuf_printf(sb, "\t%s\n", port->port_name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->physical_port); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->virtual_port); if (retval != 0) break; if (port->target_devid != NULL) { sbuf_printf(sb, "\t"); ctl_id_sbuf(port->target_devid, sb); sbuf_printf(sb, "\n"); } if (port->port_devid != NULL) { sbuf_printf(sb, "\t"); ctl_id_sbuf(port->port_devid, sb); sbuf_printf(sb, "\n"); } if (port->port_info != NULL) { retval = port->port_info(port->onoff_arg, sb); if (retval != 0) break; } cookie = NULL; while ((name = nvlist_next(port->options, &type, &cookie)) != NULL) { sbuf_printf(sb, "\t<%s>", name); if (type == NV_TYPE_STRING) { value = dnvlist_get_string(port->options, name, NULL); if (value != NULL) sbuf_printf(sb, "%s", value); } sbuf_printf(sb, "\n", name); } if (port->lun_map != NULL) { sbuf_printf(sb, "\ton\n"); for (j = 0; j < port->lun_map_size; j++) { plun = ctl_lun_map_from_port(port, j); if (plun == UINT32_MAX) continue; sbuf_printf(sb, "\t%u\n", j, plun); } } for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (port->wwpn_iid[j].in_use == 0 || (port->wwpn_iid[j].wwpn == 0 && port->wwpn_iid[j].name == NULL)) continue; if (port->wwpn_iid[j].name != NULL) retval = sbuf_printf(sb, "\t%s\n", j, port->wwpn_iid[j].name); else retval = sbuf_printf(sb, "\tnaa.%08jx\n", j, port->wwpn_iid[j].wwpn); if (retval != 0) break; } if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; } mtx_unlock(&softc->ctl_lock); if ((retval != 0) || ((retval = sbuf_printf(sb, "\n")) != 0)) { retval = 0; sbuf_delete(sb); list->status = CTL_LUN_LIST_NEED_MORE_SPACE; snprintf(list->error_str, sizeof(list->error_str), "Out of space, %d bytes is too small", list->alloc_len); break; } sbuf_finish(sb); retval = copyout(sbuf_data(sb), list->lun_xml, sbuf_len(sb) + 1); list->fill_len = sbuf_len(sb) + 1; list->status = CTL_LUN_LIST_OK; sbuf_delete(sb); break; } case CTL_LUN_MAP: { struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; struct ctl_port *port; mtx_lock(&softc->ctl_lock); if (lm->port < softc->port_min || lm->port >= softc->port_max || (port = softc->ctl_ports[lm->port]) == NULL) { mtx_unlock(&softc->ctl_lock); return (ENXIO); } if (port->status & CTL_PORT_STATUS_ONLINE) { STAILQ_FOREACH(lun, &softc->lun_list, links) { if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; mtx_lock(&lun->lun_lock); ctl_est_ua_port(lun, lm->port, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&lun->lun_lock); } } mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps if (lm->plun != UINT32_MAX) { if (lm->lun == UINT32_MAX) retval = ctl_lun_map_unset(port, lm->plun); else if (lm->lun < ctl_max_luns && softc->ctl_luns[lm->lun] != NULL) retval = ctl_lun_map_set(port, lm->plun, lm->lun); else return (ENXIO); } else { if (lm->lun == UINT32_MAX) retval = ctl_lun_map_deinit(port); else retval = ctl_lun_map_init(port); } if (port->status & CTL_PORT_STATUS_ONLINE) ctl_isc_announce_port(port); break; } case CTL_GET_LUN_STATS: { struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; int i; /* * XXX KDM no locking here. If the LUN list changes, * things can blow up. */ i = 0; stats->status = CTL_SS_OK; stats->fill_len = 0; STAILQ_FOREACH(lun, &softc->lun_list, links) { if (lun->lun < stats->first_item) continue; if (stats->fill_len + sizeof(lun->stats) > stats->alloc_len) { stats->status = CTL_SS_NEED_MORE_SPACE; break; } retval = copyout(&lun->stats, &stats->stats[i++], sizeof(lun->stats)); if (retval != 0) break; stats->fill_len += sizeof(lun->stats); } stats->num_items = softc->num_luns; stats->flags = CTL_STATS_FLAG_NONE; #ifdef CTL_TIME_IO stats->flags |= CTL_STATS_FLAG_TIME_VALID; #endif getnanouptime(&stats->timestamp); break; } case CTL_GET_PORT_STATS: { struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; int i; /* * XXX KDM no locking here. If the LUN list changes, * things can blow up. */ i = 0; stats->status = CTL_SS_OK; stats->fill_len = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port < stats->first_item) continue; if (stats->fill_len + sizeof(port->stats) > stats->alloc_len) { stats->status = CTL_SS_NEED_MORE_SPACE; break; } retval = copyout(&port->stats, &stats->stats[i++], sizeof(port->stats)); if (retval != 0) break; stats->fill_len += sizeof(port->stats); } stats->num_items = softc->num_ports; stats->flags = CTL_STATS_FLAG_NONE; #ifdef CTL_TIME_IO stats->flags |= CTL_STATS_FLAG_TIME_VALID; #endif getnanouptime(&stats->timestamp); break; } default: { /* XXX KDM should we fix this? */ #if 0 struct ctl_backend_driver *backend; unsigned int type; int found; found = 0; /* * We encode the backend type as the ioctl type for backend * ioctls. So parse it out here, and then search for a * backend of this type. */ type = _IOC_TYPE(cmd); STAILQ_FOREACH(backend, &softc->be_list, links) { if (backend->type == type) { found = 1; break; } } if (found == 0) { printf("ctl: unknown ioctl command %#lx or backend " "%d\n", cmd, type); retval = EINVAL; break; } retval = backend->ioctl(dev, cmd, addr, flag, td); #endif retval = ENOTTY; break; } } return (retval); } uint32_t ctl_get_initindex(struct ctl_nexus *nexus) { return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); } int ctl_lun_map_init(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; struct ctl_lun *lun; int size = ctl_lun_map_size; uint32_t i; if (port->lun_map == NULL || port->lun_map_size < size) { port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = malloc(size * sizeof(uint32_t), M_CTL, M_NOWAIT); } if (port->lun_map == NULL) return (ENOMEM); for (i = 0; i < size; i++) port->lun_map[i] = UINT32_MAX; port->lun_map_size = size; if (port->status & CTL_PORT_STATUS_ONLINE) { if (port->lun_disable != NULL) { STAILQ_FOREACH(lun, &softc->lun_list, links) port->lun_disable(port->targ_lun_arg, lun->lun); } ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_deinit(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; struct ctl_lun *lun; if (port->lun_map == NULL) return (0); port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = NULL; if (port->status & CTL_PORT_STATUS_ONLINE) { if (port->lun_enable != NULL) { STAILQ_FOREACH(lun, &softc->lun_list, links) port->lun_enable(port->targ_lun_arg, lun->lun); } ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) { int status; uint32_t old; if (port->lun_map == NULL) { status = ctl_lun_map_init(port); if (status != 0) return (status); } if (plun >= port->lun_map_size) return (EINVAL); old = port->lun_map[plun]; port->lun_map[plun] = glun; if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { if (port->lun_enable != NULL) port->lun_enable(port->targ_lun_arg, plun); ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) { uint32_t old; if (port->lun_map == NULL || plun >= port->lun_map_size) return (0); old = port->lun_map[plun]; port->lun_map[plun] = UINT32_MAX; if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { if (port->lun_disable != NULL) port->lun_disable(port->targ_lun_arg, plun); ctl_isc_announce_port(port); } return (0); } uint32_t ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) { if (port == NULL) return (UINT32_MAX); if (port->lun_map == NULL) return (lun_id); if (lun_id > port->lun_map_size) return (UINT32_MAX); return (port->lun_map[lun_id]); } uint32_t ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) { uint32_t i; if (port == NULL) return (UINT32_MAX); if (port->lun_map == NULL) return (lun_id); for (i = 0; i < port->lun_map_size; i++) { if (port->lun_map[i] == lun_id) return (i); } return (UINT32_MAX); } uint32_t ctl_decode_lun(uint64_t encoded) { uint8_t lun[8]; uint32_t result = 0xffffffff; be64enc(lun, encoded); switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) result = lun[1]; break; case RPL_LUNDATA_ATYP_FLAT: if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) result = ((lun[0] & 0x3f) << 8) + lun[1]; break; case RPL_LUNDATA_ATYP_EXTLUN: switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { case 0x02: switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { case 0x00: result = lun[1]; break; case 0x10: result = (lun[1] << 16) + (lun[2] << 8) + lun[3]; break; case 0x20: if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) result = (lun[2] << 24) + (lun[3] << 16) + (lun[4] << 8) + lun[5]; break; } break; case RPL_LUNDATA_EXT_EAM_NOT_SPEC: result = 0xffffffff; break; } break; } return (result); } uint64_t ctl_encode_lun(uint32_t decoded) { uint64_t l = decoded; if (l <= 0xff) return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); if (l <= 0x3fff) return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); if (l <= 0xffffff) return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | (l << 32)); return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); } int ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) { int i; for (i = first; i < last; i++) { if ((mask[i / 32] & (1 << (i % 32))) == 0) return (i); } return (-1); } int ctl_set_mask(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) != 0) return (-1); else mask[chunk] |= (1 << piece); return (0); } int ctl_clear_mask(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) == 0) return (-1); else mask[chunk] &= ~(1 << piece); return (0); } int ctl_is_set(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) == 0) return (0); else return (1); } static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; if (t == NULL) return (0); return (t[residx % CTL_MAX_INIT_PER_PORT]); } static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; if (t == NULL) return; t[residx % CTL_MAX_INIT_PER_PORT] = 0; } static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *p; u_int i; i = residx/CTL_MAX_INIT_PER_PORT; if (lun->pr_keys[i] != NULL) return; mtx_unlock(&lun->lun_lock); p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, M_WAITOK | M_ZERO); mtx_lock(&lun->lun_lock); if (lun->pr_keys[i] == NULL) lun->pr_keys[i] = p; else free(p, M_CTL); } static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; KASSERT(t != NULL, ("prkey %d is not allocated", residx)); t[residx % CTL_MAX_INIT_PER_PORT] = key; } /* * ctl_softc, pool_name, total_ctl_io are passed in. * npool is passed out. */ int ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, uint32_t total_ctl_io, void **npool) { struct ctl_io_pool *pool; pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, M_NOWAIT | M_ZERO); if (pool == NULL) return (ENOMEM); snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); pool->ctl_softc = ctl_softc; #ifdef IO_POOLS pool->zone = uma_zsecond_create(pool->name, NULL, NULL, NULL, NULL, ctl_softc->io_zone); /* uma_prealloc(pool->zone, total_ctl_io); */ #else pool->zone = ctl_softc->io_zone; #endif *npool = pool; return (0); } void ctl_pool_free(struct ctl_io_pool *pool) { if (pool == NULL) return; #ifdef IO_POOLS uma_zdestroy(pool->zone); #endif free(pool, M_CTL); } union ctl_io * ctl_alloc_io(void *pool_ref) { struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; union ctl_io *io; io = uma_zalloc(pool->zone, M_WAITOK); if (io != NULL) { io->io_hdr.pool = pool_ref; CTL_SOFTC(io) = pool->ctl_softc; TAILQ_INIT(&io->io_hdr.blocked_queue); } return (io); } union ctl_io * ctl_alloc_io_nowait(void *pool_ref) { struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; union ctl_io *io; io = uma_zalloc(pool->zone, M_NOWAIT); if (io != NULL) { io->io_hdr.pool = pool_ref; CTL_SOFTC(io) = pool->ctl_softc; TAILQ_INIT(&io->io_hdr.blocked_queue); } return (io); } void ctl_free_io(union ctl_io *io) { struct ctl_io_pool *pool; if (io == NULL) return; pool = (struct ctl_io_pool *)io->io_hdr.pool; uma_zfree(pool->zone, io); } void ctl_zero_io(union ctl_io *io) { struct ctl_io_pool *pool; if (io == NULL) return; /* * May need to preserve linked list pointers at some point too. */ pool = io->io_hdr.pool; memset(io, 0, sizeof(*io)); io->io_hdr.pool = pool; CTL_SOFTC(io) = pool->ctl_softc; TAILQ_INIT(&io->io_hdr.blocked_queue); } int ctl_expand_number(const char *buf, uint64_t *num) { char *endptr; uint64_t number; unsigned shift; number = strtoq(buf, &endptr, 0); switch (tolower((unsigned char)*endptr)) { case 'e': shift = 60; break; case 'p': shift = 50; break; case 't': shift = 40; break; case 'g': shift = 30; break; case 'm': shift = 20; break; case 'k': shift = 10; break; case 'b': case '\0': /* No unit. */ *num = number; return (0); default: /* Unrecognized unit. */ return (-1); } if ((number << shift) >> shift != number) { /* Overflow */ return (-1); } *num = number << shift; return (0); } /* * This routine could be used in the future to load default and/or saved * mode page parameters for a particuar lun. */ static int ctl_init_page_index(struct ctl_lun *lun) { int i, page_code; struct ctl_page_index *page_index; const char *value; uint64_t ival; memcpy(&lun->mode_pages.index, page_index_template, sizeof(page_index_template)); for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; page_code = page_index->page_code & SMPH_PC_MASK; switch (page_code) { case SMS_RW_ERROR_RECOVERY_PAGE: { KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], &rw_er_page_default, sizeof(rw_er_page_default)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], &rw_er_page_changeable, sizeof(rw_er_page_changeable)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], &rw_er_page_default, sizeof(rw_er_page_default)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], &rw_er_page_default, sizeof(rw_er_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.rw_er_page; break; } case SMS_FORMAT_DEVICE_PAGE: { struct scsi_format_page *format_page; KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); /* * Sectors per track are set above. Bytes per * sector need to be set here on a per-LUN basis. */ memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], &format_page_default, sizeof(format_page_default)); memcpy(&lun->mode_pages.format_page[ CTL_PAGE_CHANGEABLE], &format_page_changeable, sizeof(format_page_changeable)); memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], &format_page_default, sizeof(format_page_default)); memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], &format_page_default, sizeof(format_page_default)); format_page = &lun->mode_pages.format_page[ CTL_PAGE_CURRENT]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); format_page = &lun->mode_pages.format_page[ CTL_PAGE_DEFAULT]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); format_page = &lun->mode_pages.format_page[ CTL_PAGE_SAVED]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); page_index->page_data = (uint8_t *)lun->mode_pages.format_page; break; } case SMS_RIGID_DISK_PAGE: { struct scsi_rigid_disk_page *rigid_disk_page; uint32_t sectors_per_cylinder; uint64_t cylinders; #ifndef __XSCALE__ int shift; #endif /* !__XSCALE__ */ KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); /* * Rotation rate and sectors per track are set * above. We calculate the cylinders here based on * capacity. Due to the number of heads and * sectors per track we're using, smaller arrays * may turn out to have 0 cylinders. Linux and * FreeBSD don't pay attention to these mode pages * to figure out capacity, but Solaris does. It * seems to deal with 0 cylinders just fine, and * works out a fake geometry based on the capacity. */ memcpy(&lun->mode_pages.rigid_disk_page[ CTL_PAGE_DEFAULT], &rigid_disk_page_default, sizeof(rigid_disk_page_default)); memcpy(&lun->mode_pages.rigid_disk_page[ CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, sizeof(rigid_disk_page_changeable)); sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * CTL_DEFAULT_HEADS; /* * The divide method here will be more accurate, * probably, but results in floating point being * used in the kernel on i386 (__udivdi3()). On the * XScale, though, __udivdi3() is implemented in * software. * * The shift method for cylinder calculation is * accurate if sectors_per_cylinder is a power of * 2. Otherwise it might be slightly off -- you * might have a bit of a truncation problem. */ #ifdef __XSCALE__ cylinders = (lun->be_lun->maxlba + 1) / sectors_per_cylinder; #else for (shift = 31; shift > 0; shift--) { if (sectors_per_cylinder & (1 << shift)) break; } cylinders = (lun->be_lun->maxlba + 1) >> shift; #endif /* * We've basically got 3 bytes, or 24 bits for the * cylinder size in the mode page. If we're over, * just round down to 2^24. */ if (cylinders > 0xffffff) cylinders = 0xffffff; rigid_disk_page = &lun->mode_pages.rigid_disk_page[ CTL_PAGE_DEFAULT]; scsi_ulto3b(cylinders, rigid_disk_page->cylinders); if ((value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) { scsi_ulto2b(strtol(value, NULL, 0), rigid_disk_page->rotation_rate); } memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], sizeof(rigid_disk_page_default)); memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], sizeof(rigid_disk_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.rigid_disk_page; break; } case SMS_VERIFY_ERROR_RECOVERY_PAGE: { KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], &verify_er_page_default, sizeof(verify_er_page_default)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], &verify_er_page_changeable, sizeof(verify_er_page_changeable)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], &verify_er_page_default, sizeof(verify_er_page_default)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], &verify_er_page_default, sizeof(verify_er_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.verify_er_page; break; } case SMS_CACHING_PAGE: { struct scsi_caching_page *caching_page; KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], &caching_page_default, sizeof(caching_page_default)); memcpy(&lun->mode_pages.caching_page[ CTL_PAGE_CHANGEABLE], &caching_page_changeable, sizeof(caching_page_changeable)); memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], &caching_page_default, sizeof(caching_page_default)); caching_page = &lun->mode_pages.caching_page[ CTL_PAGE_SAVED]; value = dnvlist_get_string(lun->be_lun->options, "writecache", NULL); if (value != NULL && strcmp(value, "off") == 0) caching_page->flags1 &= ~SCP_WCE; value = dnvlist_get_string(lun->be_lun->options, "readcache", NULL); if (value != NULL && strcmp(value, "off") == 0) caching_page->flags1 |= SCP_RCD; memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], &lun->mode_pages.caching_page[CTL_PAGE_SAVED], sizeof(caching_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.caching_page; break; } case SMS_CONTROL_MODE_PAGE: { switch (page_index->subpage) { case SMS_SUBPAGE_PAGE_0: { struct scsi_control_page *control_page; memcpy(&lun->mode_pages.control_page[ CTL_PAGE_DEFAULT], &control_page_default, sizeof(control_page_default)); memcpy(&lun->mode_pages.control_page[ CTL_PAGE_CHANGEABLE], &control_page_changeable, sizeof(control_page_changeable)); memcpy(&lun->mode_pages.control_page[ CTL_PAGE_SAVED], &control_page_default, sizeof(control_page_default)); control_page = &lun->mode_pages.control_page[ CTL_PAGE_SAVED]; value = dnvlist_get_string(lun->be_lun->options, "reordering", NULL); if (value != NULL && strcmp(value, "unrestricted") == 0) { control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; } memcpy(&lun->mode_pages.control_page[ CTL_PAGE_CURRENT], &lun->mode_pages.control_page[ CTL_PAGE_SAVED], sizeof(control_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.control_page; break; } case 0x01: memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_DEFAULT], &control_ext_page_default, sizeof(control_ext_page_default)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_CHANGEABLE], &control_ext_page_changeable, sizeof(control_ext_page_changeable)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_SAVED], &control_ext_page_default, sizeof(control_ext_page_default)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_CURRENT], &lun->mode_pages.control_ext_page[ CTL_PAGE_SAVED], sizeof(control_ext_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.control_ext_page; break; default: panic("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code); } break; } case SMS_INFO_EXCEPTIONS_PAGE: { switch (page_index->subpage) { case SMS_SUBPAGE_PAGE_0: memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], &ie_page_default, sizeof(ie_page_default)); memcpy(&lun->mode_pages.ie_page[ CTL_PAGE_CHANGEABLE], &ie_page_changeable, sizeof(ie_page_changeable)); memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], &ie_page_default, sizeof(ie_page_default)); memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], &ie_page_default, sizeof(ie_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.ie_page; break; case 0x02: { struct ctl_logical_block_provisioning_page *page; memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], &lbp_page_default, sizeof(lbp_page_default)); memcpy(&lun->mode_pages.lbp_page[ CTL_PAGE_CHANGEABLE], &lbp_page_changeable, sizeof(lbp_page_changeable)); memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], &lbp_page_default, sizeof(lbp_page_default)); page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; value = dnvlist_get_string(lun->be_lun->options, "avail-threshold", NULL); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[0].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_DEC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[0].count); } value = dnvlist_get_string(lun->be_lun->options, "used-threshold", NULL); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[1].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_INC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[1].count); } value = dnvlist_get_string(lun->be_lun->options, "pool-avail-threshold", NULL); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[2].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_DEC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[2].count); } value = dnvlist_get_string(lun->be_lun->options, "pool-used-threshold", NULL); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[3].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_INC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[3].count); } memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], sizeof(lbp_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.lbp_page; break; } default: panic("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code); } break; } case SMS_CDDVD_CAPS_PAGE:{ KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], &cddvd_page_default, sizeof(cddvd_page_default)); memcpy(&lun->mode_pages.cddvd_page[ CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, sizeof(cddvd_page_changeable)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], &cddvd_page_default, sizeof(cddvd_page_default)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], sizeof(cddvd_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.cddvd_page; break; } default: panic("invalid page code value %#x", page_code); } } return (CTL_RETVAL_COMPLETE); } static int ctl_init_log_page_index(struct ctl_lun *lun) { struct ctl_page_index *page_index; int i, j, k, prev; memcpy(&lun->log_pages.index, log_page_index_template, sizeof(log_page_index_template)); prev = -1; for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { page_index = &lun->log_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && lun->backend->lun_attr == NULL) continue; if (page_index->page_code != prev) { lun->log_pages.pages_page[j] = page_index->page_code; prev = page_index->page_code; j++; } lun->log_pages.subpages_page[k*2] = page_index->page_code; lun->log_pages.subpages_page[k*2+1] = page_index->subpage; k++; } lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; lun->log_pages.index[0].page_len = j; lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; lun->log_pages.index[1].page_len = k * 2; lun->log_pages.index[2].page_data = (uint8_t *)&lun->log_pages.temp_page; lun->log_pages.index[2].page_len = sizeof(lun->log_pages.temp_page); lun->log_pages.index[3].page_data = &lun->log_pages.lbp_page[0]; lun->log_pages.index[3].page_len = 12*CTL_NUM_LBP_PARAMS; lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.stat_page; lun->log_pages.index[4].page_len = sizeof(lun->log_pages.stat_page); lun->log_pages.index[5].page_data = (uint8_t *)&lun->log_pages.ie_page; lun->log_pages.index[5].page_len = sizeof(lun->log_pages.ie_page); return (CTL_RETVAL_COMPLETE); } static int hex2bin(const char *str, uint8_t *buf, int buf_size) { int i; u_char c; memset(buf, 0, buf_size); while (isspace(str[0])) str++; if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) str += 2; buf_size *= 2; for (i = 0; str[i] != 0 && i < buf_size; i++) { while (str[i] == '-') /* Skip dashes in UUIDs. */ str++; c = str[i]; if (isdigit(c)) c -= '0'; else if (isalpha(c)) c -= isupper(c) ? 'A' - 10 : 'a' - 10; else break; if (c >= 16) break; if ((i & 1) == 0) buf[i / 2] |= (c << 4); else buf[i / 2] |= c; } return ((i + 1) / 2); } /* * Add LUN. * * Returns 0 for success, non-zero (errno) for failure. */ int ctl_add_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *ctl_softc = control_softc; struct ctl_lun *nlun, *lun; struct scsi_vpd_id_descriptor *desc; struct scsi_vpd_id_t10 *t10id; const char *eui, *naa, *scsiname, *uuid, *vendor, *value; int lun_number; int devidlen, idlen1, idlen2 = 0, len; /* * We support only Direct Access, CD-ROM or Processor LUN types. */ switch (be_lun->lun_type) { case T_DIRECT: case T_PROCESSOR: case T_CDROM: break; case T_SEQUENTIAL: case T_CHANGER: default: return (EINVAL); } lun = malloc(sizeof(*lun), M_CTL, M_WAITOK | M_ZERO); lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); /* Generate LUN ID. */ devidlen = max(CTL_DEVID_MIN_LEN, strnlen(be_lun->device_id, CTL_DEVID_LEN)); idlen1 = sizeof(*t10id) + devidlen; len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; scsiname = dnvlist_get_string(be_lun->options, "scsiname", NULL); if (scsiname != NULL) { idlen2 = roundup2(strlen(scsiname) + 1, 4); len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; } eui = dnvlist_get_string(be_lun->options, "eui", NULL); if (eui != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 16; } naa = dnvlist_get_string(be_lun->options, "naa", NULL); if (naa != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 16; } uuid = dnvlist_get_string(be_lun->options, "uuid", NULL); if (uuid != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 18; } lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; desc->proto_codeset = SVPD_ID_CODESET_ASCII; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; desc->length = idlen1; t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; memset(t10id->vendor, ' ', sizeof(t10id->vendor)); if ((vendor = dnvlist_get_string(be_lun->options, "vendor", NULL)) == NULL) { strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); } else { strncpy(t10id->vendor, vendor, min(sizeof(t10id->vendor), strlen(vendor))); } strncpy((char *)t10id->vendor_spec_id, (char *)be_lun->device_id, devidlen); if (scsiname != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen2; strlcpy(desc->identifier, scsiname, idlen2); } if (eui != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; desc->length = hex2bin(eui, desc->identifier, 16); desc->length = desc->length > 12 ? 16 : (desc->length > 8 ? 12 : 8); len -= 16 - desc->length; } if (naa != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_NAA; desc->length = hex2bin(naa, desc->identifier, 16); desc->length = desc->length > 8 ? 16 : 8; len -= 16 - desc->length; } if (uuid != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_UUID; desc->identifier[0] = 0x10; hex2bin(uuid, &desc->identifier[2], 16); desc->length = 18; } lun->lun_devid->len = len; mtx_lock(&ctl_softc->ctl_lock); /* * See if the caller requested a particular LUN number. If so, see * if it is available. Otherwise, allocate the first available LUN. */ if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { if ((be_lun->req_lun_id > (ctl_max_luns - 1)) || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { mtx_unlock(&ctl_softc->ctl_lock); if (be_lun->req_lun_id > (ctl_max_luns - 1)) { printf("ctl: requested LUN ID %d is higher " "than ctl_max_luns - 1 (%d)\n", be_lun->req_lun_id, ctl_max_luns - 1); } else { /* * XXX KDM return an error, or just assign * another LUN ID in this case?? */ printf("ctl: requested LUN ID %d is already " "in use\n", be_lun->req_lun_id); } fail: free(lun->lun_devid, M_CTL); free(lun, M_CTL); return (ENOSPC); } lun_number = be_lun->req_lun_id; } else { lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns); if (lun_number == -1) { mtx_unlock(&ctl_softc->ctl_lock); printf("ctl: can't allocate LUN, out of LUNs\n"); goto fail; } } ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); mtx_unlock(&ctl_softc->ctl_lock); mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); lun->lun = lun_number; lun->be_lun = be_lun; /* * The processor LUN is always enabled. Disk LUNs come on line * disabled, and must be enabled by the backend. */ lun->flags |= CTL_LUN_DISABLED; lun->backend = be_lun->be; be_lun->ctl_lun = lun; be_lun->lun_id = lun_number; if (be_lun->flags & CTL_LUN_FLAG_EJECTED) lun->flags |= CTL_LUN_EJECTED; if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) lun->flags |= CTL_LUN_NO_MEDIA; if (be_lun->flags & CTL_LUN_FLAG_STOPPED) lun->flags |= CTL_LUN_STOPPED; if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) lun->flags |= CTL_LUN_PRIMARY_SC; value = dnvlist_get_string(be_lun->options, "removable", NULL); if (value != NULL) { if (strcmp(value, "on") == 0) lun->flags |= CTL_LUN_REMOVABLE; } else if (be_lun->lun_type == T_CDROM) lun->flags |= CTL_LUN_REMOVABLE; lun->ctl_softc = ctl_softc; #ifdef CTL_TIME_IO lun->last_busy = getsbinuptime(); #endif TAILQ_INIT(&lun->ooa_queue); STAILQ_INIT(&lun->error_list); lun->ie_reported = 1; callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); ctl_tpc_lun_init(lun); if (lun->flags & CTL_LUN_REMOVABLE) { lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, M_CTL, M_WAITOK); } /* * Initialize the mode and log page index. */ ctl_init_page_index(lun); ctl_init_log_page_index(lun); /* Setup statistics gathering */ lun->stats.item = lun_number; /* * Now, before we insert this lun on the lun list, set the lun * inventory changed UA for all other luns. */ mtx_lock(&ctl_softc->ctl_lock); STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { mtx_lock(&nlun->lun_lock); ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&nlun->lun_lock); } STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); ctl_softc->ctl_luns[lun_number] = lun; ctl_softc->num_luns++; mtx_unlock(&ctl_softc->ctl_lock); /* * We successfully added the LUN, attempt to enable it. */ if (ctl_enable_lun(lun) != 0) { printf("%s: ctl_enable_lun() failed!\n", __func__); mtx_lock(&ctl_softc->ctl_lock); STAILQ_REMOVE(&ctl_softc->lun_list, lun, ctl_lun, links); ctl_clear_mask(ctl_softc->ctl_lun_mask, lun_number); ctl_softc->ctl_luns[lun_number] = NULL; ctl_softc->num_luns--; mtx_unlock(&ctl_softc->ctl_lock); free(lun->lun_devid, M_CTL); free(lun, M_CTL); return (EIO); } return (0); } /* * Free LUN that has no active requests. */ static int ctl_free_lun(struct ctl_lun *lun) { struct ctl_softc *softc = lun->ctl_softc; struct ctl_lun *nlun; int i; KASSERT(TAILQ_EMPTY(&lun->ooa_queue), ("Freeing a LUN %p with outstanding I/O!\n", lun)); mtx_lock(&softc->ctl_lock); STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); ctl_clear_mask(softc->ctl_lun_mask, lun->lun); softc->ctl_luns[lun->lun] = NULL; softc->num_luns--; STAILQ_FOREACH(nlun, &softc->lun_list, links) { mtx_lock(&nlun->lun_lock); ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&nlun->lun_lock); } mtx_unlock(&softc->ctl_lock); /* * Tell the backend to free resources, if this LUN has a backend. */ lun->be_lun->lun_shutdown(lun->be_lun); lun->ie_reportcnt = UINT32_MAX; callout_drain(&lun->ie_callout); ctl_tpc_lun_shutdown(lun); mtx_destroy(&lun->lun_lock); free(lun->lun_devid, M_CTL); for (i = 0; i < ctl_max_ports; i++) free(lun->pending_ua[i], M_CTL); free(lun->pending_ua, M_DEVBUF); for (i = 0; i < ctl_max_ports; i++) free(lun->pr_keys[i], M_CTL); free(lun->pr_keys, M_DEVBUF); free(lun->write_buffer, M_CTL); free(lun->prevent, M_CTL); free(lun, M_CTL); return (0); } static int ctl_enable_lun(struct ctl_lun *lun) { struct ctl_softc *softc; struct ctl_port *port, *nport; int retval; softc = lun->ctl_softc; mtx_lock(&softc->ctl_lock); mtx_lock(&lun->lun_lock); KASSERT((lun->flags & CTL_LUN_DISABLED) != 0, ("%s: LUN not disabled", __func__)); lun->flags &= ~CTL_LUN_DISABLED; mtx_unlock(&lun->lun_lock); STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || port->lun_map != NULL || port->lun_enable == NULL) continue; /* * Drop the lock while we call the FETD's enable routine. * This can lead to a callback into CTL (at least in the * case of the internal initiator frontend. */ mtx_unlock(&softc->ctl_lock); retval = port->lun_enable(port->targ_lun_arg, lun->lun); mtx_lock(&softc->ctl_lock); if (retval != 0) { printf("%s: FETD %s port %d returned error " "%d for lun_enable on lun %jd\n", __func__, port->port_name, port->targ_port, retval, (intmax_t)lun->lun); } } mtx_unlock(&softc->ctl_lock); ctl_isc_announce_lun(lun); return (0); } static int ctl_disable_lun(struct ctl_lun *lun) { struct ctl_softc *softc; struct ctl_port *port; int retval; softc = lun->ctl_softc; mtx_lock(&softc->ctl_lock); mtx_lock(&lun->lun_lock); KASSERT((lun->flags & CTL_LUN_DISABLED) == 0, ("%s: LUN not enabled", __func__)); lun->flags |= CTL_LUN_DISABLED; mtx_unlock(&lun->lun_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || port->lun_map != NULL || port->lun_disable == NULL) continue; /* * Drop the lock before we call the frontend's disable * routine, to avoid lock order reversals. * * XXX KDM what happens if the frontend list changes while * we're traversing it? It's unlikely, but should be handled. */ mtx_unlock(&softc->ctl_lock); retval = port->lun_disable(port->targ_lun_arg, lun->lun); mtx_lock(&softc->ctl_lock); if (retval != 0) { printf("%s: FETD %s port %d returned error " "%d for lun_disable on lun %jd\n", __func__, port->port_name, port->targ_port, retval, (intmax_t)lun->lun); } } mtx_unlock(&softc->ctl_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_start_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_STOPPED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_stop_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_STOPPED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_no_media(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_NO_MEDIA; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_has_media(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; union ctl_ha_msg msg; mtx_lock(&lun->lun_lock); lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); if (lun->flags & CTL_LUN_REMOVABLE) ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); mtx_unlock(&lun->lun_lock); if ((lun->flags & CTL_LUN_REMOVABLE) && lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = 1; msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); } return (0); } int ctl_lun_ejected(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_EJECTED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_primary(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_PRIMARY_SC; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); mtx_unlock(&lun->lun_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_lun_secondary(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_PRIMARY_SC; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); mtx_unlock(&lun->lun_lock); ctl_isc_announce_lun(lun); return (0); } /* * Remove LUN. If there are active requests, wait for completion. * * Returns 0 for success, non-zero (errno) for failure. * Completion is reported to backed via the lun_shutdown() method. */ int ctl_remove_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun; lun = (struct ctl_lun *)be_lun->ctl_lun; ctl_disable_lun(lun); mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_INVALID; /* * If there is nothing in the OOA queue, go ahead and free the LUN. * If we have something in the OOA queue, we'll free it when the * last I/O completes. */ if (TAILQ_EMPTY(&lun->ooa_queue)) { mtx_unlock(&lun->lun_lock); ctl_free_lun(lun); } else mtx_unlock(&lun->lun_lock); return (0); } void ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; union ctl_ha_msg msg; mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); mtx_unlock(&lun->lun_lock); if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { /* Send msg to other side. */ bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = 1; msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); } } /* * Backend "memory move is complete" callback for requests that never * make it down to say RAIDCore's configuration code. */ int ctl_config_move_done(union ctl_io *io) { int retval; CTL_DEBUG_PRINT(("ctl_config_move_done\n")); KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); if ((io->io_hdr.port_status != 0) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ io->io_hdr.port_status); } else if (io->scsiio.kern_data_resid != 0 && (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { ctl_set_invalid_field_ciu(&io->scsiio); } if (ctl_debug & CTL_DEBUG_CDB_DATA) ctl_data_print(io); if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { /* * XXX KDM just assuming a single pointer here, and not a * S/G list. If we start using S/G lists for config data, * we'll need to know how to clean them up here as well. */ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) free(io->scsiio.kern_data_ptr, M_CTL); ctl_done(io); retval = CTL_RETVAL_COMPLETE; } else { /* * XXX KDM now we need to continue data movement. Some * options: * - call ctl_scsiio() again? We don't do this for data * writes, because for those at least we know ahead of * time where the write will go and how long it is. For * config writes, though, that information is largely * contained within the write itself, thus we need to * parse out the data again. * * - Call some other function once the data is in? */ /* * XXX KDM call ctl_scsiio() again for now, and check flag * bits to see whether we're allocated or not. */ retval = ctl_scsiio(&io->scsiio); } return (retval); } /* * This gets called by a backend driver when it is done with a * data_submit method. */ void ctl_data_submit_done(union ctl_io *io) { /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. * * If there is an error, though, we don't want to keep processing. * Instead, just send status back to the initiator. */ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { io->scsiio.io_cont(io); return; } ctl_done(io); } /* * This gets called by a backend driver when it is done with a * configuration write. */ void ctl_config_write_done(union ctl_io *io) { uint8_t *buf; /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. * * If there is an error, though, we don't want to keep processing. * Instead, just send status back to the initiator. */ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { io->scsiio.io_cont(io); return; } /* * Since a configuration write can be done for commands that actually * have data allocated, like write buffer, and commands that have * no data, like start/stop unit, we need to check here. */ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) buf = io->scsiio.kern_data_ptr; else buf = NULL; ctl_done(io); if (buf) free(buf, M_CTL); } void ctl_config_read_done(union ctl_io *io) { uint8_t *buf; /* * If there is some error -- we are done, skip data transfer. */ if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) buf = io->scsiio.kern_data_ptr; else buf = NULL; ctl_done(io); if (buf) free(buf, M_CTL); return; } /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. */ if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { io->scsiio.io_cont(io); return; } ctl_datamove(io); } /* * SCSI release command. */ int ctl_scsi_release(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); uint32_t residx; CTL_DEBUG_PRINT(("ctl_scsi_release\n")); residx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * XXX KDM right now, we only support LUN reservation. We don't * support 3rd party reservations, or extent reservations, which * might actually need the parameter list. If we've gotten this * far, we've got a LUN reservation. Anything else got kicked out * above. So, according to SPC, ignore the length. */ mtx_lock(&lun->lun_lock); /* * According to SPC, it is not an error for an intiator to attempt * to release a reservation on a LUN that isn't reserved, or that * is reserved by another initiator. The reservation can only be * released, though, by the initiator who made it or by one of * several reset type events. */ if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) lun->flags &= ~CTL_LUN_RESERVED; mtx_unlock(&lun->lun_lock); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_scsi_reserve(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); uint32_t residx; CTL_DEBUG_PRINT(("ctl_reserve\n")); residx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * XXX KDM right now, we only support LUN reservation. We don't * support 3rd party reservations, or extent reservations, which * might actually need the parameter list. If we've gotten this * far, we've got a LUN reservation. Anything else got kicked out * above. So, according to SPC, ignore the length. */ mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { ctl_set_reservation_conflict(ctsio); goto bailout; } /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ if (lun->flags & CTL_LUN_PR_RESERVED) { ctl_set_success(ctsio); goto bailout; } lun->flags |= CTL_LUN_RESERVED; lun->res_idx = residx; ctl_set_success(ctsio); bailout: mtx_unlock(&lun->lun_lock); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_start_stop(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_start_stop_unit *cdb; int retval; CTL_DEBUG_PRINT(("ctl_start_stop\n")); cdb = (struct scsi_start_stop_unit *)ctsio->cdb; if ((cdb->how & SSS_PC_MASK) == 0) { if ((lun->flags & CTL_LUN_PR_RESERVED) && (cdb->how & SSS_START) == 0) { uint32_t residx; residx = ctl_get_initindex(&ctsio->io_hdr.nexus); if (ctl_get_prkey(lun, residx) == 0 || (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } if ((cdb->how & SSS_LOEJ) && (lun->flags & CTL_LUN_REMOVABLE) == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 4, /*bit_valid*/ 1, /*bit*/ 1); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && lun->prevent_count > 0) { /* "Medium removal prevented" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_prevent_allow(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_prevent *cdb; int retval; uint32_t initidx; CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); cdb = (struct scsi_prevent *)ctsio->cdb; if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); mtx_lock(&lun->lun_lock); if ((cdb->how & PR_PREVENT) && ctl_is_set(lun->prevent, initidx) == 0) { ctl_set_mask(lun->prevent, initidx); lun->prevent_count++; } else if ((cdb->how & PR_PREVENT) == 0 && ctl_is_set(lun->prevent, initidx)) { ctl_clear_mask(lun->prevent, initidx); lun->prevent_count--; } mtx_unlock(&lun->lun_lock); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } /* * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but * we don't really do anything with the LBA and length fields if the user * passes them in. Instead we'll just flush out the cache for the entire * LUN. */ int ctl_sync_cache(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t starting_lba; uint32_t block_count; int retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_sync_cache\n")); retval = 0; switch (ctsio->cdb[0]) { case SYNCHRONIZE_CACHE: { struct scsi_sync_cache *cdb; cdb = (struct scsi_sync_cache *)ctsio->cdb; starting_lba = scsi_4btoul(cdb->begin_lba); block_count = scsi_2btoul(cdb->lb_count); byte2 = cdb->byte2; break; } case SYNCHRONIZE_CACHE_16: { struct scsi_sync_cache_16 *cdb; cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; starting_lba = scsi_8btou64(cdb->begin_lba); block_count = scsi_4btoul(cdb->lb_count); byte2 = cdb->byte2; break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); goto bailout; break; /* NOTREACHED */ } /* * We check the LBA and length, but don't do anything with them. * A SYNCHRONIZE CACHE will cause the entire cache for this lun to * get flushed. This check will just help satisfy anyone who wants * to see an error for an out of range LBA. */ if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { ctl_set_lba_out_of_range(ctsio, MAX(starting_lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); goto bailout; } lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = starting_lba; lbalen->len = block_count; lbalen->flags = byte2; retval = lun->backend->config_write((union ctl_io *)ctsio); bailout: return (retval); } int ctl_format(struct ctl_scsiio *ctsio) { struct scsi_format *cdb; int length, defect_list_len; CTL_DEBUG_PRINT(("ctl_format\n")); cdb = (struct scsi_format *)ctsio->cdb; length = 0; if (cdb->byte2 & SF_FMTDATA) { if (cdb->byte2 & SF_LONGLIST) length = sizeof(struct scsi_format_header_long); else length = sizeof(struct scsi_format_header_short); } if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) && (length > 0)) { ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); ctsio->kern_data_len = length; ctsio->kern_total_len = length; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } defect_list_len = 0; if (cdb->byte2 & SF_FMTDATA) { if (cdb->byte2 & SF_LONGLIST) { struct scsi_format_header_long *header; header = (struct scsi_format_header_long *) ctsio->kern_data_ptr; defect_list_len = scsi_4btoul(header->defect_list_len); if (defect_list_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); goto bailout; } } else { struct scsi_format_header_short *header; header = (struct scsi_format_header_short *) ctsio->kern_data_ptr; defect_list_len = scsi_2btoul(header->defect_list_len); if (defect_list_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); goto bailout; } } } ctl_set_success(ctsio); bailout: if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_buffer(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); uint64_t buffer_offset; uint32_t len; uint8_t byte2; static uint8_t descr[4]; static uint8_t echo_descr[4] = { 0 }; CTL_DEBUG_PRINT(("ctl_read_buffer\n")); switch (ctsio->cdb[0]) { case READ_BUFFER: { struct scsi_read_buffer *cdb; cdb = (struct scsi_read_buffer *)ctsio->cdb; buffer_offset = scsi_3btoul(cdb->offset); len = scsi_3btoul(cdb->length); byte2 = cdb->byte2; break; } case READ_BUFFER_16: { struct scsi_read_buffer_16 *cdb; cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; buffer_offset = scsi_8btou64(cdb->offset); len = scsi_4btoul(cdb->length); byte2 = cdb->byte2; break; } default: /* This shouldn't happen. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (buffer_offset > CTL_WRITE_BUFFER_SIZE || buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { descr[0] = 0; scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); ctsio->kern_data_ptr = descr; len = min(len, sizeof(descr)); } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { ctsio->kern_data_ptr = echo_descr; len = min(len, sizeof(echo_descr)); } else { if (lun->write_buffer == NULL) { lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, M_CTL, M_WAITOK); } ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; } ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctl_set_success(ctsio); ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_write_buffer(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_write_buffer *cdb; int buffer_offset, len; CTL_DEBUG_PRINT(("ctl_write_buffer\n")); cdb = (struct scsi_write_buffer *)ctsio->cdb; len = scsi_3btoul(cdb->length); buffer_offset = scsi_3btoul(cdb->offset); if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { if (lun->write_buffer == NULL) { lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, M_CTL, M_WAITOK); } ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_write_same_cont(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct ctl_scsiio *ctsio; struct ctl_lba_len_flags *lbalen; int retval; ctsio = &io->scsiio; ctsio->io_hdr.status = CTL_STATUS_NONE; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba += lbalen->len; if ((lun->be_lun->maxlba + 1) - lbalen->lba <= UINT32_MAX) { ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; lbalen->len = (lun->be_lun->maxlba + 1) - lbalen->lba; } CTL_DEBUG_PRINT(("ctl_write_same_cont: calling config_write()\n")); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_write_same(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; const char *val; uint64_t lba, ival; uint32_t num_blocks; int len, retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_write_same\n")); switch (ctsio->cdb[0]) { case WRITE_SAME_10: { struct scsi_write_same_10 *cdb; cdb = (struct scsi_write_same_10 *)ctsio->cdb; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); byte2 = cdb->byte2; break; } case WRITE_SAME_16: { struct scsi_write_same_16 *cdb; cdb = (struct scsi_write_same_16 *)ctsio->cdb; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); byte2 = cdb->byte2; break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* ANCHOR flag can be used only together with UNMAP */ if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Zero number of blocks means "to the last logical block" */ if (num_blocks == 0) { ival = UINT64_MAX; val = dnvlist_get_string(lun->be_lun->options, "write_same_max_lba", NULL); if (val != NULL) ctl_expand_number(val, &ival); if ((lun->be_lun->maxlba + 1) - lba > ival) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ ctsio->cdb[0] == WRITE_SAME_10 ? 7 : 10, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_write_same_cont; num_blocks = 1 << 31; } else num_blocks = (lun->be_lun->maxlba + 1) - lba; } len = lun->be_lun->blocksize; /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((byte2 & SWS_NDOB) == 0 && (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = byte2; retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_unmap(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_unmap *cdb; struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_header *hdr; struct scsi_unmap_desc *buf, *end, *endnz, *range; uint64_t lba; uint32_t num_blocks; int len, retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_unmap\n")); cdb = (struct scsi_unmap *)ctsio->cdb; len = scsi_2btoul(cdb->length); byte2 = cdb->byte2; /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } len = ctsio->kern_total_len - ctsio->kern_data_resid; hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; if (len < sizeof (*hdr) || len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); goto done; } len = scsi_2btoul(hdr->desc_length); buf = (struct scsi_unmap_desc *)(hdr + 1); end = buf + len / sizeof(*buf); endnz = buf; for (range = buf; range < end; range++) { lba = scsi_8btou64(range->lba); num_blocks = scsi_4btoul(range->length); if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (num_blocks != 0) endnz = range + 1; } /* * Block backend can not handle zero last range. * Filter it out and return if there is nothing left. */ len = (uint8_t *)endnz - (uint8_t *)buf; if (len == 0) { ctl_set_success(ctsio); goto done; } mtx_lock(&lun->lun_lock); ptrlen = (struct ctl_ptr_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; ptrlen->ptr = (void *)buf; ptrlen->len = len; ptrlen->flags = byte2; ctl_try_unblock_others(lun, (union ctl_io *)ctsio, FALSE); mtx_unlock(&lun->lun_lock); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); done: if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_default_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { struct ctl_lun *lun = CTL_LUN(ctsio); uint8_t *current_cp; int set_ua; uint32_t initidx; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); set_ua = 0; current_cp = (page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT)); mtx_lock(&lun->lun_lock); if (memcmp(current_cp, page_ptr, page_index->page_len)) { memcpy(current_cp, page_ptr, page_index->page_len); set_ua = 1; } if (set_ua != 0) ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); if (set_ua) { ctl_isc_announce_mode(lun, ctl_get_initindex(&ctsio->io_hdr.nexus), page_index->page_code, page_index->subpage); } return (CTL_RETVAL_COMPLETE); } static void ctl_ie_timer(void *arg) { struct ctl_lun *lun = arg; uint64_t t; if (lun->ie_asc == 0) return; if (lun->MODE_IE.mrie == SIEP_MRIE_UA) ctl_est_ua_all(lun, -1, CTL_UA_IE); else lun->ie_reported = 0; if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { lun->ie_reportcnt++; t = scsi_4btoul(lun->MODE_IE.interval_timer); if (t == 0 || t == UINT32_MAX) t = 3000; /* 5 min */ callout_schedule(&lun->ie_callout, t * hz / 10); } } int ctl_ie_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_info_exceptions_page *pg; uint64_t t; (void)ctl_default_page_handler(ctsio, page_index, page_ptr); pg = (struct scsi_info_exceptions_page *)page_ptr; mtx_lock(&lun->lun_lock); if (pg->info_flags & SIEP_FLAGS_TEST) { lun->ie_asc = 0x5d; lun->ie_ascq = 0xff; if (pg->mrie == SIEP_MRIE_UA) { ctl_est_ua_all(lun, -1, CTL_UA_IE); lun->ie_reported = 1; } else { ctl_clr_ua_all(lun, -1, CTL_UA_IE); lun->ie_reported = -1; } lun->ie_reportcnt = 1; if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { lun->ie_reportcnt++; t = scsi_4btoul(pg->interval_timer); if (t == 0 || t == UINT32_MAX) t = 3000; /* 5 min */ callout_reset(&lun->ie_callout, t * hz / 10, ctl_ie_timer, lun); } } else { lun->ie_asc = 0; lun->ie_ascq = 0; lun->ie_reported = 1; ctl_clr_ua_all(lun, -1, CTL_UA_IE); lun->ie_reportcnt = UINT32_MAX; callout_stop(&lun->ie_callout); } mtx_unlock(&lun->lun_lock); return (CTL_RETVAL_COMPLETE); } static int ctl_do_mode_select(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct scsi_mode_page_header *page_header; struct ctl_page_index *page_index; struct ctl_scsiio *ctsio; int page_len, page_len_offset, page_len_size; union ctl_modepage_info *modepage_info; uint16_t *len_left, *len_used; int retval, i; ctsio = &io->scsiio; page_index = NULL; page_len = 0; modepage_info = (union ctl_modepage_info *) ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; len_left = &modepage_info->header.len_left; len_used = &modepage_info->header.len_used; do_next_page: page_header = (struct scsi_mode_page_header *) (ctsio->kern_data_ptr + *len_used); if (*len_left == 0) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if (*len_left < sizeof(struct scsi_mode_page_header)) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if ((page_header->page_code & SMPH_SPF) && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * XXX KDM should we do something with the block descriptor? */ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; if ((page_index->page_code & SMPH_PC_MASK) != (page_header->page_code & SMPH_PC_MASK)) continue; /* * If neither page has a subpage code, then we've got a * match. */ if (((page_index->page_code & SMPH_SPF) == 0) && ((page_header->page_code & SMPH_SPF) == 0)) { page_len = page_header->page_length; break; } /* * If both pages have subpages, then the subpage numbers * have to match. */ if ((page_index->page_code & SMPH_SPF) && (page_header->page_code & SMPH_SPF)) { struct scsi_mode_page_header_sp *sph; sph = (struct scsi_mode_page_header_sp *)page_header; if (page_index->subpage == sph->subpage) { page_len = scsi_2btoul(sph->page_length); break; } } } /* * If we couldn't find the page, or if we don't have a mode select * handler for it, send back an error to the user. */ if ((i >= CTL_NUM_MODE_PAGES) || (page_index->select_handler == NULL)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used, /*bit_valid*/ 0, /*bit*/ 0); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (page_index->page_code & SMPH_SPF) { page_len_offset = 2; page_len_size = 2; } else { page_len_size = 1; page_len_offset = 1; } /* * If the length the initiator gives us isn't the one we specify in * the mode page header, or if they didn't specify enough data in * the CDB to avoid truncating this page, kick out the request. */ if (page_len != page_index->page_len - page_len_offset - page_len_size) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used + page_len_offset, /*bit_valid*/ 0, /*bit*/ 0); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (*len_left < page_index->page_len) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Run through the mode page, checking to make sure that the bits * the user changed are actually legal for him to change. */ for (i = 0; i < page_index->page_len; i++) { uint8_t *user_byte, *change_mask, *current_byte; int bad_bit; int j; user_byte = (uint8_t *)page_header + i; change_mask = page_index->page_data + (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; current_byte = page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT) + i; /* * Check to see whether the user set any bits in this byte * that he is not allowed to set. */ if ((*user_byte & ~(*change_mask)) == (*current_byte & ~(*change_mask))) continue; /* * Go through bit by bit to determine which one is illegal. */ bad_bit = 0; for (j = 7; j >= 0; j--) { if ((((1 << i) & ~(*change_mask)) & *user_byte) != (((1 << i) & ~(*change_mask)) & *current_byte)) { bad_bit = i; break; } } ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used + i, /*bit_valid*/ 1, /*bit*/ bad_bit); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Decrement these before we call the page handler, since we may * end up getting called back one way or another before the handler * returns to this context. */ *len_left -= page_index->page_len; *len_used += page_index->page_len; retval = page_index->select_handler(ctsio, page_index, (uint8_t *)page_header); /* * If the page handler returns CTL_RETVAL_QUEUED, then we need to * wait until this queued command completes to finish processing * the mode page. If it returns anything other than * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have * already set the sense information, freed the data pointer, and * completed the io for us. */ if (retval != CTL_RETVAL_COMPLETE) goto bailout_no_done; /* * If the initiator sent us more than one page, parse the next one. */ if (*len_left > 0) goto do_next_page; ctl_set_success(ctsio); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); bailout_no_done: return (CTL_RETVAL_COMPLETE); } int ctl_mode_select(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); union ctl_modepage_info *modepage_info; int bd_len, i, header_size, param_len, rtd; uint32_t initidx; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); switch (ctsio->cdb[0]) { case MODE_SELECT_6: { struct scsi_mode_select_6 *cdb; cdb = (struct scsi_mode_select_6 *)ctsio->cdb; rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; param_len = cdb->length; header_size = sizeof(struct scsi_mode_header_6); break; } case MODE_SELECT_10: { struct scsi_mode_select_10 *cdb; cdb = (struct scsi_mode_select_10 *)ctsio->cdb; rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; param_len = scsi_2btoul(cdb->length); header_size = sizeof(struct scsi_mode_header_10); break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (rtd) { if (param_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, /*command*/ 1, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Revert to defaults. */ ctl_init_page_index(lun); mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { ctl_isc_announce_mode(lun, -1, lun->mode_pages.index[i].page_code & SMPH_PC_MASK, lun->mode_pages.index[i].subpage); } ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * From SPC-3: * "A parameter list length of zero indicates that the Data-Out Buffer * shall be empty. This condition shall not be considered as an error." */ if (param_len == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Since we'll hit this the first time through, prior to * allocation, we don't need to free a data buffer here. */ if (param_len < header_size) { ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Allocate the data buffer and grab the user's data. In theory, * we shouldn't have to sanity check the parameter list length here * because the maximum size is 64K. We should be able to malloc * that much without too many problems. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } switch (ctsio->cdb[0]) { case MODE_SELECT_6: { struct scsi_mode_header_6 *mh6; mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; bd_len = mh6->blk_desc_len; break; } case MODE_SELECT_10: { struct scsi_mode_header_10 *mh10; mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; bd_len = scsi_2btoul(mh10->blk_desc_len); break; } default: panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); } if (param_len < (header_size + bd_len)) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Set the IO_CONT flag, so that if this I/O gets passed to * ctl_config_write_done(), it'll get passed back to * ctl_do_mode_select() for further processing, or completion if * we're all done. */ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_do_mode_select; modepage_info = (union ctl_modepage_info *) ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; memset(modepage_info, 0, sizeof(*modepage_info)); modepage_info->header.len_left = param_len - header_size - bd_len; modepage_info->header.len_used = header_size + bd_len; return (ctl_do_mode_select((union ctl_io *)ctsio)); } int ctl_mode_sense(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); int pc, page_code, llba, subpage; int alloc_len, page_len, header_len, bd_len, total_len; void *block_desc; struct ctl_page_index *page_index; llba = 0; CTL_DEBUG_PRINT(("ctl_mode_sense\n")); switch (ctsio->cdb[0]) { case MODE_SENSE_6: { struct scsi_mode_sense_6 *cdb; cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; header_len = sizeof(struct scsi_mode_hdr_6); if (cdb->byte2 & SMS_DBD) bd_len = 0; else bd_len = sizeof(struct scsi_mode_block_descr); header_len += bd_len; pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SMS_PAGE_CODE; subpage = cdb->subpage; alloc_len = cdb->length; break; } case MODE_SENSE_10: { struct scsi_mode_sense_10 *cdb; cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; header_len = sizeof(struct scsi_mode_hdr_10); if (cdb->byte2 & SMS_DBD) { bd_len = 0; } else if (lun->be_lun->lun_type == T_DIRECT) { if (cdb->byte2 & SMS10_LLBAA) { llba = 1; bd_len = sizeof(struct scsi_mode_block_descr_dlong); } else bd_len = sizeof(struct scsi_mode_block_descr_dshort); } else bd_len = sizeof(struct scsi_mode_block_descr); header_len += bd_len; pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SMS_PAGE_CODE; subpage = cdb->subpage; alloc_len = scsi_2btoul(cdb->length); break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * We have to make a first pass through to calculate the size of * the pages that match the user's query. Then we allocate enough * memory to hold it, and actually copy the data into the buffer. */ switch (page_code) { case SMS_ALL_PAGES_PAGE: { u_int i; page_len = 0; /* * At the moment, values other than 0 and 0xff here are * reserved according to SPC-3. */ if ((subpage != SMS_SUBPAGE_PAGE_0) && (subpage != SMS_SUBPAGE_ALL)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 3, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * We don't use this subpage if the user didn't * request all subpages. */ if ((page_index->subpage != 0) && (subpage == SMS_SUBPAGE_PAGE_0)) continue; page_len += page_index->page_len; } break; } default: { u_int i; page_len = 0; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* Look for the right page code */ if ((page_index->page_code & SMPH_PC_MASK) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if ((page_index->subpage != subpage) && (subpage != SMS_SUBPAGE_ALL)) continue; page_len += page_index->page_len; } if (page_len == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 5); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } break; } } total_len = header_len + page_len; ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; switch (ctsio->cdb[0]) { case MODE_SENSE_6: { struct scsi_mode_hdr_6 *header; header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; header->datalen = MIN(total_len - 1, 254); if (lun->be_lun->lun_type == T_DIRECT) { header->dev_specific = 0x10; /* DPOFUA */ if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) header->dev_specific |= 0x80; /* WP */ } header->block_descr_len = bd_len; block_desc = &header[1]; break; } case MODE_SENSE_10: { struct scsi_mode_hdr_10 *header; int datalen; header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; datalen = MIN(total_len - 2, 65533); scsi_ulto2b(datalen, header->datalen); if (lun->be_lun->lun_type == T_DIRECT) { header->dev_specific = 0x10; /* DPOFUA */ if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) header->dev_specific |= 0x80; /* WP */ } if (llba) header->flags |= SMH_LONGLBA; scsi_ulto2b(bd_len, header->block_descr_len); block_desc = &header[1]; break; } default: panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); } /* * If we've got a disk, use its blocksize in the block * descriptor. Otherwise, just set it to 0. */ if (bd_len > 0) { if (lun->be_lun->lun_type == T_DIRECT) { if (llba) { struct scsi_mode_block_descr_dlong *bd = block_desc; if (lun->be_lun->maxlba != 0) scsi_u64to8b(lun->be_lun->maxlba + 1, bd->num_blocks); scsi_ulto4b(lun->be_lun->blocksize, bd->block_len); } else { struct scsi_mode_block_descr_dshort *bd = block_desc; if (lun->be_lun->maxlba != 0) scsi_ulto4b(MIN(lun->be_lun->maxlba+1, UINT32_MAX), bd->num_blocks); scsi_ulto3b(lun->be_lun->blocksize, bd->block_len); } } else { struct scsi_mode_block_descr *bd = block_desc; scsi_ulto3b(0, bd->block_len); } } switch (page_code) { case SMS_ALL_PAGES_PAGE: { int i, data_used; data_used = header_len; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { struct ctl_page_index *page_index; page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * We don't use this subpage if the user didn't * request all subpages. We already checked (above) * to make sure the user only specified a subpage * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. */ if ((page_index->subpage != 0) && (subpage == SMS_SUBPAGE_PAGE_0)) continue; /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index,pc); memcpy(ctsio->kern_data_ptr + data_used, page_index->page_data + (page_index->page_len * pc), page_index->page_len); data_used += page_index->page_len; } break; } default: { int i, data_used; data_used = header_len; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { struct ctl_page_index *page_index; page_index = &lun->mode_pages.index[i]; /* Look for the right page code */ if ((page_index->page_code & SMPH_PC_MASK) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if ((page_index->subpage != subpage) && (subpage != SMS_SUBPAGE_ALL)) continue; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index,pc); memcpy(ctsio->kern_data_ptr + data_used, page_index->page_data + (page_index->page_len * pc), page_index->page_len); data_used += page_index->page_len; } break; } } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_temp_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_log_temperature *data; const char *value; data = (struct scsi_log_temperature *)page_index->page_data; scsi_ulto2b(SLP_TEMPERATURE, data->hdr.param_code); data->hdr.param_control = SLP_LBIN; data->hdr.param_len = sizeof(struct scsi_log_temperature) - sizeof(struct scsi_log_param_header); if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", NULL)) != NULL) data->temperature = strtol(value, NULL, 0); else data->temperature = 0xff; data++; scsi_ulto2b(SLP_REFTEMPERATURE, data->hdr.param_code); data->hdr.param_control = SLP_LBIN; data->hdr.param_len = sizeof(struct scsi_log_temperature) - sizeof(struct scsi_log_param_header); if ((value = dnvlist_get_string(lun->be_lun->options, "reftemperature", NULL)) != NULL) data->temperature = strtol(value, NULL, 0); else data->temperature = 0xff; return (0); } int ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_log_param_header *phdr; uint8_t *data; uint64_t val; data = page_index->page_data; if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun, "blocksavail")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x0001, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun, "blocksused")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x0002, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x01; /* per-LUN */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun, "poolblocksavail")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x00f1, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun, "poolblocksused")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x00f2, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } page_index->page_len = data - page_index->page_data; return (0); } int ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct stat_page *data; struct bintime *t; data = (struct stat_page *)page_index->page_data; scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); data->sap.hdr.param_control = SLP_LBIN; data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - sizeof(struct scsi_log_param_header); scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], data->sap.read_num); scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], data->sap.write_num); if (lun->be_lun->blocksize > 0) { scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / lun->be_lun->blocksize, data->sap.recvieved_lba); scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / lun->be_lun->blocksize, data->sap.transmitted_lba); } t = &lun->stats.time[CTL_STATS_READ]; scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), data->sap.read_int); t = &lun->stats.time[CTL_STATS_WRITE]; scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), data->sap.write_int); scsi_u64to8b(0, data->sap.weighted_num); scsi_u64to8b(0, data->sap.weighted_int); scsi_ulto2b(SLP_IT, data->it.hdr.param_code); data->it.hdr.param_control = SLP_LBIN; data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - sizeof(struct scsi_log_param_header); #ifdef CTL_TIME_IO scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); #endif scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); data->it.hdr.param_control = SLP_LBIN; data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - sizeof(struct scsi_log_param_header); scsi_ulto4b(3, data->ti.exponent); scsi_ulto4b(1, data->ti.integer); return (0); } int ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_log_informational_exceptions *data; const char *value; data = (struct scsi_log_informational_exceptions *)page_index->page_data; scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); data->hdr.param_control = SLP_LBIN; data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - sizeof(struct scsi_log_param_header); data->ie_asc = lun->ie_asc; data->ie_ascq = lun->ie_ascq; if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", NULL)) != NULL) data->temperature = strtol(value, NULL, 0); else data->temperature = 0xff; return (0); } int ctl_log_sense(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); int i, pc, page_code, subpage; int alloc_len, total_len; struct ctl_page_index *page_index; struct scsi_log_sense *cdb; struct scsi_log_header *header; CTL_DEBUG_PRINT(("ctl_log_sense\n")); cdb = (struct scsi_log_sense *)ctsio->cdb; pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SLS_PAGE_CODE; subpage = cdb->subpage; alloc_len = scsi_2btoul(cdb->length); page_index = NULL; for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { page_index = &lun->log_pages.index[i]; /* Look for the right page code */ if ((page_index->page_code & SL_PAGE_CODE) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if (page_index->subpage != subpage) continue; break; } if (i >= CTL_NUM_LOG_PAGES) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_log_header) + page_index->page_len; ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; header = (struct scsi_log_header *)ctsio->kern_data_ptr; header->page = page_index->page_code; if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) header->page |= SL_DS; if (page_index->subpage) { header->page |= SL_SPF; header->subpage = page_index->subpage; } scsi_ulto2b(page_index->page_len, header->datalen); /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index, pc); memcpy(header + 1, page_index->page_data, page_index->page_len); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_capacity(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_read_capacity *cdb; struct scsi_read_capacity_data *data; uint32_t lba; CTL_DEBUG_PRINT(("ctl_read_capacity\n")); cdb = (struct scsi_read_capacity *)ctsio->cdb; lba = scsi_4btoul(cdb->addr); if (((cdb->pmi & SRC_PMI) == 0) && (lba != 0)) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; ctsio->kern_data_len = sizeof(*data); ctsio->kern_total_len = sizeof(*data); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * If the maximum LBA is greater than 0xfffffffe, the user must * issue a SERVICE ACTION IN (16) command, with the read capacity * serivce action set. */ if (lun->be_lun->maxlba > 0xfffffffe) scsi_ulto4b(0xffffffff, data->addr); else scsi_ulto4b(lun->be_lun->maxlba, data->addr); /* * XXX KDM this may not be 512 bytes... */ scsi_ulto4b(lun->be_lun->blocksize, data->length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_capacity_16(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_read_capacity_16 *cdb; struct scsi_read_capacity_data_long *data; uint64_t lba; uint32_t alloc_len; CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; alloc_len = scsi_4btoul(cdb->alloc_len); lba = scsi_8btou64(cdb->addr); if ((cdb->reladr & SRC16_PMI) && (lba != 0)) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sizeof(*data), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; scsi_u64to8b(lun->be_lun->maxlba, data->addr); /* XXX KDM this may not be 512 bytes... */ scsi_ulto4b(lun->be_lun->blocksize, data->length); data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_get_lba_status(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_get_lba_status *cdb; struct scsi_get_lba_status_data *data; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t alloc_len, total_len; int retval; CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); cdb = (struct scsi_get_lba_status *)ctsio->cdb; lba = scsi_8btou64(cdb->addr); alloc_len = scsi_4btoul(cdb->alloc_len); if (lba > lun->be_lun->maxlba) { ctl_set_lba_out_of_range(ctsio, lba); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(*data) + sizeof(data->descr[0]); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* Fill dummy data in case backend can't tell anything. */ scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); scsi_u64to8b(lba, data->descr[0].addr); scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), data->descr[0].length); data->descr[0].status = 0; /* Mapped or unknown. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = total_len; lbalen->flags = 0; retval = lun->backend->config_read((union ctl_io *)ctsio); return (retval); } int ctl_read_defect(struct ctl_scsiio *ctsio) { struct scsi_read_defect_data_10 *ccb10; struct scsi_read_defect_data_12 *ccb12; struct scsi_read_defect_data_hdr_10 *data10; struct scsi_read_defect_data_hdr_12 *data12; uint32_t alloc_len, data_len; uint8_t format; CTL_DEBUG_PRINT(("ctl_read_defect\n")); if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; format = ccb10->format; alloc_len = scsi_2btoul(ccb10->alloc_length); data_len = sizeof(*data10); } else { ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; format = ccb12->format; alloc_len = scsi_4btoul(ccb12->alloc_length); data_len = sizeof(*data12); } if (alloc_len == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { data10 = (struct scsi_read_defect_data_hdr_10 *) ctsio->kern_data_ptr; data10->format = format; scsi_ulto2b(0, data10->length); } else { data12 = (struct scsi_read_defect_data_hdr_12 *) ctsio->kern_data_ptr; data12->format = format; scsi_ulto2b(0, data12->generation); scsi_ulto4b(0, data12->length); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_report_ident_info(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_report_ident_info *cdb; struct scsi_report_ident_info_data *rii_ptr; struct scsi_report_ident_info_descr *riid_ptr; const char *oii, *otii; int retval, alloc_len, total_len = 0, len = 0; CTL_DEBUG_PRINT(("ctl_report_ident_info\n")); cdb = (struct scsi_report_ident_info *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; total_len = sizeof(struct scsi_report_ident_info_data); switch (cdb->type) { case RII_LUII: oii = dnvlist_get_string(lun->be_lun->options, "ident_info", NULL); if (oii) len = strlen(oii); /* Approximately */ break; case RII_LUTII: otii = dnvlist_get_string(lun->be_lun->options, "text_ident_info", NULL); if (otii) len = strlen(otii) + 1; /* NULL-terminated */ break; case RII_IIS: len = 2 * sizeof(struct scsi_report_ident_info_descr); break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 11, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return(retval); } total_len += len; alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; rii_ptr = (struct scsi_report_ident_info_data *)ctsio->kern_data_ptr; switch (cdb->type) { case RII_LUII: if (oii) { if (oii[0] == '0' && oii[1] == 'x') len = hex2bin(oii, (uint8_t *)(rii_ptr + 1), len); else strncpy((uint8_t *)(rii_ptr + 1), oii, len); } break; case RII_LUTII: if (otii) strlcpy((uint8_t *)(rii_ptr + 1), otii, len); break; case RII_IIS: riid_ptr = (struct scsi_report_ident_info_descr *)(rii_ptr + 1); riid_ptr->type = RII_LUII; scsi_ulto2b(0xffff, riid_ptr->length); riid_ptr++; riid_ptr->type = RII_LUTII; scsi_ulto2b(0xffff, riid_ptr->length); } scsi_ulto2b(len, rii_ptr->length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_maintenance_in *cdb; int retval; int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; int num_ha_groups, num_target_ports, shared_group; struct ctl_port *port; struct scsi_target_group_data *rtg_ptr; struct scsi_target_group_data_extended *rtg_ext_ptr; struct scsi_target_port_group_descriptor *tpg_desc; CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); cdb = (struct scsi_maintenance_in *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; switch (cdb->byte2 & STG_PDF_MASK) { case STG_PDF_LENGTH: ext = 0; break; case STG_PDF_EXTENDED: ext = 1; break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 5); ctl_done((union ctl_io *)ctsio); return(retval); } num_target_ports = 0; shared_group = (softc->is_single != 0); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; num_target_ports++; if (port->status & CTL_PORT_STATUS_HA_SHARED) shared_group = 1; } mtx_unlock(&softc->ctl_lock); num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; if (ext) total_len = sizeof(struct scsi_target_group_data_extended); else total_len = sizeof(struct scsi_target_group_data); total_len += sizeof(struct scsi_target_port_group_descriptor) * (shared_group + num_ha_groups) + sizeof(struct scsi_target_port_descriptor) * num_target_ports; alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; if (ext) { rtg_ext_ptr = (struct scsi_target_group_data_extended *) ctsio->kern_data_ptr; scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); rtg_ext_ptr->format_type = 0x10; rtg_ext_ptr->implicit_transition_time = 0; tpg_desc = &rtg_ext_ptr->groups[0]; } else { rtg_ptr = (struct scsi_target_group_data *) ctsio->kern_data_ptr; scsi_ulto4b(total_len - 4, rtg_ptr->length); tpg_desc = &rtg_ptr->groups[0]; } mtx_lock(&softc->ctl_lock); pg = softc->port_min / softc->port_cnt; if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { /* Some shelf is known to be primary. */ if (softc->ha_link == CTL_HA_LINK_OFFLINE) os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) os = TPG_ASYMMETRIC_ACCESS_STANDBY; else os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; if (lun->flags & CTL_LUN_PRIMARY_SC) { ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else { ts = os; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } } else { /* No known primary shelf. */ if (softc->ha_link == CTL_HA_LINK_OFFLINE) { ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else { ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; } } if (shared_group) { tpg_desc->pref_state = ts; tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | TPG_U_SUP | TPG_T_SUP; scsi_ulto2b(1, tpg_desc->target_port_group); tpg_desc->status = TPG_IMPLICIT; pc = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (!softc->is_single && (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) continue; if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. relative_target_port_identifier); pc++; } tpg_desc->target_port_count = pc; tpg_desc = (struct scsi_target_port_group_descriptor *) &tpg_desc->descriptors[pc]; } for (g = 0; g < num_ha_groups; g++) { tpg_desc->pref_state = (g == pg) ? ts : os; tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | TPG_U_SUP | TPG_T_SUP; scsi_ulto2b(2 + g, tpg_desc->target_port_group); tpg_desc->status = TPG_IMPLICIT; pc = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port < g * softc->port_cnt || port->targ_port >= (g + 1) * softc->port_cnt) continue; if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (port->status & CTL_PORT_STATUS_HA_SHARED) continue; if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. relative_target_port_identifier); pc++; } tpg_desc->target_port_count = pc; tpg_desc = (struct scsi_target_port_group_descriptor *) &tpg_desc->descriptors[pc]; } mtx_unlock(&softc->ctl_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_report_supported_opcodes *cdb; const struct ctl_cmd_entry *entry, *sentry; struct scsi_report_supported_opcodes_all *all; struct scsi_report_supported_opcodes_descr *descr; struct scsi_report_supported_opcodes_one *one; int retval; int alloc_len, total_len; int opcode, service_action, i, j, num; CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; opcode = cdb->requested_opcode; service_action = scsi_2btoul(cdb->requested_service_action); switch (cdb->options & RSO_OPTIONS_MASK) { case RSO_OPTIONS_ALL: num = 0; for (i = 0; i < 256; i++) { entry = &ctl_cmd_table[i]; if (entry->flags & CTL_CMD_FLAG_SA5) { for (j = 0; j < 32; j++) { sentry = &((const struct ctl_cmd_entry *) entry->execute)[j]; if (ctl_cmd_applicable( lun->be_lun->lun_type, sentry)) num++; } } else { if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) num++; } } total_len = sizeof(struct scsi_report_supported_opcodes_all) + num * sizeof(struct scsi_report_supported_opcodes_descr); break; case RSO_OPTIONS_OC: if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; break; case RSO_OPTIONS_OC_SA: if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || service_action >= 32) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* FALLTHROUGH */ case RSO_OPTIONS_OC_ASA: total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; switch (cdb->options & RSO_OPTIONS_MASK) { case RSO_OPTIONS_ALL: all = (struct scsi_report_supported_opcodes_all *) ctsio->kern_data_ptr; num = 0; for (i = 0; i < 256; i++) { entry = &ctl_cmd_table[i]; if (entry->flags & CTL_CMD_FLAG_SA5) { for (j = 0; j < 32; j++) { sentry = &((const struct ctl_cmd_entry *) entry->execute)[j]; if (!ctl_cmd_applicable( lun->be_lun->lun_type, sentry)) continue; descr = &all->descr[num++]; descr->opcode = i; scsi_ulto2b(j, descr->service_action); descr->flags = RSO_SERVACTV; scsi_ulto2b(sentry->length, descr->cdb_length); } } else { if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) continue; descr = &all->descr[num++]; descr->opcode = i; scsi_ulto2b(0, descr->service_action); descr->flags = 0; scsi_ulto2b(entry->length, descr->cdb_length); } } scsi_ulto4b( num * sizeof(struct scsi_report_supported_opcodes_descr), all->length); break; case RSO_OPTIONS_OC: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; goto fill_one; case RSO_OPTIONS_OC_SA: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; fill_one: if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { one->support = 3; scsi_ulto2b(entry->length, one->cdb_length); one->cdb_usage[0] = opcode; memcpy(&one->cdb_usage[1], entry->usage, entry->length - 1); } else one->support = 1; break; case RSO_OPTIONS_OC_ASA: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; if (entry->flags & CTL_CMD_FLAG_SA5) { entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; } else if (service_action != 0) { one->support = 1; break; } goto fill_one; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_supported_tmf(struct ctl_scsiio *ctsio) { struct scsi_report_supported_tmf *cdb; struct scsi_report_supported_tmf_ext_data *data; int retval; int alloc_len, total_len; CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; if (cdb->options & RST_REPD) total_len = sizeof(struct scsi_report_supported_tmf_ext_data); else total_len = sizeof(struct scsi_report_supported_tmf_data); alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | RST_TRS; data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; data->length = total_len - 4; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_report_timestamp(struct ctl_scsiio *ctsio) { struct scsi_report_timestamp *cdb; struct scsi_report_timestamp_data *data; struct timeval tv; int64_t timestamp; int retval; int alloc_len, total_len; CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); cdb = (struct scsi_report_timestamp *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; total_len = sizeof(struct scsi_report_timestamp_data); alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; scsi_ulto2b(sizeof(*data) - 2, data->length); data->origin = RTS_ORIG_OUTSIDE; getmicrotime(&tv); timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; scsi_ulto4b(timestamp >> 16, data->timestamp); scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_per_res_in *cdb; int alloc_len, total_len = 0; /* struct scsi_per_res_in_rsrv in_data; */ uint64_t key; CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); cdb = (struct scsi_per_res_in *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); retry: mtx_lock(&lun->lun_lock); switch (cdb->action) { case SPRI_RK: /* read keys */ total_len = sizeof(struct scsi_per_res_in_keys) + lun->pr_key_count * sizeof(struct scsi_per_res_key); break; case SPRI_RR: /* read reservation */ if (lun->flags & CTL_LUN_PR_RESERVED) total_len = sizeof(struct scsi_per_res_in_rsrv); else total_len = sizeof(struct scsi_per_res_in_header); break; case SPRI_RC: /* report capabilities */ total_len = sizeof(struct scsi_per_res_cap); break; case SPRI_RS: /* read full status */ total_len = sizeof(struct scsi_per_res_in_header) + (sizeof(struct scsi_per_res_in_full_desc) + 256) * lun->pr_key_count; break; default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } mtx_unlock(&lun->lun_lock); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; mtx_lock(&lun->lun_lock); switch (cdb->action) { case SPRI_RK: { // read keys struct scsi_per_res_in_keys *res_keys; int i, key_count; res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (total_len != (sizeof(struct scsi_per_res_in_keys) + (lun->pr_key_count * sizeof(struct scsi_per_res_key)))){ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation length changed, retrying\n", __func__); goto retry; } scsi_ulto4b(lun->pr_generation, res_keys->header.generation); scsi_ulto4b(sizeof(struct scsi_per_res_key) * lun->pr_key_count, res_keys->header.length); for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { if ((key = ctl_get_prkey(lun, i)) == 0) continue; /* * We used lun->pr_key_count to calculate the * size to allocate. If it turns out the number of * initiators with the registered flag set is * larger than that (i.e. they haven't been kept in * sync), we've got a problem. */ if (key_count >= lun->pr_key_count) { key_count++; continue; } scsi_u64to8b(key, res_keys->keys[key_count].key); key_count++; } break; } case SPRI_RR: { // read reservation struct scsi_per_res_in_rsrv *res; int tmp_len, header_only; res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; scsi_ulto4b(lun->pr_generation, res->header.generation); if (lun->flags & CTL_LUN_PR_RESERVED) { tmp_len = sizeof(struct scsi_per_res_in_rsrv); scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), res->header.length); header_only = 0; } else { tmp_len = sizeof(struct scsi_per_res_in_header); scsi_ulto4b(0, res->header.length); header_only = 1; } /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (tmp_len != total_len) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation status changed, retrying\n", __func__); goto retry; } /* * No reservation held, so we're done. */ if (header_only != 0) break; /* * If the registration is an All Registrants type, the key * is 0, since it doesn't really matter. */ if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), res->data.reservation); } res->data.scopetype = lun->pr_res_type; break; } case SPRI_RC: //report capabilities { struct scsi_per_res_cap *res_cap; uint16_t type_mask; res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; scsi_ulto2b(sizeof(*res_cap), res_cap->length); res_cap->flags1 = SPRI_CRH; res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; type_mask = SPRI_TM_WR_EX_AR | SPRI_TM_EX_AC_RO | SPRI_TM_WR_EX_RO | SPRI_TM_EX_AC | SPRI_TM_WR_EX | SPRI_TM_EX_AC_AR; scsi_ulto2b(type_mask, res_cap->type_mask); break; } case SPRI_RS: { // read full status struct scsi_per_res_in_full *res_status; struct scsi_per_res_in_full_desc *res_desc; struct ctl_port *port; int i, len; res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (total_len < (sizeof(struct scsi_per_res_in_header) + (sizeof(struct scsi_per_res_in_full_desc) + 256) * lun->pr_key_count)){ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation length changed, retrying\n", __func__); goto retry; } scsi_ulto4b(lun->pr_generation, res_status->header.generation); res_desc = &res_status->desc[0]; for (i = 0; i < CTL_MAX_INITIATORS; i++) { if ((key = ctl_get_prkey(lun, i)) == 0) continue; scsi_u64to8b(key, res_desc->res_key.key); if ((lun->flags & CTL_LUN_PR_RESERVED) && (lun->pr_res_idx == i || lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { res_desc->flags = SPRI_FULL_R_HOLDER; res_desc->scopetype = lun->pr_res_type; } scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, res_desc->rel_trgt_port_id); len = 0; port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; if (port != NULL) len = ctl_create_iid(port, i % CTL_MAX_INIT_PER_PORT, res_desc->transport_id); scsi_ulto4b(len, res_desc->additional_length); res_desc = (struct scsi_per_res_in_full_desc *) &res_desc->transport_id[len]; } scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], res_status->header.length); break; } default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } mtx_unlock(&lun->lun_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if * it should return. */ static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, uint64_t sa_res_key, uint8_t type, uint32_t residx, struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, struct scsi_per_res_out_parms* param) { union ctl_ha_msg persis_io; int i; mtx_lock(&lun->lun_lock); if (sa_res_key == 0) { if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { /* validate scope and type */ if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (1); } if (type>8 || type==2 || type==4 || type==0) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } /* * Unregister everybody else and build UA for * them */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_key_count = 1; lun->pr_res_type = type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; lun->pr_generation++; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* not all registrants */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS || !(lun->flags & CTL_LUN_PR_RESERVED)) { int found = 0; if (res_key == sa_res_key) { /* special case */ /* * The spec implies this is not good but doesn't * say what to do. There are two choices either * generate a res conflict or check condition * with illegal field in parameter data. Since * that is what is done when the sa_res_key is * zero I'll take that approach since this has * to do with the sa_res_key. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (ctl_get_prkey(lun, i) != sa_res_key) continue; found = 1; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } if (!found) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* Reserved but not all registrants */ /* sa_res_key is res holder */ if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { /* validate scope and type */ if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (1); } if (type>8 || type==2 || type==4 || type==0) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } /* * Do the following: * if sa_res_key != res_key remove all * registrants w/sa_res_key and generate UA * for these registrants(Registrations * Preempted) if it wasn't an exclusive * reservation generate UA(Reservations * Preempted) for all other registered nexuses * if the type has changed. Establish the new * reservation and holder. If res_key and * sa_res_key are the same do the above * except don't unregister the res holder. */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; if (sa_res_key == ctl_get_prkey(lun, i)) { ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } else if (type != lun->pr_res_type && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* * sa_res_key is not the res holder just * remove registrants */ int found=0; for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (sa_res_key != ctl_get_prkey(lun, i)) continue; found = 1; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } if (!found) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (1); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } } return (0); } static void ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) { uint64_t sa_res_key; int i; sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS || lun->pr_res_idx == CTL_PR_NO_RESERVATION || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { if (sa_res_key == 0) { /* * Unregister everybody else and build UA for * them */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == msg->pr.pr_info.residx || ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_key_count = 1; lun->pr_res_type = msg->pr.pr_info.res_type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = msg->pr.pr_info.residx; } else { for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (sa_res_key == ctl_get_prkey(lun, i)) continue; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } } } else { for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == msg->pr.pr_info.residx || ctl_get_prkey(lun, i) == 0) continue; if (sa_res_key == ctl_get_prkey(lun, i)) { ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } else if (msg->pr.pr_info.res_type != lun->pr_res_type && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = msg->pr.pr_info.res_type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = msg->pr.pr_info.residx; else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; } lun->pr_generation++; } int ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); int retval; u_int32_t param_len; struct scsi_per_res_out *cdb; struct scsi_per_res_out_parms* param; uint32_t residx; uint64_t res_key, sa_res_key, key; uint8_t type; union ctl_ha_msg persis_io; int i; CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); cdb = (struct scsi_per_res_out *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; /* * We only support whole-LUN scope. The scope & type are ignored for * register, register and ignore existing key and clear. * We sometimes ignore scope and type on preempts too!! * Verify reservation type here as well. */ type = cdb->scope_type & SPR_TYPE_MASK; if ((cdb->action == SPRO_RESERVE) || (cdb->action == SPRO_RELEASE)) { if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (type>8 || type==2 || type==4 || type==0) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } param_len = scsi_4btoul(cdb->length); if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; residx = ctl_get_initindex(&ctsio->io_hdr.nexus); res_key = scsi_8btou64(param->res_key.key); sa_res_key = scsi_8btou64(param->serv_act_res_key); /* * Validate the reservation key here except for SPRO_REG_IGNO * This must be done for all other service actions */ if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { mtx_lock(&lun->lun_lock); if ((key = ctl_get_prkey(lun, residx)) != 0) { if (res_key != key) { /* * The current key passed in doesn't match * the one the initiator previously * registered. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { /* * We are not registered */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if (res_key != 0) { /* * We are not registered and trying to register but * the register key isn't zero. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_unlock(&lun->lun_lock); } switch (cdb->action & SPRO_ACTION_MASK) { case SPRO_REGISTER: case SPRO_REG_IGNO: { /* * We don't support any of these options, as we report in * the read capabilities request (see * ctl_persistent_reserve_in(), above). */ if ((param->flags & SPR_SPEC_I_PT) || (param->flags & SPR_ALL_TG_PT) || (param->flags & SPR_APTPL)) { int bit_ptr; if (param->flags & SPR_APTPL) bit_ptr = 0; else if (param->flags & SPR_ALL_TG_PT) bit_ptr = 2; else /* SPR_SPEC_I_PT */ bit_ptr = 3; free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 20, /*bit_valid*/ 1, /*bit*/ bit_ptr); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_lock(&lun->lun_lock); /* * The initiator wants to clear the * key/unregister. */ if (sa_res_key == 0) { if ((res_key == 0 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO && ctl_get_prkey(lun, residx) == 0)) { mtx_unlock(&lun->lun_lock); goto done; } ctl_clr_prkey(lun, residx); lun->pr_key_count--; if (residx == lun->pr_res_idx) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO) && lun->pr_key_count) { /* * If the reservation is a registrants * only type we need to generate a UA * for other registered inits. The * sense code should be RESERVATIONS * RELEASED */ for (i = softc->init_min; i < softc->init_max; i++){ if (ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = 0; } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { if (lun->pr_key_count==0) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; } } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; persis_io.pr.pr_info.residx = residx; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else /* sa_res_key != 0 */ { /* * If we aren't registered currently then increment * the key count and set the registered flag. */ ctl_alloc_prkey(lun, residx); if (ctl_get_prkey(lun, residx) == 0) lun->pr_key_count++; ctl_set_prkey(lun, residx, sa_res_key); lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_REG_KEY; persis_io.pr.pr_info.residx = residx; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } break; } case SPRO_RESERVE: mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_PR_RESERVED) { /* * if this isn't the reservation holder and it's * not a "all registrants" type or if the type is * different then we have a conflict */ if ((lun->pr_res_idx != residx && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) || lun->pr_res_type != type) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_unlock(&lun->lun_lock); } else /* create a reservation */ { /* * If it's not an "all registrants" type record * reservation holder */ if (type != SPR_TYPE_WR_EX_AR && type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; /* Res holder */ else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; lun->flags |= CTL_LUN_PR_RESERVED; lun->pr_res_type = type; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_RESERVE; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } break; case SPRO_RELEASE: mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { /* No reservation exists return good status */ mtx_unlock(&lun->lun_lock); goto done; } /* * Is this nexus a reservation holder? */ if (lun->pr_res_idx != residx && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { /* * not a res holder return good status but * do nothing */ mtx_unlock(&lun->lun_lock); goto done; } if (lun->pr_res_type != type) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_illegal_pr_release(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* okay to release */ lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; lun->pr_res_type = 0; /* * If this isn't an exclusive access reservation and NUAR * is not set, generate UA for all other registrants. */ if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { for (i = softc->init_min; i < softc->init_max; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } mtx_unlock(&lun->lun_lock); /* Send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_RELEASE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); break; case SPRO_CLEAR: /* send msg to other side */ mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_key_count = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; ctl_clr_prkey(lun, residx); for (i = 0; i < CTL_MAX_INITIATORS; i++) if (ctl_get_prkey(lun, i) != 0) { ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_CLEAR; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); break; case SPRO_PREEMPT: case SPRO_PRE_ABO: { int nretval; nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, residx, ctsio, cdb, param); if (nretval != 0) return (CTL_RETVAL_COMPLETE); break; } default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } done: free(ctsio->kern_data_ptr, M_CTL); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (retval); } /* * This routine is for handling a message from the other SC pertaining to * persistent reserve out. All the error checking will have been done * so only perorming the action need be done here to keep the two * in sync. */ static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; struct ctl_lun *lun; int i; uint32_t residx, targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } residx = ctl_get_initindex(&msg->hdr.nexus); switch(msg->pr.pr_info.action) { case CTL_PR_REG_KEY: ctl_alloc_prkey(lun, msg->pr.pr_info.residx); if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) lun->pr_key_count++; ctl_set_prkey(lun, msg->pr.pr_info.residx, scsi_8btou64(msg->pr.pr_info.sa_res_key)); lun->pr_generation++; break; case CTL_PR_UNREG_KEY: ctl_clr_prkey(lun, msg->pr.pr_info.residx); lun->pr_key_count--; /* XXX Need to see if the reservation has been released */ /* if so do we need to generate UA? */ if (msg->pr.pr_info.residx == lun->pr_res_idx) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO) && lun->pr_key_count) { /* * If the reservation is a registrants * only type we need to generate a UA * for other registered inits. The * sense code should be RESERVATIONS * RELEASED */ for (i = softc->init_min; i < softc->init_max; i++) { if (ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = 0; } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { if (lun->pr_key_count==0) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; } } lun->pr_generation++; break; case CTL_PR_RESERVE: lun->flags |= CTL_LUN_PR_RESERVED; lun->pr_res_type = msg->pr.pr_info.res_type; lun->pr_res_idx = msg->pr.pr_info.residx; break; case CTL_PR_RELEASE: /* * If this isn't an exclusive access reservation and NUAR * is not set, generate UA for all other registrants. */ if (lun->pr_res_type != SPR_TYPE_EX_AC && lun->pr_res_type != SPR_TYPE_WR_EX && (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { for (i = softc->init_min; i < softc->init_max; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; lun->pr_res_type = 0; break; case CTL_PR_PREEMPT: ctl_pro_preempt_other(lun, msg); break; case CTL_PR_CLEAR: lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_key_count = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; for (i=0; i < CTL_MAX_INITIATORS; i++) { if (ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_generation++; break; } mtx_unlock(&lun->lun_lock); } int ctl_read_write(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int flags, retval; int isread; CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); flags = 0; isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; switch (ctsio->cdb[0]) { case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb; cdb = (struct scsi_rw_6 *)ctsio->cdb; lba = scsi_3btoul(cdb->addr); /* only 5 bits are valid in the most significant address byte */ lba &= 0x1fffff; num_blocks = cdb->length; /* * This is correct according to SBC-2. */ if (num_blocks == 0) num_blocks = 256; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)ctsio->cdb; if (cdb->byte2 & SRW10_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW10_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_10: { struct scsi_write_verify_10 *cdb; cdb = (struct scsi_write_verify_10 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case WRITE_VERIFY_12: { struct scsi_write_verify_12 *cdb; cdb = (struct scsi_write_verify_12 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case WRITE_ATOMIC_16: { struct scsi_write_atomic_16 *cdb; if (lun->be_lun->atomicblock == 0) { ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_2btoul(cdb->length); if (num_blocks > lun->be_lun->atomicblock) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } break; } case WRITE_VERIFY_16: { struct scsi_write_verify_16 *cdb; cdb = (struct scsi_write_verify_16 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. * Note that this cannot happen with WRITE(6) or READ(6), since 0 * translates to 256 blocks for those commands. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Set FUA and/or DPO if caches are disabled. */ if (isread) { if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) flags |= CTL_LLF_FUA | CTL_LLF_DPO; } else { if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) flags |= CTL_LLF_FUA; } lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; ctsio->kern_rel_offset = 0; CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } static int ctl_cnw_cont(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct ctl_scsiio *ctsio; struct ctl_lba_len_flags *lbalen; int retval; ctsio = &io->scsiio; ctsio->io_hdr.status = CTL_STATUS_NONE; ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->flags &= ~CTL_LLF_COMPARE; lbalen->flags |= CTL_LLF_WRITE; CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_cnw(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int flags, retval; CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); flags = 0; switch (ctsio->cdb[0]) { case COMPARE_AND_WRITE: { struct scsi_compare_and_write *cdb; cdb = (struct scsi_compare_and_write *)ctsio->cdb; if (cdb->byte2 & SRW10_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW10_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = cdb->length; break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Set FUA if write cache is disabled. */ if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) flags |= CTL_LLF_FUA; ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; ctsio->kern_rel_offset = 0; /* * Set the IO_CONT flag, so that if this I/O gets passed to * ctl_data_submit_done(), it'll get passed back to * ctl_ctl_cnw_cont() for further processing. */ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_cnw_cont; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = CTL_LLF_COMPARE | flags; CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_verify(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int bytchk, flags; int retval; CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); bytchk = 0; flags = CTL_LLF_FUA; switch (ctsio->cdb[0]) { case VERIFY_10: { struct scsi_verify_10 *cdb; cdb = (struct scsi_verify_10 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case VERIFY_12: { struct scsi_verify_12 *cdb; cdb = (struct scsi_verify_12 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case VERIFY_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; if (bytchk) { lbalen->flags = CTL_LLF_COMPARE | flags; ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; } else { lbalen->flags = CTL_LLF_VERIFY | flags; ctsio->kern_total_len = 0; } ctsio->kern_rel_offset = 0; CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_report_luns(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); struct scsi_report_luns *cdb; struct scsi_report_luns_data *lun_data; int num_filled, num_luns, num_port_luns, retval; uint32_t alloc_len, lun_datalen; uint32_t initidx, targ_lun_id, lun_id; retval = CTL_RETVAL_COMPLETE; cdb = (struct scsi_report_luns *)ctsio->cdb; CTL_DEBUG_PRINT(("ctl_report_luns\n")); num_luns = 0; num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns; mtx_lock(&softc->ctl_lock); for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) num_luns++; } mtx_unlock(&softc->ctl_lock); switch (cdb->select_report) { case RPL_REPORT_DEFAULT: case RPL_REPORT_ALL: case RPL_REPORT_NONSUBSID: break; case RPL_REPORT_WELLKNOWN: case RPL_REPORT_ADMIN: case RPL_REPORT_CONGLOM: num_luns = 0; break; default: ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (retval); break; /* NOTREACHED */ } alloc_len = scsi_4btoul(cdb->length); /* * The initiator has to allocate at least 16 bytes for this request, * so he can at least get the header and the first LUN. Otherwise * we reject the request (per SPC-3 rev 14, section 6.21). */ if (alloc_len < (sizeof(struct scsi_report_luns_data) + sizeof(struct scsi_report_luns_lundata))) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (retval); } lun_datalen = sizeof(*lun_data) + (num_luns * sizeof(struct scsi_report_luns_lundata)); ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); mtx_lock(&softc->ctl_lock); for (targ_lun_id = 0, num_filled = 0; targ_lun_id < num_port_luns && num_filled < num_luns; targ_lun_id++) { lun_id = ctl_lun_map_from_port(port, targ_lun_id); if (lun_id == UINT32_MAX) continue; lun = softc->ctl_luns[lun_id]; if (lun == NULL) continue; be64enc(lun_data->luns[num_filled++].lundata, ctl_encode_lun(targ_lun_id)); /* * According to SPC-3, rev 14 section 6.21: * * "The execution of a REPORT LUNS command to any valid and * installed logical unit shall clear the REPORTED LUNS DATA * HAS CHANGED unit attention condition for all logical * units of that target with respect to the requesting * initiator. A valid and installed logical unit is one * having a PERIPHERAL QUALIFIER of 000b in the standard * INQUIRY data (see 6.4.2)." * * If request_lun is NULL, the LUN this report luns command * was issued to is either disabled or doesn't exist. In that * case, we shouldn't clear any pending lun change unit * attention. */ if (request_lun != NULL) { mtx_lock(&lun->lun_lock); ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); mtx_unlock(&lun->lun_lock); } } mtx_unlock(&softc->ctl_lock); /* * It's quite possible that we've returned fewer LUNs than we allocated * space for. Trim it. */ lun_datalen = sizeof(*lun_data) + (num_filled * sizeof(struct scsi_report_luns_lundata)); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(lun_datalen, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * We set this to the actual data length, regardless of how much * space we actually have to return results. If the user looks at * this value, he'll know whether or not he allocated enough space * and reissue the command if necessary. We don't support well * known logical units, so if the user asks for that, return none. */ scsi_ulto4b(lun_datalen - 8, lun_data->length); /* * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy * this request. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_request_sense(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_request_sense *cdb; struct scsi_sense_data *sense_ptr, *ps; uint32_t initidx; int have_error; u_int sense_len = SSD_FULL_SIZE; scsi_sense_data_type sense_format; ctl_ua_type ua_type; uint8_t asc = 0, ascq = 0; cdb = (struct scsi_request_sense *)ctsio->cdb; CTL_DEBUG_PRINT(("ctl_request_sense\n")); /* * Determine which sense format the user wants. */ if (cdb->byte2 & SRS_DESC) sense_format = SSD_TYPE_DESC; else sense_format = SSD_TYPE_FIXED; ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; /* * struct scsi_sense_data, which is currently set to 256 bytes, is * larger than the largest allowed value for the length field in the * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. */ ctsio->kern_data_len = cdb->length; ctsio->kern_total_len = cdb->length; /* * If we don't have a LUN, we don't have any pending sense. */ if (lun == NULL || ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && softc->ha_link < CTL_HA_LINK_UNKNOWN)) { /* "Logical unit not supported" */ ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x25, /*ascq*/ 0x00, SSD_ELEM_NONE); goto send; } have_error = 0; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * Check for pending sense, and then for pending unit attentions. * Pending sense gets returned first, then pending unit attentions. */ mtx_lock(&lun->lun_lock); ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; if (ps != NULL) ps += initidx % CTL_MAX_INIT_PER_PORT; if (ps != NULL && ps->error_code != 0) { scsi_sense_data_type stored_format; /* * Check to see which sense format was used for the stored * sense data. */ stored_format = scsi_sense_type(ps); /* * If the user requested a different sense format than the * one we stored, then we need to convert it to the other * format. If we're going from descriptor to fixed format * sense data, we may lose things in translation, depending * on what options were used. * * If the stored format is SSD_TYPE_NONE (i.e. invalid), * for some reason we'll just copy it out as-is. */ if ((stored_format == SSD_TYPE_FIXED) && (sense_format == SSD_TYPE_DESC)) ctl_sense_to_desc((struct scsi_sense_data_fixed *) ps, (struct scsi_sense_data_desc *)sense_ptr); else if ((stored_format == SSD_TYPE_DESC) && (sense_format == SSD_TYPE_FIXED)) ctl_sense_to_fixed((struct scsi_sense_data_desc *) ps, (struct scsi_sense_data_fixed *)sense_ptr); else memcpy(sense_ptr, ps, sizeof(*sense_ptr)); ps->error_code = 0; have_error = 1; } else { ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, sense_format); if (ua_type != CTL_UA_NONE) have_error = 1; } if (have_error == 0) { /* * Report informational exception if have one and allowed. */ if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { asc = lun->ie_asc; ascq = lun->ie_ascq; } ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NO_SENSE, /*asc*/ asc, /*ascq*/ ascq, SSD_ELEM_NONE); } mtx_unlock(&lun->lun_lock); send: /* * We report the SCSI status as OK, since the status of the command * itself is OK. We're reporting sense as parameter data. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_tur(struct ctl_scsiio *ctsio) { CTL_DEBUG_PRINT(("ctl_tur\n")); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x00, the Supported VPD Pages page. */ static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_supported_pages *pages; int sup_page_size; int p; sup_page_size = sizeof(struct scsi_vpd_supported_pages) * SCSI_EVPD_NUM_SUPPORTED_PAGES; ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sup_page_size, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) pages->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; p = 0; /* Supported VPD pages */ pages->page_list[p++] = SVPD_SUPPORTED_PAGES; /* Serial Number */ pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; /* Device Identification */ pages->page_list[p++] = SVPD_DEVICE_ID; /* Extended INQUIRY Data */ pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; /* Mode Page Policy */ pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; /* SCSI Ports */ pages->page_list[p++] = SVPD_SCSI_PORTS; /* Third-party Copy */ pages->page_list[p++] = SVPD_SCSI_TPC; /* SCSI Feature Sets */ pages->page_list[p++] = SVPD_SCSI_SFS; if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { /* Block limits */ pages->page_list[p++] = SVPD_BLOCK_LIMITS; /* Block Device Characteristics */ pages->page_list[p++] = SVPD_BDC; /* Logical Block Provisioning */ pages->page_list[p++] = SVPD_LBP; } pages->length = p; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x80, the Unit Serial Number page. */ static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_unit_serial_number *sn_ptr; int data_len; data_len = 4 + CTL_SN_LEN; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; sn_ptr->length = CTL_SN_LEN; /* * If we don't have a LUN, we just leave the serial number as * all spaces. */ if (lun != NULL) { strncpy((char *)sn_ptr->serial_num, (char *)lun->be_lun->serial_num, CTL_SN_LEN); } else memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x86, the Extended INQUIRY Data page. */ static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_extended_inquiry_data *eid_ptr; int data_len; data_len = sizeof(struct scsi_vpd_extended_inquiry_data); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; scsi_ulto2b(data_len - 4, eid_ptr->page_length); /* * We support head of queue, ordered and simple tags. */ eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; /* * Volatile cache supported. */ eid_ptr->flags3 = SVPD_EID_V_SUP; /* * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit * attention for a particular IT nexus on all LUNs once we report * it to that nexus once. This bit is required as of SPC-4. */ eid_ptr->flags4 = SVPD_EID_LUICLR; /* * We support revert to defaults (RTD) bit in MODE SELECT. */ eid_ptr->flags5 = SVPD_EID_RTD_SUP; /* * XXX KDM in order to correctly answer this, we would need * information from the SIM to determine how much sense data it * can send. So this would really be a path inquiry field, most * likely. This can be set to a maximum of 252 according to SPC-4, * but the hardware may or may not be able to support that much. * 0 just means that the maximum sense data length is not reported. */ eid_ptr->max_sense_length = 0; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_mode_page_policy *mpp_ptr; int data_len; data_len = sizeof(struct scsi_vpd_mode_page_policy) + sizeof(struct scsi_vpd_mode_page_policy_descr); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; scsi_ulto2b(data_len - 4, mpp_ptr->page_length); mpp_ptr->descr[0].page_code = 0x3f; mpp_ptr->descr[0].subpage_code = 0xff; mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x83, the Device Identification page. */ static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_device_id *devid_ptr; struct scsi_vpd_id_descriptor *desc; int data_len, g; uint8_t proto; data_len = sizeof(struct scsi_vpd_device_id) + sizeof(struct scsi_vpd_id_descriptor) + sizeof(struct scsi_vpd_id_rel_trgt_port_id) + sizeof(struct scsi_vpd_id_descriptor) + sizeof(struct scsi_vpd_id_trgt_port_grp_id); if (lun && lun->lun_devid) data_len += lun->lun_devid->len; if (port && port->port_devid) data_len += port->port_devid->len; if (port && port->target_devid) data_len += port->target_devid->len; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; devid_ptr->page_code = SVPD_DEVICE_ID; scsi_ulto2b(data_len - 4, devid_ptr->length); if (port && port->port_type == CTL_PORT_FC) proto = SCSI_PROTO_FC << 4; else if (port && port->port_type == CTL_PORT_SAS) proto = SCSI_PROTO_SAS << 4; else if (port && port->port_type == CTL_PORT_ISCSI) proto = SCSI_PROTO_ISCSI << 4; else proto = SCSI_PROTO_SPI << 4; desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; /* * We're using a LUN association here. i.e., this device ID is a * per-LUN identifier. */ if (lun && lun->lun_devid) { memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + lun->lun_devid->len); } /* * This is for the WWPN which is a port association. */ if (port && port->port_devid) { memcpy(desc, port->port_devid->data, port->port_devid->len); desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + port->port_devid->len); } /* * This is for the Relative Target Port(type 4h) identifier */ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_RELTARG; desc->length = 4; scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + sizeof(struct scsi_vpd_id_rel_trgt_port_id)); /* * This is for the Target Port Group(type 5h) identifier */ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_TPORTGRP; desc->length = 4; if (softc->is_single || (port && port->status & CTL_PORT_STATUS_HA_SHARED)) g = 1; else g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; scsi_ulto2b(g, &desc->identifier[2]); desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + sizeof(struct scsi_vpd_id_trgt_port_grp_id)); /* * This is for the Target identifier */ if (port && port->target_devid) { memcpy(desc, port->target_devid->data, port->target_devid->len); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_scsi_ports *sp; struct scsi_vpd_port_designation *pd; struct scsi_vpd_port_designation_cont *pdc; struct ctl_port *port; int data_len, num_target_ports, iid_len, id_len; num_target_ports = 0; iid_len = 0; id_len = 0; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (lun != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; num_target_ports++; if (port->init_devid) iid_len += port->init_devid->len; if (port->port_devid) id_len += port->port_devid->len; } mtx_unlock(&softc->ctl_lock); data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_ports * (sizeof(struct scsi_vpd_port_designation) + sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sp->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sp->page_code = SVPD_SCSI_PORTS; scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), sp->page_length); pd = &sp->design[0]; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (lun != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; scsi_ulto2b(port->targ_port, pd->relative_port_id); if (port->init_devid) { iid_len = port->init_devid->len; memcpy(pd->initiator_transportid, port->init_devid->data, port->init_devid->len); } else iid_len = 0; scsi_ulto2b(iid_len, pd->initiator_transportid_length); pdc = (struct scsi_vpd_port_designation_cont *) (&pd->initiator_transportid[iid_len]); if (port->port_devid) { id_len = port->port_devid->len; memcpy(pdc->target_port_descriptors, port->port_devid->data, port->port_devid->len); } else id_len = 0; scsi_ulto2b(id_len, pdc->target_port_descriptors_length); pd = (struct scsi_vpd_port_designation *) ((uint8_t *)pdc->target_port_descriptors + id_len); } mtx_unlock(&softc->ctl_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_sfs *sfs_ptr; int sfs_page_size, n; sfs_page_size = sizeof(*sfs_ptr) + 5 * 2; ctsio->kern_data_ptr = malloc(sfs_page_size, M_CTL, M_WAITOK | M_ZERO); sfs_ptr = (struct scsi_vpd_sfs *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sfs_page_size, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sfs_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sfs_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sfs_ptr->page_code = SVPD_SCSI_SFS; n = 0; /* Discovery 2016 */ scsi_ulto2b(0x0001, &sfs_ptr->codes[2 * n++]); if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { /* SBC Base 2016 */ scsi_ulto2b(0x0101, &sfs_ptr->codes[2 * n++]); /* SBC Base 2010 */ scsi_ulto2b(0x0102, &sfs_ptr->codes[2 * n++]); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { /* Basic Provisioning 2016 */ scsi_ulto2b(0x0103, &sfs_ptr->codes[2 * n++]); } /* Drive Maintenance 2016 */ //scsi_ulto2b(0x0104, &sfs_ptr->codes[2 * n++]); } scsi_ulto2b(4 + 2 * n, sfs_ptr->page_length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_block_limits *bl_ptr; const char *val; uint64_t ival; ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; bl_ptr->page_code = SVPD_BLOCK_LIMITS; scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); bl_ptr->max_cmp_write_len = 0xff; scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); if (lun != NULL) { scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { ival = 0xffffffff; val = dnvlist_get_string(lun->be_lun->options, "unmap_max_lba", NULL); if (val != NULL) ctl_expand_number(val, &ival); scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); ival = 0xffffffff; val = dnvlist_get_string(lun->be_lun->options, "unmap_max_descr", NULL); if (val != NULL) ctl_expand_number(val, &ival); scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); if (lun->be_lun->ublockexp != 0) { scsi_ulto4b((1 << lun->be_lun->ublockexp), bl_ptr->opt_unmap_grain); scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, bl_ptr->unmap_grain_align); } } scsi_ulto4b(lun->be_lun->atomicblock, bl_ptr->max_atomic_transfer_length); scsi_ulto4b(0, bl_ptr->atomic_alignment); scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); ival = UINT64_MAX; val = dnvlist_get_string(lun->be_lun->options, "write_same_max_lba", NULL); if (val != NULL) ctl_expand_number(val, &ival); scsi_u64to8b(ival, bl_ptr->max_write_same_length); if (lun->be_lun->maxlba + 1 > ival) bl_ptr->flags |= SVPD_BL_WSNZ; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_block_device_characteristics *bdc_ptr; const char *value; u_int i; ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; bdc_ptr->page_code = SVPD_BDC; scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); if (lun != NULL && (value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) i = strtol(value, NULL, 0); else i = CTL_DEFAULT_ROTATION_RATE; scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); if (lun != NULL && (value = dnvlist_get_string(lun->be_lun->options, "formfactor", NULL)) != NULL) i = strtol(value, NULL, 0); else i = 0; bdc_ptr->wab_wac_ff = (i & 0x0f); bdc_ptr->flags = SVPD_RBWZ | SVPD_FUAB | SVPD_VBULS; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_logical_block_prov *lbp_ptr; const char *value; ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; lbp_ptr->page_code = SVPD_LBP; scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; value = dnvlist_get_string(lun->be_lun->options, "provisioning_type", NULL); if (value != NULL) { if (strcmp(value, "resource") == 0) lbp_ptr->prov_type = SVPD_LBP_RESOURCE; else if (strcmp(value, "thin") == 0) lbp_ptr->prov_type = SVPD_LBP_THIN; } else lbp_ptr->prov_type = SVPD_LBP_THIN; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * INQUIRY with the EVPD bit set. */ static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_inquiry *cdb; int alloc_len, retval; cdb = (struct scsi_inquiry *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); switch (cdb->page_code) { case SVPD_SUPPORTED_PAGES: retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); break; case SVPD_UNIT_SERIAL_NUMBER: retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); break; case SVPD_DEVICE_ID: retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); break; case SVPD_EXTENDED_INQUIRY_DATA: retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); break; case SVPD_MODE_PAGE_POLICY: retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); break; case SVPD_SCSI_PORTS: retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); break; case SVPD_SCSI_TPC: retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); break; case SVPD_SCSI_SFS: retval = ctl_inquiry_evpd_sfs(ctsio, alloc_len); break; case SVPD_BLOCK_LIMITS: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); break; case SVPD_BDC: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); break; case SVPD_LBP: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); break; default: err: ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } /* * Standard INQUIRY data. */ static int ctl_inquiry_std(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_inquiry_data *inq_ptr; struct scsi_inquiry *cdb; const char *val; uint32_t alloc_len, data_len; ctl_port_type port_type; port_type = port->port_type; if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) port_type = CTL_PORT_SCSI; cdb = (struct scsi_inquiry *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); /* * We malloc the full inquiry data size here and fill it * in. If the user only asks for less, we'll give him * that much. */ data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; if (lun != NULL) { if ((lun->flags & CTL_LUN_PRIMARY_SC) || softc->ha_link >= CTL_HA_LINK_UNKNOWN) { inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; } else { inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | lun->be_lun->lun_type; } if (lun->flags & CTL_LUN_REMOVABLE) inq_ptr->dev_qual2 |= SID_RMB; } else inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; /* RMB in byte 2 is 0 */ inq_ptr->version = SCSI_REV_SPC5; /* * According to SAM-3, even if a device only supports a single * level of LUN addressing, it should still set the HISUP bit: * * 4.9.1 Logical unit numbers overview * * All logical unit number formats described in this standard are * hierarchical in structure even when only a single level in that * hierarchy is used. The HISUP bit shall be set to one in the * standard INQUIRY data (see SPC-2) when any logical unit number * format described in this standard is used. Non-hierarchical * formats are outside the scope of this standard. * * Therefore we set the HiSup bit here. * * The response format is 2, per SPC-3. */ inq_ptr->response_format = SID_HiSup | 2; inq_ptr->additional_length = data_len - (offsetof(struct scsi_inquiry_data, additional_length) + 1); CTL_DEBUG_PRINT(("additional_length = %d\n", inq_ptr->additional_length)); inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; if (port_type == CTL_PORT_SCSI) inq_ptr->spc2_flags = SPC2_SID_ADDR16; inq_ptr->spc2_flags |= SPC2_SID_MultiP; inq_ptr->flags = SID_CmdQue; if (port_type == CTL_PORT_SCSI) inq_ptr->flags |= SID_WBus16 | SID_Sync; /* * Per SPC-3, unused bytes in ASCII strings are filled with spaces. * We have 8 bytes for the vendor name, and 16 bytes for the device * name and 4 bytes for the revision. */ if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, "vendor", NULL)) == NULL) { strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); } else { memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); strncpy(inq_ptr->vendor, val, min(sizeof(inq_ptr->vendor), strlen(val))); } if (lun == NULL) { strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, sizeof(inq_ptr->product)); } else if ((val = dnvlist_get_string(lun->be_lun->options, "product", NULL)) == NULL) { switch (lun->be_lun->lun_type) { case T_DIRECT: strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, sizeof(inq_ptr->product)); break; case T_PROCESSOR: strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, sizeof(inq_ptr->product)); break; case T_CDROM: strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, sizeof(inq_ptr->product)); break; default: strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, sizeof(inq_ptr->product)); break; } } else { memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); strncpy(inq_ptr->product, val, min(sizeof(inq_ptr->product), strlen(val))); } /* * XXX make this a macro somewhere so it automatically gets * incremented when we make changes. */ if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, "revision", NULL)) == NULL) { strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); } else { memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); strncpy(inq_ptr->revision, val, min(sizeof(inq_ptr->revision), strlen(val))); } /* * For parallel SCSI, we support double transition and single * transition clocking. We also support QAS (Quick Arbitration * and Selection) and Information Unit transfers on both the * control and array devices. */ if (port_type == CTL_PORT_SCSI) inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | SID_SPI_IUS; /* SAM-6 (no version claimed) */ scsi_ulto2b(0x00C0, inq_ptr->version1); /* SPC-5 (no version claimed) */ scsi_ulto2b(0x05C0, inq_ptr->version2); if (port_type == CTL_PORT_FC) { /* FCP-2 ANSI INCITS.350:2003 */ scsi_ulto2b(0x0917, inq_ptr->version3); } else if (port_type == CTL_PORT_SCSI) { /* SPI-4 ANSI INCITS.362:200x */ scsi_ulto2b(0x0B56, inq_ptr->version3); } else if (port_type == CTL_PORT_ISCSI) { /* iSCSI (no version claimed) */ scsi_ulto2b(0x0960, inq_ptr->version3); } else if (port_type == CTL_PORT_SAS) { /* SAS (no version claimed) */ scsi_ulto2b(0x0BE0, inq_ptr->version3); } else if (port_type == CTL_PORT_UMASS) { /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ scsi_ulto2b(0x1730, inq_ptr->version3); } if (lun == NULL) { /* SBC-4 (no version claimed) */ scsi_ulto2b(0x0600, inq_ptr->version4); } else { switch (lun->be_lun->lun_type) { case T_DIRECT: /* SBC-4 (no version claimed) */ scsi_ulto2b(0x0600, inq_ptr->version4); break; case T_PROCESSOR: break; case T_CDROM: /* MMC-6 (no version claimed) */ scsi_ulto2b(0x04E0, inq_ptr->version4); break; default: break; } } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_inquiry(struct ctl_scsiio *ctsio) { struct scsi_inquiry *cdb; int retval; CTL_DEBUG_PRINT(("ctl_inquiry\n")); cdb = (struct scsi_inquiry *)ctsio->cdb; if (cdb->byte2 & SI_EVPD) retval = ctl_inquiry_evpd(ctsio); else if (cdb->page_code == 0) retval = ctl_inquiry_std(ctsio); else { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } return (retval); } int ctl_get_config(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_get_config_header *hdr; struct scsi_get_config_feature *feature; struct scsi_get_config *cdb; uint32_t alloc_len, data_len; int rt, starting; cdb = (struct scsi_get_config *)ctsio->cdb; rt = (cdb->rt & SGC_RT_MASK); starting = scsi_2btoul(cdb->starting_feature); alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_get_config_header) + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; if (lun->flags & CTL_LUN_NO_MEDIA) scsi_ulto2b(0x0000, hdr->current_profile); else scsi_ulto2b(0x0010, hdr->current_profile); feature = (struct scsi_get_config_feature *)(hdr + 1); if (starting > 0x003b) goto done; if (starting > 0x003a) goto f3b; if (starting > 0x002b) goto f3a; if (starting > 0x002a) goto f2b; if (starting > 0x001f) goto f2a; if (starting > 0x001e) goto f1f; if (starting > 0x001d) goto f1e; if (starting > 0x0010) goto f1d; if (starting > 0x0003) goto f10; if (starting > 0x0002) goto f3; if (starting > 0x0001) goto f2; if (starting > 0x0000) goto f1; /* Profile List */ scsi_ulto2b(0x0000, feature->feature_code); feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 8; scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ feature->feature_data[2] = 0x00; scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ feature->feature_data[6] = 0x01; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1: /* Core */ scsi_ulto2b(0x0001, feature->feature_code); feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 8; scsi_ulto4b(0x00000000, &feature->feature_data[0]); feature->feature_data[4] = 0x03; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2: /* Morphing */ scsi_ulto2b(0x0002, feature->feature_code); feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x02; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3: /* Removable Medium */ scsi_ulto2b(0x0003, feature->feature_code); feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x39; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) goto done; f10: /* Random Read */ scsi_ulto2b(0x0010, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 8; scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); scsi_ulto2b(1, &feature->feature_data[4]); feature->feature_data[6] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1d: /* Multi-Read */ scsi_ulto2b(0x001D, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 0; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1e: /* CD Read */ scsi_ulto2b(0x001E, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1f: /* DVD Read */ scsi_ulto2b(0x001F, feature->feature_code); feature->flags = 0x08; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x01; feature->feature_data[2] = 0x03; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2a: /* DVD+RW */ scsi_ulto2b(0x002A, feature->feature_code); feature->flags = 0x04; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature->feature_data[1] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2b: /* DVD+R */ scsi_ulto2b(0x002B, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3a: /* DVD+RW Dual Layer */ scsi_ulto2b(0x003A, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature->feature_data[1] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3b: /* DVD+R Dual Layer */ scsi_ulto2b(0x003B, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; done: data_len = (uint8_t *)feature - (uint8_t *)hdr; if (rt == SGC_RT_SPECIFIC && data_len > 4) { feature = (struct scsi_get_config_feature *)(hdr + 1); if (scsi_2btoul(feature->feature_code) == starting) feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; data_len = (uint8_t *)feature - (uint8_t *)hdr; } scsi_ulto4b(data_len - 4, hdr->data_length); ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_get_event_status(struct ctl_scsiio *ctsio) { struct scsi_get_event_status_header *hdr; struct scsi_get_event_status *cdb; uint32_t alloc_len, data_len; cdb = (struct scsi_get_event_status *)ctsio->cdb; if ((cdb->byte2 & SGESN_POLLED) == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_get_event_status_header); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; scsi_ulto2b(0, hdr->descr_length); hdr->nea_class = SGESN_NEA; hdr->supported_class = 0; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_mechanism_status(struct ctl_scsiio *ctsio) { struct scsi_mechanism_status_header *hdr; struct scsi_mechanism_status *cdb; uint32_t alloc_len, data_len; cdb = (struct scsi_mechanism_status *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_mechanism_status_header); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; hdr->state1 = 0x00; hdr->state2 = 0xe0; scsi_ulto3b(0, hdr->lba); hdr->slots_num = 0; scsi_ulto2b(0, hdr->slots_length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static void ctl_ultomsf(uint32_t lba, uint8_t *buf) { lba += 150; buf[0] = 0; buf[1] = bin2bcd((lba / 75) / 60); buf[2] = bin2bcd((lba / 75) % 60); buf[3] = bin2bcd(lba % 75); } int ctl_read_toc(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_read_toc_hdr *hdr; struct scsi_read_toc_type01_descr *descr; struct scsi_read_toc *cdb; uint32_t alloc_len, data_len; int format, msf; cdb = (struct scsi_read_toc *)ctsio->cdb; msf = (cdb->byte2 & CD_MSF) != 0; format = cdb->format; alloc_len = scsi_2btoul(cdb->data_len); data_len = sizeof(struct scsi_read_toc_hdr); if (format == 0) data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); else data_len += sizeof(struct scsi_read_toc_type01_descr); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; if (format == 0) { scsi_ulto2b(0x12, hdr->data_length); hdr->first = 1; hdr->last = 1; descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); descr->addr_ctl = 0x14; descr->track_number = 1; if (msf) ctl_ultomsf(0, descr->track_start); else scsi_ulto4b(0, descr->track_start); descr++; descr->addr_ctl = 0x14; descr->track_number = 0xaa; if (msf) ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); else scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); } else { scsi_ulto2b(0x0a, hdr->data_length); hdr->first = 1; hdr->last = 1; descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); descr->addr_ctl = 0x14; descr->track_number = 1; if (msf) ctl_ultomsf(0, descr->track_start); else scsi_ulto4b(0, descr->track_start); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * For known CDB types, parse the LBA and length. */ static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) { if (io->io_hdr.io_type != CTL_IO_SCSI) return (1); switch (io->scsiio.cdb[0]) { case COMPARE_AND_WRITE: { struct scsi_compare_and_write *cdb; cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = cdb->length; break; } case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb; cdb = (struct scsi_rw_6 *)io->scsiio.cdb; *lba = scsi_3btoul(cdb->addr); /* only 5 bits are valid in the most significant address byte */ *lba &= 0x1fffff; *len = cdb->length; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_10: { struct scsi_write_verify_10 *cdb; cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_VERIFY_12: { struct scsi_write_verify_12 *cdb; cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_ATOMIC_16: { struct scsi_write_atomic_16 *cdb; cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_16: { struct scsi_write_verify_16 *cdb; cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_SAME_10: { struct scsi_write_same_10 *cdb; cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_SAME_16: { struct scsi_write_same_16 *cdb; cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case VERIFY_10: { struct scsi_verify_10 *cdb; cdb = (struct scsi_verify_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case VERIFY_12: { struct scsi_verify_12 *cdb; cdb = (struct scsi_verify_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case VERIFY_16: { struct scsi_verify_16 *cdb; cdb = (struct scsi_verify_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case UNMAP: { *lba = 0; *len = UINT64_MAX; break; } case SERVICE_ACTION_IN: { /* GET LBA STATUS */ struct scsi_get_lba_status *cdb; cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = UINT32_MAX; break; } default: return (1); break; /* NOTREACHED */ } return (0); } static ctl_action ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, bool seq) { uint64_t endlba1, endlba2; endlba1 = lba1 + len1 - (seq ? 0 : 1); endlba2 = lba2 + len2 - 1; if ((endlba1 < lba2) || (endlba2 < lba1)) return (CTL_ACTION_PASS); else return (CTL_ACTION_BLOCK); } static int ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) { struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_desc *buf, *end, *range; uint64_t lba; uint32_t len; /* If not UNMAP -- go other way. */ if (io->io_hdr.io_type != CTL_IO_SCSI || io->scsiio.cdb[0] != UNMAP) return (CTL_ACTION_ERROR); /* If UNMAP without data -- block and wait for data. */ ptrlen = (struct ctl_ptr_len_flags *) &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || ptrlen->ptr == NULL) return (CTL_ACTION_BLOCK); /* UNMAP with data -- check for collision. */ buf = (struct scsi_unmap_desc *)ptrlen->ptr; end = buf + ptrlen->len / sizeof(*buf); for (range = buf; range < end; range++) { lba = scsi_8btou64(range->lba); len = scsi_4btoul(range->length); if ((lba < lba2 + len2) && (lba + len > lba2)) return (CTL_ACTION_BLOCK); } return (CTL_ACTION_PASS); } static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) { uint64_t lba1, lba2; uint64_t len1, len2; int retval; if (ctl_get_lba_len(io2, &lba2, &len2) != 0) return (CTL_ACTION_ERROR); retval = ctl_extent_check_unmap(io1, lba2, len2); if (retval != CTL_ACTION_ERROR) return (retval); if (ctl_get_lba_len(io1, &lba1, &len1) != 0) return (CTL_ACTION_ERROR); if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) seq = FALSE; return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); } static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) { uint64_t lba1, lba2; uint64_t len1, len2; if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) return (CTL_ACTION_PASS); if (ctl_get_lba_len(io1, &lba1, &len1) != 0) return (CTL_ACTION_ERROR); if (ctl_get_lba_len(io2, &lba2, &len2) != 0) return (CTL_ACTION_ERROR); if (lba1 + len1 == lba2) return (CTL_ACTION_BLOCK); return (CTL_ACTION_PASS); } static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *ooa_io) { const struct ctl_cmd_entry *pending_entry, *ooa_entry; const ctl_serialize_action *serialize_row; /* * Aborted commands are not going to be executed and may even * not report completion, so we don't care about their order. * Let them complete ASAP to clean the OOA queue. */ if (pending_io->io_hdr.flags & CTL_FLAG_ABORT) return (CTL_ACTION_SKIP); /* * The initiator attempted multiple untagged commands at the same * time. Can't do that. */ if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) && ((pending_io->io_hdr.nexus.targ_port == ooa_io->io_hdr.nexus.targ_port) && (pending_io->io_hdr.nexus.initid == ooa_io->io_hdr.nexus.initid)) && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | CTL_FLAG_STATUS_SENT)) == 0)) return (CTL_ACTION_OVERLAP); /* * The initiator attempted to send multiple tagged commands with * the same ID. (It's fine if different initiators have the same * tag ID.) * * Even if all of those conditions are true, we don't kill the I/O * if the command ahead of us has been aborted. We won't end up * sending it to the FETD, and it's perfectly legal to resend a * command with the same tag number as long as the previous * instance of this tag number has been aborted somehow. */ if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) && ((pending_io->io_hdr.nexus.targ_port == ooa_io->io_hdr.nexus.targ_port) && (pending_io->io_hdr.nexus.initid == ooa_io->io_hdr.nexus.initid)) && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | CTL_FLAG_STATUS_SENT)) == 0)) return (CTL_ACTION_OVERLAP_TAG); /* * If we get a head of queue tag, SAM-3 says that we should * immediately execute it. * * What happens if this command would normally block for some other * reason? e.g. a request sense with a head of queue tag * immediately after a write. Normally that would block, but this * will result in its getting executed immediately... * * We currently return "pass" instead of "skip", so we'll end up * going through the rest of the queue to check for overlapped tags. * * XXX KDM check for other types of blockage first?? */ if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) return (CTL_ACTION_PASS); /* * Ordered tags have to block until all items ahead of them * have completed. If we get called with an ordered tag, we always * block, if something else is ahead of us in the queue. */ if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) return (CTL_ACTION_BLOCK); /* * Simple tags get blocked until all head of queue and ordered tags * ahead of them have completed. I'm lumping untagged commands in * with simple tags here. XXX KDM is that the right thing to do? */ if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) return (CTL_ACTION_BLOCK); pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], pending_io->scsiio.cdb[1], pending_io)); ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); if (ooa_entry->seridx == CTL_SERIDX_INVLD) return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], ooa_io->scsiio.cdb[1], ooa_io)); serialize_row = ctl_serialize_table[ooa_entry->seridx]; switch (serialize_row[pending_entry->seridx]) { case CTL_SER_BLOCK: return (CTL_ACTION_BLOCK); case CTL_SER_EXTENT: return (ctl_extent_check(ooa_io, pending_io, (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); case CTL_SER_EXTENTOPT: if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) return (ctl_extent_check(ooa_io, pending_io, (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); return (CTL_ACTION_PASS); case CTL_SER_EXTENTSEQ: if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) return (ctl_extent_check_seq(ooa_io, pending_io)); return (CTL_ACTION_PASS); case CTL_SER_PASS: return (CTL_ACTION_PASS); case CTL_SER_BLOCKOPT: if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) return (CTL_ACTION_BLOCK); return (CTL_ACTION_PASS); case CTL_SER_SKIP: return (CTL_ACTION_SKIP); default: panic("%s: Invalid serialization value %d for %d => %d", __func__, serialize_row[pending_entry->seridx], pending_entry->seridx, ooa_entry->seridx); } return (CTL_ACTION_ERROR); } /* * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. * Assumptions: * - pending_io is generally either incoming, or on the blocked queue * - starting I/O is the I/O we want to start the check with. */ static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io **starting_io) { union ctl_io *ooa_io; ctl_action action; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run back along the OOA queue, starting with the current * blocked I/O and going through every I/O before it on the * queue. If starting_io is NULL, we'll just end up returning * CTL_ACTION_PASS. */ for (ooa_io = *starting_io; ooa_io != NULL; ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, ooa_links)){ action = ctl_check_for_blockage(lun, pending_io, ooa_io); if (action != CTL_ACTION_PASS) { *starting_io = ooa_io; return (action); } } *starting_io = NULL; return (CTL_ACTION_PASS); } /* * Try to unblock the specified I/O. * * skip parameter allows explicitly skip present blocker of the I/O, * starting from the previous one on OOA queue. It can be used when * we know for sure that the blocker I/O does no longer count. */ static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip) { struct ctl_softc *softc = lun->ctl_softc; union ctl_io *bio, *obio; const struct ctl_cmd_entry *entry; union ctl_ha_msg msg_info; ctl_action action; mtx_assert(&lun->lun_lock, MA_OWNED); if (io->io_hdr.blocker == NULL) return; obio = bio = io->io_hdr.blocker; if (skip) bio = (union ctl_io *)TAILQ_PREV(&bio->io_hdr, ctl_ooaq, ooa_links); action = ctl_check_ooa(lun, io, &bio); if (action == CTL_ACTION_BLOCK) { /* Still blocked, but may be by different I/O now. */ if (bio != obio) { TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); io->io_hdr.blocker = bio; } return; } /* No longer blocked, one way or another. */ TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); io->io_hdr.blocker = NULL; switch (action) { case CTL_ACTION_OVERLAP: ctl_set_overlapped_cmd(&io->scsiio); goto error; case CTL_ACTION_OVERLAP_TAG: ctl_set_overlapped_tag(&io->scsiio, io->scsiio.tag_num & 0xff); goto error; case CTL_ACTION_PASS: case CTL_ACTION_SKIP: /* Serializing commands from the other SC retire there. */ if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && (softc->ha_mode != CTL_HA_MODE_XFER)) { io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; msg_info.hdr.original_sc = io->io_hdr.remote_io; msg_info.hdr.serializing_sc = io; msg_info.hdr.msg_type = CTL_MSG_R2R; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.hdr), M_NOWAIT); break; } /* * Check this I/O for LUN state changes that may have happened * while this command was blocked. The LUN state may have been * changed by a command ahead of us in the queue. */ entry = ctl_get_cmd_entry(&io->scsiio, NULL); if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { ctl_done(io); break; } io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr(io); break; case CTL_ACTION_ERROR: default: ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 0, /*retry_count*/ 0); error: /* Serializing commands from the other SC are done here. */ if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && (softc->ha_mode != CTL_HA_MODE_XFER)) { ctl_try_unblock_others(lun, io, TRUE); TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); ctl_copy_sense_data_back(io, &msg_info); msg_info.hdr.original_sc = io->io_hdr.remote_io; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.scsi), M_WAITOK); ctl_free_io(io); break; } ctl_done(io); break; } } /* * Try to unblock I/Os blocked by the specified I/O. * * skip parameter allows explicitly skip the specified I/O as blocker, * starting from the previous one on the OOA queue. It can be used when * we know for sure that the specified I/O does no longer count (done). * It has to be still on OOA queue though so that we know where to start. */ static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *bio, bool skip) { union ctl_io *io, *next_io; mtx_assert(&lun->lun_lock, MA_OWNED); for (io = (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue); io != NULL; io = next_io) { next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, blocked_links); KASSERT(io->io_hdr.blocker != NULL, ("I/O %p on blocked list without blocker", io)); ctl_try_unblock_io(lun, io, skip); } KASSERT(!skip || TAILQ_EMPTY(&bio->io_hdr.blocked_queue), ("blocked_queue is not empty after skipping %p", bio)); } /* * This routine (with one exception) checks LUN flags that can be set by * commands ahead of us in the OOA queue. These flags have to be checked * when a command initially comes in, and when we pull a command off the * blocked queue and are preparing to execute it. The reason we have to * check these flags for commands on the blocked queue is that the LUN * state may have been changed by a command ahead of us while we're on the * blocked queue. * * Ordering is somewhat important with these checks, so please pay * careful attention to the placement of any new checks. */ static int ctl_scsiio_lun_check(struct ctl_lun *lun, const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) { struct ctl_softc *softc = lun->ctl_softc; int retval; uint32_t residx; retval = 0; mtx_assert(&lun->lun_lock, MA_OWNED); /* * If this shelf is a secondary shelf controller, we may have to * reject some commands disallowed by HA mode and link state. */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { if (softc->ha_link == CTL_HA_LINK_OFFLINE && (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { ctl_set_lun_unavail(ctsio); retval = 1; goto bailout; } if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { ctl_set_lun_transit(ctsio); retval = 1; goto bailout; } if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { ctl_set_lun_standby(ctsio); retval = 1; goto bailout; } /* The rest of checks are only done on executing side */ if (softc->ha_mode == CTL_HA_MODE_XFER) goto bailout; } if (entry->pattern & CTL_LUN_PAT_WRITE) { if (lun->be_lun && lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { ctl_set_hw_write_protected(ctsio); retval = 1; goto bailout; } if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_DATA_PROTECT, /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); retval = 1; goto bailout; } } /* * Check for a reservation conflict. If this command isn't allowed * even on reserved LUNs, and if this initiator isn't the one who * reserved us, reject the command with a reservation conflict. */ residx = ctl_get_initindex(&ctsio->io_hdr.nexus); if ((lun->flags & CTL_LUN_RESERVED) && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { if (lun->res_idx != residx) { ctl_set_reservation_conflict(ctsio); retval = 1; goto bailout; } } if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { /* No reservation or command is allowed. */; } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && (lun->pr_res_type == SPR_TYPE_WR_EX || lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { /* The command is allowed for Write Exclusive resv. */; } else { /* * if we aren't registered or it's a res holder type * reservation and this isn't the res holder then set a * conflict. */ if (ctl_get_prkey(lun, residx) == 0 || (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { ctl_set_reservation_conflict(ctsio); retval = 1; goto bailout; } } if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { if (lun->flags & CTL_LUN_EJECTED) ctl_set_lun_ejected(ctsio); else if (lun->flags & CTL_LUN_NO_MEDIA) { if (lun->flags & CTL_LUN_REMOVABLE) ctl_set_lun_no_media(ctsio); else ctl_set_lun_int_reqd(ctsio); } else if (lun->flags & CTL_LUN_STOPPED) ctl_set_lun_stopped(ctsio); else goto bailout; retval = 1; goto bailout; } bailout: return (retval); } static void ctl_failover_io(union ctl_io *io, int have_lock) { ctl_set_busy(&io->scsiio); ctl_done(io); } static void ctl_failover_lun(union ctl_io *rio) { struct ctl_softc *softc = CTL_SOFTC(rio); struct ctl_lun *lun; struct ctl_io_hdr *io, *next_io; uint32_t targ_lun; targ_lun = rio->io_hdr.nexus.targ_mapped_lun; CTL_DEBUG_PRINT(("FAILOVER for lun %u\n", targ_lun)); /* Find and lock the LUN. */ mtx_lock(&softc->ctl_lock); if (targ_lun > ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } if (softc->ha_mode == CTL_HA_MODE_XFER) { TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { /* We are master */ if (io->flags & CTL_FLAG_FROM_OTHER_SC) { if (io->flags & CTL_FLAG_IO_ACTIVE) { io->flags |= CTL_FLAG_ABORT; io->flags |= CTL_FLAG_FAILOVER; ctl_try_unblock_io(lun, (union ctl_io *)io, FALSE); } else { /* This can be only due to DATAMOVE */ io->msg_type = CTL_MSG_DATAMOVE_DONE; io->flags &= ~CTL_FLAG_DMA_INPROG; io->flags |= CTL_FLAG_IO_ACTIVE; io->port_status = 31340; ctl_enqueue_isc((union ctl_io *)io); } } else /* We are slave */ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; if (io->flags & CTL_FLAG_IO_ACTIVE) { io->flags |= CTL_FLAG_FAILOVER; } else { ctl_set_busy(&((union ctl_io *)io)-> scsiio); ctl_done((union ctl_io *)io); } } } } else { /* SERIALIZE modes */ TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { /* We are master */ if (io->flags & CTL_FLAG_FROM_OTHER_SC) { if (io->blocker != NULL) { TAILQ_REMOVE(&io->blocker->io_hdr.blocked_queue, io, blocked_links); io->blocker = NULL; } ctl_try_unblock_others(lun, (union ctl_io *)io, TRUE); TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); ctl_free_io((union ctl_io *)io); } else /* We are slave */ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { ctl_set_busy(&((union ctl_io *)io)-> scsiio); ctl_done((union ctl_io *)io); } } } } mtx_unlock(&lun->lun_lock); } static int ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) { struct ctl_lun *lun; const struct ctl_cmd_entry *entry; union ctl_io *bio; uint32_t initidx, targ_lun; int retval = 0; lun = NULL; targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; if (targ_lun < ctl_max_luns) lun = softc->ctl_luns[targ_lun]; if (lun) { /* * If the LUN is invalid, pretend that it doesn't exist. * It will go away as soon as all pending I/O has been * completed. */ mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); lun = NULL; } } CTL_LUN(ctsio) = lun; if (lun) { CTL_BACKEND_LUN(ctsio) = lun->be_lun; /* * Every I/O goes into the OOA queue for a particular LUN, * and stays there until completion. */ #ifdef CTL_TIME_IO if (TAILQ_EMPTY(&lun->ooa_queue)) lun->idle_time += getsbinuptime() - lun->last_busy; #endif TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); } /* Get command entry and return error if it is unsuppotyed. */ entry = ctl_validate_command(ctsio); if (entry == NULL) { if (lun) mtx_unlock(&lun->lun_lock); return (retval); } ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; /* * Check to see whether we can send this command to LUNs that don't * exist. This should pretty much only be the case for inquiry * and request sense. Further checks, below, really require having * a LUN, so we can't really check the command anymore. Just put * it on the rtr queue. */ if (lun == NULL) { if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr((union ctl_io *)ctsio); return (retval); } ctl_set_unsupported_lun(ctsio); ctl_done((union ctl_io *)ctsio); CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); return (retval); } else { /* * Make sure we support this particular command on this LUN. * e.g., we don't support writes to the control LUN. */ if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (retval); } } initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * If we've got a request sense, it'll clear the contingent * allegiance condition. Otherwise, if we have a CA condition for * this initiator, clear it, because it sent down a command other * than request sense. */ if (ctsio->cdb[0] != REQUEST_SENSE) { struct scsi_sense_data *ps; ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; if (ps != NULL) ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; } /* * If the command has this flag set, it handles its own unit * attention reporting, we shouldn't do anything. Otherwise we * check for any pending unit attentions, and send them back to the * initiator. We only do this when a command initially comes in, * not when we pull it off the blocked queue. * * According to SAM-3, section 5.3.2, the order that things get * presented back to the host is basically unit attentions caused * by some sort of reset event, busy status, reservation conflicts * or task set full, and finally any other status. * * One issue here is that some of the unit attentions we report * don't fall into the "reset" category (e.g. "reported luns data * has changed"). So reporting it here, before the reservation * check, may be technically wrong. I guess the only thing to do * would be to check for and report the reset events here, and then * check for the other unit attention types after we check for a * reservation conflict. * * XXX KDM need to fix this */ if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { ctl_ua_type ua_type; u_int sense_len = 0; ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, &sense_len, SSD_TYPE_NONE); if (ua_type != CTL_UA_NONE) { mtx_unlock(&lun->lun_lock); ctsio->scsi_status = SCSI_STATUS_CHECK_COND; ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; ctsio->sense_len = sense_len; ctl_done((union ctl_io *)ctsio); return (retval); } } if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { mtx_unlock(&lun->lun_lock); ctl_done((union ctl_io *)ctsio); return (retval); } /* * XXX CHD this is where we want to send IO to other side if * this LUN is secondary on this SC. We will need to make a copy * of the IO and flag the IO on this side as SENT_2OTHER and the flag * the copy we send as FROM_OTHER. * We also need to stuff the address of the original IO so we can * find it easily. Something similar will need be done on the other * side so when we are done we can find the copy. */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { union ctl_ha_msg msg_info; int isc_retval; ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; mtx_unlock(&lun->lun_lock); msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; msg_info.hdr.original_sc = (union ctl_io *)ctsio; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.nexus = ctsio->io_hdr.nexus; msg_info.scsi.tag_num = ctsio->tag_num; msg_info.scsi.tag_type = ctsio->tag_type; - msg_info.scsi.cdb_len = ctsio->cdb_len; memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); + msg_info.scsi.cdb_len = ctsio->cdb_len; + msg_info.scsi.priority = ctsio->priority; if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { ctl_set_busy(ctsio); ctl_done((union ctl_io *)ctsio); return (retval); } return (retval); } bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links); switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { case CTL_ACTION_BLOCK: ctsio->io_hdr.blocker = bio; TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, blocked_links); mtx_unlock(&lun->lun_lock); return (retval); case CTL_ACTION_PASS: case CTL_ACTION_SKIP: ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; mtx_unlock(&lun->lun_lock); ctl_enqueue_rtr((union ctl_io *)ctsio); break; case CTL_ACTION_OVERLAP: mtx_unlock(&lun->lun_lock); ctl_set_overlapped_cmd(ctsio); ctl_done((union ctl_io *)ctsio); break; case CTL_ACTION_OVERLAP_TAG: mtx_unlock(&lun->lun_lock); ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); ctl_done((union ctl_io *)ctsio); break; case CTL_ACTION_ERROR: default: mtx_unlock(&lun->lun_lock); ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 0); ctl_done((union ctl_io *)ctsio); break; } return (retval); } const struct ctl_cmd_entry * ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) { const struct ctl_cmd_entry *entry; int service_action; entry = &ctl_cmd_table[ctsio->cdb[0]]; if (sa) *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); if (entry->flags & CTL_CMD_FLAG_SA5) { service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; } return (entry); } const struct ctl_cmd_entry * ctl_validate_command(struct ctl_scsiio *ctsio) { const struct ctl_cmd_entry *entry; int i, sa; uint8_t diff; entry = ctl_get_cmd_entry(ctsio, &sa); if (entry->execute == NULL) { if (sa) ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); else ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (NULL); } KASSERT(entry->length > 0, ("Not defined length for command 0x%02x/0x%02x", ctsio->cdb[0], ctsio->cdb[1])); for (i = 1; i < entry->length; i++) { diff = ctsio->cdb[i] & ~entry->usage[i - 1]; if (diff == 0) continue; ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ i, /*bit_valid*/ 1, /*bit*/ fls(diff) - 1); ctl_done((union ctl_io *)ctsio); return (NULL); } return (entry); } static int ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) { switch (lun_type) { case T_DIRECT: if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) return (0); break; case T_PROCESSOR: if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) return (0); break; case T_CDROM: if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) return (0); break; default: return (0); } return (1); } static int ctl_scsiio(struct ctl_scsiio *ctsio) { int retval; const struct ctl_cmd_entry *entry; retval = CTL_RETVAL_COMPLETE; CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); entry = ctl_get_cmd_entry(ctsio, NULL); /* * If this I/O has been aborted, just send it straight to * ctl_done() without executing it. */ if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { ctl_done((union ctl_io *)ctsio); goto bailout; } /* * All the checks should have been handled by ctl_scsiio_precheck(). * We should be clear now to just execute the I/O. */ retval = entry->execute(ctsio); bailout: return (retval); } static int ctl_target_reset(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_port *port = CTL_PORT(io); struct ctl_lun *lun; uint32_t initidx; ctl_ua_type ua_type; if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = io->taskio.task_action; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } initidx = ctl_get_initindex(&io->io_hdr.nexus); if (io->taskio.task_action == CTL_TASK_TARGET_RESET) ua_type = CTL_UA_TARG_RESET; else ua_type = CTL_UA_BUS_RESET; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if (port != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; ctl_do_lun_reset(lun, initidx, ua_type); } mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } /* * The LUN should always be set. The I/O is optional, and is used to * distinguish between I/Os sent by this initiator, and by other * initiators. We set unit attention for initiators other than this one. * SAM-3 is vague on this point. It does say that a unit attention should * be established for other initiators when a LUN is reset (see section * 5.7.3), but it doesn't specifically say that the unit attention should * be established for this particular initiator when a LUN is reset. Here * is the relevant text, from SAM-3 rev 8: * * 5.7.2 When a SCSI initiator port aborts its own tasks * * When a SCSI initiator port causes its own task(s) to be aborted, no * notification that the task(s) have been aborted shall be returned to * the SCSI initiator port other than the completion response for the * command or task management function action that caused the task(s) to * be aborted and notification(s) associated with related effects of the * action (e.g., a reset unit attention condition). * * XXX KDM for now, we're setting unit attention for all initiators. */ static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type) { union ctl_io *xio; int i; mtx_lock(&lun->lun_lock); /* Abort tasks. */ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; ctl_try_unblock_io(lun, xio, FALSE); } /* Clear CA. */ for (i = 0; i < ctl_max_ports; i++) { free(lun->pending_sense[i], M_CTL); lun->pending_sense[i] = NULL; } /* Clear reservation. */ lun->flags &= ~CTL_LUN_RESERVED; /* Clear prevent media removal. */ if (lun->prevent) { for (i = 0; i < CTL_MAX_INITIATORS; i++) ctl_clear_mask(lun->prevent, i); lun->prevent_count = 0; } /* Clear TPC status */ ctl_tpc_lun_clear(lun, -1); /* Establish UA. */ #if 0 ctl_est_ua_all(lun, initidx, ua_type); #else ctl_est_ua_all(lun, -1, ua_type); #endif mtx_unlock(&lun->lun_lock); } static int ctl_lun_reset(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; uint32_t targ_lun, initidx; targ_lun = io->io_hdr.nexus.targ_mapped_lun; initidx = ctl_get_initindex(&io->io_hdr.nexus); mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET); mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { union ctl_ha_msg msg_info; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_LUN_RESET; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } return (0); } static void ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, int other_sc) { union ctl_io *xio; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run through the OOA queue and attempt to find the given I/O. * The target port, initiator ID, tag type and tag number have to * match the values that we got from the initiator. If we have an * untagged command to abort, simply abort the first untagged command * we come to. We only allow one untagged command at a time of course. */ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { if ((targ_port == UINT32_MAX || targ_port == xio->io_hdr.nexus.targ_port) && (init_id == UINT32_MAX || init_id == xio->io_hdr.nexus.initid)) { if (targ_port != xio->io_hdr.nexus.targ_port || init_id != xio->io_hdr.nexus.initid) xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; xio->io_hdr.flags |= CTL_FLAG_ABORT; if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = xio->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_ABORT_TASK; msg_info.task.tag_num = xio->scsiio.tag_num; msg_info.task.tag_type = xio->scsiio.tag_type; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_NOWAIT); } ctl_try_unblock_io(lun, xio, FALSE); } } } static int ctl_abort_task_set(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; uint32_t targ_lun; /* * Look up the LUN. */ targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.initid, (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); } else { /* CTL_TASK_CLEAR_TASK_SET */ ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); } mtx_unlock(&lun->lun_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, ctl_ua_type ua_type) { struct ctl_lun *lun; struct scsi_sense_data *ps; uint32_t p, i; p = initidx / CTL_MAX_INIT_PER_PORT; i = initidx % CTL_MAX_INIT_PER_PORT; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); /* Abort tasks. */ ctl_abort_tasks_lun(lun, p, i, 1); /* Clear CA. */ ps = lun->pending_sense[p]; if (ps != NULL) ps[i].error_code = 0; /* Clear reservation. */ if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) lun->flags &= ~CTL_LUN_RESERVED; /* Clear prevent media removal. */ if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { ctl_clear_mask(lun->prevent, initidx); lun->prevent_count--; } /* Clear TPC status */ ctl_tpc_lun_clear(lun, initidx); /* Establish UA. */ ctl_est_ua(lun, initidx, ua_type); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); } static int ctl_i_t_nexus_reset(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); uint32_t initidx; if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } initidx = ctl_get_initindex(&io->io_hdr.nexus); ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_abort_task(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); union ctl_io *xio; struct ctl_lun *lun; uint32_t targ_lun; /* * Look up the LUN. */ targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * Run through the OOA queue and attempt to find the given I/O. * The target port, initiator ID, tag type and tag number have to * match the values that we got from the initiator. If we have an * untagged command to abort, simply abort the first untagged command * we come to. We only allow one untagged command at a time of course. */ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) || (xio->io_hdr.flags & CTL_FLAG_ABORT)) continue; /* * If the abort says that the task is untagged, the * task in the queue must be untagged. Otherwise, * we just check to see whether the tag numbers * match. This is because the QLogic firmware * doesn't pass back the tag type in an abort * request. */ #if 0 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) || (xio->scsiio.tag_num == io->taskio.tag_num)) { #else /* * XXX KDM we've got problems with FC, because it * doesn't send down a tag type with aborts. So we * can only really go by the tag number... * This may cause problems with parallel SCSI. * Need to figure that out!! */ if (xio->scsiio.tag_num == io->taskio.tag_num) { #endif xio->io_hdr.flags |= CTL_FLAG_ABORT; if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && !(lun->flags & CTL_LUN_PRIMARY_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_ABORT_TASK; msg_info.task.tag_num = io->taskio.tag_num; msg_info.task.tag_type = io->taskio.tag_type; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_NOWAIT); } ctl_try_unblock_io(lun, xio, FALSE); } } mtx_unlock(&lun->lun_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_query_task(union ctl_io *io, int task_set) { struct ctl_softc *softc = CTL_SOFTC(io); union ctl_io *xio; struct ctl_lun *lun; int found = 0; uint32_t targ_lun; targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) || (xio->io_hdr.flags & CTL_FLAG_ABORT)) continue; if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { found = 1; break; } } mtx_unlock(&lun->lun_lock); if (found) io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; else io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_query_async_event(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; ctl_ua_type ua; uint32_t targ_lun, initidx; targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); initidx = ctl_get_initindex(&io->io_hdr.nexus); ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); mtx_unlock(&lun->lun_lock); if (ua != CTL_UA_NONE) io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; else io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static void ctl_run_task(union ctl_io *io) { int retval = 1; CTL_DEBUG_PRINT(("ctl_run_task\n")); KASSERT(io->io_hdr.io_type == CTL_IO_TASK, ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); switch (io->taskio.task_action) { case CTL_TASK_ABORT_TASK: retval = ctl_abort_task(io); break; case CTL_TASK_ABORT_TASK_SET: case CTL_TASK_CLEAR_TASK_SET: retval = ctl_abort_task_set(io); break; case CTL_TASK_CLEAR_ACA: break; case CTL_TASK_I_T_NEXUS_RESET: retval = ctl_i_t_nexus_reset(io); break; case CTL_TASK_LUN_RESET: retval = ctl_lun_reset(io); break; case CTL_TASK_TARGET_RESET: case CTL_TASK_BUS_RESET: retval = ctl_target_reset(io); break; case CTL_TASK_PORT_LOGIN: break; case CTL_TASK_PORT_LOGOUT: break; case CTL_TASK_QUERY_TASK: retval = ctl_query_task(io, 0); break; case CTL_TASK_QUERY_TASK_SET: retval = ctl_query_task(io, 1); break; case CTL_TASK_QUERY_ASYNC_EVENT: retval = ctl_query_async_event(io); break; default: printf("%s: got unknown task management event %d\n", __func__, io->taskio.task_action); break; } if (retval == 0) io->io_hdr.status = CTL_SUCCESS; else io->io_hdr.status = CTL_ERROR; ctl_done(io); } /* * For HA operation. Handle commands that come in from the other * controller. */ static void ctl_handle_isc(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; const struct ctl_cmd_entry *entry; uint32_t targ_lun; targ_lun = io->io_hdr.nexus.targ_mapped_lun; switch (io->io_hdr.msg_type) { case CTL_MSG_SERIALIZE: ctl_serialize_other_sc_cmd(&io->scsiio); break; case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ entry = ctl_get_cmd_entry(&io->scsiio, NULL); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { ctl_done(io); break; } mtx_lock(&lun->lun_lock); if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { mtx_unlock(&lun->lun_lock); ctl_done(io); break; } io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; mtx_unlock(&lun->lun_lock); ctl_enqueue_rtr(io); break; case CTL_MSG_FINISH_IO: if (softc->ha_mode == CTL_HA_MODE_XFER) { ctl_done(io); break; } if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { ctl_free_io(io); break; } mtx_lock(&lun->lun_lock); ctl_try_unblock_others(lun, io, TRUE); TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_free_io(io); break; case CTL_MSG_PERS_ACTION: ctl_hndl_per_res_out_on_other_sc(io); ctl_free_io(io); break; case CTL_MSG_BAD_JUJU: ctl_done(io); break; case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ ctl_datamove_remote(io); break; case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ io->scsiio.be_move_done(io); break; case CTL_MSG_FAILOVER: ctl_failover_lun(io); ctl_free_io(io); break; default: printf("%s: Invalid message type %d\n", __func__, io->io_hdr.msg_type); ctl_free_io(io); break; } } /* * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if * there is no match. */ static ctl_lun_error_pattern ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) { const struct ctl_cmd_entry *entry; ctl_lun_error_pattern filtered_pattern, pattern; pattern = desc->error_pattern; /* * XXX KDM we need more data passed into this function to match a * custom pattern, and we actually need to implement custom pattern * matching. */ if (pattern & CTL_LUN_PAT_CMD) return (CTL_LUN_PAT_CMD); if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) return (CTL_LUN_PAT_ANY); entry = ctl_get_cmd_entry(ctsio, NULL); filtered_pattern = entry->pattern & pattern; /* * If the user requested specific flags in the pattern (e.g. * CTL_LUN_PAT_RANGE), make sure the command supports all of those * flags. * * If the user did not specify any flags, it doesn't matter whether * or not the command supports the flags. */ if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != (pattern & ~CTL_LUN_PAT_MASK)) return (CTL_LUN_PAT_NONE); /* * If the user asked for a range check, see if the requested LBA * range overlaps with this command's LBA range. */ if (filtered_pattern & CTL_LUN_PAT_RANGE) { uint64_t lba1; uint64_t len1; ctl_action action; int retval; retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); if (retval != 0) return (CTL_LUN_PAT_NONE); action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, desc->lba_range.len, FALSE); /* * A "pass" means that the LBA ranges don't overlap, so * this doesn't match the user's range criteria. */ if (action == CTL_ACTION_PASS) return (CTL_LUN_PAT_NONE); } return (filtered_pattern); } static void ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) { struct ctl_error_desc *desc, *desc2; mtx_assert(&lun->lun_lock, MA_OWNED); STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { ctl_lun_error_pattern pattern; /* * Check to see whether this particular command matches * the pattern in the descriptor. */ pattern = ctl_cmd_pattern_match(&io->scsiio, desc); if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) continue; switch (desc->lun_error & CTL_LUN_INJ_TYPE) { case CTL_LUN_INJ_ABORTED: ctl_set_aborted(&io->scsiio); break; case CTL_LUN_INJ_MEDIUM_ERR: ctl_set_medium_error(&io->scsiio, (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_OUT); break; case CTL_LUN_INJ_UA: /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET * OCCURRED */ ctl_set_ua(&io->scsiio, 0x29, 0x00); break; case CTL_LUN_INJ_CUSTOM: /* * We're assuming the user knows what he is doing. * Just copy the sense information without doing * checks. */ bcopy(&desc->custom_sense, &io->scsiio.sense_data, MIN(sizeof(desc->custom_sense), sizeof(io->scsiio.sense_data))); io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; io->scsiio.sense_len = SSD_FULL_SIZE; io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; break; case CTL_LUN_INJ_NONE: default: /* * If this is an error injection type we don't know * about, clear the continuous flag (if it is set) * so it will get deleted below. */ desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; break; } /* * By default, each error injection action is a one-shot */ if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) continue; STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); free(desc, M_CTL); } } #ifdef CTL_IO_DELAY static void ctl_datamove_timer_wakeup(void *arg) { union ctl_io *io; io = (union ctl_io *)arg; ctl_datamove(io); } #endif /* CTL_IO_DELAY */ void ctl_datamove(union ctl_io *io) { void (*fe_datamove)(union ctl_io *io); mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); CTL_DEBUG_PRINT(("ctl_datamove\n")); /* No data transferred yet. Frontend must update this when done. */ io->scsiio.kern_data_resid = io->scsiio.kern_data_len; #ifdef CTL_TIME_IO if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { char str[256]; char path_str[64]; struct sbuf sb; ctl_scsi_path_string(io, path_str, sizeof(path_str)); sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: ctl_scsi_command_string(&io->scsiio, NULL, &sb); sbuf_printf(&sb, "\n"); sbuf_cat(&sb, path_str); - sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", - io->scsiio.tag_num, io->scsiio.tag_type); + sbuf_printf(&sb, "Tag: 0x%04x/%d, Prio: %d\n", + io->scsiio.tag_num, io->scsiio.tag_type, + io->scsiio.priority); break; case CTL_IO_TASK: - sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " - "Tag Type: %d\n", io->taskio.task_action, + sbuf_printf(&sb, "Task Action: %d Tag: 0x%04x/%d\n", + io->taskio.task_action, io->taskio.tag_num, io->taskio.tag_type); break; default: panic("%s: Invalid CTL I/O type %d\n", __func__, io->io_hdr.io_type); } sbuf_cat(&sb, path_str); sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", (intmax_t)time_uptime - io->io_hdr.start_time); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } #endif /* CTL_TIME_IO */ #ifdef CTL_IO_DELAY if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; } else { struct ctl_lun *lun; lun = CTL_LUN(io); if ((lun != NULL) && (lun->delay_info.datamove_delay > 0)) { callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; callout_reset(&io->io_hdr.delay_callout, lun->delay_info.datamove_delay * hz, ctl_datamove_timer_wakeup, io); if (lun->delay_info.datamove_type == CTL_DELAY_TYPE_ONESHOT) lun->delay_info.datamove_delay = 0; return; } } #endif /* * This command has been aborted. Set the port status, so we fail * the data move. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", io->scsiio.tag_num, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun); io->io_hdr.port_status = 31337; /* * Note that the backend, in this case, will get the * callback in its context. In other cases it may get * called in the frontend's interrupt thread context. */ io->scsiio.be_move_done(io); return; } /* Don't confuse frontend with zero length data move. */ if (io->scsiio.kern_data_len == 0) { io->scsiio.be_move_done(io); return; } fe_datamove = CTL_PORT(io)->fe_datamove; fe_datamove(io); } static void ctl_send_datamove_done(union ctl_io *io, int have_lock) { union ctl_ha_msg msg; #ifdef CTL_TIME_IO struct bintime cur_bt; #endif memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; msg.hdr.original_sc = io; msg.hdr.serializing_sc = io->io_hdr.remote_io; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; msg.scsi.tag_num = io->scsiio.tag_num; msg.scsi.tag_type = io->scsiio.tag_type; msg.scsi.scsi_status = io->scsiio.scsi_status; memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, io->scsiio.sense_len); msg.scsi.sense_len = io->scsiio.sense_len; msg.scsi.port_status = io->io_hdr.port_status; io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { ctl_failover_io(io, /*have_lock*/ have_lock); return; } ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + msg.scsi.sense_len, M_WAITOK); #ifdef CTL_TIME_IO getbinuptime(&cur_bt); bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); bintime_add(&io->io_hdr.dma_bt, &cur_bt); #endif io->io_hdr.num_dmas++; } /* * The DMA to the remote side is done, now we need to tell the other side * we're done so it can continue with its data movement. */ static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) { union ctl_io *io; uint32_t i; io = rq->context; if (rq->ret != CTL_HA_STATUS_SUCCESS) { printf("%s: ISC DMA write failed with error %d", __func__, rq->ret); ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ rq->ret); } ctl_dt_req_free(rq); for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(CTL_LSGLT(io)[i].addr, M_CTL); free(CTL_RSGL(io), M_CTL); CTL_RSGL(io) = NULL; CTL_LSGL(io) = NULL; /* * The data is in local and remote memory, so now we need to send * status (good or back) back to the other side. */ ctl_send_datamove_done(io, /*have_lock*/ 0); } /* * We've moved the data from the host/controller into local memory. Now we * need to push it over to the remote controller's memory. */ static int ctl_datamove_remote_dm_write_cb(union ctl_io *io) { int retval; retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, ctl_datamove_remote_write_cb); return (retval); } static void ctl_datamove_remote_write(union ctl_io *io) { int retval; void (*fe_datamove)(union ctl_io *io); /* * - Get the data from the host/HBA into local memory. * - DMA memory from the local controller to the remote controller. * - Send status back to the remote controller. */ retval = ctl_datamove_remote_sgl_setup(io); if (retval != 0) return; /* Switch the pointer over so the FETD knows what to do */ io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); /* * Use a custom move done callback, since we need to send completion * back to the other controller, not to the backend on this side. */ io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; fe_datamove = CTL_PORT(io)->fe_datamove; fe_datamove(io); } static int ctl_datamove_remote_dm_read_cb(union ctl_io *io) { uint32_t i; for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(CTL_LSGLT(io)[i].addr, M_CTL); free(CTL_RSGL(io), M_CTL); CTL_RSGL(io) = NULL; CTL_LSGL(io) = NULL; /* * The read is done, now we need to send status (good or bad) back * to the other side. */ ctl_send_datamove_done(io, /*have_lock*/ 0); return (0); } static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) { union ctl_io *io; void (*fe_datamove)(union ctl_io *io); io = rq->context; if (rq->ret != CTL_HA_STATUS_SUCCESS) { printf("%s: ISC DMA read failed with error %d\n", __func__, rq->ret); ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ rq->ret); } ctl_dt_req_free(rq); /* Switch the pointer over so the FETD knows what to do */ io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); /* * Use a custom move done callback, since we need to send completion * back to the other controller, not to the backend on this side. */ io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; /* XXX KDM add checks like the ones in ctl_datamove? */ fe_datamove = CTL_PORT(io)->fe_datamove; fe_datamove(io); } static int ctl_datamove_remote_sgl_setup(union ctl_io *io) { struct ctl_sg_entry *local_sglist; uint32_t len_to_go; int retval; int i; retval = 0; local_sglist = CTL_LSGL(io); len_to_go = io->scsiio.kern_data_len; /* * The difficult thing here is that the size of the various * S/G segments may be different than the size from the * remote controller. That'll make it harder when DMAing * the data back to the other side. */ for (i = 0; len_to_go > 0; i++) { local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); local_sglist[i].addr = malloc(local_sglist[i].len, M_CTL, M_WAITOK); len_to_go -= local_sglist[i].len; } /* * Reset the number of S/G entries accordingly. The original * number of S/G entries is available in rem_sg_entries. */ io->scsiio.kern_sg_entries = i; return (retval); } static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, ctl_ha_dt_cb callback) { struct ctl_ha_dt_req *rq; struct ctl_sg_entry *remote_sglist, *local_sglist; uint32_t local_used, remote_used, total_used; int i, j, isc_ret; rq = ctl_dt_req_alloc(); /* * If we failed to allocate the request, and if the DMA didn't fail * anyway, set busy status. This is just a resource allocation * failure. */ if ((rq == NULL) && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) ctl_set_busy(&io->scsiio); if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { if (rq != NULL) ctl_dt_req_free(rq); /* * The data move failed. We need to return status back * to the other controller. No point in trying to DMA * data to the remote controller. */ ctl_send_datamove_done(io, /*have_lock*/ 0); return (1); } local_sglist = CTL_LSGL(io); remote_sglist = CTL_RSGL(io); local_used = 0; remote_used = 0; total_used = 0; /* * Pull/push the data over the wire from/to the other controller. * This takes into account the possibility that the local and * remote sglists may not be identical in terms of the size of * the elements and the number of elements. * * One fundamental assumption here is that the length allocated for * both the local and remote sglists is identical. Otherwise, we've * essentially got a coding error of some sort. */ isc_ret = CTL_HA_STATUS_SUCCESS; for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { uint32_t cur_len; uint8_t *tmp_ptr; rq->command = command; rq->context = io; /* * Both pointers should be aligned. But it is possible * that the allocation length is not. They should both * also have enough slack left over at the end, though, * to round up to the next 8 byte boundary. */ cur_len = MIN(local_sglist[i].len - local_used, remote_sglist[j].len - remote_used); rq->size = cur_len; tmp_ptr = (uint8_t *)local_sglist[i].addr; tmp_ptr += local_used; #if 0 /* Use physical addresses when talking to ISC hardware */ if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { /* XXX KDM use busdma */ rq->local = vtophys(tmp_ptr); } else rq->local = tmp_ptr; #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); rq->local = tmp_ptr; #endif tmp_ptr = (uint8_t *)remote_sglist[j].addr; tmp_ptr += remote_used; rq->remote = tmp_ptr; rq->callback = NULL; local_used += cur_len; if (local_used >= local_sglist[i].len) { i++; local_used = 0; } remote_used += cur_len; if (remote_used >= remote_sglist[j].len) { j++; remote_used = 0; } total_used += cur_len; if (total_used >= io->scsiio.kern_data_len) rq->callback = callback; isc_ret = ctl_dt_single(rq); if (isc_ret > CTL_HA_STATUS_SUCCESS) break; } if (isc_ret != CTL_HA_STATUS_WAIT) { rq->ret = isc_ret; callback(rq); } return (0); } static void ctl_datamove_remote_read(union ctl_io *io) { int retval; uint32_t i; /* * This will send an error to the other controller in the case of a * failure. */ retval = ctl_datamove_remote_sgl_setup(io); if (retval != 0) return; retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, ctl_datamove_remote_read_cb); if (retval != 0) { /* * Make sure we free memory if there was an error.. The * ctl_datamove_remote_xfer() function will send the * datamove done message, or call the callback with an * error if there is a problem. */ for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(CTL_LSGLT(io)[i].addr, M_CTL); free(CTL_RSGL(io), M_CTL); CTL_RSGL(io) = NULL; CTL_LSGL(io) = NULL; } } /* * Process a datamove request from the other controller. This is used for * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory * first. Once that is complete, the data gets DMAed into the remote * controller's memory. For reads, we DMA from the remote controller's * memory into our memory first, and then move it out to the FETD. */ static void ctl_datamove_remote(union ctl_io *io) { mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { ctl_failover_io(io, /*have_lock*/ 0); return; } /* * Note that we look for an aborted I/O here, but don't do some of * the other checks that ctl_datamove() normally does. * We don't need to run the datamove delay code, since that should * have been done if need be on the other controller. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, io->scsiio.tag_num, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun); io->io_hdr.port_status = 31338; ctl_send_datamove_done(io, /*have_lock*/ 0); return; } if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) ctl_datamove_remote_write(io); else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) ctl_datamove_remote_read(io); else { io->io_hdr.port_status = 31339; ctl_send_datamove_done(io, /*have_lock*/ 0); } } static void ctl_process_done(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_port *port = CTL_PORT(io); struct ctl_lun *lun = CTL_LUN(io); void (*fe_done)(union ctl_io *io); union ctl_ha_msg msg; CTL_DEBUG_PRINT(("ctl_process_done\n")); fe_done = port->fe_done; #ifdef CTL_TIME_IO if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { char str[256]; char path_str[64]; struct sbuf sb; ctl_scsi_path_string(io, path_str, sizeof(path_str)); sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: ctl_scsi_command_string(&io->scsiio, NULL, &sb); sbuf_printf(&sb, "\n"); sbuf_cat(&sb, path_str); - sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", - io->scsiio.tag_num, io->scsiio.tag_type); + sbuf_printf(&sb, "Tag: 0x%04x/%d, Prio: %d\n", + io->scsiio.tag_num, io->scsiio.tag_type, + io->scsiio.priority); break; case CTL_IO_TASK: - sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " - "Tag Type: %d\n", io->taskio.task_action, + sbuf_printf(&sb, "Task Action: %d Tag: 0x%04x/%d\n", + io->taskio.task_action, io->taskio.tag_num, io->taskio.tag_type); break; default: panic("%s: Invalid CTL I/O type %d\n", __func__, io->io_hdr.io_type); } sbuf_cat(&sb, path_str); sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", (intmax_t)time_uptime - io->io_hdr.start_time); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } #endif /* CTL_TIME_IO */ switch (io->io_hdr.io_type) { case CTL_IO_SCSI: break; case CTL_IO_TASK: if (ctl_debug & CTL_DEBUG_INFO) ctl_io_error_print(io, NULL); fe_done(io); return; default: panic("%s: Invalid CTL I/O type %d\n", __func__, io->io_hdr.io_type); } if (lun == NULL) { CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", io->io_hdr.nexus.targ_mapped_lun)); goto bailout; } mtx_lock(&lun->lun_lock); /* * Check to see if we have any informational exception and status * of this command can be modified to report it in form of either * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. */ if (lun->ie_reported == 0 && lun->ie_asc != 0 && io->io_hdr.status == CTL_SUCCESS && (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { uint8_t mrie = lun->MODE_IE.mrie; uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || (lun->MODE_VER.byte3 & SMS_VER_PER)); if (((mrie == SIEP_MRIE_REC_COND && per) || mrie == SIEP_MRIE_REC_UNCOND || mrie == SIEP_MRIE_NO_SENSE) && (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { ctl_set_sense(&io->scsiio, /*current_error*/ 1, /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, /*asc*/ lun->ie_asc, /*ascq*/ lun->ie_ascq, SSD_ELEM_NONE); lun->ie_reported = 1; } } else if (lun->ie_reported < 0) lun->ie_reported = 0; /* * Check to see if we have any errors to inject here. We only * inject errors for commands that don't already have errors set. */ if (!STAILQ_EMPTY(&lun->error_list) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) ctl_inject_error(lun, io); /* * XXX KDM how do we treat commands that aren't completed * successfully? * * XXX KDM should we also track I/O latency? */ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && io->io_hdr.io_type == CTL_IO_SCSI) { int type; #ifdef CTL_TIME_IO struct bintime bt; getbinuptime(&bt); bintime_sub(&bt, &io->io_hdr.start_bt); #endif if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) type = CTL_STATS_READ; else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) type = CTL_STATS_WRITE; else type = CTL_STATS_NO_IO; lun->stats.bytes[type] += io->scsiio.kern_total_len; lun->stats.operations[type] ++; lun->stats.dmas[type] += io->io_hdr.num_dmas; #ifdef CTL_TIME_IO bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); bintime_add(&lun->stats.time[type], &bt); #endif mtx_lock(&port->port_lock); port->stats.bytes[type] += io->scsiio.kern_total_len; port->stats.operations[type] ++; port->stats.dmas[type] += io->io_hdr.num_dmas; #ifdef CTL_TIME_IO bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); bintime_add(&port->stats.time[type], &bt); #endif mtx_unlock(&port->port_lock); } /* * Run through the blocked queue of this I/O and see if anything * can be unblocked, now that this I/O is done and will be removed. * We need to do it before removal to have OOA position to start. */ ctl_try_unblock_others(lun, io, TRUE); /* * Remove this from the OOA queue. */ TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); #ifdef CTL_TIME_IO if (TAILQ_EMPTY(&lun->ooa_queue)) lun->last_busy = getsbinuptime(); #endif /* * If the LUN has been invalidated, free it if there is nothing * left on its OOA queue. */ if ((lun->flags & CTL_LUN_INVALID) && TAILQ_EMPTY(&lun->ooa_queue)) { mtx_unlock(&lun->lun_lock); ctl_free_lun(lun); } else mtx_unlock(&lun->lun_lock); bailout: /* * If this command has been aborted, make sure we set the status * properly. The FETD is responsible for freeing the I/O and doing * whatever it needs to do to clean up its state. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) ctl_set_task_aborted(&io->scsiio); /* * If enabled, print command error status. */ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && (ctl_debug & CTL_DEBUG_INFO) != 0) ctl_io_error_print(io, NULL); /* * Tell the FETD or the other shelf controller we're done with this * command. Note that only SCSI commands get to this point. Task * management commands are completed above. */ if ((softc->ha_mode != CTL_HA_MODE_XFER) && (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_FINISH_IO; msg.hdr.serializing_sc = io->io_hdr.remote_io; msg.hdr.nexus = io->io_hdr.nexus; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), M_WAITOK); } fe_done(io); } /* * Front end should call this if it doesn't do autosense. When the request * sense comes back in from the initiator, we'll dequeue this and send it. */ int ctl_queue_sense(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_port *port = CTL_PORT(io); struct ctl_lun *lun; struct scsi_sense_data *ps; uint32_t initidx, p, targ_lun; CTL_DEBUG_PRINT(("ctl_queue_sense\n")); targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); /* * LUN lookup will likely move to the ctl_work_thread() once we * have our new queueing infrastructure (that doesn't put things on * a per-LUN queue initially). That is so that we can handle * things like an INQUIRY to a LUN that we don't have enabled. We * can't deal with that right now. * If we don't have a LUN for this, just toss the sense information. */ mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); goto bailout; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); initidx = ctl_get_initindex(&io->io_hdr.nexus); p = initidx / CTL_MAX_INIT_PER_PORT; if (lun->pending_sense[p] == NULL) { lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, M_CTL, M_NOWAIT | M_ZERO); } if ((ps = lun->pending_sense[p]) != NULL) { ps += initidx % CTL_MAX_INIT_PER_PORT; memset(ps, 0, sizeof(*ps)); memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); } mtx_unlock(&lun->lun_lock); bailout: ctl_free_io(io); return (CTL_RETVAL_COMPLETE); } /* * Primary command inlet from frontend ports. All SCSI and task I/O * requests must go through this function. */ int ctl_queue(union ctl_io *io) { struct ctl_port *port = CTL_PORT(io); CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); #ifdef CTL_TIME_IO io->io_hdr.start_time = time_uptime; getbinuptime(&io->io_hdr.start_bt); #endif /* CTL_TIME_IO */ /* Map FE-specific LUN ID into global one. */ io->io_hdr.nexus.targ_mapped_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: case CTL_IO_TASK: if (ctl_debug & CTL_DEBUG_CDB) ctl_io_print(io); ctl_enqueue_incoming(io); break; default: printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); return (EINVAL); } return (CTL_RETVAL_COMPLETE); } #ifdef CTL_IO_DELAY static void ctl_done_timer_wakeup(void *arg) { union ctl_io *io; io = (union ctl_io *)arg; ctl_done(io); } #endif /* CTL_IO_DELAY */ void ctl_serseq_done(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); if (lun->be_lun == NULL || lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) return; mtx_lock(&lun->lun_lock); io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; ctl_try_unblock_others(lun, io, FALSE); mtx_unlock(&lun->lun_lock); } void ctl_done(union ctl_io *io) { /* * Enable this to catch duplicate completion issues. */ #if 0 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { printf("%s: type %d msg %d cdb %x iptl: " "%u:%u:%u tag 0x%04x " "flag %#x status %x\n", __func__, io->io_hdr.io_type, io->io_hdr.msg_type, io->scsiio.cdb[0], io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, (io->io_hdr.io_type == CTL_IO_TASK) ? io->taskio.tag_num : io->scsiio.tag_num, io->io_hdr.flags, io->io_hdr.status); } else io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; #endif /* * This is an internal copy of an I/O, and should not go through * the normal done processing logic. */ if (io->io_hdr.flags & CTL_FLAG_INT_COPY) return; #ifdef CTL_IO_DELAY if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; } else { struct ctl_lun *lun = CTL_LUN(io); if ((lun != NULL) && (lun->delay_info.done_delay > 0)) { callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; callout_reset(&io->io_hdr.delay_callout, lun->delay_info.done_delay * hz, ctl_done_timer_wakeup, io); if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) lun->delay_info.done_delay = 0; return; } } #endif /* CTL_IO_DELAY */ ctl_enqueue_done(io); } static void ctl_work_thread(void *arg) { struct ctl_thread *thr = (struct ctl_thread *)arg; struct ctl_softc *softc = thr->ctl_softc; union ctl_io *io; int retval; CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); thread_lock(curthread); sched_prio(curthread, PUSER - 1); thread_unlock(curthread); while (!softc->shutdown) { /* * We handle the queues in this order: * - ISC * - done queue (to free up resources, unblock other commands) * - incoming queue * - RtR queue * * If those queues are empty, we break out of the loop and * go to sleep. */ mtx_lock(&thr->queue_lock); io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->isc_queue, links); mtx_unlock(&thr->queue_lock); ctl_handle_isc(io); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->done_queue, links); /* clear any blocked commands, call fe_done */ mtx_unlock(&thr->queue_lock); ctl_process_done(io); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); mtx_unlock(&thr->queue_lock); if (io->io_hdr.io_type == CTL_IO_TASK) ctl_run_task(io); else ctl_scsiio_precheck(softc, &io->scsiio); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); mtx_unlock(&thr->queue_lock); retval = ctl_scsiio(&io->scsiio); if (retval != CTL_RETVAL_COMPLETE) CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); continue; } /* Sleep until we have something to do. */ mtx_sleep(thr, &thr->queue_lock, PDROP, "-", 0); } thr->thread = NULL; kthread_exit(); } static void ctl_thresh_thread(void *arg) { struct ctl_softc *softc = (struct ctl_softc *)arg; struct ctl_lun *lun; struct ctl_logical_block_provisioning_page *page; const char *attr; union ctl_ha_msg msg; uint64_t thres, val; int i, e, set; CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); thread_lock(curthread); sched_prio(curthread, PUSER - 1); thread_unlock(curthread); while (!softc->shutdown) { mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if ((lun->flags & CTL_LUN_DISABLED) || (lun->flags & CTL_LUN_NO_MEDIA) || lun->backend->lun_attr == NULL) continue; if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && softc->ha_mode == CTL_HA_MODE_XFER) continue; if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) continue; e = 0; page = &lun->MODE_LBP; for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) continue; thres = scsi_4btoul(page->descr[i].count); thres <<= CTL_LBP_EXPONENT; switch (page->descr[i].resource) { case 0x01: attr = "blocksavail"; break; case 0x02: attr = "blocksused"; break; case 0xf1: attr = "poolblocksavail"; break; case 0xf2: attr = "poolblocksused"; break; default: continue; } mtx_unlock(&softc->ctl_lock); // XXX val = lun->backend->lun_attr(lun->be_lun, attr); mtx_lock(&softc->ctl_lock); if (val == UINT64_MAX) continue; if ((page->descr[i].flags & SLBPPD_ARMING_MASK) == SLBPPD_ARMING_INC) e = (val >= thres); else e = (val <= thres); if (e) break; } mtx_lock(&lun->lun_lock); if (e) { scsi_u64to8b((uint8_t *)&page->descr[i] - (uint8_t *)page, lun->ua_tpt_info); if (lun->lasttpt == 0 || time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { lun->lasttpt = time_uptime; ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); set = 1; } else set = 0; } else { lun->lasttpt = 0; ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); set = -1; } mtx_unlock(&lun->lun_lock); if (set != 0 && lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { /* Send msg to other side. */ bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = (set > 0); msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); mtx_unlock(&softc->ctl_lock); // XXX ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); mtx_lock(&softc->ctl_lock); } } mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, PDROP, "-", CTL_LBP_PERIOD * hz); } softc->thresh_thread = NULL; kthread_exit(); } static void ctl_enqueue_incoming(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; u_int idx; idx = (io->io_hdr.nexus.targ_port * 127 + io->io_hdr.nexus.initid) % worker_threads; thr = &softc->threads[idx]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_rtr(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_done(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_isc(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } /* * vim: ts=8 */ Index: head/sys/cam/ctl/ctl_frontend_cam_sim.c =================================================================== --- head/sys/cam/ctl/ctl_frontend_cam_sim.c (revision 367043) +++ head/sys/cam/ctl/ctl_frontend_cam_sim.c (revision 367044) @@ -1,790 +1,791 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_cam_sim.c#4 $ */ /* * CTL frontend to CAM SIM interface. This allows access to CTL LUNs via * the da(4) and pass(4) drivers from inside the system. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define io_ptr spriv_ptr1 struct cfcs_io { union ccb *ccb; }; struct cfcs_softc { struct ctl_port port; char port_name[32]; struct cam_sim *sim; struct cam_devq *devq; struct cam_path *path; uint64_t wwnn; uint64_t wwpn; uint32_t cur_tag_num; int online; }; /* * We can't handle CCBs with these flags. For the most part, we just don't * handle physical addresses yet. That would require mapping things in * order to do the copy. */ #define CFCS_BAD_CCB_FLAGS (CAM_DATA_ISPHYS | CAM_CDB_PHYS | CAM_SENSE_PTR | \ CAM_SENSE_PHYS) static int cfcs_init(void); static int cfcs_shutdown(void); static void cfcs_poll(struct cam_sim *sim); static void cfcs_online(void *arg); static void cfcs_offline(void *arg); static void cfcs_datamove(union ctl_io *io); static void cfcs_done(union ctl_io *io); void cfcs_action(struct cam_sim *sim, union ccb *ccb); struct cfcs_softc cfcs_softc; /* * This is primarily intended to allow for error injection to test the CAM * sense data and sense residual handling code. This sets the maximum * amount of SCSI sense data that we will report to CAM. */ static int cfcs_max_sense = sizeof(struct scsi_sense_data); SYSCTL_NODE(_kern_cam, OID_AUTO, ctl2cam, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer SIM frontend"); SYSCTL_INT(_kern_cam_ctl2cam, OID_AUTO, max_sense, CTLFLAG_RW, &cfcs_max_sense, 0, "Maximum sense data size"); static struct ctl_frontend cfcs_frontend = { .name = "camsim", .init = cfcs_init, .shutdown = cfcs_shutdown, }; CTL_FRONTEND_DECLARE(ctlcfcs, cfcs_frontend); static int cfcs_init(void) { struct cfcs_softc *softc; struct ctl_port *port; int retval; softc = &cfcs_softc; bzero(softc, sizeof(*softc)); port = &softc->port; port->frontend = &cfcs_frontend; port->port_type = CTL_PORT_INTERNAL; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = 4096; snprintf(softc->port_name, sizeof(softc->port_name), "camsim"); port->port_name = softc->port_name; port->port_online = cfcs_online; port->port_offline = cfcs_offline; port->onoff_arg = softc; port->fe_datamove = cfcs_datamove; port->fe_done = cfcs_done; port->targ_port = -1; retval = ctl_port_register(port); if (retval != 0) { printf("%s: ctl_port_register() failed with error %d!\n", __func__, retval); return (retval); } /* * If the CTL frontend didn't tell us what our WWNN/WWPN is, go * ahead and set something random. */ if (port->wwnn == 0) { uint64_t random_bits; arc4rand(&random_bits, sizeof(random_bits), 0); softc->wwnn = (random_bits & 0x0000000fffffff00ULL) | /* Company ID */ 0x5000000000000000ULL | /* NL-Port */ 0x0300; softc->wwpn = softc->wwnn + port->targ_port + 1; ctl_port_set_wwns(port, true, softc->wwnn, true, softc->wwpn); } else { softc->wwnn = port->wwnn; softc->wwpn = port->wwpn; } softc->devq = cam_simq_alloc(port->num_requested_ctl_io); if (softc->devq == NULL) { printf("%s: error allocating devq\n", __func__); retval = ENOMEM; goto bailout; } softc->sim = cam_sim_alloc(cfcs_action, cfcs_poll, softc->port_name, softc, /*unit*/ 0, NULL, 1, port->num_requested_ctl_io, softc->devq); if (softc->sim == NULL) { printf("%s: error allocating SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_bus_register(softc->sim, NULL, 0) != CAM_SUCCESS) { printf("%s: error registering SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_create_path(&softc->path, /*periph*/NULL, cam_sim_path(softc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("%s: error creating path\n", __func__); xpt_bus_deregister(cam_sim_path(softc->sim)); retval = EINVAL; goto bailout; } return (retval); bailout: if (softc->sim) cam_sim_free(softc->sim, /*free_devq*/ TRUE); else if (softc->devq) cam_simq_free(softc->devq); return (retval); } static int cfcs_shutdown(void) { struct cfcs_softc *softc = &cfcs_softc; struct ctl_port *port = &softc->port; int error; ctl_port_offline(port); xpt_free_path(softc->path); xpt_bus_deregister(cam_sim_path(softc->sim)); cam_sim_free(softc->sim, /*free_devq*/ TRUE); if ((error = ctl_port_deregister(port)) != 0) printf("%s: cam_sim port deregistration failed\n", __func__); return (error); } static void cfcs_poll(struct cam_sim *sim) { } static void cfcs_onoffline(void *arg, int online) { struct cfcs_softc *softc = (struct cfcs_softc *)arg; union ccb *ccb; softc->online = online; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { printf("%s: unable to allocate CCB for rescan\n", __func__); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(softc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("%s: can't allocate path for rescan\n", __func__); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void cfcs_online(void *arg) { cfcs_onoffline(arg, /*online*/ 1); } static void cfcs_offline(void *arg) { cfcs_onoffline(arg, /*online*/ 0); } /* * This function is very similar to ctl_ioctl_do_datamove(). Is there a * way to combine the functionality? * * XXX KDM may need to move this into a thread. We're doing a bcopy in the * caller's context, which will usually be the backend. That may not be a * good thing. */ static void cfcs_datamove(union ctl_io *io) { union ccb *ccb; bus_dma_segment_t cam_sg_entry, *cam_sglist; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; int cam_sg_count, ctl_sg_count, cam_sg_start; int cam_sg_offset; int len_to_copy; int ctl_watermark, cam_watermark; int i, j; ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; /* * Note that we have a check in cfcs_action() to make sure that any * CCBs with "bad" flags are returned with CAM_REQ_INVALID. This * is just to make sure no one removes that check without updating * this code to provide the additional functionality necessary to * support those modes of operation. */ KASSERT(((ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) == 0), ("invalid " "CAM flags %#x", (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS))); /* * Simplify things on both sides by putting single buffers into a * single entry S/G list. */ switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { case CAM_DATA_SG: { int len_seen; cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; cam_sg_count = ccb->csio.sglist_cnt; cam_sg_start = cam_sg_count; cam_sg_offset = 0; for (i = 0, len_seen = 0; i < cam_sg_count; i++) { if ((len_seen + cam_sglist[i].ds_len) >= io->scsiio.kern_rel_offset) { cam_sg_start = i; cam_sg_offset = io->scsiio.kern_rel_offset - len_seen; break; } len_seen += cam_sglist[i].ds_len; } break; } case CAM_DATA_VADDR: cam_sglist = &cam_sg_entry; cam_sglist[0].ds_len = ccb->csio.dxfer_len; cam_sglist[0].ds_addr = (bus_addr_t)(uintptr_t)ccb->csio.data_ptr; cam_sg_count = 1; cam_sg_start = 0; cam_sg_offset = io->scsiio.kern_rel_offset; break; default: panic("Invalid CAM flags %#x", ccb->ccb_h.flags); } if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } ctl_watermark = 0; cam_watermark = cam_sg_offset; for (i = cam_sg_start, j = 0; i < cam_sg_count && j < ctl_sg_count;) { uint8_t *cam_ptr, *ctl_ptr; len_to_copy = MIN(cam_sglist[i].ds_len - cam_watermark, ctl_sglist[j].len - ctl_watermark); cam_ptr = (uint8_t *)(uintptr_t)cam_sglist[i].ds_addr; cam_ptr = cam_ptr + cam_watermark; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { /* * XXX KDM fix this! */ panic("need to implement bus address support"); #if 0 kern_ptr = bus_to_virt(kern_sglist[j].addr); #endif } else ctl_ptr = (uint8_t *)ctl_sglist[j].addr; ctl_ptr = ctl_ptr + ctl_watermark; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("%s: copying %d bytes to CAM\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", ctl_ptr, __func__, cam_ptr)); bcopy(ctl_ptr, cam_ptr, len_to_copy); } else { CTL_DEBUG_PRINT(("%s: copying %d bytes from CAM\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", cam_ptr, __func__, ctl_ptr)); bcopy(cam_ptr, ctl_ptr, len_to_copy); } io->scsiio.ext_data_filled += len_to_copy; io->scsiio.kern_data_resid -= len_to_copy; cam_watermark += len_to_copy; if (cam_sglist[i].ds_len == cam_watermark) { i++; cam_watermark = 0; } ctl_watermark += len_to_copy; if (ctl_sglist[j].len == ctl_watermark) { j++; ctl_watermark = 0; } } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL; io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; ccb->csio.resid = ccb->csio.dxfer_len - io->scsiio.ext_data_filled; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(ccb); } io->scsiio.be_move_done(io); } static void cfcs_done(union ctl_io *io) { union ccb *ccb; ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; if (ccb == NULL) { ctl_free_io(io); return; } /* * At this point we should have status. If we don't, that's a bug. */ KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE), ("invalid CTL status %#x", io->io_hdr.status)); /* * Translate CTL status to CAM status. */ if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->csio.resid = ccb->csio.dxfer_len - io->scsiio.ext_data_filled; } ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (io->io_hdr.status & CTL_STATUS_MASK) { case CTL_SUCCESS: ccb->ccb_h.status |= CAM_REQ_CMP; break; case CTL_SCSI_ERROR: ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; ccb->csio.scsi_status = io->scsiio.scsi_status; bcopy(&io->scsiio.sense_data, &ccb->csio.sense_data, min(io->scsiio.sense_len, ccb->csio.sense_len)); if (ccb->csio.sense_len > io->scsiio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - io->scsiio.sense_len; else ccb->csio.sense_resid = 0; if ((ccb->csio.sense_len - ccb->csio.sense_resid) > cfcs_max_sense) { ccb->csio.sense_resid = ccb->csio.sense_len - cfcs_max_sense; } break; case CTL_CMD_ABORTED: ccb->ccb_h.status |= CAM_REQ_ABORTED; break; case CTL_ERROR: default: ccb->ccb_h.status |= CAM_REQ_CMP_ERR; break; } ctl_free_io(io); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(ccb); } void cfcs_action(struct cam_sim *sim, union ccb *ccb) { struct cfcs_softc *softc; int err; softc = (struct cfcs_softc *)cam_sim_softc(sim); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { union ctl_io *io; struct ccb_scsiio *csio; csio = &ccb->csio; /* * Catch CCB flags, like physical address flags, that * indicate situations we currently can't handle. */ if (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) { ccb->ccb_h.status = CAM_REQ_INVALID; printf("%s: bad CCB flags %#x (all flags %#x)\n", __func__, ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS, ccb->ccb_h.flags); xpt_done(ccb); return; } /* * If we aren't online, there are no devices to see. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { printf("%s: can't allocate ctl_io\n", __func__); ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; /* * Only SCSI I/O comes down this path, resets, etc. come * down via the XPT_RESET_BUS/LUN CCBs below. */ io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); + io->scsiio.priority = csio->priority; /* * This tag scheme isn't the best, since we could in theory * have a very long-lived I/O and tag collision, especially * in a high I/O environment. But it should work well * enough for now. Since we're using unsigned ints, * they'll just wrap around. */ io->scsiio.tag_num = atomic_fetchadd_32(&softc->cur_tag_num, 1); csio->tag_id = io->scsiio.tag_num; switch (csio->tag_action) { case CAM_TAG_ACTION_NONE: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, csio->tag_action); break; } if (csio->cdb_len > sizeof(io->scsiio.cdb)) { printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", __func__, csio->cdb_len, sizeof(io->scsiio.cdb)); } io->scsiio.cdb_len = min(csio->cdb_len, sizeof(io->scsiio.cdb)); bcopy(scsiio_cdb_ptr(csio), io->scsiio.cdb, io->scsiio.cdb_len); ccb->ccb_h.status |= CAM_SIM_QUEUED; err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s: func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } break; } case XPT_ABORT: { union ctl_io *io; union ccb *abort_ccb; abort_ccb = ccb->cab.abort_ccb; if (abort_ccb->ccb_h.func_code != XPT_SCSI_IO) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); } /* * If we aren't online, there are no devices to talk to. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); io->taskio.task_action = CTL_TASK_ABORT_TASK; io->taskio.tag_num = abort_ccb->csio.tag_id; switch (abort_ccb->csio.tag_action) { case CAM_TAG_ACTION_NONE: io->taskio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->taskio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->taskio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->taskio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->taskio.tag_type = CTL_TAG_ACA; break; default: io->taskio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, abort_ccb->csio.tag_action); break; } err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); } break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_fc *fc; cts = &ccb->cts; scsi = &cts->proto_specific.scsi; fc = &cts->xport_specific.fc; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_FC; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 800000; fc->wwnn = softc->wwnn; fc->wwpn = softc->wwpn; fc->port = softc->port.targ_port; fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SET_TRAN_SETTINGS: /* XXX KDM should we actually do something here? */ ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_RESET_BUS: case XPT_RESET_DEV: { union ctl_io *io; /* * If we aren't online, there are no devices to talk to. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ if (ccb->ccb_h.func_code == XPT_RESET_DEV) io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); if (ccb->ccb_h.func_code == XPT_RESET_BUS) io->taskio.task_action = CTL_TASK_BUS_RESET; else io->taskio.task_action = CTL_TASK_LUN_RESET; err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); } break; } case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi; cpi = &ccb->cpi; cpi->version_num = 0; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_EXTLUNS; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = 1024; /* Do we really have a limit? */ cpi->maxio = 1024 * 1024; cpi->async_flags = 0; cpi->hpath_id = 0; cpi->initiator_id = 1; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "FreeBSD", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = 0; cpi->bus_id = 0; cpi->base_transfer_speed = 800000; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC2; /* * Pretend to be Fibre Channel. */ cpi->transport = XPORT_FC; cpi->transport_version = 0; cpi->xport_specific.fc.wwnn = softc->wwnn; cpi->xport_specific.fc.wwpn = softc->wwpn; cpi->xport_specific.fc.port = softc->port.targ_port; cpi->xport_specific.fc.bitrate = 8 * 1000 * 1000; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_PROVIDE_FAIL; printf("%s: unsupported CCB type %#x\n", __func__, ccb->ccb_h.func_code); xpt_done(ccb); break; } } Index: head/sys/cam/ctl/ctl_frontend_iscsi.c =================================================================== --- head/sys/cam/ctl/ctl_frontend_iscsi.c (revision 367043) +++ head/sys/cam/ctl/ctl_frontend_iscsi.c (revision 367044) @@ -1,3039 +1,3041 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * CTL frontend for the iSCSI protocol. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ICL_KERNEL_PROXY #include #endif #ifdef ICL_KERNEL_PROXY FEATURE(cfiscsi_kernel_proxy, "iSCSI target built with ICL_KERNEL_PROXY"); #endif static MALLOC_DEFINE(M_CFISCSI, "cfiscsi", "Memory used for CTL iSCSI frontend"); static uma_zone_t cfiscsi_data_wait_zone; SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, iscsi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer iSCSI Frontend"); static int debug = 1; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN, &debug, 1, "Enable debug messages"); static int ping_timeout = 5; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN, &ping_timeout, 5, "Interval between ping (NOP-Out) requests, in seconds"); static int login_timeout = 60; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN, &login_timeout, 60, "Time to wait for ctld(8) to finish Login Phase, in seconds"); static int maxtags = 256; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, maxtags, CTLFLAG_RWTUN, &maxtags, 0, "Max number of requests queued by initiator"); #define CFISCSI_DEBUG(X, ...) \ do { \ if (debug > 1) { \ printf("%s: " X "\n", \ __func__, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_WARN(X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s: " X "\n", \ __func__, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_DEBUG(S, X, ...) \ do { \ if (debug > 1) { \ printf("%s: %s (%s): " X "\n", \ __func__, S->cs_initiator_addr, \ S->cs_initiator_name, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_WARN(S, X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s (%s): " X "\n", \ S->cs_initiator_addr, \ S->cs_initiator_name, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_LOCK(X) mtx_lock(&X->cs_lock) #define CFISCSI_SESSION_UNLOCK(X) mtx_unlock(&X->cs_lock) #define CFISCSI_SESSION_LOCK_ASSERT(X) mtx_assert(&X->cs_lock, MA_OWNED) #define CONN_SESSION(X) ((struct cfiscsi_session *)(X)->ic_prv0) #define PDU_SESSION(X) CONN_SESSION((X)->ip_conn) struct cfiscsi_priv { void *request; uint32_t expdatasn; uint32_t r2tsn; }; #define PRIV(io) \ ((struct cfiscsi_priv *)&(io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND]) #define PRIV_REQUEST(io) PRIV(io)->request #define PRIV_EXPDATASN(io) PRIV(io)->expdatasn #define PRIV_R2TSN(io) PRIV(io)->r2tsn static int cfiscsi_init(void); static int cfiscsi_shutdown(void); static void cfiscsi_online(void *arg); static void cfiscsi_offline(void *arg); static int cfiscsi_info(void *arg, struct sbuf *sb); static int cfiscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static void cfiscsi_datamove(union ctl_io *io); static void cfiscsi_datamove_in(union ctl_io *io); static void cfiscsi_datamove_out(union ctl_io *io); static void cfiscsi_done(union ctl_io *io); static bool cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request); static void cfiscsi_pdu_handle_nop_out(struct icl_pdu *request); static void cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request); static void cfiscsi_pdu_handle_task_request(struct icl_pdu *request); static void cfiscsi_pdu_handle_data_out(struct icl_pdu *request); static void cfiscsi_pdu_handle_logout_request(struct icl_pdu *request); static void cfiscsi_session_terminate(struct cfiscsi_session *cs); static struct cfiscsi_data_wait *cfiscsi_data_wait_new( struct cfiscsi_session *cs, union ctl_io *io, uint32_t initiator_task_tag, uint32_t *target_transfer_tagp); static void cfiscsi_data_wait_free(struct cfiscsi_session *cs, struct cfiscsi_data_wait *cdw); static struct cfiscsi_target *cfiscsi_target_find(struct cfiscsi_softc *softc, const char *name, uint16_t tag); static struct cfiscsi_target *cfiscsi_target_find_or_create( struct cfiscsi_softc *softc, const char *name, const char *alias, uint16_t tag); static void cfiscsi_target_release(struct cfiscsi_target *ct); static void cfiscsi_session_delete(struct cfiscsi_session *cs); static struct cfiscsi_softc cfiscsi_softc; static struct ctl_frontend cfiscsi_frontend = { .name = "iscsi", .init = cfiscsi_init, .ioctl = cfiscsi_ioctl, .shutdown = cfiscsi_shutdown, }; CTL_FRONTEND_DECLARE(cfiscsi, cfiscsi_frontend); MODULE_DEPEND(cfiscsi, icl, 1, 1, 1); static struct icl_pdu * cfiscsi_pdu_new_response(struct icl_pdu *request, int flags) { return (icl_pdu_new(request->ip_conn, flags)); } static bool cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request) { const struct iscsi_bhs_scsi_command *bhssc; struct cfiscsi_session *cs; uint32_t cmdsn, curcmdsn; cs = PDU_SESSION(request); /* * Every incoming PDU - not just NOP-Out - resets the ping timer. * The purpose of the timeout is to reset the connection when it stalls; * we don't want this to happen when NOP-In or NOP-Out ends up delayed * in some queue. */ cs->cs_timeout = 0; /* * Immediate commands carry cmdsn, but it is neither incremented nor * verified. */ if (request->ip_bhs->bhs_opcode & ISCSI_BHS_OPCODE_IMMEDIATE) return (false); /* * Data-Out PDUs don't contain CmdSN. */ if (request->ip_bhs->bhs_opcode == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) return (false); /* * We're only using fields common for all the request * (initiator -> target) PDUs. */ bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; curcmdsn = cmdsn = ntohl(bhssc->bhssc_cmdsn); /* * Increment session cmdsn and exit if we received the expected value. */ do { if (atomic_fcmpset_32(&cs->cs_cmdsn, &curcmdsn, cmdsn + 1)) return (false); } while (curcmdsn == cmdsn); /* * The target MUST silently ignore any non-immediate command outside * of this range. */ if (ISCSI_SNLT(cmdsn, curcmdsn) || ISCSI_SNGT(cmdsn, curcmdsn - 1 + maxtags)) { CFISCSI_SESSION_WARN(cs, "received PDU with CmdSN %u, " "while expected %u", cmdsn, curcmdsn); return (true); } /* * We don't support multiple connections now, so any discontinuity in * CmdSN means lost PDUs. Since we don't support PDU retransmission -- * terminate the connection. */ CFISCSI_SESSION_WARN(cs, "received PDU with CmdSN %u, " "while expected %u; dropping connection", cmdsn, curcmdsn); cfiscsi_session_terminate(cs); return (true); } static void cfiscsi_pdu_handle(struct icl_pdu *request) { struct cfiscsi_session *cs; bool ignore; cs = PDU_SESSION(request); ignore = cfiscsi_pdu_update_cmdsn(request); if (ignore) { icl_pdu_free(request); return; } /* * Handle the PDU; this includes e.g. receiving the remaining * part of PDU and submitting the SCSI command to CTL * or queueing a reply. The handling routine is responsible * for freeing the PDU when it's no longer needed. */ switch (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) { case ISCSI_BHS_OPCODE_NOP_OUT: cfiscsi_pdu_handle_nop_out(request); break; case ISCSI_BHS_OPCODE_SCSI_COMMAND: cfiscsi_pdu_handle_scsi_command(request); break; case ISCSI_BHS_OPCODE_TASK_REQUEST: cfiscsi_pdu_handle_task_request(request); break; case ISCSI_BHS_OPCODE_SCSI_DATA_OUT: cfiscsi_pdu_handle_data_out(request); break; case ISCSI_BHS_OPCODE_LOGOUT_REQUEST: cfiscsi_pdu_handle_logout_request(request); break; default: CFISCSI_SESSION_WARN(cs, "received PDU with unsupported " "opcode 0x%x; dropping connection", request->ip_bhs->bhs_opcode); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static void cfiscsi_receive_callback(struct icl_pdu *request) { #ifdef ICL_KERNEL_PROXY struct cfiscsi_session *cs; cs = PDU_SESSION(request); if (cs->cs_waiting_for_ctld || cs->cs_login_phase) { if (cs->cs_login_pdu == NULL) cs->cs_login_pdu = request; else icl_pdu_free(request); cv_signal(&cs->cs_login_cv); return; } #endif cfiscsi_pdu_handle(request); } static void cfiscsi_error_callback(struct icl_conn *ic) { struct cfiscsi_session *cs; cs = CONN_SESSION(ic); CFISCSI_SESSION_WARN(cs, "connection error; dropping connection"); cfiscsi_session_terminate(cs); } static int cfiscsi_pdu_prepare(struct icl_pdu *response) { struct cfiscsi_session *cs; struct iscsi_bhs_scsi_response *bhssr; bool advance_statsn = true; uint32_t cmdsn; cs = PDU_SESSION(response); CFISCSI_SESSION_LOCK_ASSERT(cs); /* * We're only using fields common for all the response * (target -> initiator) PDUs. */ bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs; /* * 10.8.3: "The StatSN for this connection is not advanced * after this PDU is sent." */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_R2T) advance_statsn = false; /* * 10.19.2: "However, when the Initiator Task Tag is set to 0xffffffff, * StatSN for the connection is not advanced after this PDU is sent." */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_NOP_IN && bhssr->bhssr_initiator_task_tag == 0xffffffff) advance_statsn = false; /* * See the comment below - StatSN is not meaningful and must * not be advanced. */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_SCSI_DATA_IN && (bhssr->bhssr_flags & BHSDI_FLAGS_S) == 0) advance_statsn = false; /* * 10.7.3: "The fields StatSN, Status, and Residual Count * only have meaningful content if the S bit is set to 1." */ if (bhssr->bhssr_opcode != ISCSI_BHS_OPCODE_SCSI_DATA_IN || (bhssr->bhssr_flags & BHSDI_FLAGS_S)) bhssr->bhssr_statsn = htonl(cs->cs_statsn); cmdsn = cs->cs_cmdsn; bhssr->bhssr_expcmdsn = htonl(cmdsn); bhssr->bhssr_maxcmdsn = htonl(cmdsn - 1 + imax(0, maxtags - cs->cs_outstanding_ctl_pdus)); if (advance_statsn) cs->cs_statsn++; return (0); } static void cfiscsi_pdu_queue(struct icl_pdu *response) { struct cfiscsi_session *cs; cs = PDU_SESSION(response); CFISCSI_SESSION_LOCK(cs); cfiscsi_pdu_prepare(response); icl_pdu_queue(response); CFISCSI_SESSION_UNLOCK(cs); } static void cfiscsi_pdu_queue_cb(struct icl_pdu *response, icl_pdu_cb cb) { struct cfiscsi_session *cs = PDU_SESSION(response); CFISCSI_SESSION_LOCK(cs); cfiscsi_pdu_prepare(response); icl_pdu_queue_cb(response, cb); CFISCSI_SESSION_UNLOCK(cs); } static void cfiscsi_pdu_handle_nop_out(struct icl_pdu *request) { struct cfiscsi_session *cs; struct iscsi_bhs_nop_out *bhsno; struct iscsi_bhs_nop_in *bhsni; struct icl_pdu *response; void *data = NULL; size_t datasize; int error; cs = PDU_SESSION(request); bhsno = (struct iscsi_bhs_nop_out *)request->ip_bhs; if (bhsno->bhsno_initiator_task_tag == 0xffffffff) { /* * Nothing to do, iscsi_pdu_update_statsn() already * zeroed the timeout. */ icl_pdu_free(request); return; } datasize = icl_pdu_data_segment_length(request); if (datasize > 0) { data = malloc(datasize, M_CFISCSI, M_NOWAIT | M_ZERO); if (data == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } icl_pdu_get_data(request, 0, data, datasize); } response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "droppping connection"); free(data, M_CFISCSI); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhsni = (struct iscsi_bhs_nop_in *)response->ip_bhs; bhsni->bhsni_opcode = ISCSI_BHS_OPCODE_NOP_IN; bhsni->bhsni_flags = 0x80; bhsni->bhsni_initiator_task_tag = bhsno->bhsno_initiator_task_tag; bhsni->bhsni_target_transfer_tag = 0xffffffff; if (datasize > 0) { error = icl_pdu_append_data(response, data, datasize, M_NOWAIT); if (error != 0) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); free(data, M_CFISCSI); icl_pdu_free(request); icl_pdu_free(response); cfiscsi_session_terminate(cs); return; } free(data, M_CFISCSI); } icl_pdu_free(request); cfiscsi_pdu_queue(response); } static void cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request) { struct iscsi_bhs_scsi_command *bhssc; struct cfiscsi_session *cs; union ctl_io *io; int error; cs = PDU_SESSION(request); bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; //CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x", // bhssc->bhssc_initiator_task_tag); if (request->ip_data_len > 0 && cs->cs_immediate_data == false) { CFISCSI_SESSION_WARN(cs, "unsolicited data with " "ImmediateData=No; dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); PRIV_REQUEST(io) = request; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun(be64toh(bhssc->bhssc_lun)); + io->scsiio.priority = (bhssc->bhssc_pri & BHSSC_PRI_MASK) >> + BHSSC_PRI_SHIFT; io->scsiio.tag_num = bhssc->bhssc_initiator_task_tag; switch ((bhssc->bhssc_flags & BHSSC_FLAGS_ATTR)) { case BHSSC_FLAGS_ATTR_UNTAGGED: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case BHSSC_FLAGS_ATTR_SIMPLE: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case BHSSC_FLAGS_ATTR_ORDERED: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case BHSSC_FLAGS_ATTR_HOQ: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case BHSSC_FLAGS_ATTR_ACA: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; CFISCSI_SESSION_WARN(cs, "unhandled tag type %d", bhssc->bhssc_flags & BHSSC_FLAGS_ATTR); break; } io->scsiio.cdb_len = sizeof(bhssc->bhssc_cdb); /* Which is 16. */ memcpy(io->scsiio.cdb, bhssc->bhssc_cdb, sizeof(bhssc->bhssc_cdb)); refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_queue(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d; " "dropping connection", error); ctl_free_io(io); refcount_release(&cs->cs_outstanding_ctl_pdus); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static void cfiscsi_pdu_handle_task_request(struct icl_pdu *request) { struct iscsi_bhs_task_management_request *bhstmr; struct iscsi_bhs_task_management_response *bhstmr2; struct icl_pdu *response; struct cfiscsi_session *cs; union ctl_io *io; int error; cs = PDU_SESSION(request); bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs; io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); PRIV_REQUEST(io) = request; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun(be64toh(bhstmr->bhstmr_lun)); io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */ switch (bhstmr->bhstmr_function & ~0x80) { case BHSTMR_FUNCTION_ABORT_TASK: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK"); #endif io->taskio.task_action = CTL_TASK_ABORT_TASK; io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag; break; case BHSTMR_FUNCTION_ABORT_TASK_SET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK_SET"); #endif io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; break; case BHSTMR_FUNCTION_CLEAR_TASK_SET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_CLEAR_TASK_SET"); #endif io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET; break; case BHSTMR_FUNCTION_LOGICAL_UNIT_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_LOGICAL_UNIT_RESET"); #endif io->taskio.task_action = CTL_TASK_LUN_RESET; break; case BHSTMR_FUNCTION_TARGET_WARM_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_TARGET_WARM_RESET"); #endif io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case BHSTMR_FUNCTION_TARGET_COLD_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_TARGET_COLD_RESET"); #endif io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case BHSTMR_FUNCTION_QUERY_TASK: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK"); #endif io->taskio.task_action = CTL_TASK_QUERY_TASK; io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag; break; case BHSTMR_FUNCTION_QUERY_TASK_SET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK_SET"); #endif io->taskio.task_action = CTL_TASK_QUERY_TASK_SET; break; case BHSTMR_FUNCTION_I_T_NEXUS_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_I_T_NEXUS_RESET"); #endif io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET; break; case BHSTMR_FUNCTION_QUERY_ASYNC_EVENT: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_ASYNC_EVENT"); #endif io->taskio.task_action = CTL_TASK_QUERY_ASYNC_EVENT; break; default: CFISCSI_SESSION_DEBUG(cs, "unsupported function 0x%x", bhstmr->bhstmr_function & ~0x80); ctl_free_io(io); response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhstmr2 = (struct iscsi_bhs_task_management_response *) response->ip_bhs; bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE; bhstmr2->bhstmr_flags = 0x80; bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED; bhstmr2->bhstmr_initiator_task_tag = bhstmr->bhstmr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); return; } refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_queue(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d; " "dropping connection", error); ctl_free_io(io); refcount_release(&cs->cs_outstanding_ctl_pdus); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static bool cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *cdw) { struct iscsi_bhs_data_out *bhsdo; struct cfiscsi_session *cs; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; size_t copy_len, len, off, buffer_offset; int ctl_sg_count; union ctl_io *io; cs = PDU_SESSION(request); KASSERT((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT || (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bad opcode 0x%x", request->ip_bhs->bhs_opcode)); /* * We're only using fields common for Data-Out and SCSI Command PDUs. */ bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs; io = cdw->cdw_ctl_io; KASSERT((io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_IN, ("CTL_FLAG_DATA_IN")); #if 0 CFISCSI_SESSION_DEBUG(cs, "received %zd bytes out of %d", request->ip_data_len, io->scsiio.kern_total_len); #endif if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) buffer_offset = ntohl(bhsdo->bhsdo_buffer_offset); else buffer_offset = 0; len = icl_pdu_data_segment_length(request); /* * Make sure the offset, as sent by the initiator, matches the offset * we're supposed to be at in the scatter-gather list. */ if (buffer_offset > io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled || buffer_offset + len <= io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled) { CFISCSI_SESSION_WARN(cs, "received bad buffer offset %zd, " "expected %zd; dropping connection", buffer_offset, (size_t)io->scsiio.kern_rel_offset + (size_t)io->scsiio.ext_data_filled); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } /* * This is the offset within the PDU data segment, as opposed * to buffer_offset, which is the offset within the task (SCSI * command). */ off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled - buffer_offset; /* * Iterate over the scatter/gather segments, filling them with data * from the PDU data segment. Note that this can get called multiple * times for one SCSI command; the cdw structure holds state for the * scatter/gather list. */ for (;;) { KASSERT(cdw->cdw_sg_index < ctl_sg_count, ("cdw->cdw_sg_index >= ctl_sg_count")); if (cdw->cdw_sg_len == 0) { cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; } KASSERT(off <= len, ("len > off")); copy_len = len - off; if (copy_len > cdw->cdw_sg_len) copy_len = cdw->cdw_sg_len; icl_pdu_get_data(request, off, cdw->cdw_sg_addr, copy_len); cdw->cdw_sg_addr += copy_len; cdw->cdw_sg_len -= copy_len; off += copy_len; io->scsiio.ext_data_filled += copy_len; io->scsiio.kern_data_resid -= copy_len; if (cdw->cdw_sg_len == 0) { /* * End of current segment. */ if (cdw->cdw_sg_index == ctl_sg_count - 1) { /* * Last segment in scatter/gather list. */ break; } cdw->cdw_sg_index++; } if (off == len) { /* * End of PDU payload. */ break; } } if (len > off) { /* * In case of unsolicited data, it's possible that the buffer * provided by CTL is smaller than negotiated FirstBurstLength. * Just ignore the superfluous data; will ask for them with R2T * on next call to cfiscsi_datamove(). * * This obviously can only happen with SCSI Command PDU. */ if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND) return (true); CFISCSI_SESSION_WARN(cs, "received too much data: got %zd bytes, " "expected %zd; dropping connection", icl_pdu_data_segment_length(request), off); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end && (bhsdo->bhsdo_flags & BHSDO_FLAGS_F) == 0) { CFISCSI_SESSION_WARN(cs, "got the final packet without " "the F flag; flags = 0x%x; dropping connection", bhsdo->bhsdo_flags); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } if (io->scsiio.ext_data_filled != cdw->cdw_r2t_end && (bhsdo->bhsdo_flags & BHSDO_FLAGS_F) != 0) { if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) { CFISCSI_SESSION_WARN(cs, "got the final packet, but the " "transmitted size was %zd bytes instead of %d; " "dropping connection", (size_t)io->scsiio.ext_data_filled, cdw->cdw_r2t_end); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } else { /* * For SCSI Command PDU, this just means we need to * solicit more data by sending R2T. */ return (false); } } if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end) { #if 0 CFISCSI_SESSION_DEBUG(cs, "no longer expecting Data-Out with target " "transfer tag 0x%x", cdw->cdw_target_transfer_tag); #endif return (true); } return (false); } static void cfiscsi_pdu_handle_data_out(struct icl_pdu *request) { struct iscsi_bhs_data_out *bhsdo; struct cfiscsi_session *cs; struct cfiscsi_data_wait *cdw = NULL; union ctl_io *io; bool done; cs = PDU_SESSION(request); bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs; CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH(cdw, &cs->cs_waiting_for_data_out, cdw_next) { #if 0 CFISCSI_SESSION_DEBUG(cs, "have ttt 0x%x, itt 0x%x; looking for " "ttt 0x%x, itt 0x%x", bhsdo->bhsdo_target_transfer_tag, bhsdo->bhsdo_initiator_task_tag, cdw->cdw_target_transfer_tag, cdw->cdw_initiator_task_tag)); #endif if (bhsdo->bhsdo_target_transfer_tag == cdw->cdw_target_transfer_tag) break; } CFISCSI_SESSION_UNLOCK(cs); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "data transfer tag 0x%x, initiator task tag " "0x%x, not found; dropping connection", bhsdo->bhsdo_target_transfer_tag, bhsdo->bhsdo_initiator_task_tag); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } if (cdw->cdw_datasn != ntohl(bhsdo->bhsdo_datasn)) { CFISCSI_SESSION_WARN(cs, "received Data-Out PDU with " "DataSN %u, while expected %u; dropping connection", ntohl(bhsdo->bhsdo_datasn), cdw->cdw_datasn); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } cdw->cdw_datasn++; io = cdw->cdw_ctl_io; KASSERT((io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_IN, ("CTL_FLAG_DATA_IN")); done = cfiscsi_handle_data_segment(request, cdw); if (done) { CFISCSI_SESSION_LOCK(cs); TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); done = (io->scsiio.ext_data_filled != cdw->cdw_r2t_end || io->scsiio.ext_data_filled == io->scsiio.kern_data_len); cfiscsi_data_wait_free(cs, cdw); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; if (done) io->scsiio.be_move_done(io); else cfiscsi_datamove_out(io); } icl_pdu_free(request); } static void cfiscsi_pdu_handle_logout_request(struct icl_pdu *request) { struct iscsi_bhs_logout_request *bhslr; struct iscsi_bhs_logout_response *bhslr2; struct icl_pdu *response; struct cfiscsi_session *cs; cs = PDU_SESSION(request); bhslr = (struct iscsi_bhs_logout_request *)request->ip_bhs; switch (bhslr->bhslr_reason & 0x7f) { case BHSLR_REASON_CLOSE_SESSION: case BHSLR_REASON_CLOSE_CONNECTION: response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_DEBUG(cs, "failed to allocate memory"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhslr2 = (struct iscsi_bhs_logout_response *)response->ip_bhs; bhslr2->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_RESPONSE; bhslr2->bhslr_flags = 0x80; bhslr2->bhslr_response = BHSLR_RESPONSE_CLOSED_SUCCESSFULLY; bhslr2->bhslr_initiator_task_tag = bhslr->bhslr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); cfiscsi_session_terminate(cs); break; case BHSLR_REASON_REMOVE_FOR_RECOVERY: response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhslr2 = (struct iscsi_bhs_logout_response *)response->ip_bhs; bhslr2->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_RESPONSE; bhslr2->bhslr_flags = 0x80; bhslr2->bhslr_response = BHSLR_RESPONSE_RECOVERY_NOT_SUPPORTED; bhslr2->bhslr_initiator_task_tag = bhslr->bhslr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); break; default: CFISCSI_SESSION_WARN(cs, "invalid reason 0%x; dropping connection", bhslr->bhslr_reason); icl_pdu_free(request); cfiscsi_session_terminate(cs); break; } } static void cfiscsi_callout(void *context) { struct icl_pdu *cp; struct iscsi_bhs_nop_in *bhsni; struct cfiscsi_session *cs; cs = context; if (cs->cs_terminating) return; callout_schedule(&cs->cs_callout, 1 * hz); atomic_add_int(&cs->cs_timeout, 1); #ifdef ICL_KERNEL_PROXY if (cs->cs_waiting_for_ctld || cs->cs_login_phase) { if (login_timeout > 0 && cs->cs_timeout > login_timeout) { CFISCSI_SESSION_WARN(cs, "login timed out after " "%d seconds; dropping connection", cs->cs_timeout); cfiscsi_session_terminate(cs); } return; } #endif if (ping_timeout <= 0) { /* * Pings are disabled. Don't send NOP-In in this case; * user might have disabled pings to work around problems * with certain initiators that can't properly handle * NOP-In, such as iPXE. Reset the timeout, to avoid * triggering reconnection, should the user decide to * reenable them. */ cs->cs_timeout = 0; return; } if (cs->cs_timeout >= ping_timeout) { CFISCSI_SESSION_WARN(cs, "no ping reply (NOP-Out) after %d seconds; " "dropping connection", ping_timeout); cfiscsi_session_terminate(cs); return; } /* * If the ping was reset less than one second ago - which means * that we've received some PDU during the last second - assume * the traffic flows correctly and don't bother sending a NOP-Out. * * (It's 2 - one for one second, and one for incrementing is_timeout * earlier in this routine.) */ if (cs->cs_timeout < 2) return; cp = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (cp == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory"); return; } bhsni = (struct iscsi_bhs_nop_in *)cp->ip_bhs; bhsni->bhsni_opcode = ISCSI_BHS_OPCODE_NOP_IN; bhsni->bhsni_flags = 0x80; bhsni->bhsni_initiator_task_tag = 0xffffffff; cfiscsi_pdu_queue(cp); } static struct cfiscsi_data_wait * cfiscsi_data_wait_new(struct cfiscsi_session *cs, union ctl_io *io, uint32_t initiator_task_tag, uint32_t *target_transfer_tagp) { struct cfiscsi_data_wait *cdw; int error; cdw = uma_zalloc(cfiscsi_data_wait_zone, M_NOWAIT | M_ZERO); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate %zd bytes", sizeof(*cdw)); return (NULL); } error = icl_conn_transfer_setup(cs->cs_conn, io, target_transfer_tagp, &cdw->cdw_icl_prv); if (error != 0) { CFISCSI_SESSION_WARN(cs, "icl_conn_transfer_setup() failed with error %d", error); uma_zfree(cfiscsi_data_wait_zone, cdw); return (NULL); } cdw->cdw_ctl_io = io; cdw->cdw_target_transfer_tag = *target_transfer_tagp; cdw->cdw_initiator_task_tag = initiator_task_tag; return (cdw); } static void cfiscsi_data_wait_free(struct cfiscsi_session *cs, struct cfiscsi_data_wait *cdw) { icl_conn_transfer_done(cs->cs_conn, cdw->cdw_icl_prv); uma_zfree(cfiscsi_data_wait_zone, cdw); } static void cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs) { struct cfiscsi_data_wait *cdw; union ctl_io *io; int error, last, wait; if (cs->cs_target == NULL) return; /* No target yet, so nothing to do. */ io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); PRIV_REQUEST(io) = cs; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_lun = 0; io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */ io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET; wait = cs->cs_outstanding_ctl_pdus; refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_queue(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d", error); refcount_release(&cs->cs_outstanding_ctl_pdus); ctl_free_io(io); } CFISCSI_SESSION_LOCK(cs); while ((cdw = TAILQ_FIRST(&cs->cs_waiting_for_data_out)) != NULL) { TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); /* * Set nonzero port status; this prevents backends from * assuming that the data transfer actually succeeded * and writing uninitialized data to disk. */ cdw->cdw_ctl_io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 42; cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io); cfiscsi_data_wait_free(cs, cdw); CFISCSI_SESSION_LOCK(cs); } CFISCSI_SESSION_UNLOCK(cs); /* * Wait for CTL to terminate all the tasks. */ if (wait > 0) CFISCSI_SESSION_WARN(cs, "waiting for CTL to terminate %d tasks", wait); for (;;) { refcount_acquire(&cs->cs_outstanding_ctl_pdus); last = refcount_release(&cs->cs_outstanding_ctl_pdus); if (last != 0) break; tsleep(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus), 0, "cfiscsi_terminate", hz / 100); } if (wait > 0) CFISCSI_SESSION_WARN(cs, "tasks terminated"); } static void cfiscsi_maintenance_thread(void *arg) { struct cfiscsi_session *cs; cs = arg; for (;;) { CFISCSI_SESSION_LOCK(cs); if (cs->cs_terminating == false || cs->cs_handoff_in_progress) cv_wait(&cs->cs_maintenance_cv, &cs->cs_lock); CFISCSI_SESSION_UNLOCK(cs); if (cs->cs_terminating && cs->cs_handoff_in_progress == false) { /* * We used to wait up to 30 seconds to deliver queued * PDUs to the initiator. We also tried hard to deliver * SCSI Responses for the aborted PDUs. We don't do * that anymore. We might need to revisit that. */ callout_drain(&cs->cs_callout); icl_conn_close(cs->cs_conn); /* * At this point ICL receive thread is no longer * running; no new tasks can be queued. */ cfiscsi_session_terminate_tasks(cs); cfiscsi_session_delete(cs); kthread_exit(); return; } CFISCSI_SESSION_DEBUG(cs, "nothing to do"); } } static void cfiscsi_session_terminate(struct cfiscsi_session *cs) { cs->cs_terminating = true; cv_signal(&cs->cs_maintenance_cv); #ifdef ICL_KERNEL_PROXY cv_signal(&cs->cs_login_cv); #endif } static int cfiscsi_session_register_initiator(struct cfiscsi_session *cs) { struct cfiscsi_target *ct; char *name; int i; KASSERT(cs->cs_ctl_initid == -1, ("already registered")); ct = cs->cs_target; name = strdup(cs->cs_initiator_id, M_CTL); i = ctl_add_initiator(&ct->ct_port, -1, 0, name); if (i < 0) { CFISCSI_SESSION_WARN(cs, "ctl_add_initiator failed with error %d", i); cs->cs_ctl_initid = -1; return (1); } cs->cs_ctl_initid = i; #if 0 CFISCSI_SESSION_DEBUG(cs, "added initiator id %d", i); #endif return (0); } static void cfiscsi_session_unregister_initiator(struct cfiscsi_session *cs) { int error; if (cs->cs_ctl_initid == -1) return; error = ctl_remove_initiator(&cs->cs_target->ct_port, cs->cs_ctl_initid); if (error != 0) { CFISCSI_SESSION_WARN(cs, "ctl_remove_initiator failed with error %d", error); } cs->cs_ctl_initid = -1; } static struct cfiscsi_session * cfiscsi_session_new(struct cfiscsi_softc *softc, const char *offload) { struct cfiscsi_session *cs; int error; cs = malloc(sizeof(*cs), M_CFISCSI, M_NOWAIT | M_ZERO); if (cs == NULL) { CFISCSI_WARN("malloc failed"); return (NULL); } cs->cs_ctl_initid = -1; refcount_init(&cs->cs_outstanding_ctl_pdus, 0); TAILQ_INIT(&cs->cs_waiting_for_data_out); mtx_init(&cs->cs_lock, "cfiscsi_lock", NULL, MTX_DEF); cv_init(&cs->cs_maintenance_cv, "cfiscsi_mt"); #ifdef ICL_KERNEL_PROXY cv_init(&cs->cs_login_cv, "cfiscsi_login"); #endif /* * The purpose of this is to avoid racing with session shutdown. * Otherwise we could have the maintenance thread call icl_conn_close() * before we call icl_conn_handoff(). */ cs->cs_handoff_in_progress = true; cs->cs_conn = icl_new_conn(offload, false, "cfiscsi", &cs->cs_lock); if (cs->cs_conn == NULL) { free(cs, M_CFISCSI); return (NULL); } cs->cs_conn->ic_receive = cfiscsi_receive_callback; cs->cs_conn->ic_error = cfiscsi_error_callback; cs->cs_conn->ic_prv0 = cs; error = kthread_add(cfiscsi_maintenance_thread, cs, NULL, NULL, 0, 0, "cfiscsimt"); if (error != 0) { CFISCSI_SESSION_WARN(cs, "kthread_add(9) failed with error %d", error); free(cs, M_CFISCSI); return (NULL); } mtx_lock(&softc->lock); cs->cs_id = ++softc->last_session_id; TAILQ_INSERT_TAIL(&softc->sessions, cs, cs_next); mtx_unlock(&softc->lock); /* * Start pinging the initiator. */ callout_init(&cs->cs_callout, 1); callout_reset(&cs->cs_callout, 1 * hz, cfiscsi_callout, cs); return (cs); } static void cfiscsi_session_delete(struct cfiscsi_session *cs) { struct cfiscsi_softc *softc; softc = &cfiscsi_softc; KASSERT(cs->cs_outstanding_ctl_pdus == 0, ("destroying session with outstanding CTL pdus")); KASSERT(TAILQ_EMPTY(&cs->cs_waiting_for_data_out), ("destroying session with non-empty queue")); mtx_lock(&softc->lock); TAILQ_REMOVE(&softc->sessions, cs, cs_next); mtx_unlock(&softc->lock); cfiscsi_session_unregister_initiator(cs); if (cs->cs_target != NULL) cfiscsi_target_release(cs->cs_target); icl_conn_close(cs->cs_conn); icl_conn_free(cs->cs_conn); free(cs, M_CFISCSI); cv_signal(&softc->sessions_cv); } static int cfiscsi_init(void) { struct cfiscsi_softc *softc; softc = &cfiscsi_softc; bzero(softc, sizeof(*softc)); mtx_init(&softc->lock, "cfiscsi", NULL, MTX_DEF); cv_init(&softc->sessions_cv, "cfiscsi_sessions"); #ifdef ICL_KERNEL_PROXY cv_init(&softc->accept_cv, "cfiscsi_accept"); #endif TAILQ_INIT(&softc->sessions); TAILQ_INIT(&softc->targets); cfiscsi_data_wait_zone = uma_zcreate("cfiscsi_data_wait", sizeof(struct cfiscsi_data_wait), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); return (0); } static int cfiscsi_shutdown(void) { struct cfiscsi_softc *softc = &cfiscsi_softc; if (!TAILQ_EMPTY(&softc->sessions) || !TAILQ_EMPTY(&softc->targets)) return (EBUSY); uma_zdestroy(cfiscsi_data_wait_zone); #ifdef ICL_KERNEL_PROXY cv_destroy(&softc->accept_cv); #endif cv_destroy(&softc->sessions_cv); mtx_destroy(&softc->lock); return (0); } #ifdef ICL_KERNEL_PROXY static void cfiscsi_accept(struct socket *so, struct sockaddr *sa, int portal_id) { struct cfiscsi_session *cs; cs = cfiscsi_session_new(&cfiscsi_softc, NULL); if (cs == NULL) { CFISCSI_WARN("failed to create session"); return; } icl_conn_handoff_sock(cs->cs_conn, so); cs->cs_initiator_sa = sa; cs->cs_portal_id = portal_id; cs->cs_handoff_in_progress = false; cs->cs_waiting_for_ctld = true; cv_signal(&cfiscsi_softc.accept_cv); CFISCSI_SESSION_LOCK(cs); /* * Wake up the maintenance thread if we got scheduled for termination * somewhere between cfiscsi_session_new() and icl_conn_handoff_sock(). */ if (cs->cs_terminating) cfiscsi_session_terminate(cs); CFISCSI_SESSION_UNLOCK(cs); } #endif static void cfiscsi_online(void *arg) { struct cfiscsi_softc *softc; struct cfiscsi_target *ct; int online; ct = (struct cfiscsi_target *)arg; softc = ct->ct_softc; mtx_lock(&softc->lock); if (ct->ct_online) { mtx_unlock(&softc->lock); return; } ct->ct_online = 1; online = softc->online++; mtx_unlock(&softc->lock); if (online > 0) return; #ifdef ICL_KERNEL_PROXY if (softc->listener != NULL) icl_listen_free(softc->listener); softc->listener = icl_listen_new(cfiscsi_accept); #endif } static void cfiscsi_offline(void *arg) { struct cfiscsi_softc *softc; struct cfiscsi_target *ct; struct cfiscsi_session *cs; int error, online; ct = (struct cfiscsi_target *)arg; softc = ct->ct_softc; mtx_lock(&softc->lock); if (!ct->ct_online) { mtx_unlock(&softc->lock); return; } ct->ct_online = 0; online = --softc->online; do { TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cs->cs_target == ct) cfiscsi_session_terminate(cs); } TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cs->cs_target == ct) break; } if (cs != NULL) { error = cv_wait_sig(&softc->sessions_cv, &softc->lock); if (error != 0) { CFISCSI_SESSION_DEBUG(cs, "cv_wait failed with error %d\n", error); break; } } } while (cs != NULL && ct->ct_online == 0); mtx_unlock(&softc->lock); if (online > 0) return; #ifdef ICL_KERNEL_PROXY icl_listen_free(softc->listener); softc->listener = NULL; #endif } static int cfiscsi_info(void *arg, struct sbuf *sb) { struct cfiscsi_target *ct = (struct cfiscsi_target *)arg; int retval; retval = sbuf_printf(sb, "\t%d\n", ct->ct_state); return (retval); } static void cfiscsi_ioctl_handoff(struct ctl_iscsi *ci) { struct cfiscsi_softc *softc; struct cfiscsi_session *cs, *cs2; struct cfiscsi_target *ct; struct ctl_iscsi_handoff_params *cihp; int error; cihp = (struct ctl_iscsi_handoff_params *)&(ci->data); softc = &cfiscsi_softc; CFISCSI_DEBUG("new connection from %s (%s) to %s", cihp->initiator_name, cihp->initiator_addr, cihp->target_name); ct = cfiscsi_target_find(softc, cihp->target_name, cihp->portal_group_tag); if (ct == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: target not found", __func__); return; } #ifdef ICL_KERNEL_PROXY if (cihp->socket > 0 && cihp->connection_id > 0) { snprintf(ci->error_str, sizeof(ci->error_str), "both socket and connection_id set"); ci->status = CTL_ISCSI_ERROR; cfiscsi_target_release(ct); return; } if (cihp->socket == 0) { mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cihp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; cfiscsi_target_release(ct); return; } mtx_unlock(&cfiscsi_softc.lock); } else { #endif cs = cfiscsi_session_new(softc, cihp->offload); if (cs == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: cfiscsi_session_new failed", __func__); cfiscsi_target_release(ct); return; } #ifdef ICL_KERNEL_PROXY } #endif /* * First PDU of Full Feature phase has the same CmdSN as the last * PDU from the Login Phase received from the initiator. Thus, * the -1 below. */ cs->cs_cmdsn = cihp->cmdsn; cs->cs_statsn = cihp->statsn; cs->cs_max_recv_data_segment_length = cihp->max_recv_data_segment_length; cs->cs_max_send_data_segment_length = cihp->max_send_data_segment_length; cs->cs_max_burst_length = cihp->max_burst_length; cs->cs_first_burst_length = cihp->first_burst_length; cs->cs_immediate_data = !!cihp->immediate_data; if (cihp->header_digest == CTL_ISCSI_DIGEST_CRC32C) cs->cs_conn->ic_header_crc32c = true; if (cihp->data_digest == CTL_ISCSI_DIGEST_CRC32C) cs->cs_conn->ic_data_crc32c = true; strlcpy(cs->cs_initiator_name, cihp->initiator_name, sizeof(cs->cs_initiator_name)); strlcpy(cs->cs_initiator_addr, cihp->initiator_addr, sizeof(cs->cs_initiator_addr)); strlcpy(cs->cs_initiator_alias, cihp->initiator_alias, sizeof(cs->cs_initiator_alias)); memcpy(cs->cs_initiator_isid, cihp->initiator_isid, sizeof(cs->cs_initiator_isid)); snprintf(cs->cs_initiator_id, sizeof(cs->cs_initiator_id), "%s,i,0x%02x%02x%02x%02x%02x%02x", cs->cs_initiator_name, cihp->initiator_isid[0], cihp->initiator_isid[1], cihp->initiator_isid[2], cihp->initiator_isid[3], cihp->initiator_isid[4], cihp->initiator_isid[5]); mtx_lock(&softc->lock); if (ct->ct_online == 0) { mtx_unlock(&softc->lock); CFISCSI_SESSION_LOCK(cs); cs->cs_handoff_in_progress = false; cfiscsi_session_terminate(cs); CFISCSI_SESSION_UNLOCK(cs); cfiscsi_target_release(ct); ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: port offline", __func__); return; } cs->cs_target = ct; mtx_unlock(&softc->lock); restart: if (!cs->cs_terminating) { mtx_lock(&softc->lock); TAILQ_FOREACH(cs2, &softc->sessions, cs_next) { if (cs2 != cs && cs2->cs_tasks_aborted == false && cs->cs_target == cs2->cs_target && strcmp(cs->cs_initiator_id, cs2->cs_initiator_id) == 0) { if (strcmp(cs->cs_initiator_addr, cs2->cs_initiator_addr) != 0) { CFISCSI_SESSION_WARN(cs2, "session reinstatement from " "different address %s", cs->cs_initiator_addr); } else { CFISCSI_SESSION_DEBUG(cs2, "session reinstatement"); } cfiscsi_session_terminate(cs2); mtx_unlock(&softc->lock); pause("cfiscsi_reinstate", 1); goto restart; } } mtx_unlock(&softc->lock); } /* * Register initiator with CTL. */ cfiscsi_session_register_initiator(cs); #ifdef ICL_KERNEL_PROXY if (cihp->socket > 0) { #endif error = icl_conn_handoff(cs->cs_conn, cihp->socket); if (error != 0) { CFISCSI_SESSION_LOCK(cs); cs->cs_handoff_in_progress = false; cfiscsi_session_terminate(cs); CFISCSI_SESSION_UNLOCK(cs); ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: icl_conn_handoff failed with error %d", __func__, error); return; } #ifdef ICL_KERNEL_PROXY } #endif #ifdef ICL_KERNEL_PROXY cs->cs_login_phase = false; /* * First PDU of the Full Feature phase has likely already arrived. * We have to pick it up and execute properly. */ if (cs->cs_login_pdu != NULL) { CFISCSI_SESSION_DEBUG(cs, "picking up first PDU"); cfiscsi_pdu_handle(cs->cs_login_pdu); cs->cs_login_pdu = NULL; } #endif CFISCSI_SESSION_LOCK(cs); cs->cs_handoff_in_progress = false; /* * Wake up the maintenance thread if we got scheduled for termination. */ if (cs->cs_terminating) cfiscsi_session_terminate(cs); CFISCSI_SESSION_UNLOCK(cs); ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_list(struct ctl_iscsi *ci) { struct ctl_iscsi_list_params *cilp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; struct sbuf *sb; int error; cilp = (struct ctl_iscsi_list_params *)&(ci->data); softc = &cfiscsi_softc; sb = sbuf_new(NULL, NULL, cilp->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Unable to allocate %d bytes for iSCSI session list", cilp->alloc_len); return; } sbuf_printf(sb, "\n"); mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cs->cs_target == NULL) continue; error = sbuf_printf(sb, "" "%s" "%s" "%s" "%s" "%s" "%u" "%s" "%s" "%d" "%d" "%d" "%d" "%d" "%d" "%s" "\n", cs->cs_id, cs->cs_initiator_name, cs->cs_initiator_addr, cs->cs_initiator_alias, cs->cs_target->ct_name, cs->cs_target->ct_alias, cs->cs_target->ct_tag, cs->cs_conn->ic_header_crc32c ? "CRC32C" : "None", cs->cs_conn->ic_data_crc32c ? "CRC32C" : "None", cs->cs_max_recv_data_segment_length, cs->cs_max_send_data_segment_length, cs->cs_max_burst_length, cs->cs_first_burst_length, cs->cs_immediate_data, cs->cs_conn->ic_iser, cs->cs_conn->ic_offload); if (error != 0) break; } mtx_unlock(&softc->lock); error = sbuf_printf(sb, "\n"); if (error != 0) { sbuf_delete(sb); ci->status = CTL_ISCSI_LIST_NEED_MORE_SPACE; snprintf(ci->error_str, sizeof(ci->error_str), "Out of space, %d bytes is too small", cilp->alloc_len); return; } sbuf_finish(sb); error = copyout(sbuf_data(sb), cilp->conn_xml, sbuf_len(sb) + 1); if (error != 0) { sbuf_delete(sb); snprintf(ci->error_str, sizeof(ci->error_str), "copyout failed with error %d", error); ci->status = CTL_ISCSI_ERROR; return; } cilp->fill_len = sbuf_len(sb) + 1; ci->status = CTL_ISCSI_OK; sbuf_delete(sb); } static void cfiscsi_ioctl_logout(struct ctl_iscsi *ci) { struct icl_pdu *response; struct iscsi_bhs_asynchronous_message *bhsam; struct ctl_iscsi_logout_params *cilp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; int found = 0; cilp = (struct ctl_iscsi_logout_params *)&(ci->data); softc = &cfiscsi_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cilp->all == 0 && cs->cs_id != cilp->connection_id && strcmp(cs->cs_initiator_name, cilp->initiator_name) != 0 && strcmp(cs->cs_initiator_addr, cilp->initiator_addr) != 0) continue; response = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (response == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Unable to allocate memory"); mtx_unlock(&softc->lock); return; } bhsam = (struct iscsi_bhs_asynchronous_message *)response->ip_bhs; bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE; bhsam->bhsam_flags = 0x80; bhsam->bhsam_async_event = BHSAM_EVENT_TARGET_REQUESTS_LOGOUT; bhsam->bhsam_parameter3 = htons(10); cfiscsi_pdu_queue(response); found++; } mtx_unlock(&softc->lock); if (found == 0) { ci->status = CTL_ISCSI_SESSION_NOT_FOUND; snprintf(ci->error_str, sizeof(ci->error_str), "No matching connections found"); return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_terminate(struct ctl_iscsi *ci) { struct icl_pdu *response; struct iscsi_bhs_asynchronous_message *bhsam; struct ctl_iscsi_terminate_params *citp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; int found = 0; citp = (struct ctl_iscsi_terminate_params *)&(ci->data); softc = &cfiscsi_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (citp->all == 0 && cs->cs_id != citp->connection_id && strcmp(cs->cs_initiator_name, citp->initiator_name) != 0 && strcmp(cs->cs_initiator_addr, citp->initiator_addr) != 0) continue; response = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (response == NULL) { /* * Oh well. Just terminate the connection. */ } else { bhsam = (struct iscsi_bhs_asynchronous_message *) response->ip_bhs; bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE; bhsam->bhsam_flags = 0x80; bhsam->bhsam_0xffffffff = 0xffffffff; bhsam->bhsam_async_event = BHSAM_EVENT_TARGET_TERMINATES_SESSION; cfiscsi_pdu_queue(response); } cfiscsi_session_terminate(cs); found++; } mtx_unlock(&softc->lock); if (found == 0) { ci->status = CTL_ISCSI_SESSION_NOT_FOUND; snprintf(ci->error_str, sizeof(ci->error_str), "No matching connections found"); return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_limits(struct ctl_iscsi *ci) { struct ctl_iscsi_limits_params *cilp; struct icl_drv_limits idl; int error; cilp = (struct ctl_iscsi_limits_params *)&(ci->data); error = icl_limits(cilp->offload, false, &idl); if (error != 0) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: icl_limits failed with error %d", __func__, error); return; } cilp->max_recv_data_segment_length = idl.idl_max_recv_data_segment_length; cilp->max_send_data_segment_length = idl.idl_max_send_data_segment_length; cilp->max_burst_length = idl.idl_max_burst_length; cilp->first_burst_length = idl.idl_first_burst_length; ci->status = CTL_ISCSI_OK; } #ifdef ICL_KERNEL_PROXY static void cfiscsi_ioctl_listen(struct ctl_iscsi *ci) { struct ctl_iscsi_listen_params *cilp; struct sockaddr *sa; int error; cilp = (struct ctl_iscsi_listen_params *)&(ci->data); if (cfiscsi_softc.listener == NULL) { CFISCSI_DEBUG("no listener"); snprintf(ci->error_str, sizeof(ci->error_str), "no listener"); ci->status = CTL_ISCSI_ERROR; return; } error = getsockaddr(&sa, (void *)cilp->addr, cilp->addrlen); if (error != 0) { CFISCSI_DEBUG("getsockaddr, error %d", error); snprintf(ci->error_str, sizeof(ci->error_str), "getsockaddr failed"); ci->status = CTL_ISCSI_ERROR; return; } error = icl_listen_add(cfiscsi_softc.listener, cilp->iser, cilp->domain, cilp->socktype, cilp->protocol, sa, cilp->portal_id); if (error != 0) { free(sa, M_SONAME); CFISCSI_DEBUG("icl_listen_add, error %d", error); snprintf(ci->error_str, sizeof(ci->error_str), "icl_listen_add failed, error %d", error); ci->status = CTL_ISCSI_ERROR; return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_accept(struct ctl_iscsi *ci) { struct ctl_iscsi_accept_params *ciap; struct cfiscsi_session *cs; int error; ciap = (struct ctl_iscsi_accept_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); for (;;) { TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_waiting_for_ctld) break; } if (cs != NULL) break; error = cv_wait_sig(&cfiscsi_softc.accept_cv, &cfiscsi_softc.lock); if (error != 0) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "interrupted"); ci->status = CTL_ISCSI_ERROR; return; } } mtx_unlock(&cfiscsi_softc.lock); cs->cs_waiting_for_ctld = false; cs->cs_login_phase = true; ciap->connection_id = cs->cs_id; ciap->portal_id = cs->cs_portal_id; ciap->initiator_addrlen = cs->cs_initiator_sa->sa_len; error = copyout(cs->cs_initiator_sa, ciap->initiator_addr, cs->cs_initiator_sa->sa_len); if (error != 0) { snprintf(ci->error_str, sizeof(ci->error_str), "copyout failed with error %d", error); ci->status = CTL_ISCSI_ERROR; return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_send(struct ctl_iscsi *ci) { struct ctl_iscsi_send_params *cisp; struct cfiscsi_session *cs; struct icl_pdu *ip; size_t datalen; void *data; int error; cisp = (struct ctl_iscsi_send_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cisp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; return; } mtx_unlock(&cfiscsi_softc.lock); #if 0 if (cs->cs_login_phase == false) return (EBUSY); #endif if (cs->cs_terminating) { snprintf(ci->error_str, sizeof(ci->error_str), "connection is terminating"); ci->status = CTL_ISCSI_ERROR; return; } datalen = cisp->data_segment_len; /* * XXX */ //if (datalen > CFISCSI_MAX_DATA_SEGMENT_LENGTH) { if (datalen > 65535) { snprintf(ci->error_str, sizeof(ci->error_str), "data segment too big"); ci->status = CTL_ISCSI_ERROR; return; } if (datalen > 0) { data = malloc(datalen, M_CFISCSI, M_WAITOK); error = copyin(cisp->data_segment, data, datalen); if (error != 0) { free(data, M_CFISCSI); snprintf(ci->error_str, sizeof(ci->error_str), "copyin error %d", error); ci->status = CTL_ISCSI_ERROR; return; } } ip = icl_pdu_new(cs->cs_conn, M_WAITOK); memcpy(ip->ip_bhs, cisp->bhs, sizeof(*ip->ip_bhs)); if (datalen > 0) { icl_pdu_append_data(ip, data, datalen, M_WAITOK); free(data, M_CFISCSI); } CFISCSI_SESSION_LOCK(cs); icl_pdu_queue(ip); CFISCSI_SESSION_UNLOCK(cs); ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_receive(struct ctl_iscsi *ci) { struct ctl_iscsi_receive_params *cirp; struct cfiscsi_session *cs; struct icl_pdu *ip; void *data; int error; cirp = (struct ctl_iscsi_receive_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cirp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; return; } mtx_unlock(&cfiscsi_softc.lock); #if 0 if (is->is_login_phase == false) return (EBUSY); #endif CFISCSI_SESSION_LOCK(cs); while (cs->cs_login_pdu == NULL && cs->cs_terminating == false) { error = cv_wait_sig(&cs->cs_login_cv, &cs->cs_lock); if (error != 0) { CFISCSI_SESSION_UNLOCK(cs); snprintf(ci->error_str, sizeof(ci->error_str), "interrupted by signal"); ci->status = CTL_ISCSI_ERROR; return; } } if (cs->cs_terminating) { CFISCSI_SESSION_UNLOCK(cs); snprintf(ci->error_str, sizeof(ci->error_str), "connection terminating"); ci->status = CTL_ISCSI_ERROR; return; } ip = cs->cs_login_pdu; cs->cs_login_pdu = NULL; CFISCSI_SESSION_UNLOCK(cs); if (ip->ip_data_len > cirp->data_segment_len) { icl_pdu_free(ip); snprintf(ci->error_str, sizeof(ci->error_str), "data segment too big"); ci->status = CTL_ISCSI_ERROR; return; } copyout(ip->ip_bhs, cirp->bhs, sizeof(*ip->ip_bhs)); if (ip->ip_data_len > 0) { data = malloc(ip->ip_data_len, M_CFISCSI, M_WAITOK); icl_pdu_get_data(ip, 0, data, ip->ip_data_len); copyout(data, cirp->data_segment, ip->ip_data_len); free(data, M_CFISCSI); } icl_pdu_free(ip); ci->status = CTL_ISCSI_OK; } #endif /* !ICL_KERNEL_PROXY */ static void cfiscsi_ioctl_port_create(struct ctl_req *req) { struct cfiscsi_target *ct; struct ctl_port *port; const char *target, *alias, *val; struct scsi_vpd_id_descriptor *desc; int retval, len, idlen; uint16_t tag; target = dnvlist_get_string(req->args_nvl, "cfiscsi_target", NULL); alias = dnvlist_get_string(req->args_nvl, "cfiscsi_target_alias", NULL); val = dnvlist_get_string(req->args_nvl, "cfiscsi_portal_group_tag", NULL); if (target == NULL || val == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Missing required argument"); return; } tag = strtoul(val, NULL, 0); ct = cfiscsi_target_find_or_create(&cfiscsi_softc, target, alias, tag); if (ct == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "failed to create target \"%s\"", target); return; } if (ct->ct_state == CFISCSI_TARGET_STATE_ACTIVE) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "target \"%s\" for portal group tag %u already exists", target, tag); cfiscsi_target_release(ct); return; } port = &ct->ct_port; // WAT if (ct->ct_state == CFISCSI_TARGET_STATE_DYING) goto done; port->frontend = &cfiscsi_frontend; port->port_type = CTL_PORT_ISCSI; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = 4096; port->port_name = "iscsi"; port->physical_port = (int)tag; port->virtual_port = ct->ct_target_id; port->port_online = cfiscsi_online; port->port_offline = cfiscsi_offline; port->port_info = cfiscsi_info; port->onoff_arg = ct; port->fe_datamove = cfiscsi_datamove; port->fe_done = cfiscsi_done; port->targ_port = -1; port->options = nvlist_clone(req->args_nvl); /* Generate Port ID. */ idlen = strlen(target) + strlen(",t,0x0001") + 1; idlen = roundup2(idlen, 4); len = sizeof(struct scsi_vpd_device_id) + idlen; port->port_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->port_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->port_devid->data; desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen; snprintf(desc->identifier, idlen, "%s,t,0x%4.4x", target, tag); /* Generate Target ID. */ idlen = strlen(target) + 1; idlen = roundup2(idlen, 4); len = sizeof(struct scsi_vpd_device_id) + idlen; port->target_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->target_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->target_devid->data; desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_TARGET | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen; strlcpy(desc->identifier, target, idlen); retval = ctl_port_register(port); if (retval != 0) { free(port->port_devid, M_CFISCSI); free(port->target_devid, M_CFISCSI); cfiscsi_target_release(ct); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "ctl_port_register() failed with error %d", retval); return; } done: ct->ct_state = CFISCSI_TARGET_STATE_ACTIVE; req->status = CTL_LUN_OK; req->result_nvl = nvlist_create(0); nvlist_add_number(req->result_nvl, "port_id", port->targ_port); } static void cfiscsi_ioctl_port_remove(struct ctl_req *req) { struct cfiscsi_target *ct; const char *target, *val; uint16_t tag; target = dnvlist_get_string(req->args_nvl, "cfiscsi_target", NULL); val = dnvlist_get_string(req->args_nvl, "cfiscsi_portal_group_tag", NULL); if (target == NULL || val == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Missing required argument"); return; } tag = strtoul(val, NULL, 0); ct = cfiscsi_target_find(&cfiscsi_softc, target, tag); if (ct == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "can't find target \"%s\"", target); return; } ct->ct_state = CFISCSI_TARGET_STATE_DYING; ctl_port_offline(&ct->ct_port); cfiscsi_target_release(ct); cfiscsi_target_release(ct); req->status = CTL_LUN_OK; } static int cfiscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_iscsi *ci; struct ctl_req *req; if (cmd == CTL_PORT_REQ) { req = (struct ctl_req *)addr; switch (req->reqtype) { case CTL_REQ_CREATE: cfiscsi_ioctl_port_create(req); break; case CTL_REQ_REMOVE: cfiscsi_ioctl_port_remove(req); break; default: req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Unsupported request type %d", req->reqtype); } return (0); } if (cmd != CTL_ISCSI) return (ENOTTY); ci = (struct ctl_iscsi *)addr; switch (ci->type) { case CTL_ISCSI_HANDOFF: cfiscsi_ioctl_handoff(ci); break; case CTL_ISCSI_LIST: cfiscsi_ioctl_list(ci); break; case CTL_ISCSI_LOGOUT: cfiscsi_ioctl_logout(ci); break; case CTL_ISCSI_TERMINATE: cfiscsi_ioctl_terminate(ci); break; case CTL_ISCSI_LIMITS: cfiscsi_ioctl_limits(ci); break; #ifdef ICL_KERNEL_PROXY case CTL_ISCSI_LISTEN: cfiscsi_ioctl_listen(ci); break; case CTL_ISCSI_ACCEPT: cfiscsi_ioctl_accept(ci); break; case CTL_ISCSI_SEND: cfiscsi_ioctl_send(ci); break; case CTL_ISCSI_RECEIVE: cfiscsi_ioctl_receive(ci); break; #else case CTL_ISCSI_LISTEN: case CTL_ISCSI_ACCEPT: case CTL_ISCSI_SEND: case CTL_ISCSI_RECEIVE: ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: CTL compiled without ICL_KERNEL_PROXY", __func__); break; #endif /* !ICL_KERNEL_PROXY */ default: ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: invalid iSCSI request type %d", __func__, ci->type); break; } return (0); } static void cfiscsi_target_hold(struct cfiscsi_target *ct) { refcount_acquire(&ct->ct_refcount); } static void cfiscsi_target_release(struct cfiscsi_target *ct) { struct cfiscsi_softc *softc; softc = ct->ct_softc; mtx_lock(&softc->lock); if (refcount_release(&ct->ct_refcount)) { TAILQ_REMOVE(&softc->targets, ct, ct_next); mtx_unlock(&softc->lock); if (ct->ct_state != CFISCSI_TARGET_STATE_INVALID) { ct->ct_state = CFISCSI_TARGET_STATE_INVALID; if (ctl_port_deregister(&ct->ct_port) != 0) printf("%s: ctl_port_deregister() failed\n", __func__); } free(ct, M_CFISCSI); return; } mtx_unlock(&softc->lock); } static struct cfiscsi_target * cfiscsi_target_find(struct cfiscsi_softc *softc, const char *name, uint16_t tag) { struct cfiscsi_target *ct; mtx_lock(&softc->lock); TAILQ_FOREACH(ct, &softc->targets, ct_next) { if (ct->ct_tag != tag || strcmp(name, ct->ct_name) != 0 || ct->ct_state != CFISCSI_TARGET_STATE_ACTIVE) continue; cfiscsi_target_hold(ct); mtx_unlock(&softc->lock); return (ct); } mtx_unlock(&softc->lock); return (NULL); } static struct cfiscsi_target * cfiscsi_target_find_or_create(struct cfiscsi_softc *softc, const char *name, const char *alias, uint16_t tag) { struct cfiscsi_target *ct, *newct; if (name[0] == '\0' || strlen(name) >= CTL_ISCSI_NAME_LEN) return (NULL); newct = malloc(sizeof(*newct), M_CFISCSI, M_WAITOK | M_ZERO); mtx_lock(&softc->lock); TAILQ_FOREACH(ct, &softc->targets, ct_next) { if (ct->ct_tag != tag || strcmp(name, ct->ct_name) != 0 || ct->ct_state == CFISCSI_TARGET_STATE_INVALID) continue; cfiscsi_target_hold(ct); mtx_unlock(&softc->lock); free(newct, M_CFISCSI); return (ct); } strlcpy(newct->ct_name, name, sizeof(newct->ct_name)); if (alias != NULL) strlcpy(newct->ct_alias, alias, sizeof(newct->ct_alias)); newct->ct_tag = tag; refcount_init(&newct->ct_refcount, 1); newct->ct_softc = softc; if (TAILQ_EMPTY(&softc->targets)) softc->last_target_id = 0; newct->ct_target_id = ++softc->last_target_id; TAILQ_INSERT_TAIL(&softc->targets, newct, ct_next); mtx_unlock(&softc->lock); return (newct); } static void cfiscsi_pdu_done(struct icl_pdu *ip, int error) { if (error != 0) ; // XXX: Do something on error? ((ctl_ref)ip->ip_prv0)(ip->ip_prv1, -1); } static void cfiscsi_datamove_in(union ctl_io *io) { struct cfiscsi_session *cs; struct icl_pdu *request, *response; const struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_data_in *bhsdi; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; size_t len, expected_len, sg_len, buffer_offset; const char *sg_addr; icl_pdu_cb cb; int ctl_sg_count, error, i; request = PRIV_REQUEST(io); cs = PDU_SESSION(request); bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_COMMAND")); if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } /* * This is the offset within the current SCSI command; for the first * call to cfiscsi_datamove() it will be 0, and for subsequent ones * it will be the sum of lengths of previous ones. */ buffer_offset = io->scsiio.kern_rel_offset; /* * This is the transfer length expected by the initiator. It can be * different from the amount of data from the SCSI point of view. */ expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length); /* * If the transfer is outside of expected length -- we are done. */ if (buffer_offset >= expected_len) { #if 0 CFISCSI_SESSION_DEBUG(cs, "buffer_offset = %zd, " "already sent the expected len", buffer_offset); #endif io->scsiio.be_move_done(io); return; } if (io->scsiio.kern_data_ref != NULL) cb = cfiscsi_pdu_done; else cb = NULL; i = 0; sg_addr = NULL; sg_len = 0; response = NULL; bhsdi = NULL; for (;;) { if (response == NULL) { response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } bhsdi = (struct iscsi_bhs_data_in *)response->ip_bhs; bhsdi->bhsdi_opcode = ISCSI_BHS_OPCODE_SCSI_DATA_IN; bhsdi->bhsdi_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhsdi->bhsdi_target_transfer_tag = 0xffffffff; bhsdi->bhsdi_datasn = htonl(PRIV_EXPDATASN(io)++); bhsdi->bhsdi_buffer_offset = htonl(buffer_offset); } KASSERT(i < ctl_sg_count, ("i >= ctl_sg_count")); if (sg_len == 0) { sg_addr = ctl_sglist[i].addr; sg_len = ctl_sglist[i].len; KASSERT(sg_len > 0, ("sg_len <= 0")); } len = sg_len; /* * Truncate to maximum data segment length. */ KASSERT(response->ip_data_len < cs->cs_max_send_data_segment_length, ("ip_data_len %zd >= max_send_data_segment_length %d", response->ip_data_len, cs->cs_max_send_data_segment_length)); if (response->ip_data_len + len > cs->cs_max_send_data_segment_length) { len = cs->cs_max_send_data_segment_length - response->ip_data_len; KASSERT(len <= sg_len, ("len %zd > sg_len %zd", len, sg_len)); } /* * Truncate to expected data transfer length. */ KASSERT(buffer_offset + response->ip_data_len < expected_len, ("buffer_offset %zd + ip_data_len %zd >= expected_len %zd", buffer_offset, response->ip_data_len, expected_len)); if (buffer_offset + response->ip_data_len + len > expected_len) { CFISCSI_SESSION_DEBUG(cs, "truncating from %zd " "to expected data transfer length %zd", buffer_offset + response->ip_data_len + len, expected_len); len = expected_len - (buffer_offset + response->ip_data_len); KASSERT(len <= sg_len, ("len %zd > sg_len %zd", len, sg_len)); } error = icl_pdu_append_data(response, sg_addr, len, M_NOWAIT | (cb ? ICL_NOCOPY : 0)); if (error != 0) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); icl_pdu_free(response); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } sg_addr += len; sg_len -= len; io->scsiio.kern_data_resid -= len; KASSERT(buffer_offset + response->ip_data_len <= expected_len, ("buffer_offset %zd + ip_data_len %zd > expected_len %zd", buffer_offset, response->ip_data_len, expected_len)); if (buffer_offset + response->ip_data_len == expected_len) { /* * Already have the amount of data the initiator wanted. */ break; } if (sg_len == 0) { /* * End of scatter-gather segment; * proceed to the next one... */ if (i == ctl_sg_count - 1) { /* * ... unless this was the last one. */ break; } i++; } if (response->ip_data_len == cs->cs_max_send_data_segment_length) { /* * Can't stuff more data into the current PDU; * queue it. Note that's not enough to check * for kern_data_resid == 0 instead; there * may be several Data-In PDUs for the final * call to cfiscsi_datamove(), and we want * to set the F flag only on the last of them. */ buffer_offset += response->ip_data_len; if (buffer_offset == io->scsiio.kern_total_len || buffer_offset == expected_len) { buffer_offset -= response->ip_data_len; break; } if (cb != NULL) { response->ip_prv0 = io->scsiio.kern_data_ref; response->ip_prv1 = io->scsiio.kern_data_arg; io->scsiio.kern_data_ref(io->scsiio.kern_data_arg, 1); } cfiscsi_pdu_queue_cb(response, cb); response = NULL; bhsdi = NULL; } } if (response != NULL) { buffer_offset += response->ip_data_len; if (buffer_offset == io->scsiio.kern_total_len || buffer_offset == expected_len) { bhsdi->bhsdi_flags |= BHSDI_FLAGS_F; if (io->io_hdr.status == CTL_SUCCESS) { bhsdi->bhsdi_flags |= BHSDI_FLAGS_S; if (io->scsiio.kern_total_len < ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhsdi->bhsdi_flags |= BHSSR_FLAGS_RESIDUAL_UNDERFLOW; bhsdi->bhsdi_residual_count = htonl(ntohl(bhssc->bhssc_expected_data_transfer_length) - io->scsiio.kern_total_len); } else if (io->scsiio.kern_total_len > ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhsdi->bhsdi_flags |= BHSSR_FLAGS_RESIDUAL_OVERFLOW; bhsdi->bhsdi_residual_count = htonl(io->scsiio.kern_total_len - ntohl(bhssc->bhssc_expected_data_transfer_length)); } bhsdi->bhsdi_status = io->scsiio.scsi_status; io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; } } KASSERT(response->ip_data_len > 0, ("sending empty Data-In")); if (cb != NULL) { response->ip_prv0 = io->scsiio.kern_data_ref; response->ip_prv1 = io->scsiio.kern_data_arg; io->scsiio.kern_data_ref(io->scsiio.kern_data_arg, 1); } cfiscsi_pdu_queue_cb(response, cb); } io->scsiio.be_move_done(io); } static void cfiscsi_datamove_out(union ctl_io *io) { struct cfiscsi_session *cs; struct icl_pdu *request, *response; const struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_r2t *bhsr2t; struct cfiscsi_data_wait *cdw; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; uint32_t expected_len, datamove_len, r2t_off, r2t_len; uint32_t target_transfer_tag; bool done; request = PRIV_REQUEST(io); cs = PDU_SESSION(request); bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_COMMAND")); /* * Complete write underflow. Not a single byte to read. Return. */ expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length); if (io->scsiio.kern_rel_offset >= expected_len) { io->scsiio.be_move_done(io); return; } datamove_len = MIN(io->scsiio.kern_data_len, expected_len - io->scsiio.kern_rel_offset); target_transfer_tag = atomic_fetchadd_32(&cs->cs_target_transfer_tag, 1); cdw = cfiscsi_data_wait_new(cs, io, bhssc->bhssc_initiator_task_tag, &target_transfer_tag); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } #if 0 CFISCSI_SESSION_DEBUG(cs, "expecting Data-Out with initiator " "task tag 0x%x, target transfer tag 0x%x", bhssc->bhssc_initiator_task_tag, target_transfer_tag); #endif cdw->cdw_ctl_io = io; cdw->cdw_target_transfer_tag = target_transfer_tag; cdw->cdw_initiator_task_tag = bhssc->bhssc_initiator_task_tag; cdw->cdw_r2t_end = datamove_len; cdw->cdw_datasn = 0; /* Set initial data pointer for the CDW respecting ext_data_filled. */ if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = datamove_len; } cdw->cdw_sg_index = 0; cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; r2t_off = io->scsiio.ext_data_filled; while (r2t_off > 0) { if (r2t_off >= cdw->cdw_sg_len) { r2t_off -= cdw->cdw_sg_len; cdw->cdw_sg_index++; cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; continue; } cdw->cdw_sg_addr += r2t_off; cdw->cdw_sg_len -= r2t_off; r2t_off = 0; } if (cs->cs_immediate_data && io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled < icl_pdu_data_segment_length(request)) { done = cfiscsi_handle_data_segment(request, cdw); if (done) { cfiscsi_data_wait_free(cs, cdw); io->scsiio.be_move_done(io); return; } } r2t_off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled; r2t_len = MIN(datamove_len - io->scsiio.ext_data_filled, cs->cs_max_burst_length); cdw->cdw_r2t_end = io->scsiio.ext_data_filled + r2t_len; CFISCSI_SESSION_LOCK(cs); TAILQ_INSERT_TAIL(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); /* * XXX: We should limit the number of outstanding R2T PDUs * per task to MaxOutstandingR2T. */ response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; bhsr2t = (struct iscsi_bhs_r2t *)response->ip_bhs; bhsr2t->bhsr2t_opcode = ISCSI_BHS_OPCODE_R2T; bhsr2t->bhsr2t_flags = 0x80; bhsr2t->bhsr2t_lun = bhssc->bhssc_lun; bhsr2t->bhsr2t_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhsr2t->bhsr2t_target_transfer_tag = target_transfer_tag; /* * XXX: Here we assume that cfiscsi_datamove() won't ever * be running concurrently on several CPUs for a given * command. */ bhsr2t->bhsr2t_r2tsn = htonl(PRIV_R2TSN(io)++); /* * This is the offset within the current SCSI command; * i.e. for the first call of datamove(), it will be 0, * and for subsequent ones it will be the sum of lengths * of previous ones. * * The ext_data_filled is to account for unsolicited * (immediate) data that might have already arrived. */ bhsr2t->bhsr2t_buffer_offset = htonl(r2t_off); /* * This is the total length (sum of S/G lengths) this call * to cfiscsi_datamove() is supposed to handle, limited by * MaxBurstLength. */ bhsr2t->bhsr2t_desired_data_transfer_length = htonl(r2t_len); cfiscsi_pdu_queue(response); } static void cfiscsi_datamove(union ctl_io *io) { if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) cfiscsi_datamove_in(io); else { /* We hadn't received anything during this datamove yet. */ io->scsiio.ext_data_filled = 0; cfiscsi_datamove_out(io); } } static void cfiscsi_scsi_command_done(union ctl_io *io) { struct icl_pdu *request, *response; struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_scsi_response *bhssr; #ifdef DIAGNOSTIC struct cfiscsi_data_wait *cdw; #endif struct cfiscsi_session *cs; uint16_t sense_length; request = PRIV_REQUEST(io); cs = PDU_SESSION(request); bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("replying to wrong opcode 0x%x", bhssc->bhssc_opcode)); //CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x", // bhssc->bhssc_initiator_task_tag); #ifdef DIAGNOSTIC CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH(cdw, &cs->cs_waiting_for_data_out, cdw_next) KASSERT(bhssc->bhssc_initiator_task_tag != cdw->cdw_initiator_task_tag, ("dangling cdw")); CFISCSI_SESSION_UNLOCK(cs); #endif /* * Do not return status for aborted commands. * There are exceptions, but none supported by CTL yet. */ if (((io->io_hdr.flags & CTL_FLAG_ABORT) && (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) || (io->io_hdr.flags & CTL_FLAG_STATUS_SENT)) { ctl_free_io(io); icl_pdu_free(request); return; } response = cfiscsi_pdu_new_response(request, M_WAITOK); bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs; bhssr->bhssr_opcode = ISCSI_BHS_OPCODE_SCSI_RESPONSE; bhssr->bhssr_flags = 0x80; /* * XXX: We don't deal with bidirectional under/overflows; * does anything actually support those? */ if (io->scsiio.kern_total_len < ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhssr->bhssr_flags |= BHSSR_FLAGS_RESIDUAL_UNDERFLOW; bhssr->bhssr_residual_count = htonl(ntohl(bhssc->bhssc_expected_data_transfer_length) - io->scsiio.kern_total_len); //CFISCSI_SESSION_DEBUG(cs, "underflow; residual count %d", // ntohl(bhssr->bhssr_residual_count)); } else if (io->scsiio.kern_total_len > ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhssr->bhssr_flags |= BHSSR_FLAGS_RESIDUAL_OVERFLOW; bhssr->bhssr_residual_count = htonl(io->scsiio.kern_total_len - ntohl(bhssc->bhssc_expected_data_transfer_length)); //CFISCSI_SESSION_DEBUG(cs, "overflow; residual count %d", // ntohl(bhssr->bhssr_residual_count)); } bhssr->bhssr_response = BHSSR_RESPONSE_COMMAND_COMPLETED; bhssr->bhssr_status = io->scsiio.scsi_status; bhssr->bhssr_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhssr->bhssr_expdatasn = htonl(PRIV_EXPDATASN(io)); if (io->scsiio.sense_len > 0) { #if 0 CFISCSI_SESSION_DEBUG(cs, "returning %d bytes of sense data", io->scsiio.sense_len); #endif sense_length = htons(io->scsiio.sense_len); icl_pdu_append_data(response, &sense_length, sizeof(sense_length), M_WAITOK); icl_pdu_append_data(response, &io->scsiio.sense_data, io->scsiio.sense_len, M_WAITOK); } ctl_free_io(io); icl_pdu_free(request); cfiscsi_pdu_queue(response); } static void cfiscsi_task_management_done(union ctl_io *io) { struct icl_pdu *request, *response; struct iscsi_bhs_task_management_request *bhstmr; struct iscsi_bhs_task_management_response *bhstmr2; struct cfiscsi_data_wait *cdw, *tmpcdw; struct cfiscsi_session *cs, *tcs; struct cfiscsi_softc *softc; int cold_reset = 0; request = PRIV_REQUEST(io); cs = PDU_SESSION(request); bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs; KASSERT((bhstmr->bhstmr_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_TASK_REQUEST, ("replying to wrong opcode 0x%x", bhstmr->bhstmr_opcode)); #if 0 CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x; referenced task tag 0x%x", bhstmr->bhstmr_initiator_task_tag, bhstmr->bhstmr_referenced_task_tag); #endif if ((bhstmr->bhstmr_function & ~0x80) == BHSTMR_FUNCTION_ABORT_TASK) { /* * Make sure we no longer wait for Data-Out for this command. */ CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH_SAFE(cdw, &cs->cs_waiting_for_data_out, cdw_next, tmpcdw) { if (bhstmr->bhstmr_referenced_task_tag != cdw->cdw_initiator_task_tag) continue; #if 0 CFISCSI_SESSION_DEBUG(cs, "removing csw for initiator task " "tag 0x%x", bhstmr->bhstmr_initiator_task_tag); #endif TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 43; cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io); cfiscsi_data_wait_free(cs, cdw); } CFISCSI_SESSION_UNLOCK(cs); } if ((bhstmr->bhstmr_function & ~0x80) == BHSTMR_FUNCTION_TARGET_COLD_RESET && io->io_hdr.status == CTL_SUCCESS) cold_reset = 1; response = cfiscsi_pdu_new_response(request, M_WAITOK); bhstmr2 = (struct iscsi_bhs_task_management_response *) response->ip_bhs; bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE; bhstmr2->bhstmr_flags = 0x80; switch (io->taskio.task_status) { case CTL_TASK_FUNCTION_COMPLETE: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_COMPLETE; break; case CTL_TASK_FUNCTION_SUCCEEDED: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_SUCCEEDED; break; case CTL_TASK_LUN_DOES_NOT_EXIST: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_LUN_DOES_NOT_EXIST; break; case CTL_TASK_FUNCTION_NOT_SUPPORTED: default: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED; break; } memcpy(bhstmr2->bhstmr_additional_reponse_information, io->taskio.task_resp, sizeof(io->taskio.task_resp)); bhstmr2->bhstmr_initiator_task_tag = bhstmr->bhstmr_initiator_task_tag; ctl_free_io(io); icl_pdu_free(request); cfiscsi_pdu_queue(response); if (cold_reset) { softc = cs->cs_target->ct_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(tcs, &softc->sessions, cs_next) { if (tcs->cs_target == cs->cs_target) cfiscsi_session_terminate(tcs); } mtx_unlock(&softc->lock); } } static void cfiscsi_done(union ctl_io *io) { struct icl_pdu *request; struct cfiscsi_session *cs; KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE), ("invalid CTL status %#x", io->io_hdr.status)); if (io->io_hdr.io_type == CTL_IO_TASK && io->taskio.task_action == CTL_TASK_I_T_NEXUS_RESET) { /* * Implicit task termination has just completed; nothing to do. */ cs = PRIV_REQUEST(io); cs->cs_tasks_aborted = true; refcount_release(&cs->cs_outstanding_ctl_pdus); wakeup(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus)); ctl_free_io(io); return; } request = PRIV_REQUEST(io); cs = PDU_SESSION(request); switch (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) { case ISCSI_BHS_OPCODE_SCSI_COMMAND: cfiscsi_scsi_command_done(io); break; case ISCSI_BHS_OPCODE_TASK_REQUEST: cfiscsi_task_management_done(io); break; default: panic("cfiscsi_done called with wrong opcode 0x%x", request->ip_bhs->bhs_opcode); } refcount_release(&cs->cs_outstanding_ctl_pdus); } Index: head/sys/cam/ctl/ctl_io.h =================================================================== --- head/sys/cam/ctl/ctl_io.h (revision 367043) +++ head/sys/cam/ctl/ctl_io.h (revision 367044) @@ -1,600 +1,602 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_io.h#5 $ * $FreeBSD$ */ /* * CAM Target Layer data movement structures/interface. * * Author: Ken Merry */ #ifndef _CTL_IO_H_ #define _CTL_IO_H_ #define CTL_MAX_CDBLEN 32 /* * Uncomment this next line to enable printing out times for I/Os * that take longer than CTL_TIME_IO_SECS seconds to get to the datamove * and/or done stage. */ #define CTL_TIME_IO #ifdef CTL_TIME_IO #define CTL_TIME_IO_DEFAULT_SECS 90 #endif /* * Uncomment this next line to enable the CTL I/O delay feature. You * can delay I/O at two different points -- datamove and done. This is * useful for diagnosing abort conditions (for hosts that send an abort on a * timeout), and for determining how long a host's timeout is. */ //#define CTL_IO_DELAY typedef enum { CTL_STATUS_NONE, /* No status */ CTL_SUCCESS, /* Transaction completed successfully */ CTL_CMD_TIMEOUT, /* Command timed out, shouldn't happen here */ CTL_SEL_TIMEOUT, /* Selection timeout, shouldn't happen here */ CTL_ERROR, /* General CTL error XXX expand on this? */ CTL_SCSI_ERROR, /* SCSI error, look at status byte/sense data */ CTL_CMD_ABORTED, /* Command aborted, don't return status */ CTL_STATUS_MASK = 0xfff,/* Mask off any status flags */ CTL_AUTOSENSE = 0x1000 /* Autosense performed */ } ctl_io_status; /* * WARNING: Keep the data in/out/none flags where they are. They're used * in conjunction with ctl_cmd_flags. See comment above ctl_cmd_flags * definition in ctl_private.h. */ typedef enum { CTL_FLAG_NONE = 0x00000000, /* no flags */ CTL_FLAG_DATA_IN = 0x00000001, /* DATA IN */ CTL_FLAG_DATA_OUT = 0x00000002, /* DATA OUT */ CTL_FLAG_DATA_NONE = 0x00000003, /* no data */ CTL_FLAG_DATA_MASK = 0x00000003, CTL_FLAG_DO_AUTOSENSE = 0x00000020, /* grab sense info */ CTL_FLAG_USER_REQ = 0x00000040, /* request came from userland */ CTL_FLAG_ALLOCATED = 0x00000100, /* data space allocated */ CTL_FLAG_ABORT_STATUS = 0x00000400, /* return TASK ABORTED status */ CTL_FLAG_ABORT = 0x00000800, /* this I/O should be aborted */ CTL_FLAG_DMA_INPROG = 0x00001000, /* DMA in progress */ CTL_FLAG_DELAY_DONE = 0x00004000, /* delay injection done */ CTL_FLAG_INT_COPY = 0x00008000, /* internal copy, no done call*/ CTL_FLAG_SENT_2OTHER_SC = 0x00010000, CTL_FLAG_FROM_OTHER_SC = 0x00020000, CTL_FLAG_IS_WAS_ON_RTR = 0x00040000, /* Don't rerun cmd on failover*/ CTL_FLAG_BUS_ADDR = 0x00080000, /* ctl_sglist contains BUS addresses, not virtual ones*/ CTL_FLAG_IO_CONT = 0x00100000, /* Continue I/O instead of completing */ #if 0 CTL_FLAG_ALREADY_DONE = 0x00200000 /* I/O already completed */ #endif CTL_FLAG_NO_DATAMOVE = 0x00400000, CTL_FLAG_DMA_QUEUED = 0x00800000, /* DMA queued but not started*/ CTL_FLAG_STATUS_QUEUED = 0x01000000, /* Status queued but not sent*/ CTL_FLAG_FAILOVER = 0x04000000, /* Killed by a failover */ CTL_FLAG_IO_ACTIVE = 0x08000000, /* I/O active on this SC */ CTL_FLAG_STATUS_SENT = 0x10000000, /* Status sent by datamove */ CTL_FLAG_SERSEQ_DONE = 0x20000000 /* All storage I/O started */ } ctl_io_flags; struct ctl_lba_len { uint64_t lba; uint32_t len; }; struct ctl_lba_len_flags { uint64_t lba; uint32_t len; uint32_t flags; #define CTL_LLF_FUA 0x04000000 #define CTL_LLF_DPO 0x08000000 #define CTL_LLF_READ 0x10000000 #define CTL_LLF_WRITE 0x20000000 #define CTL_LLF_VERIFY 0x40000000 #define CTL_LLF_COMPARE 0x80000000 }; struct ctl_ptr_len_flags { uint8_t *ptr; uint32_t len; uint32_t flags; }; union ctl_priv { uint8_t bytes[sizeof(uint64_t) * 2]; uint64_t integer; uint64_t integers[2]; void *ptr; void *ptrs[2]; }; /* * Number of CTL private areas. */ #define CTL_NUM_PRIV 6 /* * Which private area are we using for a particular piece of data? */ #define CTL_PRIV_LUN 0 /* CTL LUN pointer goes here */ #define CTL_PRIV_LBA_LEN 1 /* Decoded LBA/len for read/write*/ #define CTL_PRIV_MODEPAGE 1 /* Modepage info for config write */ #define CTL_PRIV_BACKEND 2 /* Reserved for block, RAIDCore */ #define CTL_PRIV_BACKEND_LUN 3 /* Backend LUN pointer */ #define CTL_PRIV_FRONTEND 4 /* Frontend storage */ #define CTL_PRIV_FRONTEND2 5 /* Another frontend storage */ #define CTL_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[0]) #define CTL_SOFTC(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[1]) #define CTL_BACKEND_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptrs[0]) #define CTL_PORT(io) (((struct ctl_softc *)CTL_SOFTC(io))-> \ ctl_ports[(io)->io_hdr.nexus.targ_port]) /* * These are used only on Originating SC in XFER mode, where requests don't * ever reach backends, so we can reuse backend's private storage. */ #define CTL_RSGL(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptrs[0]) #define CTL_LSGL(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptrs[1]) #define CTL_RSGLT(io) ((struct ctl_sg_entry *)CTL_RSGL(io)) #define CTL_LSGLT(io) ((struct ctl_sg_entry *)CTL_LSGL(io)) #define CTL_INVALID_PORTNAME 0xFF #define CTL_UNMAPPED_IID 0xFF struct ctl_sg_entry { void *addr; size_t len; }; typedef enum { CTL_IO_NONE, CTL_IO_SCSI, CTL_IO_TASK, } ctl_io_type; struct ctl_nexus { uint32_t initid; /* Initiator ID */ uint32_t targ_port; /* Target port, filled in by PORT */ uint32_t targ_lun; /* Destination lun */ uint32_t targ_mapped_lun; /* Destination lun CTL-wide */ }; typedef enum { CTL_MSG_SERIALIZE, CTL_MSG_R2R, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU, CTL_MSG_MANAGE_TASKS, CTL_MSG_PERS_ACTION, CTL_MSG_DATAMOVE, CTL_MSG_DATAMOVE_DONE, CTL_MSG_UA, /* Set/clear UA on secondary. */ CTL_MSG_PORT_SYNC, /* Information about port. */ CTL_MSG_LUN_SYNC, /* Information about LUN. */ CTL_MSG_IID_SYNC, /* Information about initiator. */ CTL_MSG_LOGIN, /* Information about HA peer. */ CTL_MSG_MODE_SYNC, /* Mode page current content. */ CTL_MSG_FAILOVER /* Fake, never sent though the wire */ } ctl_msg_type; struct ctl_scsiio; struct ctl_io_hdr { uint32_t version; /* interface version XXX */ ctl_io_type io_type; /* task I/O, SCSI I/O, etc. */ ctl_msg_type msg_type; struct ctl_nexus nexus; /* Initiator, port, target, lun */ uint32_t iid_indx; /* the index into the iid mapping */ uint32_t flags; /* transaction flags */ uint32_t status; /* transaction status */ uint32_t port_status; /* trans status, set by PORT, 0 = good*/ uint32_t timeout; /* timeout in ms */ uint32_t retries; /* retry count */ #ifdef CTL_IO_DELAY struct callout delay_callout; #endif /* CTL_IO_DELAY */ #ifdef CTL_TIME_IO time_t start_time; /* I/O start time */ struct bintime start_bt; /* Timer start ticks */ struct bintime dma_start_bt; /* DMA start ticks */ struct bintime dma_bt; /* DMA total ticks */ #endif /* CTL_TIME_IO */ uint32_t num_dmas; /* Number of DMAs */ union ctl_io *remote_io; /* I/O counterpart on remote HA side */ union ctl_io *blocker; /* I/O blocking this one */ void *pool; /* I/O pool */ union ctl_priv ctl_private[CTL_NUM_PRIV];/* CTL private area */ TAILQ_HEAD(, ctl_io_hdr) blocked_queue; /* I/Os blocked by this one */ STAILQ_ENTRY(ctl_io_hdr) links; /* linked list pointer */ TAILQ_ENTRY(ctl_io_hdr) ooa_links; /* ooa_queue links */ TAILQ_ENTRY(ctl_io_hdr) blocked_links; /* blocked_queue links */ }; typedef enum { CTL_TAG_UNTAGGED, CTL_TAG_SIMPLE, CTL_TAG_ORDERED, CTL_TAG_HEAD_OF_QUEUE, CTL_TAG_ACA } ctl_tag_type; union ctl_io; typedef void (*ctl_ref)(void *arg, int diff); /* * SCSI passthrough I/O structure for the CAM Target Layer. Note * that some of these fields are here for completeness, but they aren't * used in the CTL implementation. e.g., timeout and retries won't be * used. * * Note: Make sure the io_hdr is *always* the first element in this * structure. */ struct ctl_scsiio { struct ctl_io_hdr io_hdr; /* common to all I/O types */ /* * The ext_* fields are generally intended for frontend use; CTL itself * doesn't modify or use them. */ uint32_t ext_sg_entries; /* 0 = no S/G list, > 0 = num entries */ uint8_t *ext_data_ptr; /* data buffer or S/G list */ uint32_t ext_data_len; /* Data transfer length */ uint32_t ext_data_filled; /* Amount of data filled so far */ /* * The number of scatter/gather entries in the list pointed to * by kern_data_ptr. 0 means there is no list, just a data pointer. */ uint32_t kern_sg_entries; uint32_t rem_sg_entries; /* Unused. */ /* * The data pointer or a pointer to the scatter/gather list. */ uint8_t *kern_data_ptr; /* * Length of the data buffer or scatter/gather list. It's also * the length of this particular piece of the data transfer, * ie. number of bytes expected to be transferred by the current * invocation of frontend's datamove() callback. It's always * less than or equal to kern_total_len. */ uint32_t kern_data_len; /* * Total length of data to be transferred during this particular * SCSI command, as decoded from SCSI CDB. */ uint32_t kern_total_len; /* * Amount of data left after the current data transfer. */ uint32_t kern_data_resid; /* * Byte offset of this transfer, equal to the amount of data * already transferred for this SCSI command during previous * datamove() invocations. */ uint32_t kern_rel_offset; struct scsi_sense_data sense_data; /* sense data */ uint8_t sense_len; /* Returned sense length */ uint8_t scsi_status; /* SCSI status byte */ uint8_t sense_residual; /* Unused. */ + uint8_t priority; /* Command priority */ uint32_t residual; /* Unused */ uint32_t tag_num; /* tag number */ ctl_tag_type tag_type; /* simple, ordered, head of queue,etc.*/ uint8_t cdb_len; /* CDB length */ uint8_t cdb[CTL_MAX_CDBLEN]; /* CDB */ int (*be_move_done)(union ctl_io *io); /* called by fe */ int (*io_cont)(union ctl_io *io); /* to continue processing */ ctl_ref kern_data_ref; /* Method to reference/release data */ void *kern_data_arg; /* Opaque argument for kern_data_ref() */ }; typedef enum { CTL_TASK_ABORT_TASK, CTL_TASK_ABORT_TASK_SET, CTL_TASK_CLEAR_ACA, CTL_TASK_CLEAR_TASK_SET, CTL_TASK_I_T_NEXUS_RESET, CTL_TASK_LUN_RESET, CTL_TASK_TARGET_RESET, CTL_TASK_BUS_RESET, CTL_TASK_PORT_LOGIN, CTL_TASK_PORT_LOGOUT, CTL_TASK_QUERY_TASK, CTL_TASK_QUERY_TASK_SET, CTL_TASK_QUERY_ASYNC_EVENT } ctl_task_type; typedef enum { CTL_TASK_FUNCTION_COMPLETE, CTL_TASK_FUNCTION_SUCCEEDED, CTL_TASK_FUNCTION_REJECTED, CTL_TASK_LUN_DOES_NOT_EXIST, CTL_TASK_FUNCTION_NOT_SUPPORTED } ctl_task_status; /* * Task management I/O structure. Aborts, bus resets, etc., are sent using * this structure. * * Note: Make sure the io_hdr is *always* the first element in this * structure. */ struct ctl_taskio { struct ctl_io_hdr io_hdr; /* common to all I/O types */ ctl_task_type task_action; /* Target Reset, Abort, etc. */ uint32_t tag_num; /* tag number */ ctl_tag_type tag_type; /* simple, ordered, etc. */ uint8_t task_status; /* Complete, Succeeded, etc. */ uint8_t task_resp[3];/* Response information */ }; /* * HA link messages. */ #define CTL_HA_VERSION 3 /* * Used for CTL_MSG_LOGIN. */ struct ctl_ha_msg_login { ctl_msg_type msg_type; int version; int ha_mode; int ha_id; int max_luns; int max_ports; int max_init_per_port; }; typedef enum { CTL_PR_REG_KEY, CTL_PR_UNREG_KEY, CTL_PR_PREEMPT, CTL_PR_CLEAR, CTL_PR_RESERVE, CTL_PR_RELEASE } ctl_pr_action; /* * The PR info is specifically for sending Persistent Reserve actions * to the other SC which it must also act on. * * Note: Make sure the io_hdr is *always* the first element in this * structure. */ struct ctl_pr_info { ctl_pr_action action; uint8_t sa_res_key[8]; uint8_t res_type; uint32_t residx; }; struct ctl_ha_msg_hdr { ctl_msg_type msg_type; uint32_t status; /* transaction status */ union ctl_io *original_sc; union ctl_io *serializing_sc; struct ctl_nexus nexus; /* Initiator, port, target, lun */ }; #define CTL_HA_MAX_SG_ENTRIES 16 #define CTL_HA_DATAMOVE_SEGMENT 131072 /* * Used for CTL_MSG_PERS_ACTION. */ struct ctl_ha_msg_pr { struct ctl_ha_msg_hdr hdr; struct ctl_pr_info pr_info; }; /* * Used for CTL_MSG_UA. */ struct ctl_ha_msg_ua { struct ctl_ha_msg_hdr hdr; int ua_all; int ua_set; int ua_type; uint8_t ua_info[8]; }; /* * The S/G handling here is a little different than the standard ctl_scsiio * structure, because we can't pass data by reference in between controllers. * The S/G list in the ctl_scsiio struct is normally passed in the * kern_data_ptr field. So kern_sg_entries here will always be non-zero, * even if there is only one entry. * * Used for CTL_MSG_DATAMOVE. */ struct ctl_ha_msg_dt { struct ctl_ha_msg_hdr hdr; ctl_io_flags flags; /* Only I/O flags are used here */ uint32_t sg_sequence; /* S/G portion number */ uint8_t sg_last; /* last S/G batch = 1 */ uint32_t sent_sg_entries; /* previous S/G count */ uint32_t cur_sg_entries; /* current S/G entries */ uint32_t kern_sg_entries; /* total S/G entries */ uint32_t kern_data_len; /* Length of this S/G list */ uint32_t kern_total_len; /* Total length of this transaction */ uint32_t kern_data_resid; /* Length left to transfer after this*/ uint32_t kern_rel_offset; /* Byte Offset of this transfer */ struct ctl_sg_entry sg_list[CTL_HA_MAX_SG_ENTRIES]; }; /* * Used for CTL_MSG_SERIALIZE, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU, * and CTL_MSG_DATAMOVE_DONE. */ struct ctl_ha_msg_scsi { struct ctl_ha_msg_hdr hdr; uint32_t tag_num; /* tag number */ ctl_tag_type tag_type; /* simple, ordered, etc. */ uint8_t cdb[CTL_MAX_CDBLEN]; /* CDB */ uint8_t cdb_len; /* CDB length */ uint8_t scsi_status; /* SCSI status byte */ uint8_t sense_len; /* Returned sense length */ + uint8_t priority; /* Command priority */ uint32_t port_status; /* trans status, set by FETD, 0 = good*/ uint32_t kern_data_resid; /* for DATAMOVE_DONE */ struct scsi_sense_data sense_data; /* sense data */ }; /* * Used for CTL_MSG_MANAGE_TASKS. */ struct ctl_ha_msg_task { struct ctl_ha_msg_hdr hdr; ctl_task_type task_action; /* Target Reset, Abort, etc. */ uint32_t tag_num; /* tag number */ ctl_tag_type tag_type; /* simple, ordered, etc. */ }; /* * Used for CTL_MSG_PORT_SYNC. */ struct ctl_ha_msg_port { struct ctl_ha_msg_hdr hdr; int port_type; int physical_port; int virtual_port; int status; int name_len; int lun_map_len; int port_devid_len; int target_devid_len; int init_devid_len; uint8_t data[]; }; /* * Used for CTL_MSG_LUN_SYNC. */ struct ctl_ha_msg_lun { struct ctl_ha_msg_hdr hdr; int flags; unsigned int pr_generation; uint32_t pr_res_idx; uint8_t pr_res_type; int lun_devid_len; int pr_key_count; uint8_t data[]; }; struct ctl_ha_msg_lun_pr_key { uint32_t pr_iid; uint64_t pr_key; }; /* * Used for CTL_MSG_IID_SYNC. */ struct ctl_ha_msg_iid { struct ctl_ha_msg_hdr hdr; int in_use; int name_len; uint64_t wwpn; uint8_t data[]; }; /* * Used for CTL_MSG_MODE_SYNC. */ struct ctl_ha_msg_mode { struct ctl_ha_msg_hdr hdr; uint8_t page_code; uint8_t subpage; uint16_t page_len; uint8_t data[]; }; union ctl_ha_msg { struct ctl_ha_msg_hdr hdr; struct ctl_ha_msg_task task; struct ctl_ha_msg_scsi scsi; struct ctl_ha_msg_dt dt; struct ctl_ha_msg_pr pr; struct ctl_ha_msg_ua ua; struct ctl_ha_msg_port port; struct ctl_ha_msg_lun lun; struct ctl_ha_msg_iid iid; struct ctl_ha_msg_login login; struct ctl_ha_msg_mode mode; }; struct ctl_prio { struct ctl_io_hdr io_hdr; struct ctl_ha_msg_pr pr_msg; }; union ctl_io { struct ctl_io_hdr io_hdr; /* common to all I/O types */ struct ctl_scsiio scsiio; /* Normal SCSI commands */ struct ctl_taskio taskio; /* SCSI task management/reset */ struct ctl_prio presio; /* update per. res info on other SC */ }; #ifdef _KERNEL union ctl_io *ctl_alloc_io(void *pool_ref); union ctl_io *ctl_alloc_io_nowait(void *pool_ref); void ctl_free_io(union ctl_io *io); void ctl_zero_io(union ctl_io *io); #endif /* _KERNEL */ #endif /* _CTL_IO_H_ */ /* * vim: ts=8 */ Index: head/sys/cam/ctl/ctl_util.c =================================================================== --- head/sys/cam/ctl/ctl_util.c (revision 367043) +++ head/sys/cam/ctl/ctl_util.c (revision 367044) @@ -1,899 +1,900 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_util.c#2 $ */ /* * CAM Target Layer SCSI library * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #ifdef _KERNEL #include #include #include #include #include #else /* __KERNEL__ */ #include #include #include #include #include #include #endif /* __KERNEL__ */ #include #include #include #include #include #include #include struct ctl_status_desc { ctl_io_status status; const char *description; }; struct ctl_task_desc { ctl_task_type task_action; const char *description; }; static struct ctl_status_desc ctl_status_table[] = { {CTL_STATUS_NONE, "No Status"}, {CTL_SUCCESS, "Command Completed Successfully"}, {CTL_CMD_TIMEOUT, "Command Timed Out"}, {CTL_SEL_TIMEOUT, "Selection Timeout"}, {CTL_ERROR, "Command Failed"}, {CTL_SCSI_ERROR, "SCSI Error"}, {CTL_CMD_ABORTED, "Command Aborted"}, }; static struct ctl_task_desc ctl_task_table[] = { {CTL_TASK_ABORT_TASK, "Abort Task"}, {CTL_TASK_ABORT_TASK_SET, "Abort Task Set"}, {CTL_TASK_CLEAR_ACA, "Clear ACA"}, {CTL_TASK_CLEAR_TASK_SET, "Clear Task Set"}, {CTL_TASK_I_T_NEXUS_RESET, "I_T Nexus Reset"}, {CTL_TASK_LUN_RESET, "LUN Reset"}, {CTL_TASK_TARGET_RESET, "Target Reset"}, {CTL_TASK_BUS_RESET, "Bus Reset"}, {CTL_TASK_PORT_LOGIN, "Port Login"}, {CTL_TASK_PORT_LOGOUT, "Port Logout"}, {CTL_TASK_QUERY_TASK, "Query Task"}, {CTL_TASK_QUERY_TASK_SET, "Query Task Set"}, {CTL_TASK_QUERY_ASYNC_EVENT, "Query Async Event"} }; void ctl_scsi_tur(union ctl_io *io, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_test_unit_ready *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; cdb = (struct scsi_test_unit_ready *)ctsio->cdb; cdb->opcode = TEST_UNIT_READY; cdb->control = control; io->io_hdr.flags = CTL_FLAG_DATA_NONE; ctsio->tag_type = tag_type; ctsio->cdb_len = sizeof(*cdb); ctsio->ext_data_len = 0; ctsio->ext_data_ptr = NULL; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_inquiry(union ctl_io *io, uint8_t *data_ptr, int32_t data_len, uint8_t byte2, uint8_t page_code, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_inquiry *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; cdb = (struct scsi_inquiry *)ctsio->cdb; cdb->opcode = INQUIRY; cdb->byte2 = byte2; cdb->page_code = page_code; cdb->control = control; scsi_ulto2b(data_len, cdb->length); io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; ctsio->tag_type = tag_type; ctsio->cdb_len = sizeof(*cdb); ctsio->ext_data_len = data_len; ctsio->ext_data_ptr = data_ptr; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_request_sense(union ctl_io *io, uint8_t *data_ptr, int32_t data_len, uint8_t byte2, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_request_sense *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; cdb = (struct scsi_request_sense *)ctsio->cdb; cdb->opcode = REQUEST_SENSE; cdb->byte2 = byte2; cdb->control = control; cdb->length = data_len; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; ctsio->tag_type = tag_type; ctsio->cdb_len = sizeof(*cdb); ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_report_luns(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint8_t select_report, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_report_luns *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; cdb = (struct scsi_report_luns *)ctsio->cdb; cdb->opcode = REPORT_LUNS; cdb->select_report = select_report; scsi_ulto4b(data_len, cdb->length); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; ctsio->tag_type = tag_type; ctsio->cdb_len = sizeof(*cdb); ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_read_write_buffer(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int read_buffer, uint8_t mode, uint8_t buffer_id, uint32_t buffer_offset, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_write_buffer *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; cdb = (struct scsi_write_buffer *)ctsio->cdb; if (read_buffer != 0) cdb->opcode = READ_BUFFER; else cdb->opcode = WRITE_BUFFER; cdb->byte2 = mode & RWB_MODE; cdb->buffer_id = buffer_id; scsi_ulto3b(buffer_offset, cdb->offset); scsi_ulto3b(data_len, cdb->length); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; if (read_buffer != 0) io->io_hdr.flags = CTL_FLAG_DATA_IN; else io->io_hdr.flags = CTL_FLAG_DATA_OUT; ctsio->tag_type = tag_type; ctsio->cdb_len = sizeof(*cdb); ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_read_write(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int read_op, uint8_t byte2, int minimum_cdb_size, uint64_t lba, uint32_t num_blocks, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; /* * Pick out the smallest CDB that will hold the user's request. * minimum_cdb_size allows cranking the CDB size up, even for * requests that would not normally need a large CDB. This can be * useful for testing (e.g. to make sure READ_16 support works without * having an array larger than 2TB) and for compatibility -- e.g. * if your device doesn't support READ_6. (ATAPI drives don't.) */ if ((minimum_cdb_size < 10) && ((lba & 0x1fffff) == lba) && ((num_blocks & 0xff) == num_blocks) && (byte2 == 0)) { struct scsi_rw_6 *cdb; /* * Note that according to SBC-2, the target should return 256 * blocks if the transfer length in a READ(6) or WRITE(6) CDB * is set to 0. Since it's possible that some targets * won't do the right thing, we only send a READ(6) or * WRITE(6) for transfer sizes up to and including 255 blocks. */ cdb = (struct scsi_rw_6 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_6 : WRITE_6; scsi_ulto3b(lba, cdb->addr); cdb->length = num_blocks & 0xff; cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } else if ((minimum_cdb_size < 12) && ((num_blocks & 0xffff) == num_blocks) && ((lba & 0xffffffff) == lba)) { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_10 : WRITE_10; cdb->byte2 = byte2; scsi_ulto4b(lba, cdb->addr); cdb->reserved = 0; scsi_ulto2b(num_blocks, cdb->length); cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } else if ((minimum_cdb_size < 16) && ((num_blocks & 0xffffffff) == num_blocks) && ((lba & 0xffffffff) == lba)) { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_12 : WRITE_12; cdb->byte2 = byte2; scsi_ulto4b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); cdb->reserved = 0; cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } else { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_16 : WRITE_16; cdb->byte2 = byte2; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); cdb->reserved = 0; cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } io->io_hdr.io_type = CTL_IO_SCSI; if (read_op != 0) io->io_hdr.flags = CTL_FLAG_DATA_IN; else io->io_hdr.flags = CTL_FLAG_DATA_OUT; ctsio->tag_type = tag_type; ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_write_same(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint8_t byte2, uint64_t lba, uint32_t num_blocks, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_write_same_16 *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; ctsio->cdb_len = sizeof(*cdb); cdb = (struct scsi_write_same_16 *)ctsio->cdb; cdb->opcode = WRITE_SAME_16; cdb->byte2 = byte2; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); cdb->group = 0; cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_OUT; ctsio->tag_type = tag_type; ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_read_capacity(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint32_t addr, int reladr, int pmi, ctl_tag_type tag_type, uint8_t control) { struct scsi_read_capacity *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; cdb = (struct scsi_read_capacity *)io->scsiio.cdb; cdb->opcode = READ_CAPACITY; if (reladr) cdb->byte2 = SRC_RELADR; if (pmi) cdb->pmi = SRC_PMI; scsi_ulto4b(addr, cdb->addr); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_read_capacity_16(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint64_t addr, int reladr, int pmi, ctl_tag_type tag_type, uint8_t control) { struct scsi_read_capacity_16 *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; cdb = (struct scsi_read_capacity_16 *)io->scsiio.cdb; cdb->opcode = SERVICE_ACTION_IN; cdb->service_action = SRC16_SERVICE_ACTION; if (reladr) cdb->reladr |= SRC16_RELADR; if (pmi) cdb->reladr |= SRC16_PMI; scsi_u64to8b(addr, cdb->addr); scsi_ulto4b(data_len, cdb->alloc_len); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_mode_sense(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int dbd, int llbaa, uint8_t page_code, uint8_t pc, uint8_t subpage, int minimum_cdb_size, ctl_tag_type tag_type, uint8_t control) { ctl_scsi_zero_io(io); if ((minimum_cdb_size < 10) && (llbaa == 0) && (data_len < 256)) { struct scsi_mode_sense_6 *cdb; cdb = (struct scsi_mode_sense_6 *)io->scsiio.cdb; cdb->opcode = MODE_SENSE_6; if (dbd) cdb->byte2 |= SMS_DBD; cdb->page = page_code | pc; cdb->subpage = subpage; cdb->length = data_len; cdb->control = control; } else { struct scsi_mode_sense_10 *cdb; cdb = (struct scsi_mode_sense_10 *)io->scsiio.cdb; cdb->opcode = MODE_SENSE_10; if (dbd) cdb->byte2 |= SMS_DBD; if (llbaa) cdb->byte2 |= SMS10_LLBAA; cdb->page = page_code | pc; cdb->subpage = subpage; scsi_ulto2b(data_len, cdb->length); cdb->control = control; } io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_start_stop(union ctl_io *io, int start, int load_eject, int immediate, int power_conditions, ctl_tag_type tag_type, uint8_t control) { struct scsi_start_stop_unit *cdb; cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; ctl_scsi_zero_io(io); cdb->opcode = START_STOP_UNIT; if (immediate) cdb->byte2 |= SSS_IMMED; cdb->how = power_conditions; if (load_eject) cdb->how |= SSS_LOEJ; if (start) cdb->how |= SSS_START; cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_NONE; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = NULL; io->scsiio.ext_data_len = 0; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_sync_cache(union ctl_io *io, int immed, int reladr, int minimum_cdb_size, uint64_t starting_lba, uint32_t block_count, ctl_tag_type tag_type, uint8_t control) { ctl_scsi_zero_io(io); if ((minimum_cdb_size < 16) && ((block_count & 0xffff) == block_count) && ((starting_lba & 0xffffffff) == starting_lba)) { struct scsi_sync_cache *cdb; cdb = (struct scsi_sync_cache *)io->scsiio.cdb; cdb->opcode = SYNCHRONIZE_CACHE; if (reladr) cdb->byte2 |= SSC_RELADR; if (immed) cdb->byte2 |= SSC_IMMED; scsi_ulto4b(starting_lba, cdb->begin_lba); scsi_ulto2b(block_count, cdb->lb_count); cdb->control = control; } else { struct scsi_sync_cache_16 *cdb; cdb = (struct scsi_sync_cache_16 *)io->scsiio.cdb; cdb->opcode = SYNCHRONIZE_CACHE_16; if (reladr) cdb->byte2 |= SSC_RELADR; if (immed) cdb->byte2 |= SSC_IMMED; scsi_u64to8b(starting_lba, cdb->begin_lba); scsi_ulto4b(block_count, cdb->lb_count); cdb->control = control; } io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_NONE; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = NULL; io->scsiio.ext_data_len = 0; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_persistent_res_in(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int action, ctl_tag_type tag_type, uint8_t control) { struct scsi_per_res_in *cdb; ctl_scsi_zero_io(io); cdb = (struct scsi_per_res_in *)io->scsiio.cdb; cdb->opcode = PERSISTENT_RES_IN; cdb->action = action; scsi_ulto2b(data_len, cdb->length); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_persistent_res_out(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int action, int type, uint64_t key, uint64_t sa_key, ctl_tag_type tag_type, uint8_t control) { struct scsi_per_res_out *cdb; struct scsi_per_res_out_parms *params; ctl_scsi_zero_io(io); cdb = (struct scsi_per_res_out *)io->scsiio.cdb; params = (struct scsi_per_res_out_parms *)data_ptr; cdb->opcode = PERSISTENT_RES_OUT; if (action == 5) cdb->action = 6; else cdb->action = action; switch(type) { case 0: cdb->scope_type = 1; break; case 1: cdb->scope_type = 3; break; case 2: cdb->scope_type = 5; break; case 3: cdb->scope_type = 6; break; case 4: cdb->scope_type = 7; break; case 5: cdb->scope_type = 8; break; } scsi_ulto4b(data_len, cdb->length); cdb->control = control; scsi_u64to8b(key, params->res_key.key); scsi_u64to8b(sa_key, params->serv_act_res_key); io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_OUT; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_maintenance_in(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint8_t action, ctl_tag_type tag_type, uint8_t control) { struct scsi_maintenance_in *cdb; ctl_scsi_zero_io(io); cdb = (struct scsi_maintenance_in *)io->scsiio.cdb; cdb->opcode = MAINTENANCE_IN; cdb->byte2 = action; scsi_ulto4b(data_len, cdb->length); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } #ifndef _KERNEL union ctl_io * ctl_scsi_alloc_io(uint32_t initid) { union ctl_io *io; io = (union ctl_io *)malloc(sizeof(*io)); if (io == NULL) goto bailout; io->io_hdr.nexus.initid = initid; bailout: return (io); } void ctl_scsi_free_io(union ctl_io *io) { free(io); } void ctl_scsi_zero_io(union ctl_io *io) { void *pool_ref; if (io == NULL) return; pool_ref = io->io_hdr.pool; memset(io, 0, sizeof(*io)); io->io_hdr.pool = pool_ref; } #endif /* !_KERNEL */ const char * ctl_scsi_task_string(struct ctl_taskio *taskio) { unsigned int i; for (i = 0; i < (sizeof(ctl_task_table)/sizeof(ctl_task_table[0])); i++) { if (taskio->task_action == ctl_task_table[i].task_action) { return (ctl_task_table[i].description); } } return (NULL); } void ctl_io_sbuf(union ctl_io *io, struct sbuf *sb) { const char *task_desc; char path_str[64]; ctl_scsi_path_string(io, path_str, sizeof(path_str)); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: sbuf_cat(sb, path_str); ctl_scsi_command_string(&io->scsiio, NULL, sb); - sbuf_printf(sb, " Tag: %#x/%d\n", - io->scsiio.tag_num, io->scsiio.tag_type); + sbuf_printf(sb, " Tag: %#x/%d, Prio: %d\n", + io->scsiio.tag_num, io->scsiio.tag_type, + io->scsiio.priority); break; case CTL_IO_TASK: sbuf_cat(sb, path_str); task_desc = ctl_scsi_task_string(&io->taskio); if (task_desc == NULL) sbuf_printf(sb, "Unknown Task Action %d (%#x)", io->taskio.task_action, io->taskio.task_action); else sbuf_printf(sb, "Task Action: %s", task_desc); switch (io->taskio.task_action) { case CTL_TASK_ABORT_TASK: sbuf_printf(sb, " Tag: %#x/%d\n", io->taskio.tag_num, io->taskio.tag_type); break; default: sbuf_printf(sb, "\n"); break; } break; default: break; } } void ctl_io_error_sbuf(union ctl_io *io, struct scsi_inquiry_data *inq_data, struct sbuf *sb) { struct ctl_status_desc *status_desc; char path_str[64]; unsigned int i; ctl_io_sbuf(io, sb); status_desc = NULL; for (i = 0; i < (sizeof(ctl_status_table)/sizeof(ctl_status_table[0])); i++) { if ((io->io_hdr.status & CTL_STATUS_MASK) == ctl_status_table[i].status) { status_desc = &ctl_status_table[i]; break; } } ctl_scsi_path_string(io, path_str, sizeof(path_str)); sbuf_cat(sb, path_str); if (status_desc == NULL) sbuf_printf(sb, "CTL Status: Unknown status %#x\n", io->io_hdr.status); else sbuf_printf(sb, "CTL Status: %s\n", status_desc->description); if ((io->io_hdr.io_type == CTL_IO_SCSI) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR)) { sbuf_cat(sb, path_str); sbuf_printf(sb, "SCSI Status: %s\n", ctl_scsi_status_string(&io->scsiio)); if (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND) ctl_scsi_sense_sbuf(&io->scsiio, inq_data, sb, SSS_FLAG_NONE); } } char * ctl_io_string(union ctl_io *io, char *str, int str_len) { struct sbuf sb; sbuf_new(&sb, str, str_len, SBUF_FIXEDLEN); ctl_io_sbuf(io, &sb); sbuf_finish(&sb); return (sbuf_data(&sb)); } char * ctl_io_error_string(union ctl_io *io, struct scsi_inquiry_data *inq_data, char *str, int str_len) { struct sbuf sb; sbuf_new(&sb, str, str_len, SBUF_FIXEDLEN); ctl_io_error_sbuf(io, inq_data, &sb); sbuf_finish(&sb); return (sbuf_data(&sb)); } #ifdef _KERNEL void ctl_io_print(union ctl_io *io) { char str[512]; printf("%s", ctl_io_string(io, str, sizeof(str))); } void ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data) { char str[512]; printf("%s", ctl_io_error_string(io, inq_data, str, sizeof(str))); } void ctl_data_print(union ctl_io *io) { char str[128]; char path_str[64]; struct sbuf sb; int i, j, len; if (io->io_hdr.io_type != CTL_IO_SCSI) return; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) return; if (io->scsiio.kern_sg_entries > 0) /* XXX: Implement */ return; ctl_scsi_path_string(io, path_str, sizeof(path_str)); len = min(io->scsiio.kern_data_len, 4096); for (i = 0; i < len; ) { sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); sbuf_printf(&sb, " %#6x:%04x:", io->scsiio.tag_num, i); for (j = 0; j < 16 && i < len; i++, j++) { if (j == 8) sbuf_cat(&sb, " "); sbuf_printf(&sb, " %02x", io->scsiio.kern_data_ptr[i]); } sbuf_cat(&sb, "\n"); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } } #else /* _KERNEL */ void ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data, FILE *ofile) { char str[512]; fprintf(ofile, "%s", ctl_io_error_string(io, inq_data, str, sizeof(str))); } #endif /* _KERNEL */ /* * vim: ts=8 */ Index: head/sys/cam/ctl/scsi_ctl.c =================================================================== --- head/sys/cam/ctl/scsi_ctl.c (revision 367043) +++ head/sys/cam/ctl/scsi_ctl.c (revision 367044) @@ -1,1993 +1,1994 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008, 2009 Silicon Graphics International Corp. * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ */ /* * Peripheral driver interface between CAM and CTL (CAM Target Layer). * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct ctlfe_softc { struct ctl_port port; path_id_t path_id; target_id_t target_id; uint32_t hba_misc; u_int maxio; struct cam_sim *sim; char port_name[DEV_IDLEN]; struct mtx lun_softc_mtx; STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; STAILQ_ENTRY(ctlfe_softc) links; }; STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; struct mtx ctlfe_list_mtx; static char ctlfe_mtx_desc[] = "ctlfelist"; typedef enum { CTLFE_LUN_NONE = 0x00, CTLFE_LUN_WILDCARD = 0x01 } ctlfe_lun_flags; struct ctlfe_lun_softc { struct ctlfe_softc *parent_softc; struct cam_periph *periph; ctlfe_lun_flags flags; int ctios_sent; /* Number of active CTIOs */ int refcount; /* Number of active xpt_action() */ int atios_alloced; /* Number of ATIOs not freed */ int inots_alloced; /* Number of INOTs not freed */ struct task refdrain_task; STAILQ_HEAD(, ccb_hdr) work_queue; LIST_HEAD(, ccb_hdr) atio_list; /* List of ATIOs queued to SIM. */ LIST_HEAD(, ccb_hdr) inot_list; /* List of INOTs queued to SIM. */ STAILQ_ENTRY(ctlfe_lun_softc) links; }; typedef enum { CTLFE_CMD_NONE = 0x00, CTLFE_CMD_PIECEWISE = 0x01 } ctlfe_cmd_flags; struct ctlfe_cmd_info { int cur_transfer_index; size_t cur_transfer_off; ctlfe_cmd_flags flags; /* * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 * bytes on amd64. So with 32 elements, this is 256 bytes on * i386 and 512 bytes on amd64. */ #define CTLFE_MAX_SEGS 32 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; }; /* * When we register the adapter/bus, request that this many ctl_ios be * allocated. This should be the maximum supported by the adapter, but we * currently don't have a way to get that back from the path inquiry. * XXX KDM add that to the path inquiry. */ #define CTLFE_REQ_CTL_IO 4096 /* * Number of Accept Target I/O CCBs to allocate and queue down to the * adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_ATIO_PER_LUN 1024 /* * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to * allocate and queue down to the adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_IN_PER_LUN 1024 /* * Timeout (in seconds) on CTIO CCB doing DMA or sending status */ #define CTLFE_TIMEOUT 5 /* * Turn this on to enable extra debugging prints. */ #if 0 #define CTLFE_DEBUG #endif MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); #define io_ptr ppriv_ptr0 /* This is only used in the CTIO */ #define ccb_atio ppriv_ptr1 #define PRIV_CCB(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0]) #define PRIV_INFO(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1]) static int ctlfeinitialize(void); static int ctlfeshutdown(void); static periph_init_t ctlfeperiphinit; static periph_deinit_t ctlfeperiphdeinit; static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static periph_ctor_t ctlferegister; static periph_oninv_t ctlfeoninvalidate; static periph_dtor_t ctlfecleanup; static periph_start_t ctlfestart; static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb); static void ctlfe_onoffline(void *arg, int online); static void ctlfe_online(void *arg); static void ctlfe_offline(void *arg); static int ctlfe_lun_enable(void *arg, int lun_id); static int ctlfe_lun_disable(void *arg, int lun_id); static void ctlfe_dump_sim(struct cam_sim *sim); static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); static void ctlfe_datamove(union ctl_io *io); static void ctlfe_done(union ctl_io *io); static void ctlfe_dump(void); static void ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb); static void ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock); static struct periph_driver ctlfe_driver = { ctlfeperiphinit, "ctl", TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0, CAM_PERIPH_DRV_EARLY, ctlfeperiphdeinit }; static struct ctl_frontend ctlfe_frontend = { .name = "camtgt", .init = ctlfeinitialize, .fe_dump = ctlfe_dump, .shutdown = ctlfeshutdown, }; CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend); static int ctlfeinitialize(void) { STAILQ_INIT(&ctlfe_softc_list); mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); periphdriver_register(&ctlfe_driver); return (0); } static int ctlfeshutdown(void) { int error; error = periphdriver_unregister(&ctlfe_driver); if (error != 0) return (error); mtx_destroy(&ctlfe_list_mtx); return (0); } static void ctlfeperiphinit(void) { cam_status status; status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | AC_CONTRACT, ctlfeasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ctl: Failed to attach async callback due to CAM " "status 0x%x!\n", status); } } static int ctlfeperiphdeinit(void) { /* XXX: It would be good to tear down active ports here. */ if (!TAILQ_EMPTY(&ctlfe_driver.units)) return (EBUSY); xpt_register_async(0, ctlfeasync, NULL, NULL); return (0); } static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct ctlfe_softc *softc; #ifdef CTLFEDEBUG printf("%s: entered\n", __func__); #endif mtx_lock(&ctlfe_list_mtx); STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { if (softc->path_id == xpt_path_path_id(path)) break; } mtx_unlock(&ctlfe_list_mtx); /* * When a new path gets registered, and it is capable of target * mode, go ahead and attach. Later on, we may need to be more * selective, but for now this will be sufficient. */ switch (code) { case AC_PATH_REGISTERED: { struct ctl_port *port; struct ccb_pathinq *cpi; int retval; cpi = (struct ccb_pathinq *)arg; /* Don't attach if it doesn't support target mode */ if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { #ifdef CTLFEDEBUG printf("%s: SIM %s%d doesn't support target mode\n", __func__, cpi->dev_name, cpi->unit_number); #endif break; } if (softc != NULL) { #ifdef CTLFEDEBUG printf("%s: CTL port for CAM path %u already exists\n", __func__, xpt_path_path_id(path)); #endif break; } /* * We're in an interrupt context here, so we have to * use M_NOWAIT. Of course this means trouble if we * can't allocate memory. */ softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("%s: unable to malloc %zd bytes for softc\n", __func__, sizeof(*softc)); return; } softc->path_id = cpi->ccb_h.path_id; softc->target_id = cpi->initiator_id; softc->sim = xpt_path_sim(path); softc->hba_misc = cpi->hba_misc; if (cpi->maxio != 0) softc->maxio = cpi->maxio; else softc->maxio = DFLTPHYS; mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF); STAILQ_INIT(&softc->lun_softc_list); port = &softc->port; port->frontend = &ctlfe_frontend; /* * XXX KDM should we be more accurate here ? */ if (cpi->transport == XPORT_FC) port->port_type = CTL_PORT_FC; else if (cpi->transport == XPORT_SAS) port->port_type = CTL_PORT_SAS; else port->port_type = CTL_PORT_SCSI; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = CTLFE_REQ_CTL_IO; snprintf(softc->port_name, sizeof(softc->port_name), "%s%d", cpi->dev_name, cpi->unit_number); /* * XXX KDM it would be nice to allocate storage in the * frontend structure itself. */ port->port_name = softc->port_name; port->physical_port = cpi->bus_id; port->virtual_port = 0; port->port_online = ctlfe_online; port->port_offline = ctlfe_offline; port->onoff_arg = softc; port->lun_enable = ctlfe_lun_enable; port->lun_disable = ctlfe_lun_disable; port->targ_lun_arg = softc; port->fe_datamove = ctlfe_datamove; port->fe_done = ctlfe_done; port->targ_port = -1; retval = ctl_port_register(port); if (retval != 0) { printf("%s: ctl_port_register() failed with " "error %d!\n", __func__, retval); mtx_destroy(&softc->lun_softc_mtx); free(softc, M_CTLFE); break; } else { mtx_lock(&ctlfe_list_mtx); STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links); mtx_unlock(&ctlfe_list_mtx); } break; } case AC_PATH_DEREGISTERED: { if (softc != NULL) { /* * XXX KDM are we certain at this point that there * are no outstanding commands for this frontend? */ mtx_lock(&ctlfe_list_mtx); STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc, links); mtx_unlock(&ctlfe_list_mtx); ctl_port_deregister(&softc->port); mtx_destroy(&softc->lun_softc_mtx); free(softc, M_CTLFE); } break; } case AC_CONTRACT: { struct ac_contract *ac; ac = (struct ac_contract *)arg; switch (ac->contract_number) { case AC_CONTRACT_DEV_CHG: { struct ac_device_changed *dev_chg; int retval; dev_chg = (struct ac_device_changed *)ac->contract_data; printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", __func__, dev_chg->wwpn, dev_chg->port, xpt_path_path_id(path), dev_chg->target, (dev_chg->arrived == 0) ? "left" : "arrived"); if (softc == NULL) { printf("%s: CTL port for CAM path %u not " "found!\n", __func__, xpt_path_path_id(path)); break; } if (dev_chg->arrived != 0) { retval = ctl_add_initiator(&softc->port, dev_chg->target, dev_chg->wwpn, NULL); } else { retval = ctl_remove_initiator(&softc->port, dev_chg->target); } if (retval < 0) { printf("%s: could not %s port %d iid %u " "WWPN %#jx!\n", __func__, (dev_chg->arrived != 0) ? "add" : "remove", softc->port.targ_port, dev_chg->target, (uintmax_t)dev_chg->wwpn); } break; } default: printf("%s: unsupported contract number %ju\n", __func__, (uintmax_t)ac->contract_number); break; } break; } default: break; } } static cam_status ctlferegister(struct cam_periph *periph, void *arg) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; union ccb ccb; cam_status status; int i, acstatus; softc = (struct ctlfe_lun_softc *)arg; bus_softc = softc->parent_softc; STAILQ_INIT(&softc->work_queue); LIST_INIT(&softc->atio_list); LIST_INIT(&softc->inot_list); softc->periph = periph; periph->softc = softc; /* Increase device openings to maximum for the SIM. */ if (bus_softc->sim->max_tagged_dev_openings > bus_softc->sim->max_dev_openings) { cam_release_devq(periph->path, /*relsim_flags*/RELSIM_ADJUST_OPENINGS, /*openings*/bus_softc->sim->max_tagged_dev_openings, /*timeout*/0, /*getcount_only*/1); } xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); ccb.ccb_h.func_code = XPT_EN_LUN; ccb.cel.grp6_len = 0; ccb.cel.grp7_len = 0; ccb.cel.enable = 1; xpt_action(&ccb); status = (ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", __func__, ccb.ccb_h.status); return (status); } status = CAM_REQ_CMP; for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { union ccb *new_ccb; union ctl_io *new_io; struct ctlfe_cmd_info *cmd_info; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); if (new_io == NULL) { free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } cmd_info = malloc(sizeof(*cmd_info), M_CTLFE, M_ZERO | M_NOWAIT); if (cmd_info == NULL) { ctl_free_io(new_io); free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } PRIV_INFO(new_io) = cmd_info; softc->atios_alloced++; new_ccb->ccb_h.io_ptr = new_io; LIST_INSERT_HEAD(&softc->atio_list, &new_ccb->ccb_h, periph_links.le); xpt_setup_ccb(&new_ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; new_ccb->ccb_h.cbfcnp = ctlfedone; new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { free(cmd_info, M_CTLFE); ctl_free_io(new_io); free(new_ccb, M_CTLFE); break; } } acstatus = cam_periph_acquire(periph); if (acstatus != 0) { xpt_print(periph->path, "%s: could not acquire reference " "count, status = %#x\n", __func__, acstatus); return (CAM_REQ_CMP_ERR); } if (i == 0) { xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " "status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } for (i = 0; i < CTLFE_IN_PER_LUN; i++) { union ccb *new_ccb; union ctl_io *new_io; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); if (new_io == NULL) { free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } softc->inots_alloced++; new_ccb->ccb_h.io_ptr = new_io; LIST_INSERT_HEAD(&softc->inot_list, &new_ccb->ccb_h, periph_links.le); xpt_setup_ccb(&new_ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; new_ccb->ccb_h.cbfcnp = ctlfedone; new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { /* * Note that we don't free the CCB here. If the * status is not CAM_REQ_INPROG, then we're * probably talking to a SIM that says it is * target-capable but doesn't support the * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the * older API. In that case, it'll call xpt_done() * on the CCB, and we need to free it in our done * routine as a result. */ break; } } if ((i == 0) || (status != CAM_REQ_INPROG)) { xpt_print(periph->path, "%s: could not allocate immediate " "notify CCBs, status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); mtx_unlock(&bus_softc->lun_softc_mtx); return (CAM_REQ_CMP); } static void ctlfeoninvalidate(struct cam_periph *periph) { struct ctlfe_lun_softc *softc = (struct ctlfe_lun_softc *)periph->softc; struct ctlfe_softc *bus_softc; union ccb ccb; struct ccb_hdr *hdr; cam_status status; /* Abort all ATIOs and INOTs queued to SIM. */ xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); ccb.ccb_h.func_code = XPT_ABORT; LIST_FOREACH(hdr, &softc->atio_list, periph_links.le) { ccb.cab.abort_ccb = (union ccb *)hdr; xpt_action(&ccb); } LIST_FOREACH(hdr, &softc->inot_list, periph_links.le) { ccb.cab.abort_ccb = (union ccb *)hdr; xpt_action(&ccb); } /* Disable the LUN in SIM. */ ccb.ccb_h.func_code = XPT_EN_LUN; ccb.cel.grp6_len = 0; ccb.cel.grp7_len = 0; ccb.cel.enable = 0; xpt_action(&ccb); status = (ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", __func__, ccb.ccb_h.status); /* * XXX KDM what do we do now? */ } bus_softc = softc->parent_softc; mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); mtx_unlock(&bus_softc->lun_softc_mtx); } static void ctlfecleanup(struct cam_periph *periph) { struct ctlfe_lun_softc *softc; softc = (struct ctlfe_lun_softc *)periph->softc; KASSERT(softc->ctios_sent == 0, ("%s: ctios_sent %d != 0", __func__, softc->ctios_sent)); KASSERT(softc->refcount == 0, ("%s: refcount %d != 0", __func__, softc->refcount)); KASSERT(softc->atios_alloced == 0, ("%s: atios_alloced %d != 0", __func__, softc->atios_alloced)); KASSERT(softc->inots_alloced == 0, ("%s: inots_alloced %d != 0", __func__, softc->inots_alloced)); free(softc, M_CTLFE); } static void ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, u_int16_t *sglist_cnt) { struct ctlfe_softc *bus_softc; struct ctlfe_cmd_info *cmd_info; struct ctl_sg_entry *ctl_sglist; bus_dma_segment_t *cam_sglist; size_t off; int i, idx; cmd_info = PRIV_INFO(io); bus_softc = softc->parent_softc; /* * Set the direction, relative to the initiator. */ *flags &= ~CAM_DIR_MASK; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) *flags |= CAM_DIR_IN; else *flags |= CAM_DIR_OUT; *flags &= ~CAM_DATA_MASK; idx = cmd_info->cur_transfer_index; off = cmd_info->cur_transfer_off; cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; if (io->scsiio.kern_sg_entries == 0) { /* No S/G list. */ /* One time shift for SRR offset. */ off += io->scsiio.ext_data_filled; io->scsiio.ext_data_filled = 0; *data_ptr = io->scsiio.kern_data_ptr + off; if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { *dxfer_len = io->scsiio.kern_data_len - off; } else { *dxfer_len = bus_softc->maxio; cmd_info->cur_transfer_off += bus_softc->maxio; cmd_info->flags |= CTLFE_CMD_PIECEWISE; } *sglist_cnt = 0; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) *flags |= CAM_DATA_PADDR; else *flags |= CAM_DATA_VADDR; } else { /* S/G list with physical or virtual pointers. */ ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; /* One time shift for SRR offset. */ while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) { io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off; idx++; off = 0; } off += io->scsiio.ext_data_filled; io->scsiio.ext_data_filled = 0; cam_sglist = cmd_info->cam_sglist; *dxfer_len = 0; for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { cam_sglist[i].ds_addr = (bus_addr_t)(uintptr_t)ctl_sglist[i + idx].addr + off; if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; *dxfer_len += cam_sglist[i].ds_len; } else { cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; cmd_info->cur_transfer_index = idx + i; cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; cmd_info->flags |= CTLFE_CMD_PIECEWISE; *dxfer_len += cam_sglist[i].ds_len; if (ctl_sglist[i].len != 0) i++; break; } if (i == (CTLFE_MAX_SEGS - 1) && idx + i < (io->scsiio.kern_sg_entries - 1)) { cmd_info->cur_transfer_index = idx + i + 1; cmd_info->cur_transfer_off = 0; cmd_info->flags |= CTLFE_CMD_PIECEWISE; i++; break; } off = 0; } *sglist_cnt = i; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) *flags |= CAM_DATA_SG_PADDR; else *flags |= CAM_DATA_SG; *data_ptr = (uint8_t *)cam_sglist; } } static void ctlfestart(struct cam_periph *periph, union ccb *start_ccb) { struct ctlfe_lun_softc *softc; struct ctlfe_cmd_info *cmd_info; struct ccb_hdr *ccb_h; struct ccb_accept_tio *atio; struct ccb_scsiio *csio; uint8_t *data_ptr; uint32_t dxfer_len; ccb_flags flags; union ctl_io *io; uint8_t scsi_status; softc = (struct ctlfe_lun_softc *)periph->softc; next: /* Take the ATIO off the work queue */ ccb_h = STAILQ_FIRST(&softc->work_queue); if (ccb_h == NULL) { xpt_release_ccb(start_ccb); return; } STAILQ_REMOVE_HEAD(&softc->work_queue, periph_links.stqe); atio = (struct ccb_accept_tio *)ccb_h; io = (union ctl_io *)ccb_h->io_ptr; csio = &start_ccb->csio; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); cmd_info = PRIV_INFO(io); cmd_info->cur_transfer_index = 0; cmd_info->cur_transfer_off = 0; cmd_info->flags = 0; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { /* * Datamove call, we need to setup the S/G list. */ ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); } else { /* * We're done, send status back. */ if ((io->io_hdr.flags & CTL_FLAG_ABORT) && (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; /* Tell the SIM that we've aborted this ATIO */ #ifdef CTLFEDEBUG printf("%s: tag %04x abort\n", __func__, atio->tag_id); #endif KASSERT(atio->ccb_h.func_code == XPT_ACCEPT_TARGET_IO, ("func_code %#x is not ATIO", atio->ccb_h.func_code)); start_ccb->ccb_h.func_code = XPT_ABORT; start_ccb->cab.abort_ccb = (union ccb *)atio; xpt_action(start_ccb); ctlfe_requeue_ccb(periph, (union ccb *)atio, /* unlock */0); /* XPT_ABORT is not queued, so we can take next I/O. */ goto next; } data_ptr = NULL; dxfer_len = 0; csio->sglist_cnt = 0; } scsi_status = 0; if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) && (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 && ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 || io->io_hdr.status == CTL_SUCCESS)) { flags |= CAM_SEND_STATUS; scsi_status = io->scsiio.scsi_status; csio->sense_len = io->scsiio.sense_len; #ifdef CTLFEDEBUG printf("%s: tag %04x status %x\n", __func__, atio->tag_id, io->io_hdr.status); #endif if (csio->sense_len != 0) { csio->sense_data = io->scsiio.sense_data; flags |= CAM_SEND_SENSE; } } #ifdef CTLFEDEBUG printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, (flags & CAM_SEND_STATUS) ? "done" : "datamove", atio->tag_id, flags, data_ptr, dxfer_len); #endif /* * Valid combinations: * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, * sglist_cnt != 0 */ #ifdef CTLFEDEBUG if (((flags & CAM_SEND_STATUS) && (((flags & CAM_DATA_SG) != 0) || (dxfer_len != 0) || (csio->sglist_cnt != 0))) || (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) || ((flags & CAM_DATA_SG) && (csio->sglist_cnt == 0)) || (((flags & CAM_DATA_SG) == 0) && (csio->sglist_cnt != 0))) { printf("%s: tag %04x cdb %02x flags %#x dxfer_len " "%d sg %u\n", __func__, atio->tag_id, atio_cdb_ptr(atio)[0], flags, dxfer_len, csio->sglist_cnt); printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } #endif cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, scsi_status, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ CTLFE_TIMEOUT * 1000); start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.ccb_atio = atio; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED); softc->ctios_sent++; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* * If we still have work to do, ask for another CCB. */ if (!STAILQ_EMPTY(&softc->work_queue)) xpt_schedule(periph, CAM_PRIORITY_NORMAL); } static void ctlfe_drain(void *context, int pending) { struct cam_periph *periph = context; struct ctlfe_lun_softc *softc = periph->softc; cam_periph_lock(periph); while (softc->refcount != 0) { cam_periph_sleep(periph, &softc->refcount, PRIBIO, "ctlfe_drain", 1); } cam_periph_unlock(periph); cam_periph_release(periph); } static void ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) { struct ctlfe_lun_softc *softc; union ctl_io *io; struct ctlfe_cmd_info *cmd_info; softc = (struct ctlfe_lun_softc *)periph->softc; io = ccb->ccb_h.io_ptr; switch (ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: softc->atios_alloced--; cmd_info = PRIV_INFO(io); free(cmd_info, M_CTLFE); break; case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: softc->inots_alloced--; break; default: break; } ctl_free_io(io); free(ccb, M_CTLFE); KASSERT(softc->atios_alloced >= 0, ("%s: atios_alloced %d < 0", __func__, softc->atios_alloced)); KASSERT(softc->inots_alloced >= 0, ("%s: inots_alloced %d < 0", __func__, softc->inots_alloced)); /* * If we have received all of our CCBs, we can release our * reference on the peripheral driver. It will probably go away * now. */ if (softc->atios_alloced == 0 && softc->inots_alloced == 0) { if (softc->refcount == 0) { cam_periph_release_locked(periph); } else { TASK_INIT(&softc->refdrain_task, 0, ctlfe_drain, periph); taskqueue_enqueue(taskqueue_thread, &softc->refdrain_task); } } } /* * Send the ATIO/INOT back to the SIM, or free it if periph was invalidated. */ static void ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock) { struct ctlfe_lun_softc *softc; struct mtx *mtx; if (periph->flags & CAM_PERIPH_INVALID) { mtx = cam_periph_mtx(periph); ctlfe_free_ccb(periph, ccb); if (unlock) mtx_unlock(mtx); return; } softc = (struct ctlfe_lun_softc *)periph->softc; if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) LIST_INSERT_HEAD(&softc->atio_list, &ccb->ccb_h, periph_links.le); else LIST_INSERT_HEAD(&softc->inot_list, &ccb->ccb_h, periph_links.le); if (unlock) cam_periph_unlock(periph); /* * For a wildcard attachment, commands can come in with a specific * target/lun. Reset the target and LUN fields back to the wildcard * values before we send them back down to the SIM. */ xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, CAM_PRIORITY_NONE, ccb->ccb_h.flags); xpt_action(ccb); } static int ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) { uint64_t lba; uint32_t num_blocks, nbc; uint8_t *cmdbyt = atio_cdb_ptr(atio); nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ switch (cmdbyt[0]) { case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; lba = scsi_3btoul(cdb->addr); lba &= 0x1fffff; num_blocks = cdb->length; if (num_blocks == 0) num_blocks = 256; lba += nbc; num_blocks -= nbc; scsi_ulto3b(lba, cdb->addr); cdb->length = num_blocks; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto2b(num_blocks, cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } default: return -1; } return (0); } static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb) { struct ctlfe_lun_softc *softc; struct ctlfe_softc *bus_softc; struct ctlfe_cmd_info *cmd_info; struct ccb_accept_tio *atio = NULL; union ctl_io *io = NULL; struct mtx *mtx; cam_status status; KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, ("CCB in ctlfedone() without CAM_UNLOCKED flag")); #ifdef CTLFE_DEBUG printf("%s: entered, func_code = %#x\n", __func__, done_ccb->ccb_h.func_code); #endif /* * At this point CTL has no known use case for device queue freezes. * In case some SIM think different -- drop its freeze right here. */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(periph->path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } softc = (struct ctlfe_lun_softc *)periph->softc; bus_softc = softc->parent_softc; mtx = cam_periph_mtx(periph); mtx_lock(mtx); switch (done_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: { LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); atio = &done_ccb->atio; status = atio->ccb_h.status & CAM_STATUS_MASK; if (status != CAM_CDB_RECVD) { ctlfe_free_ccb(periph, done_ccb); goto out; } resubmit: /* * Allocate a ctl_io, pass it to CTL, and wait for the * datamove or done. */ mtx_unlock(mtx); io = done_ccb->ccb_h.io_ptr; cmd_info = PRIV_INFO(io); ctl_zero_io(io); /* Save pointers on both sides */ PRIV_CCB(io) = done_ccb; PRIV_INFO(io) = cmd_info; done_ccb->ccb_h.io_ptr = io; /* * Only SCSI I/O comes down this path, resets, etc. come * down the immediate notify path below. */ io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = atio->init_id; io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; if (bus_softc->hba_misc & PIM_EXTLUNS) { io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun)); } else { io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; } + io->scsiio.priority = atio->priority; io->scsiio.tag_num = atio->tag_id; switch (atio->tag_action) { case CAM_TAG_ACTION_NONE: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, atio->tag_action); break; } if (atio->cdb_len > sizeof(io->scsiio.cdb)) { printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); } io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len); #ifdef CTLFEDEBUG printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, io->scsiio.tag_num, io->scsiio.cdb[0]); #endif ctl_queue(io); return; } case XPT_CONT_TARGET_IO: { int srr = 0; uint32_t srr_off = 0; atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; io = (union ctl_io *)atio->ccb_h.io_ptr; softc->ctios_sent--; #ifdef CTLFEDEBUG printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", __func__, atio->tag_id, done_ccb->ccb_h.flags); #endif /* * Handle SRR case were the data pointer is pushed back hack */ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV && done_ccb->csio.msg_ptr != NULL && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED && done_ccb->csio.msg_ptr[1] == 5 && done_ccb->csio.msg_ptr[2] == 0) { srr = 1; srr_off = (done_ccb->csio.msg_ptr[3] << 24) | (done_ccb->csio.msg_ptr[4] << 16) | (done_ccb->csio.msg_ptr[5] << 8) | (done_ccb->csio.msg_ptr[6]); } /* * If we have an SRR and we're still sending data, we * should be able to adjust offsets and cycle again. * It is possible only if offset is from this datamove. */ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) && srr_off >= io->scsiio.kern_rel_offset && srr_off < io->scsiio.kern_rel_offset + io->scsiio.kern_data_len) { io->scsiio.kern_data_resid = io->scsiio.kern_rel_offset + io->scsiio.kern_data_len - srr_off; io->scsiio.ext_data_filled = srr_off; io->scsiio.io_hdr.status = CTL_STATUS_NONE; io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; xpt_release_ccb(done_ccb); STAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); break; } /* * If status was being sent, the back end data is now history. * Hack it up and resubmit a new command with the CDB adjusted. * If the SIM does the right thing, all of the resid math * should work. */ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { xpt_release_ccb(done_ccb); if (ctlfe_adjust_cdb(atio, srr_off) == 0) { done_ccb = (union ccb *)atio; goto resubmit; } /* * Fall through to doom.... */ } if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) && (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; /* * If we were sending status back to the initiator, free up * resources. If we were doing a datamove, call the * datamove done routine. */ if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { /* * If we asked to send sense data but it wasn't sent, * queue the I/O back to CTL for later REQUEST SENSE. */ if ((done_ccb->ccb_h.flags & CAM_SEND_SENSE) != 0 && (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (done_ccb->ccb_h.status & CAM_SENT_SENSE) == 0 && (io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref)) != NULL) { PRIV_INFO(io) = PRIV_INFO( (union ctl_io *)atio->ccb_h.io_ptr); ctl_queue_sense(atio->ccb_h.io_ptr); atio->ccb_h.io_ptr = io; } /* Abort ATIO if CTIO sending status has failed. */ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { done_ccb->ccb_h.func_code = XPT_ABORT; done_ccb->cab.abort_ccb = (union ccb *)atio; xpt_action(done_ccb); } xpt_release_ccb(done_ccb); ctlfe_requeue_ccb(periph, (union ccb *)atio, /* unlock */1); return; } else { struct ctlfe_cmd_info *cmd_info; struct ccb_scsiio *csio; csio = &done_ccb->csio; cmd_info = PRIV_INFO(io); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; /* * Translate CAM status to CTL status. Success * does not change the overall, ctl_io status. In * that case we just set port_status to 0. If we * have a failure, though, set a data phase error * for the overall ctl_io. */ switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_REQ_CMP: io->scsiio.kern_data_resid -= csio->dxfer_len - csio->resid; io->io_hdr.port_status = 0; break; default: /* * XXX KDM we probably need to figure out a * standard set of errors that the SIM * drivers should return in the event of a * data transfer failure. A data phase * error will at least point the user to a * data transfer error of some sort. * Hopefully the SIM printed out some * additional information to give the user * a clue what happened. */ io->io_hdr.port_status = 0xbad1; ctl_set_data_phase_error(&io->scsiio); /* * XXX KDM figure out residual. */ break; } /* * If we had to break this S/G list into multiple * pieces, figure out where we are in the list, and * continue sending pieces if necessary. */ if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) && io->io_hdr.port_status == 0 && csio->resid == 0) { ccb_flags flags; uint8_t *data_ptr; uint32_t dxfer_len; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT| CAM_TAG_ACTION_VALID); ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); if (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) { printf("%s: tag %04x no status or " "len cdb = %02x\n", __func__, atio->tag_id, atio_cdb_ptr(atio)[0]); printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, CTLFE_TIMEOUT * 1000); csio->ccb_h.flags |= CAM_UNLOCKED; csio->resid = 0; csio->ccb_h.ccb_atio = atio; io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; softc->ctios_sent++; mtx_unlock(mtx); xpt_action((union ccb *)csio); } else { /* * Release the CTIO. The ATIO will be sent back * down to the SIM once we send status. */ xpt_release_ccb(done_ccb); mtx_unlock(mtx); /* Call the backend move done callback */ io->scsiio.be_move_done(io); } return; } break; } case XPT_IMMEDIATE_NOTIFY: { union ctl_io *io; struct ccb_immediate_notify *inot; int send_ctl_io; LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); inot = &done_ccb->cin1; io = done_ccb->ccb_h.io_ptr; ctl_zero_io(io); send_ctl_io = 1; io->io_hdr.io_type = CTL_IO_TASK; PRIV_CCB(io) = done_ccb; inot->ccb_h.io_ptr = io; io->io_hdr.nexus.initid = inot->initiator_id; io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; if (bus_softc->hba_misc & PIM_EXTLUNS) { io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun)); } else { io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; } /* XXX KDM should this be the tag_id? */ io->taskio.tag_num = inot->seq_id; status = inot->ccb_h.status & CAM_STATUS_MASK; switch (status) { case CAM_SCSI_BUS_RESET: io->taskio.task_action = CTL_TASK_BUS_RESET; break; case CAM_BDR_SENT: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case CAM_MESSAGE_RECV: switch (inot->arg) { case MSG_ABORT_TASK_SET: io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; break; case MSG_TARGET_RESET: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case MSG_ABORT_TASK: io->taskio.task_action = CTL_TASK_ABORT_TASK; break; case MSG_LOGICAL_UNIT_RESET: io->taskio.task_action = CTL_TASK_LUN_RESET; break; case MSG_CLEAR_TASK_SET: io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET; break; case MSG_CLEAR_ACA: io->taskio.task_action = CTL_TASK_CLEAR_ACA; break; case MSG_QUERY_TASK: io->taskio.task_action = CTL_TASK_QUERY_TASK; break; case MSG_QUERY_TASK_SET: io->taskio.task_action = CTL_TASK_QUERY_TASK_SET; break; case MSG_QUERY_ASYNC_EVENT: io->taskio.task_action = CTL_TASK_QUERY_ASYNC_EVENT; break; case MSG_NOOP: send_ctl_io = 0; break; default: xpt_print(periph->path, "%s: unsupported INOT message 0x%x\n", __func__, inot->arg); send_ctl_io = 0; break; } break; default: xpt_print(periph->path, "%s: unsupported INOT status 0x%x\n", __func__, status); /* FALLTHROUGH */ case CAM_REQ_ABORTED: case CAM_REQ_INVALID: case CAM_DEV_NOT_THERE: case CAM_PROVIDE_FAIL: ctlfe_free_ccb(periph, done_ccb); goto out; } if (send_ctl_io != 0) { ctl_queue(io); } else { done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; xpt_action(done_ccb); } break; } case XPT_NOTIFY_ACKNOWLEDGE: /* Queue this back down to the SIM as an immediate notify. */ done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; ctlfe_requeue_ccb(periph, done_ccb, /* unlock */1); return; case XPT_SET_SIM_KNOB: case XPT_GET_SIM_KNOB: case XPT_GET_SIM_KNOB_OLD: break; default: panic("%s: unexpected CCB type %#x", __func__, done_ccb->ccb_h.func_code); break; } out: mtx_unlock(mtx); } static void ctlfe_onoffline(void *arg, int online) { struct ctlfe_softc *bus_softc = arg; union ccb *ccb; cam_status status; struct cam_path *path; int set_wwnn = 0; status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path!\n", __func__); return; } ccb = xpt_alloc_ccb(); xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; xpt_action(ccb); /* Check whether we should change WWNs. */ if (online != 0) { if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ printf("%s: %s current WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s current WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); /* * If the user has specified a WWNN/WWPN, send them * down to the SIM. Otherwise, record what the SIM * has reported. */ if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn != ccb->knob.xport_specific.fc.wwnn) { ccb->knob.xport_specific.fc.wwnn = bus_softc->port.wwnn; set_wwnn = 1; } else { ctl_port_set_wwns(&bus_softc->port, true, ccb->knob.xport_specific.fc.wwnn, false, 0); } if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn != ccb->knob.xport_specific.fc.wwpn) { ccb->knob.xport_specific.fc.wwpn = bus_softc->port.wwpn; set_wwnn = 1; } else { ctl_port_set_wwns(&bus_softc->port, false, 0, true, ccb->knob.xport_specific.fc.wwpn); } } else { printf("%s: %s has no valid WWNN/WWPN\n", __func__, bus_softc->port_name); if (bus_softc->port.wwnn != 0) { ccb->knob.xport_specific.fc.wwnn = bus_softc->port.wwnn; set_wwnn = 1; } if (bus_softc->port.wwpn != 0) { ccb->knob.xport_specific.fc.wwpn = bus_softc->port.wwpn; set_wwnn = 1; } } } if (set_wwnn) { ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ADDRESS; xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: %s (path id %d) failed set WWNs: %#x\n", __func__, bus_softc->port_name, bus_softc->path_id, ccb->ccb_h.status); } else { printf("%s: %s new WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s new WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); } } /* Check whether we should change role. */ if ((ccb->knob.xport_specific.valid & KNOB_VALID_ROLE) == 0 || ((online != 0) ^ ((ccb->knob.xport_specific.fc.role & KNOB_ROLE_TARGET) != 0)) != 0) { ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; if (online) ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET; else ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET; xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: %s (path id %d) failed %s target role: %#x\n", __func__, bus_softc->port_name, bus_softc->path_id, online ? "enable" : "disable", ccb->ccb_h.status); } else { printf("%s: %s (path id %d) target role %s succeeded\n", __func__, bus_softc->port_name, bus_softc->path_id, online ? "enable" : "disable"); } } xpt_free_path(path); xpt_free_ccb(ccb); } static void ctlfe_online(void *arg) { struct ctlfe_softc *bus_softc; struct cam_path *path; cam_status status; struct ctlfe_lun_softc *lun_softc; struct cam_periph *periph; bus_softc = (struct ctlfe_softc *)arg; /* * Create the wildcard LUN before bringing the port online. */ status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard periph\n", __func__); return; } lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO); xpt_path_lock(path); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ xpt_path_unlock(path); xpt_free_path(path); free(lun_softc, M_CTLFE); return; } lun_softc->parent_softc = bus_softc; lun_softc->flags |= CTLFE_LUN_WILDCARD; status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, lun_softc); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("%s: CAM error %s (%#x) returned from " "cam_periph_alloc()\n", __func__, (entry != NULL) ? entry->status_text : "Unknown", status); free(lun_softc, M_CTLFE); } xpt_path_unlock(path); ctlfe_onoffline(arg, /*online*/ 1); xpt_free_path(path); } static void ctlfe_offline(void *arg) { struct ctlfe_softc *bus_softc; struct cam_path *path; cam_status status; struct cam_periph *periph; bus_softc = (struct ctlfe_softc *)arg; ctlfe_onoffline(arg, /*online*/ 0); /* * Disable the wildcard LUN for this port now that we have taken * the port offline. */ status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard periph\n", __func__); return; } xpt_path_lock(path); if ((periph = cam_periph_find(path, "ctl")) != NULL) cam_periph_invalidate(periph); xpt_path_unlock(path); xpt_free_path(path); } /* * This will get called to enable a LUN on every bus that is attached to * CTL. So we only need to create a path/periph for this particular bus. */ static int ctlfe_lun_enable(void *arg, int lun_id) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; struct cam_path *path; struct cam_periph *periph; cam_status status; bus_softc = (struct ctlfe_softc *)arg; if (bus_softc->hba_misc & PIM_EXTLUNS) lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, bus_softc->target_id, lun_id); /* XXX KDM need some way to return status to CTL here? */ if (status != CAM_REQ_CMP) { printf("%s: could not create path, status %#x\n", __func__, status); return (1); } softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); xpt_path_lock(path); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ xpt_path_unlock(path); xpt_free_path(path); free(softc, M_CTLFE); return (0); } softc->parent_softc = bus_softc; status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, softc); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("%s: CAM error %s (%#x) returned from " "cam_periph_alloc()\n", __func__, (entry != NULL) ? entry->status_text : "Unknown", status); free(softc, M_CTLFE); } xpt_path_unlock(path); xpt_free_path(path); return (0); } /* * This will get called when the user removes a LUN to disable that LUN * on every bus that is attached to CTL. */ static int ctlfe_lun_disable(void *arg, int lun_id) { struct ctlfe_softc *softc; struct ctlfe_lun_softc *lun_softc; softc = (struct ctlfe_softc *)arg; if (softc->hba_misc & PIM_EXTLUNS) lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); mtx_lock(&softc->lun_softc_mtx); STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { struct cam_path *path; path = lun_softc->periph->path; if ((xpt_path_target_id(path) == softc->target_id) && (xpt_path_lun_id(path) == lun_id)) { break; } } if (lun_softc == NULL) { mtx_unlock(&softc->lun_softc_mtx); printf("%s: can't find lun %d\n", __func__, lun_id); return (1); } cam_periph_acquire(lun_softc->periph); mtx_unlock(&softc->lun_softc_mtx); cam_periph_lock(lun_softc->periph); cam_periph_invalidate(lun_softc->periph); cam_periph_unlock(lun_softc->periph); cam_periph_release(lun_softc->periph); return (0); } static void ctlfe_dump_sim(struct cam_sim *sim) { printf("%s%d: max dev openings: %d, max tagged dev openings: %d\n", sim->sim_name, sim->unit_number, sim->max_dev_openings, sim->max_tagged_dev_openings); } /* * Assumes that the SIM lock is held. */ static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc) { struct cam_periph *periph = softc->periph; struct ccb_hdr *hdr; struct ccb_getdevstats cgds; int num_items; xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgds.ccb_h.func_code = XPT_GDEV_STATS; xpt_action((union ccb *)&cgds); if ((cgds.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { xpt_print(periph->path, "devq: openings %d, active %d, " "allocated %d, queued %d, held %d\n", cgds.dev_openings, cgds.dev_active, cgds.allocated, cgds.queued, cgds.held); } num_items = 0; STAILQ_FOREACH(hdr, &softc->work_queue, periph_links.stqe) { union ctl_io *io = hdr->io_ptr; num_items++; /* * Only regular SCSI I/O is put on the work * queue, so we can print sense here. There may be no * sense if it's no the queue for a DMA, but this serves to * print out the CCB as well. * * XXX KDM switch this over to scsi_sense_print() when * CTL is merged in with CAM. */ ctl_io_error_print(io, NULL); /* * Print DMA status if we are DMA_QUEUED. */ if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { xpt_print(periph->path, "Total %u, Current %u, Resid %u\n", io->scsiio.kern_total_len, io->scsiio.kern_data_len, io->scsiio.kern_data_resid); } } xpt_print(periph->path, "%d requests waiting for CCBs\n", num_items); xpt_print(periph->path, "%d CTIOs outstanding\n", softc->ctios_sent); } /* * Datamove/done routine called by CTL. Put ourselves on the queue to * receive a CCB from CAM so we can queue the continue I/O request down * to the adapter. */ static void ctlfe_datamove(union ctl_io *io) { union ccb *ccb; struct cam_periph *periph; struct ctlfe_lun_softc *softc; KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type)); io->scsiio.ext_data_filled = 0; ccb = PRIV_CCB(io); periph = xpt_path_periph(ccb->ccb_h.path); cam_periph_lock(periph); softc = (struct ctlfe_lun_softc *)periph->softc; io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); } static void ctlfe_done(union ctl_io *io) { union ccb *ccb; struct cam_periph *periph; struct ctlfe_lun_softc *softc; ccb = PRIV_CCB(io); periph = xpt_path_periph(ccb->ccb_h.path); cam_periph_lock(periph); softc = (struct ctlfe_lun_softc *)periph->softc; if (io->io_hdr.io_type == CTL_IO_TASK) { /* * Send the notify acknowledge down to the SIM, to let it * know we processed the task management command. */ ccb->ccb_h.status = CAM_REQ_INPROG; ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; switch (io->taskio.task_status) { case CTL_TASK_FUNCTION_COMPLETE: ccb->cna2.arg = CAM_RSP_TMF_COMPLETE; break; case CTL_TASK_FUNCTION_SUCCEEDED: ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_FUNCTION_REJECTED: ccb->cna2.arg = CAM_RSP_TMF_REJECTED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_LUN_DOES_NOT_EXIST: ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_FUNCTION_NOT_SUPPORTED: ccb->cna2.arg = CAM_RSP_TMF_FAILED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; } ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8; xpt_action(ccb); } else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) { ctlfe_requeue_ccb(periph, ccb, /* unlock */1); return; } else { io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); } cam_periph_unlock(periph); } static void ctlfe_dump(void) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *lun_softc; STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { ctlfe_dump_sim(bus_softc->sim); STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) ctlfe_dump_queue(lun_softc); } } Index: head/sys/dev/iscsi/iscsi.c =================================================================== --- head/sys/dev/iscsi/iscsi.c (revision 367043) +++ head/sys/dev/iscsi/iscsi.c (revision 367044) @@ -1,2625 +1,2630 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ICL_KERNEL_PROXY #include #endif #ifdef ICL_KERNEL_PROXY FEATURE(iscsi_kernel_proxy, "iSCSI initiator built with ICL_KERNEL_PROXY"); #endif /* * XXX: This is global so the iscsi_unload() can access it. * Think about how to do this properly. */ static struct iscsi_softc *sc; SYSCTL_NODE(_kern, OID_AUTO, iscsi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "iSCSI initiator"); static int debug = 1; SYSCTL_INT(_kern_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN, &debug, 0, "Enable debug messages"); static int ping_timeout = 5; SYSCTL_INT(_kern_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN, &ping_timeout, 0, "Timeout for ping (NOP-Out) requests, in seconds"); static int iscsid_timeout = 60; SYSCTL_INT(_kern_iscsi, OID_AUTO, iscsid_timeout, CTLFLAG_RWTUN, &iscsid_timeout, 0, "Time to wait for iscsid(8) to handle reconnection, in seconds"); static int login_timeout = 60; SYSCTL_INT(_kern_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN, &login_timeout, 0, "Time to wait for iscsid(8) to finish Login Phase, in seconds"); static int maxtags = 255; SYSCTL_INT(_kern_iscsi, OID_AUTO, maxtags, CTLFLAG_RWTUN, &maxtags, 0, "Max number of IO requests queued"); static int fail_on_disconnection = 0; SYSCTL_INT(_kern_iscsi, OID_AUTO, fail_on_disconnection, CTLFLAG_RWTUN, &fail_on_disconnection, 0, "Destroy CAM SIM on connection failure"); static int fail_on_shutdown = 1; SYSCTL_INT(_kern_iscsi, OID_AUTO, fail_on_shutdown, CTLFLAG_RWTUN, &fail_on_shutdown, 0, "Fail disconnected sessions on shutdown"); static MALLOC_DEFINE(M_ISCSI, "iSCSI", "iSCSI initiator"); static uma_zone_t iscsi_outstanding_zone; #define CONN_SESSION(X) ((struct iscsi_session *)X->ic_prv0) #define PDU_SESSION(X) (CONN_SESSION(X->ip_conn)) #define ISCSI_DEBUG(X, ...) \ do { \ if (debug > 1) \ printf("%s: " X "\n", __func__, ## __VA_ARGS__);\ } while (0) #define ISCSI_WARN(X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s: " X "\n", \ __func__, ## __VA_ARGS__); \ } \ } while (0) #define ISCSI_SESSION_DEBUG(S, X, ...) \ do { \ if (debug > 1) { \ printf("%s: %s (%s): " X "\n", \ __func__, S->is_conf.isc_target_addr, \ S->is_conf.isc_target, ## __VA_ARGS__); \ } \ } while (0) #define ISCSI_SESSION_WARN(S, X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s (%s): " X "\n", \ S->is_conf.isc_target_addr, \ S->is_conf.isc_target, ## __VA_ARGS__); \ } \ } while (0) #define ISCSI_SESSION_LOCK(X) mtx_lock(&X->is_lock) #define ISCSI_SESSION_UNLOCK(X) mtx_unlock(&X->is_lock) #define ISCSI_SESSION_LOCK_ASSERT(X) mtx_assert(&X->is_lock, MA_OWNED) #define ISCSI_SESSION_LOCK_ASSERT_NOT(X) mtx_assert(&X->is_lock, MA_NOTOWNED) static int iscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int mode, struct thread *td); static struct cdevsw iscsi_cdevsw = { .d_version = D_VERSION, .d_ioctl = iscsi_ioctl, .d_name = "iscsi", }; static void iscsi_pdu_queue_locked(struct icl_pdu *request); static void iscsi_pdu_queue(struct icl_pdu *request); static void iscsi_pdu_update_statsn(const struct icl_pdu *response); static void iscsi_pdu_handle_nop_in(struct icl_pdu *response); static void iscsi_pdu_handle_scsi_response(struct icl_pdu *response); static void iscsi_pdu_handle_task_response(struct icl_pdu *response); static void iscsi_pdu_handle_data_in(struct icl_pdu *response); static void iscsi_pdu_handle_logout_response(struct icl_pdu *response); static void iscsi_pdu_handle_r2t(struct icl_pdu *response); static void iscsi_pdu_handle_async_message(struct icl_pdu *response); static void iscsi_pdu_handle_reject(struct icl_pdu *response); static void iscsi_session_reconnect(struct iscsi_session *is); static void iscsi_session_terminate(struct iscsi_session *is); static void iscsi_action(struct cam_sim *sim, union ccb *ccb); static void iscsi_poll(struct cam_sim *sim); static struct iscsi_outstanding *iscsi_outstanding_find(struct iscsi_session *is, uint32_t initiator_task_tag); static struct iscsi_outstanding *iscsi_outstanding_add(struct iscsi_session *is, struct icl_pdu *request, union ccb *ccb, uint32_t *initiator_task_tagp); static void iscsi_outstanding_remove(struct iscsi_session *is, struct iscsi_outstanding *io); static bool iscsi_pdu_prepare(struct icl_pdu *request) { struct iscsi_session *is; struct iscsi_bhs_scsi_command *bhssc; is = PDU_SESSION(request); ISCSI_SESSION_LOCK_ASSERT(is); /* * We're only using fields common for all the request * (initiator -> target) PDUs. */ bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; /* * Data-Out PDU does not contain CmdSN. */ if (bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_DATA_OUT) { if (ISCSI_SNGT(is->is_cmdsn, is->is_maxcmdsn) && (bhssc->bhssc_opcode & ISCSI_BHS_OPCODE_IMMEDIATE) == 0) { /* * Current MaxCmdSN prevents us from sending any more * SCSI Command PDUs to the target; postpone the PDU. * It will get resent by either iscsi_pdu_queue(), * or by maintenance thread. */ #if 0 ISCSI_SESSION_DEBUG(is, "postponing send, CmdSN %u, " "ExpCmdSN %u, MaxCmdSN %u, opcode 0x%x", is->is_cmdsn, is->is_expcmdsn, is->is_maxcmdsn, bhssc->bhssc_opcode); #endif return (true); } bhssc->bhssc_cmdsn = htonl(is->is_cmdsn); if ((bhssc->bhssc_opcode & ISCSI_BHS_OPCODE_IMMEDIATE) == 0) is->is_cmdsn++; } bhssc->bhssc_expstatsn = htonl(is->is_statsn + 1); return (false); } static void iscsi_session_send_postponed(struct iscsi_session *is) { struct icl_pdu *request; bool postpone; ISCSI_SESSION_LOCK_ASSERT(is); if (STAILQ_EMPTY(&is->is_postponed)) return; while ((request = STAILQ_FIRST(&is->is_postponed)) != NULL) { postpone = iscsi_pdu_prepare(request); if (postpone) return; STAILQ_REMOVE_HEAD(&is->is_postponed, ip_next); icl_pdu_queue(request); } xpt_release_simq(is->is_sim, 1); } static void iscsi_pdu_queue_locked(struct icl_pdu *request) { struct iscsi_session *is; bool postpone; is = PDU_SESSION(request); ISCSI_SESSION_LOCK_ASSERT(is); iscsi_session_send_postponed(is); postpone = iscsi_pdu_prepare(request); if (postpone) { if (STAILQ_EMPTY(&is->is_postponed)) xpt_freeze_simq(is->is_sim, 1); STAILQ_INSERT_TAIL(&is->is_postponed, request, ip_next); return; } icl_pdu_queue(request); } static void iscsi_pdu_queue(struct icl_pdu *request) { struct iscsi_session *is; is = PDU_SESSION(request); ISCSI_SESSION_LOCK(is); iscsi_pdu_queue_locked(request); ISCSI_SESSION_UNLOCK(is); } static void iscsi_session_logout(struct iscsi_session *is) { struct icl_pdu *request; struct iscsi_bhs_logout_request *bhslr; request = icl_pdu_new(is->is_conn, M_NOWAIT); if (request == NULL) return; bhslr = (struct iscsi_bhs_logout_request *)request->ip_bhs; bhslr->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_REQUEST; bhslr->bhslr_reason = BHSLR_REASON_CLOSE_SESSION; iscsi_pdu_queue_locked(request); } static void iscsi_session_terminate_task(struct iscsi_session *is, struct iscsi_outstanding *io, cam_status status) { ISCSI_SESSION_LOCK_ASSERT(is); if (io->io_ccb != NULL) { io->io_ccb->ccb_h.status &= ~(CAM_SIM_QUEUED | CAM_STATUS_MASK); io->io_ccb->ccb_h.status |= status; if ((io->io_ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { io->io_ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(io->io_ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } xpt_done(io->io_ccb); } iscsi_outstanding_remove(is, io); } static void iscsi_session_terminate_tasks(struct iscsi_session *is, cam_status status) { struct iscsi_outstanding *io, *tmp; ISCSI_SESSION_LOCK_ASSERT(is); TAILQ_FOREACH_SAFE(io, &is->is_outstanding, io_next, tmp) { iscsi_session_terminate_task(is, io, status); } } static void iscsi_session_cleanup(struct iscsi_session *is, bool destroy_sim) { struct icl_pdu *pdu; ISCSI_SESSION_LOCK_ASSERT(is); /* * Don't queue any new PDUs. */ if (is->is_sim != NULL && is->is_simq_frozen == false) { ISCSI_SESSION_DEBUG(is, "freezing"); xpt_freeze_simq(is->is_sim, 1); is->is_simq_frozen = true; } /* * Remove postponed PDUs. */ if (!STAILQ_EMPTY(&is->is_postponed)) xpt_release_simq(is->is_sim, 1); while ((pdu = STAILQ_FIRST(&is->is_postponed)) != NULL) { STAILQ_REMOVE_HEAD(&is->is_postponed, ip_next); icl_pdu_free(pdu); } if (destroy_sim == false) { /* * Terminate SCSI tasks, asking CAM to requeue them. */ iscsi_session_terminate_tasks(is, CAM_REQUEUE_REQ); return; } iscsi_session_terminate_tasks(is, CAM_DEV_NOT_THERE); if (is->is_sim == NULL) return; ISCSI_SESSION_DEBUG(is, "deregistering SIM"); xpt_async(AC_LOST_DEVICE, is->is_path, NULL); if (is->is_simq_frozen) { is->is_simq_frozen = false; xpt_release_simq(is->is_sim, 1); } xpt_free_path(is->is_path); is->is_path = NULL; xpt_bus_deregister(cam_sim_path(is->is_sim)); cam_sim_free(is->is_sim, TRUE /*free_devq*/); is->is_sim = NULL; is->is_devq = NULL; } static void iscsi_maintenance_thread_reconnect(struct iscsi_session *is) { icl_conn_close(is->is_conn); ISCSI_SESSION_LOCK(is); is->is_connected = false; is->is_reconnecting = false; is->is_login_phase = false; #ifdef ICL_KERNEL_PROXY if (is->is_login_pdu != NULL) { icl_pdu_free(is->is_login_pdu); is->is_login_pdu = NULL; } cv_signal(&is->is_login_cv); #endif if (fail_on_disconnection) { ISCSI_SESSION_DEBUG(is, "connection failed, destroying devices"); iscsi_session_cleanup(is, true); } else { iscsi_session_cleanup(is, false); } KASSERT(TAILQ_EMPTY(&is->is_outstanding), ("destroying session with active tasks")); KASSERT(STAILQ_EMPTY(&is->is_postponed), ("destroying session with postponed PDUs")); if (is->is_conf.isc_enable == 0 && is->is_conf.isc_discovery == 0) { ISCSI_SESSION_UNLOCK(is); return; } /* * Request immediate reconnection from iscsid(8). */ //ISCSI_SESSION_DEBUG(is, "waking up iscsid(8)"); is->is_waiting_for_iscsid = true; strlcpy(is->is_reason, "Waiting for iscsid(8)", sizeof(is->is_reason)); is->is_timeout = 0; ISCSI_SESSION_UNLOCK(is); cv_signal(&is->is_softc->sc_cv); } static void iscsi_maintenance_thread_terminate(struct iscsi_session *is) { struct iscsi_softc *sc; sc = is->is_softc; sx_xlock(&sc->sc_lock); TAILQ_REMOVE(&sc->sc_sessions, is, is_next); sx_xunlock(&sc->sc_lock); icl_conn_close(is->is_conn); callout_drain(&is->is_callout); ISCSI_SESSION_LOCK(is); KASSERT(is->is_terminating, ("is_terminating == false")); #ifdef ICL_KERNEL_PROXY if (is->is_login_pdu != NULL) { icl_pdu_free(is->is_login_pdu); is->is_login_pdu = NULL; } cv_signal(&is->is_login_cv); #endif iscsi_session_cleanup(is, true); KASSERT(TAILQ_EMPTY(&is->is_outstanding), ("destroying session with active tasks")); KASSERT(STAILQ_EMPTY(&is->is_postponed), ("destroying session with postponed PDUs")); ISCSI_SESSION_UNLOCK(is); icl_conn_free(is->is_conn); mtx_destroy(&is->is_lock); cv_destroy(&is->is_maintenance_cv); #ifdef ICL_KERNEL_PROXY cv_destroy(&is->is_login_cv); #endif ISCSI_SESSION_DEBUG(is, "terminated"); free(is, M_ISCSI); /* * The iscsi_unload() routine might be waiting. */ cv_signal(&sc->sc_cv); } static void iscsi_maintenance_thread(void *arg) { struct iscsi_session *is = arg; ISCSI_SESSION_LOCK(is); for (;;) { if (is->is_reconnecting == false && is->is_terminating == false && (STAILQ_EMPTY(&is->is_postponed) || ISCSI_SNGT(is->is_cmdsn, is->is_maxcmdsn))) cv_wait(&is->is_maintenance_cv, &is->is_lock); /* Terminate supersedes reconnect. */ if (is->is_terminating) { ISCSI_SESSION_UNLOCK(is); iscsi_maintenance_thread_terminate(is); kthread_exit(); return; } if (is->is_reconnecting) { ISCSI_SESSION_UNLOCK(is); iscsi_maintenance_thread_reconnect(is); ISCSI_SESSION_LOCK(is); continue; } iscsi_session_send_postponed(is); } ISCSI_SESSION_UNLOCK(is); } static void iscsi_session_reconnect(struct iscsi_session *is) { /* * XXX: We can't use locking here, because * it's being called from various contexts. * Hope it doesn't break anything. */ if (is->is_reconnecting) return; is->is_reconnecting = true; cv_signal(&is->is_maintenance_cv); } static void iscsi_session_terminate(struct iscsi_session *is) { if (is->is_terminating) return; is->is_terminating = true; #if 0 iscsi_session_logout(is); #endif cv_signal(&is->is_maintenance_cv); } static void iscsi_callout(void *context) { struct icl_pdu *request; struct iscsi_bhs_nop_out *bhsno; struct iscsi_session *is; bool reconnect_needed = false; is = context; ISCSI_SESSION_LOCK(is); if (is->is_terminating) { ISCSI_SESSION_UNLOCK(is); return; } callout_schedule(&is->is_callout, 1 * hz); if (is->is_conf.isc_enable == 0) goto out; is->is_timeout++; if (is->is_waiting_for_iscsid) { if (iscsid_timeout > 0 && is->is_timeout > iscsid_timeout) { ISCSI_SESSION_WARN(is, "timed out waiting for iscsid(8) " "for %d seconds; reconnecting", is->is_timeout); reconnect_needed = true; } goto out; } if (is->is_login_phase) { if (login_timeout > 0 && is->is_timeout > login_timeout) { ISCSI_SESSION_WARN(is, "login timed out after %d seconds; " "reconnecting", is->is_timeout); reconnect_needed = true; } goto out; } if (ping_timeout <= 0) { /* * Pings are disabled. Don't send NOP-Out in this case. * Reset the timeout, to avoid triggering reconnection, * should the user decide to reenable them. */ is->is_timeout = 0; goto out; } if (is->is_timeout >= ping_timeout) { ISCSI_SESSION_WARN(is, "no ping reply (NOP-In) after %d seconds; " "reconnecting", ping_timeout); reconnect_needed = true; goto out; } ISCSI_SESSION_UNLOCK(is); /* * If the ping was reset less than one second ago - which means * that we've received some PDU during the last second - assume * the traffic flows correctly and don't bother sending a NOP-Out. * * (It's 2 - one for one second, and one for incrementing is_timeout * earlier in this routine.) */ if (is->is_timeout < 2) return; request = icl_pdu_new(is->is_conn, M_NOWAIT); if (request == NULL) { ISCSI_SESSION_WARN(is, "failed to allocate PDU"); return; } bhsno = (struct iscsi_bhs_nop_out *)request->ip_bhs; bhsno->bhsno_opcode = ISCSI_BHS_OPCODE_NOP_OUT | ISCSI_BHS_OPCODE_IMMEDIATE; bhsno->bhsno_flags = 0x80; bhsno->bhsno_target_transfer_tag = 0xffffffff; iscsi_pdu_queue(request); return; out: if (is->is_terminating) { ISCSI_SESSION_UNLOCK(is); return; } ISCSI_SESSION_UNLOCK(is); if (reconnect_needed) iscsi_session_reconnect(is); } static void iscsi_pdu_update_statsn(const struct icl_pdu *response) { const struct iscsi_bhs_data_in *bhsdi; struct iscsi_session *is; uint32_t expcmdsn, maxcmdsn, statsn; is = PDU_SESSION(response); ISCSI_SESSION_LOCK_ASSERT(is); /* * We're only using fields common for all the response * (target -> initiator) PDUs. */ bhsdi = (const struct iscsi_bhs_data_in *)response->ip_bhs; /* * Ok, I lied. In case of Data-In, "The fields StatSN, Status, * and Residual Count only have meaningful content if the S bit * is set to 1", so we also need to check the bit specific for * Data-In PDU. */ if (bhsdi->bhsdi_opcode != ISCSI_BHS_OPCODE_SCSI_DATA_IN || (bhsdi->bhsdi_flags & BHSDI_FLAGS_S) != 0) { statsn = ntohl(bhsdi->bhsdi_statsn); if (statsn != is->is_statsn && statsn != (is->is_statsn + 1)) { /* XXX: This is normal situation for MCS */ ISCSI_SESSION_WARN(is, "PDU 0x%x StatSN %u != " "session ExpStatSN %u (or + 1); reconnecting", bhsdi->bhsdi_opcode, statsn, is->is_statsn); iscsi_session_reconnect(is); } if (ISCSI_SNGT(statsn, is->is_statsn)) is->is_statsn = statsn; } expcmdsn = ntohl(bhsdi->bhsdi_expcmdsn); maxcmdsn = ntohl(bhsdi->bhsdi_maxcmdsn); if (ISCSI_SNLT(maxcmdsn + 1, expcmdsn)) { ISCSI_SESSION_DEBUG(is, "PDU MaxCmdSN %u + 1 < PDU ExpCmdSN %u; ignoring", maxcmdsn, expcmdsn); } else { if (ISCSI_SNGT(maxcmdsn, is->is_maxcmdsn)) { is->is_maxcmdsn = maxcmdsn; /* * Command window increased; kick the maintanance thread * to send out postponed commands. */ if (!STAILQ_EMPTY(&is->is_postponed)) cv_signal(&is->is_maintenance_cv); } else if (ISCSI_SNLT(maxcmdsn, is->is_maxcmdsn)) { /* XXX: This is normal situation for MCS */ ISCSI_SESSION_DEBUG(is, "PDU MaxCmdSN %u < session MaxCmdSN %u; ignoring", maxcmdsn, is->is_maxcmdsn); } if (ISCSI_SNGT(expcmdsn, is->is_expcmdsn)) { is->is_expcmdsn = expcmdsn; } else if (ISCSI_SNLT(expcmdsn, is->is_expcmdsn)) { /* XXX: This is normal situation for MCS */ ISCSI_SESSION_DEBUG(is, "PDU ExpCmdSN %u < session ExpCmdSN %u; ignoring", expcmdsn, is->is_expcmdsn); } } /* * Every incoming PDU - not just NOP-In - resets the ping timer. * The purpose of the timeout is to reset the connection when it stalls; * we don't want this to happen when NOP-In or NOP-Out ends up delayed * in some queue. */ is->is_timeout = 0; } static void iscsi_receive_callback(struct icl_pdu *response) { struct iscsi_session *is; is = PDU_SESSION(response); ISCSI_SESSION_LOCK(is); iscsi_pdu_update_statsn(response); #ifdef ICL_KERNEL_PROXY if (is->is_login_phase) { if (is->is_login_pdu == NULL) is->is_login_pdu = response; else icl_pdu_free(response); ISCSI_SESSION_UNLOCK(is); cv_signal(&is->is_login_cv); return; } #endif /* * The handling routine is responsible for freeing the PDU * when it's no longer needed. */ switch (response->ip_bhs->bhs_opcode) { case ISCSI_BHS_OPCODE_NOP_IN: iscsi_pdu_handle_nop_in(response); ISCSI_SESSION_UNLOCK(is); break; case ISCSI_BHS_OPCODE_SCSI_RESPONSE: iscsi_pdu_handle_scsi_response(response); /* Session lock dropped inside. */ ISCSI_SESSION_LOCK_ASSERT_NOT(is); break; case ISCSI_BHS_OPCODE_TASK_RESPONSE: iscsi_pdu_handle_task_response(response); ISCSI_SESSION_UNLOCK(is); break; case ISCSI_BHS_OPCODE_SCSI_DATA_IN: iscsi_pdu_handle_data_in(response); /* Session lock dropped inside. */ ISCSI_SESSION_LOCK_ASSERT_NOT(is); break; case ISCSI_BHS_OPCODE_LOGOUT_RESPONSE: iscsi_pdu_handle_logout_response(response); ISCSI_SESSION_UNLOCK(is); break; case ISCSI_BHS_OPCODE_R2T: iscsi_pdu_handle_r2t(response); ISCSI_SESSION_UNLOCK(is); break; case ISCSI_BHS_OPCODE_ASYNC_MESSAGE: iscsi_pdu_handle_async_message(response); ISCSI_SESSION_UNLOCK(is); break; case ISCSI_BHS_OPCODE_REJECT: iscsi_pdu_handle_reject(response); ISCSI_SESSION_UNLOCK(is); break; default: ISCSI_SESSION_WARN(is, "received PDU with unsupported " "opcode 0x%x; reconnecting", response->ip_bhs->bhs_opcode); iscsi_session_reconnect(is); ISCSI_SESSION_UNLOCK(is); icl_pdu_free(response); } } static void iscsi_error_callback(struct icl_conn *ic) { struct iscsi_session *is; is = CONN_SESSION(ic); ISCSI_SESSION_WARN(is, "connection error; reconnecting"); iscsi_session_reconnect(is); } static void iscsi_pdu_handle_nop_in(struct icl_pdu *response) { struct iscsi_session *is; struct iscsi_bhs_nop_out *bhsno; struct iscsi_bhs_nop_in *bhsni; struct icl_pdu *request; void *data = NULL; size_t datasize; int error; is = PDU_SESSION(response); bhsni = (struct iscsi_bhs_nop_in *)response->ip_bhs; if (bhsni->bhsni_target_transfer_tag == 0xffffffff) { /* * Nothing to do; iscsi_pdu_update_statsn() already * zeroed the timeout. */ icl_pdu_free(response); return; } datasize = icl_pdu_data_segment_length(response); if (datasize > 0) { data = malloc(datasize, M_ISCSI, M_NOWAIT | M_ZERO); if (data == NULL) { ISCSI_SESSION_WARN(is, "failed to allocate memory; " "reconnecting"); icl_pdu_free(response); iscsi_session_reconnect(is); return; } icl_pdu_get_data(response, 0, data, datasize); } request = icl_pdu_new(response->ip_conn, M_NOWAIT); if (request == NULL) { ISCSI_SESSION_WARN(is, "failed to allocate memory; " "reconnecting"); free(data, M_ISCSI); icl_pdu_free(response); iscsi_session_reconnect(is); return; } bhsno = (struct iscsi_bhs_nop_out *)request->ip_bhs; bhsno->bhsno_opcode = ISCSI_BHS_OPCODE_NOP_OUT | ISCSI_BHS_OPCODE_IMMEDIATE; bhsno->bhsno_flags = 0x80; bhsno->bhsno_initiator_task_tag = 0xffffffff; bhsno->bhsno_target_transfer_tag = bhsni->bhsni_target_transfer_tag; if (datasize > 0) { error = icl_pdu_append_data(request, data, datasize, M_NOWAIT); if (error != 0) { ISCSI_SESSION_WARN(is, "failed to allocate memory; " "reconnecting"); free(data, M_ISCSI); icl_pdu_free(request); icl_pdu_free(response); iscsi_session_reconnect(is); return; } free(data, M_ISCSI); } icl_pdu_free(response); iscsi_pdu_queue_locked(request); } static void iscsi_pdu_handle_scsi_response(struct icl_pdu *response) { struct iscsi_bhs_scsi_response *bhssr; struct iscsi_outstanding *io; struct iscsi_session *is; union ccb *ccb; struct ccb_scsiio *csio; size_t data_segment_len, received; uint16_t sense_len; uint32_t resid; is = PDU_SESSION(response); bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs; io = iscsi_outstanding_find(is, bhssr->bhssr_initiator_task_tag); if (io == NULL || io->io_ccb == NULL) { ISCSI_SESSION_WARN(is, "bad itt 0x%x", bhssr->bhssr_initiator_task_tag); icl_pdu_free(response); iscsi_session_reconnect(is); ISCSI_SESSION_UNLOCK(is); return; } ccb = io->io_ccb; /* * With iSER, after getting good response we can be sure * that all the data has been successfully transferred. */ if (is->is_conn->ic_iser) { resid = ntohl(bhssr->bhssr_residual_count); if (bhssr->bhssr_flags & BHSSR_FLAGS_RESIDUAL_UNDERFLOW) { io->io_received = ccb->csio.dxfer_len - resid; } else if (bhssr->bhssr_flags & BHSSR_FLAGS_RESIDUAL_OVERFLOW) { ISCSI_SESSION_WARN(is, "overflow: target indicates %d", resid); } else { io->io_received = ccb->csio.dxfer_len; } } received = io->io_received; iscsi_outstanding_remove(is, io); ISCSI_SESSION_UNLOCK(is); if (bhssr->bhssr_response != BHSSR_RESPONSE_COMMAND_COMPLETED) { ISCSI_SESSION_WARN(is, "service response 0x%x", bhssr->bhssr_response); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_REQ_CMP_ERR | CAM_DEV_QFRZN; } else if (bhssr->bhssr_status == 0) { ccb->ccb_h.status = CAM_REQ_CMP; } else { if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_DEV_QFRZN; ccb->csio.scsi_status = bhssr->bhssr_status; } csio = &ccb->csio; data_segment_len = icl_pdu_data_segment_length(response); if (data_segment_len > 0) { if (data_segment_len < sizeof(sense_len)) { ISCSI_SESSION_WARN(is, "truncated data segment (%zd bytes)", data_segment_len); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_REQ_CMP_ERR | CAM_DEV_QFRZN; goto out; } icl_pdu_get_data(response, 0, &sense_len, sizeof(sense_len)); sense_len = ntohs(sense_len); #if 0 ISCSI_SESSION_DEBUG(is, "sense_len %d, data len %zd", sense_len, data_segment_len); #endif if (sizeof(sense_len) + sense_len > data_segment_len) { ISCSI_SESSION_WARN(is, "truncated data segment " "(%zd bytes, should be %zd)", data_segment_len, sizeof(sense_len) + sense_len); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_REQ_CMP_ERR | CAM_DEV_QFRZN; goto out; } else if (sizeof(sense_len) + sense_len < data_segment_len) ISCSI_SESSION_WARN(is, "oversize data segment " "(%zd bytes, should be %zd)", data_segment_len, sizeof(sense_len) + sense_len); if (sense_len > csio->sense_len) { ISCSI_SESSION_DEBUG(is, "truncating sense from %d to %d", sense_len, csio->sense_len); sense_len = csio->sense_len; } icl_pdu_get_data(response, sizeof(sense_len), &csio->sense_data, sense_len); csio->sense_resid = csio->sense_len - sense_len; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } out: if (bhssr->bhssr_flags & BHSSR_FLAGS_RESIDUAL_UNDERFLOW) csio->resid = ntohl(bhssr->bhssr_residual_count); if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { KASSERT(received <= csio->dxfer_len, ("received > csio->dxfer_len")); if (received < csio->dxfer_len) { if (csio->resid != csio->dxfer_len - received) { ISCSI_SESSION_WARN(is, "underflow mismatch: " "target indicates %d, we calculated %zd", csio->resid, csio->dxfer_len - received); } csio->resid = csio->dxfer_len - received; } } xpt_done(ccb); icl_pdu_free(response); } static void iscsi_pdu_handle_task_response(struct icl_pdu *response) { struct iscsi_bhs_task_management_response *bhstmr; struct iscsi_outstanding *io, *aio; struct iscsi_session *is; is = PDU_SESSION(response); bhstmr = (struct iscsi_bhs_task_management_response *)response->ip_bhs; io = iscsi_outstanding_find(is, bhstmr->bhstmr_initiator_task_tag); if (io == NULL || io->io_ccb != NULL) { ISCSI_SESSION_WARN(is, "bad itt 0x%x", bhstmr->bhstmr_initiator_task_tag); icl_pdu_free(response); iscsi_session_reconnect(is); return; } if (bhstmr->bhstmr_response != BHSTMR_RESPONSE_FUNCTION_COMPLETE) { ISCSI_SESSION_WARN(is, "task response 0x%x", bhstmr->bhstmr_response); } else { aio = iscsi_outstanding_find(is, io->io_datasn); if (aio != NULL && aio->io_ccb != NULL) iscsi_session_terminate_task(is, aio, CAM_REQ_ABORTED); } iscsi_outstanding_remove(is, io); icl_pdu_free(response); } static void iscsi_pdu_handle_data_in(struct icl_pdu *response) { struct iscsi_bhs_data_in *bhsdi; struct iscsi_outstanding *io; struct iscsi_session *is; union ccb *ccb; struct ccb_scsiio *csio; size_t data_segment_len, received, oreceived; is = PDU_SESSION(response); bhsdi = (struct iscsi_bhs_data_in *)response->ip_bhs; io = iscsi_outstanding_find(is, bhsdi->bhsdi_initiator_task_tag); if (io == NULL || io->io_ccb == NULL) { ISCSI_SESSION_WARN(is, "bad itt 0x%x", bhsdi->bhsdi_initiator_task_tag); icl_pdu_free(response); iscsi_session_reconnect(is); ISCSI_SESSION_UNLOCK(is); return; } data_segment_len = icl_pdu_data_segment_length(response); if (data_segment_len == 0) { /* * "The sending of 0 length data segments should be avoided, * but initiators and targets MUST be able to properly receive * 0 length data segments." */ ISCSI_SESSION_UNLOCK(is); icl_pdu_free(response); return; } /* * We need to track this for security reasons - without it, malicious target * could respond to SCSI READ without sending Data-In PDUs, which would result * in read operation on the initiator side returning random kernel data. */ if (ntohl(bhsdi->bhsdi_buffer_offset) != io->io_received) { ISCSI_SESSION_WARN(is, "data out of order; expected offset %zd, got %zd", io->io_received, (size_t)ntohl(bhsdi->bhsdi_buffer_offset)); icl_pdu_free(response); iscsi_session_reconnect(is); ISCSI_SESSION_UNLOCK(is); return; } ccb = io->io_ccb; csio = &ccb->csio; if (io->io_received + data_segment_len > csio->dxfer_len) { ISCSI_SESSION_WARN(is, "oversize data segment (%zd bytes " "at offset %zd, buffer is %d)", data_segment_len, io->io_received, csio->dxfer_len); icl_pdu_free(response); iscsi_session_reconnect(is); ISCSI_SESSION_UNLOCK(is); return; } oreceived = io->io_received; io->io_received += data_segment_len; received = io->io_received; if ((bhsdi->bhsdi_flags & BHSDI_FLAGS_S) != 0) iscsi_outstanding_remove(is, io); ISCSI_SESSION_UNLOCK(is); icl_pdu_get_data(response, 0, csio->data_ptr + oreceived, data_segment_len); /* * XXX: Check DataSN. * XXX: Check F. */ if ((bhsdi->bhsdi_flags & BHSDI_FLAGS_S) == 0) { /* * Nothing more to do. */ icl_pdu_free(response); return; } //ISCSI_SESSION_DEBUG(is, "got S flag; status 0x%x", bhsdi->bhsdi_status); if (bhsdi->bhsdi_status == 0) { ccb->ccb_h.status = CAM_REQ_CMP; } else { if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_DEV_QFRZN; csio->scsi_status = bhsdi->bhsdi_status; } if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { KASSERT(received <= csio->dxfer_len, ("received > csio->dxfer_len")); if (received < csio->dxfer_len) { csio->resid = ntohl(bhsdi->bhsdi_residual_count); if (csio->resid != csio->dxfer_len - received) { ISCSI_SESSION_WARN(is, "underflow mismatch: " "target indicates %d, we calculated %zd", csio->resid, csio->dxfer_len - received); } csio->resid = csio->dxfer_len - received; } } xpt_done(ccb); icl_pdu_free(response); } static void iscsi_pdu_handle_logout_response(struct icl_pdu *response) { ISCSI_SESSION_DEBUG(PDU_SESSION(response), "logout response"); icl_pdu_free(response); } static void iscsi_pdu_handle_r2t(struct icl_pdu *response) { struct icl_pdu *request; struct iscsi_session *is; struct iscsi_bhs_r2t *bhsr2t; struct iscsi_bhs_data_out *bhsdo; struct iscsi_outstanding *io; struct ccb_scsiio *csio; size_t off, len, total_len; int error; is = PDU_SESSION(response); bhsr2t = (struct iscsi_bhs_r2t *)response->ip_bhs; io = iscsi_outstanding_find(is, bhsr2t->bhsr2t_initiator_task_tag); if (io == NULL || io->io_ccb == NULL) { ISCSI_SESSION_WARN(is, "bad itt 0x%x; reconnecting", bhsr2t->bhsr2t_initiator_task_tag); icl_pdu_free(response); iscsi_session_reconnect(is); return; } csio = &io->io_ccb->csio; if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_OUT) { ISCSI_SESSION_WARN(is, "received R2T for read command; reconnecting"); icl_pdu_free(response); iscsi_session_reconnect(is); return; } /* * XXX: Verify R2TSN. */ io->io_datasn = 0; off = ntohl(bhsr2t->bhsr2t_buffer_offset); if (off > csio->dxfer_len) { ISCSI_SESSION_WARN(is, "target requested invalid offset " "%zd, buffer is is %d; reconnecting", off, csio->dxfer_len); icl_pdu_free(response); iscsi_session_reconnect(is); return; } total_len = ntohl(bhsr2t->bhsr2t_desired_data_transfer_length); if (total_len == 0 || total_len > csio->dxfer_len) { ISCSI_SESSION_WARN(is, "target requested invalid length " "%zd, buffer is %d; reconnecting", total_len, csio->dxfer_len); icl_pdu_free(response); iscsi_session_reconnect(is); return; } //ISCSI_SESSION_DEBUG(is, "r2t; off %zd, len %zd", off, total_len); for (;;) { len = total_len; if (len > is->is_max_send_data_segment_length) len = is->is_max_send_data_segment_length; if (off + len > csio->dxfer_len) { ISCSI_SESSION_WARN(is, "target requested invalid " "length/offset %zd, buffer is %d; reconnecting", off + len, csio->dxfer_len); icl_pdu_free(response); iscsi_session_reconnect(is); return; } request = icl_pdu_new(response->ip_conn, M_NOWAIT); if (request == NULL) { icl_pdu_free(response); iscsi_session_reconnect(is); return; } bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs; bhsdo->bhsdo_opcode = ISCSI_BHS_OPCODE_SCSI_DATA_OUT; bhsdo->bhsdo_lun = bhsr2t->bhsr2t_lun; bhsdo->bhsdo_initiator_task_tag = bhsr2t->bhsr2t_initiator_task_tag; bhsdo->bhsdo_target_transfer_tag = bhsr2t->bhsr2t_target_transfer_tag; bhsdo->bhsdo_datasn = htonl(io->io_datasn++); bhsdo->bhsdo_buffer_offset = htonl(off); error = icl_pdu_append_data(request, csio->data_ptr + off, len, M_NOWAIT); if (error != 0) { ISCSI_SESSION_WARN(is, "failed to allocate memory; " "reconnecting"); icl_pdu_free(request); icl_pdu_free(response); iscsi_session_reconnect(is); return; } off += len; total_len -= len; if (total_len == 0) { bhsdo->bhsdo_flags |= BHSDO_FLAGS_F; //ISCSI_SESSION_DEBUG(is, "setting F, off %zd", off); } else { //ISCSI_SESSION_DEBUG(is, "not finished, off %zd", off); } iscsi_pdu_queue_locked(request); if (total_len == 0) break; } icl_pdu_free(response); } static void iscsi_pdu_handle_async_message(struct icl_pdu *response) { struct iscsi_bhs_asynchronous_message *bhsam; struct iscsi_session *is; is = PDU_SESSION(response); bhsam = (struct iscsi_bhs_asynchronous_message *)response->ip_bhs; switch (bhsam->bhsam_async_event) { case BHSAM_EVENT_TARGET_REQUESTS_LOGOUT: ISCSI_SESSION_WARN(is, "target requests logout; removing session"); iscsi_session_logout(is); iscsi_session_terminate(is); break; case BHSAM_EVENT_TARGET_TERMINATES_CONNECTION: ISCSI_SESSION_WARN(is, "target indicates it will drop the connection"); break; case BHSAM_EVENT_TARGET_TERMINATES_SESSION: ISCSI_SESSION_WARN(is, "target indicates it will drop the session"); break; default: /* * XXX: Technically, we're obligated to also handle * parameter renegotiation. */ ISCSI_SESSION_WARN(is, "ignoring AsyncEvent %d", bhsam->bhsam_async_event); break; } icl_pdu_free(response); } static void iscsi_pdu_handle_reject(struct icl_pdu *response) { struct iscsi_bhs_reject *bhsr; struct iscsi_session *is; is = PDU_SESSION(response); bhsr = (struct iscsi_bhs_reject *)response->ip_bhs; ISCSI_SESSION_WARN(is, "received Reject PDU, reason 0x%x; protocol error?", bhsr->bhsr_reason); icl_pdu_free(response); } static int iscsi_ioctl_daemon_wait(struct iscsi_softc *sc, struct iscsi_daemon_request *request) { struct iscsi_session *is; struct icl_drv_limits idl; int error; sx_slock(&sc->sc_lock); for (;;) { TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { ISCSI_SESSION_LOCK(is); if (is->is_conf.isc_enable == 0 && is->is_conf.isc_discovery == 0) { ISCSI_SESSION_UNLOCK(is); continue; } if (is->is_waiting_for_iscsid) break; ISCSI_SESSION_UNLOCK(is); } if (is == NULL) { /* * No session requires attention from iscsid(8); wait. */ error = cv_wait_sig(&sc->sc_cv, &sc->sc_lock); if (error != 0) { sx_sunlock(&sc->sc_lock); return (error); } continue; } is->is_waiting_for_iscsid = false; is->is_login_phase = true; is->is_reason[0] = '\0'; ISCSI_SESSION_UNLOCK(is); request->idr_session_id = is->is_id; memcpy(&request->idr_isid, &is->is_isid, sizeof(request->idr_isid)); request->idr_tsih = 0; /* New or reinstated session. */ memcpy(&request->idr_conf, &is->is_conf, sizeof(request->idr_conf)); error = icl_limits(is->is_conf.isc_offload, is->is_conf.isc_iser, &idl); if (error != 0) { ISCSI_SESSION_WARN(is, "icl_limits for offload \"%s\" " "failed with error %d", is->is_conf.isc_offload, error); sx_sunlock(&sc->sc_lock); return (error); } request->idr_limits.isl_max_recv_data_segment_length = idl.idl_max_recv_data_segment_length; request->idr_limits.isl_max_send_data_segment_length = idl.idl_max_send_data_segment_length; request->idr_limits.isl_max_burst_length = idl.idl_max_burst_length; request->idr_limits.isl_first_burst_length = idl.idl_first_burst_length; sx_sunlock(&sc->sc_lock); return (0); } } static int iscsi_ioctl_daemon_handoff(struct iscsi_softc *sc, struct iscsi_daemon_handoff *handoff) { struct iscsi_session *is; struct icl_conn *ic; int error; sx_slock(&sc->sc_lock); /* * Find the session to hand off socket to. */ TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { if (is->is_id == handoff->idh_session_id) break; } if (is == NULL) { sx_sunlock(&sc->sc_lock); return (ESRCH); } ISCSI_SESSION_LOCK(is); ic = is->is_conn; if (is->is_conf.isc_discovery || is->is_terminating) { ISCSI_SESSION_UNLOCK(is); sx_sunlock(&sc->sc_lock); return (EINVAL); } if (is->is_connected) { /* * This might have happened because another iscsid(8) * instance handed off the connection in the meantime. * Just return. */ ISCSI_SESSION_WARN(is, "handoff on already connected " "session"); ISCSI_SESSION_UNLOCK(is); sx_sunlock(&sc->sc_lock); return (EBUSY); } strlcpy(is->is_target_alias, handoff->idh_target_alias, sizeof(is->is_target_alias)); is->is_tsih = handoff->idh_tsih; is->is_statsn = handoff->idh_statsn; is->is_protocol_level = handoff->idh_protocol_level; is->is_initial_r2t = handoff->idh_initial_r2t; is->is_immediate_data = handoff->idh_immediate_data; is->is_max_recv_data_segment_length = handoff->idh_max_recv_data_segment_length; is->is_max_send_data_segment_length = handoff->idh_max_send_data_segment_length; is->is_max_burst_length = handoff->idh_max_burst_length; is->is_first_burst_length = handoff->idh_first_burst_length; if (handoff->idh_header_digest == ISCSI_DIGEST_CRC32C) ic->ic_header_crc32c = true; else ic->ic_header_crc32c = false; if (handoff->idh_data_digest == ISCSI_DIGEST_CRC32C) ic->ic_data_crc32c = true; else ic->ic_data_crc32c = false; ic->ic_maxtags = maxtags; is->is_cmdsn = 0; is->is_expcmdsn = 0; is->is_maxcmdsn = 0; is->is_waiting_for_iscsid = false; is->is_login_phase = false; is->is_timeout = 0; is->is_connected = true; is->is_reason[0] = '\0'; ISCSI_SESSION_UNLOCK(is); /* * If we're going through the proxy, the idh_socket will be 0, * and the ICL module can simply ignore this call. It can also * use it to determine it's no longer in the Login phase. */ error = icl_conn_handoff(ic, handoff->idh_socket); if (error != 0) { sx_sunlock(&sc->sc_lock); iscsi_session_terminate(is); return (error); } sx_sunlock(&sc->sc_lock); if (is->is_sim != NULL) { /* * When reconnecting, there already is SIM allocated for the session. */ KASSERT(is->is_simq_frozen, ("reconnect without frozen simq")); ISCSI_SESSION_LOCK(is); ISCSI_SESSION_DEBUG(is, "releasing"); is->is_simq_frozen = false; xpt_release_simq(is->is_sim, 1); ISCSI_SESSION_UNLOCK(is); } else { ISCSI_SESSION_LOCK(is); is->is_devq = cam_simq_alloc(ic->ic_maxtags); if (is->is_devq == NULL) { ISCSI_SESSION_WARN(is, "failed to allocate simq"); iscsi_session_terminate(is); return (ENOMEM); } is->is_sim = cam_sim_alloc(iscsi_action, iscsi_poll, "iscsi", is, is->is_id /* unit */, &is->is_lock, 1, ic->ic_maxtags, is->is_devq); if (is->is_sim == NULL) { ISCSI_SESSION_UNLOCK(is); ISCSI_SESSION_WARN(is, "failed to allocate SIM"); cam_simq_free(is->is_devq); iscsi_session_terminate(is); return (ENOMEM); } error = xpt_bus_register(is->is_sim, NULL, 0); if (error != 0) { ISCSI_SESSION_UNLOCK(is); ISCSI_SESSION_WARN(is, "failed to register bus"); iscsi_session_terminate(is); return (ENOMEM); } error = xpt_create_path(&is->is_path, /*periph*/NULL, cam_sim_path(is->is_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (error != CAM_REQ_CMP) { ISCSI_SESSION_UNLOCK(is); ISCSI_SESSION_WARN(is, "failed to create path"); iscsi_session_terminate(is); return (ENOMEM); } ISCSI_SESSION_UNLOCK(is); } return (0); } static int iscsi_ioctl_daemon_fail(struct iscsi_softc *sc, struct iscsi_daemon_fail *fail) { struct iscsi_session *is; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { if (is->is_id == fail->idf_session_id) break; } if (is == NULL) { sx_sunlock(&sc->sc_lock); return (ESRCH); } ISCSI_SESSION_LOCK(is); ISCSI_SESSION_DEBUG(is, "iscsid(8) failed: %s", fail->idf_reason); strlcpy(is->is_reason, fail->idf_reason, sizeof(is->is_reason)); //is->is_waiting_for_iscsid = false; //is->is_login_phase = true; //iscsi_session_reconnect(is); ISCSI_SESSION_UNLOCK(is); sx_sunlock(&sc->sc_lock); return (0); } #ifdef ICL_KERNEL_PROXY static int iscsi_ioctl_daemon_connect(struct iscsi_softc *sc, struct iscsi_daemon_connect *idc) { struct iscsi_session *is; struct sockaddr *from_sa, *to_sa; int error; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { if (is->is_id == idc->idc_session_id) break; } if (is == NULL) { sx_sunlock(&sc->sc_lock); return (ESRCH); } sx_sunlock(&sc->sc_lock); if (idc->idc_from_addrlen > 0) { error = getsockaddr(&from_sa, (void *)idc->idc_from_addr, idc->idc_from_addrlen); if (error != 0) { ISCSI_SESSION_WARN(is, "getsockaddr failed with error %d", error); return (error); } } else { from_sa = NULL; } error = getsockaddr(&to_sa, (void *)idc->idc_to_addr, idc->idc_to_addrlen); if (error != 0) { ISCSI_SESSION_WARN(is, "getsockaddr failed with error %d", error); free(from_sa, M_SONAME); return (error); } ISCSI_SESSION_LOCK(is); is->is_statsn = 0; is->is_cmdsn = 0; is->is_expcmdsn = 0; is->is_maxcmdsn = 0; is->is_waiting_for_iscsid = false; is->is_login_phase = true; is->is_timeout = 0; ISCSI_SESSION_UNLOCK(is); error = icl_conn_connect(is->is_conn, idc->idc_domain, idc->idc_socktype, idc->idc_protocol, from_sa, to_sa); free(from_sa, M_SONAME); free(to_sa, M_SONAME); /* * Digests are always disabled during login phase. */ is->is_conn->ic_header_crc32c = false; is->is_conn->ic_data_crc32c = false; return (error); } static int iscsi_ioctl_daemon_send(struct iscsi_softc *sc, struct iscsi_daemon_send *ids) { struct iscsi_session *is; struct icl_pdu *ip; size_t datalen; void *data; int error; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { if (is->is_id == ids->ids_session_id) break; } if (is == NULL) { sx_sunlock(&sc->sc_lock); return (ESRCH); } sx_sunlock(&sc->sc_lock); if (is->is_login_phase == false) return (EBUSY); if (is->is_terminating || is->is_reconnecting) return (EIO); datalen = ids->ids_data_segment_len; if (datalen > is->is_max_send_data_segment_length) return (EINVAL); if (datalen > 0) { data = malloc(datalen, M_ISCSI, M_WAITOK); error = copyin(ids->ids_data_segment, data, datalen); if (error != 0) { free(data, M_ISCSI); return (error); } } ip = icl_pdu_new(is->is_conn, M_WAITOK); memcpy(ip->ip_bhs, ids->ids_bhs, sizeof(*ip->ip_bhs)); if (datalen > 0) { error = icl_pdu_append_data(ip, data, datalen, M_WAITOK); KASSERT(error == 0, ("icl_pdu_append_data(..., M_WAITOK) failed")); free(data, M_ISCSI); } iscsi_pdu_queue(ip); return (0); } static int iscsi_ioctl_daemon_receive(struct iscsi_softc *sc, struct iscsi_daemon_receive *idr) { struct iscsi_session *is; struct icl_pdu *ip; void *data; int error; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { if (is->is_id == idr->idr_session_id) break; } if (is == NULL) { sx_sunlock(&sc->sc_lock); return (ESRCH); } sx_sunlock(&sc->sc_lock); if (is->is_login_phase == false) return (EBUSY); ISCSI_SESSION_LOCK(is); while (is->is_login_pdu == NULL && is->is_terminating == false && is->is_reconnecting == false) { error = cv_wait_sig(&is->is_login_cv, &is->is_lock); if (error != 0) { ISCSI_SESSION_UNLOCK(is); return (error); } } if (is->is_terminating || is->is_reconnecting) { ISCSI_SESSION_UNLOCK(is); return (EIO); } ip = is->is_login_pdu; is->is_login_pdu = NULL; ISCSI_SESSION_UNLOCK(is); if (ip->ip_data_len > idr->idr_data_segment_len) { icl_pdu_free(ip); return (EMSGSIZE); } copyout(ip->ip_bhs, idr->idr_bhs, sizeof(*ip->ip_bhs)); if (ip->ip_data_len > 0) { data = malloc(ip->ip_data_len, M_ISCSI, M_WAITOK); icl_pdu_get_data(ip, 0, data, ip->ip_data_len); copyout(data, idr->idr_data_segment, ip->ip_data_len); free(data, M_ISCSI); } icl_pdu_free(ip); return (0); } #endif /* ICL_KERNEL_PROXY */ static void iscsi_sanitize_session_conf(struct iscsi_session_conf *isc) { /* * Just make sure all the fields are null-terminated. * * XXX: This is not particularly secure. We should * create our own conf and then copy in relevant * fields. */ isc->isc_initiator[ISCSI_NAME_LEN - 1] = '\0'; isc->isc_initiator_addr[ISCSI_ADDR_LEN - 1] = '\0'; isc->isc_initiator_alias[ISCSI_ALIAS_LEN - 1] = '\0'; isc->isc_target[ISCSI_NAME_LEN - 1] = '\0'; isc->isc_target_addr[ISCSI_ADDR_LEN - 1] = '\0'; isc->isc_user[ISCSI_NAME_LEN - 1] = '\0'; isc->isc_secret[ISCSI_SECRET_LEN - 1] = '\0'; isc->isc_mutual_user[ISCSI_NAME_LEN - 1] = '\0'; isc->isc_mutual_secret[ISCSI_SECRET_LEN - 1] = '\0'; } static bool iscsi_valid_session_conf(const struct iscsi_session_conf *isc) { if (isc->isc_initiator[0] == '\0') { ISCSI_DEBUG("empty isc_initiator"); return (false); } if (isc->isc_target_addr[0] == '\0') { ISCSI_DEBUG("empty isc_target_addr"); return (false); } if (isc->isc_discovery != 0 && isc->isc_target[0] != 0) { ISCSI_DEBUG("non-empty isc_target for discovery session"); return (false); } if (isc->isc_discovery == 0 && isc->isc_target[0] == 0) { ISCSI_DEBUG("empty isc_target for non-discovery session"); return (false); } return (true); } static int iscsi_ioctl_session_add(struct iscsi_softc *sc, struct iscsi_session_add *isa) { struct iscsi_session *is; const struct iscsi_session *is2; int error; iscsi_sanitize_session_conf(&isa->isa_conf); if (iscsi_valid_session_conf(&isa->isa_conf) == false) return (EINVAL); is = malloc(sizeof(*is), M_ISCSI, M_ZERO | M_WAITOK); memcpy(&is->is_conf, &isa->isa_conf, sizeof(is->is_conf)); /* * Set some default values, from RFC 3720, section 12. * * These values are updated by the handoff IOCTL, but are * needed prior to the handoff to support sending the ISER * login PDU. */ is->is_max_recv_data_segment_length = 8192; is->is_max_send_data_segment_length = 8192; is->is_max_burst_length = 262144; is->is_first_burst_length = 65536; sx_xlock(&sc->sc_lock); /* * Prevent duplicates. */ TAILQ_FOREACH(is2, &sc->sc_sessions, is_next) { if (!!is->is_conf.isc_discovery != !!is2->is_conf.isc_discovery) continue; if (strcmp(is->is_conf.isc_target_addr, is2->is_conf.isc_target_addr) != 0) continue; if (is->is_conf.isc_discovery == 0 && strcmp(is->is_conf.isc_target, is2->is_conf.isc_target) != 0) continue; sx_xunlock(&sc->sc_lock); free(is, M_ISCSI); return (EBUSY); } is->is_conn = icl_new_conn(is->is_conf.isc_offload, is->is_conf.isc_iser, "iscsi", &is->is_lock); if (is->is_conn == NULL) { sx_xunlock(&sc->sc_lock); free(is, M_ISCSI); return (EINVAL); } is->is_conn->ic_receive = iscsi_receive_callback; is->is_conn->ic_error = iscsi_error_callback; is->is_conn->ic_prv0 = is; TAILQ_INIT(&is->is_outstanding); STAILQ_INIT(&is->is_postponed); mtx_init(&is->is_lock, "iscsi_lock", NULL, MTX_DEF); cv_init(&is->is_maintenance_cv, "iscsi_mt"); #ifdef ICL_KERNEL_PROXY cv_init(&is->is_login_cv, "iscsi_login"); #endif is->is_softc = sc; sc->sc_last_session_id++; is->is_id = sc->sc_last_session_id; is->is_isid[0] = 0x80; /* RFC 3720, 10.12.5: 10b, "Random" ISID. */ arc4rand(&is->is_isid[1], 5, 0); is->is_tsih = 0; callout_init(&is->is_callout, 1); error = kthread_add(iscsi_maintenance_thread, is, NULL, NULL, 0, 0, "iscsimt"); if (error != 0) { ISCSI_SESSION_WARN(is, "kthread_add(9) failed with error %d", error); sx_xunlock(&sc->sc_lock); return (error); } callout_reset(&is->is_callout, 1 * hz, iscsi_callout, is); TAILQ_INSERT_TAIL(&sc->sc_sessions, is, is_next); ISCSI_SESSION_LOCK(is); /* * Don't notify iscsid(8) if the session is disabled and it's not * a discovery session, */ if (is->is_conf.isc_enable == 0 && is->is_conf.isc_discovery == 0) { ISCSI_SESSION_UNLOCK(is); sx_xunlock(&sc->sc_lock); return (0); } is->is_waiting_for_iscsid = true; strlcpy(is->is_reason, "Waiting for iscsid(8)", sizeof(is->is_reason)); ISCSI_SESSION_UNLOCK(is); cv_signal(&sc->sc_cv); sx_xunlock(&sc->sc_lock); return (0); } static bool iscsi_session_conf_matches(unsigned int id1, const struct iscsi_session_conf *c1, unsigned int id2, const struct iscsi_session_conf *c2) { if (id2 != 0 && id2 != id1) return (false); if (c2->isc_target[0] != '\0' && strcmp(c1->isc_target, c2->isc_target) != 0) return (false); if (c2->isc_target_addr[0] != '\0' && strcmp(c1->isc_target_addr, c2->isc_target_addr) != 0) return (false); return (true); } static int iscsi_ioctl_session_remove(struct iscsi_softc *sc, struct iscsi_session_remove *isr) { struct iscsi_session *is, *tmp; bool found = false; iscsi_sanitize_session_conf(&isr->isr_conf); sx_xlock(&sc->sc_lock); TAILQ_FOREACH_SAFE(is, &sc->sc_sessions, is_next, tmp) { ISCSI_SESSION_LOCK(is); if (iscsi_session_conf_matches(is->is_id, &is->is_conf, isr->isr_session_id, &isr->isr_conf)) { found = true; iscsi_session_logout(is); iscsi_session_terminate(is); } ISCSI_SESSION_UNLOCK(is); } sx_xunlock(&sc->sc_lock); if (!found) return (ESRCH); return (0); } static int iscsi_ioctl_session_list(struct iscsi_softc *sc, struct iscsi_session_list *isl) { int error; unsigned int i = 0; struct iscsi_session *is; struct iscsi_session_state iss; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { if (i >= isl->isl_nentries) { sx_sunlock(&sc->sc_lock); return (EMSGSIZE); } memset(&iss, 0, sizeof(iss)); memcpy(&iss.iss_conf, &is->is_conf, sizeof(iss.iss_conf)); iss.iss_id = is->is_id; strlcpy(iss.iss_target_alias, is->is_target_alias, sizeof(iss.iss_target_alias)); strlcpy(iss.iss_reason, is->is_reason, sizeof(iss.iss_reason)); strlcpy(iss.iss_offload, is->is_conn->ic_offload, sizeof(iss.iss_offload)); if (is->is_conn->ic_header_crc32c) iss.iss_header_digest = ISCSI_DIGEST_CRC32C; else iss.iss_header_digest = ISCSI_DIGEST_NONE; if (is->is_conn->ic_data_crc32c) iss.iss_data_digest = ISCSI_DIGEST_CRC32C; else iss.iss_data_digest = ISCSI_DIGEST_NONE; iss.iss_max_send_data_segment_length = is->is_max_send_data_segment_length; iss.iss_max_recv_data_segment_length = is->is_max_recv_data_segment_length; iss.iss_max_burst_length = is->is_max_burst_length; iss.iss_first_burst_length = is->is_first_burst_length; iss.iss_immediate_data = is->is_immediate_data; iss.iss_connected = is->is_connected; error = copyout(&iss, isl->isl_pstates + i, sizeof(iss)); if (error != 0) { sx_sunlock(&sc->sc_lock); return (error); } i++; } sx_sunlock(&sc->sc_lock); isl->isl_nentries = i; return (0); } static int iscsi_ioctl_session_modify(struct iscsi_softc *sc, struct iscsi_session_modify *ism) { struct iscsi_session *is; const struct iscsi_session *is2; iscsi_sanitize_session_conf(&ism->ism_conf); if (iscsi_valid_session_conf(&ism->ism_conf) == false) return (EINVAL); sx_xlock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { ISCSI_SESSION_LOCK(is); if (is->is_id == ism->ism_session_id) { /* Note that the session remains locked. */ break; } ISCSI_SESSION_UNLOCK(is); } if (is == NULL) { sx_xunlock(&sc->sc_lock); return (ESRCH); } /* * Prevent duplicates. */ TAILQ_FOREACH(is2, &sc->sc_sessions, is_next) { if (is == is2) continue; if (!!ism->ism_conf.isc_discovery != !!is2->is_conf.isc_discovery) continue; if (strcmp(ism->ism_conf.isc_target_addr, is2->is_conf.isc_target_addr) != 0) continue; if (ism->ism_conf.isc_discovery == 0 && strcmp(ism->ism_conf.isc_target, is2->is_conf.isc_target) != 0) continue; ISCSI_SESSION_UNLOCK(is); sx_xunlock(&sc->sc_lock); return (EBUSY); } sx_xunlock(&sc->sc_lock); memcpy(&is->is_conf, &ism->ism_conf, sizeof(is->is_conf)); ISCSI_SESSION_UNLOCK(is); iscsi_session_reconnect(is); return (0); } static int iscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int mode, struct thread *td) { struct iscsi_softc *sc; sc = dev->si_drv1; switch (cmd) { case ISCSIDWAIT: return (iscsi_ioctl_daemon_wait(sc, (struct iscsi_daemon_request *)arg)); case ISCSIDHANDOFF: return (iscsi_ioctl_daemon_handoff(sc, (struct iscsi_daemon_handoff *)arg)); case ISCSIDFAIL: return (iscsi_ioctl_daemon_fail(sc, (struct iscsi_daemon_fail *)arg)); #ifdef ICL_KERNEL_PROXY case ISCSIDCONNECT: return (iscsi_ioctl_daemon_connect(sc, (struct iscsi_daemon_connect *)arg)); case ISCSIDSEND: return (iscsi_ioctl_daemon_send(sc, (struct iscsi_daemon_send *)arg)); case ISCSIDRECEIVE: return (iscsi_ioctl_daemon_receive(sc, (struct iscsi_daemon_receive *)arg)); #endif /* ICL_KERNEL_PROXY */ case ISCSISADD: return (iscsi_ioctl_session_add(sc, (struct iscsi_session_add *)arg)); case ISCSISREMOVE: return (iscsi_ioctl_session_remove(sc, (struct iscsi_session_remove *)arg)); case ISCSISLIST: return (iscsi_ioctl_session_list(sc, (struct iscsi_session_list *)arg)); case ISCSISMODIFY: return (iscsi_ioctl_session_modify(sc, (struct iscsi_session_modify *)arg)); default: return (EINVAL); } } static struct iscsi_outstanding * iscsi_outstanding_find(struct iscsi_session *is, uint32_t initiator_task_tag) { struct iscsi_outstanding *io; ISCSI_SESSION_LOCK_ASSERT(is); TAILQ_FOREACH(io, &is->is_outstanding, io_next) { if (io->io_initiator_task_tag == initiator_task_tag) return (io); } return (NULL); } static struct iscsi_outstanding * iscsi_outstanding_find_ccb(struct iscsi_session *is, union ccb *ccb) { struct iscsi_outstanding *io; ISCSI_SESSION_LOCK_ASSERT(is); TAILQ_FOREACH(io, &is->is_outstanding, io_next) { if (io->io_ccb == ccb) return (io); } return (NULL); } static struct iscsi_outstanding * iscsi_outstanding_add(struct iscsi_session *is, struct icl_pdu *request, union ccb *ccb, uint32_t *initiator_task_tagp) { struct iscsi_outstanding *io; int error; ISCSI_SESSION_LOCK_ASSERT(is); io = uma_zalloc(iscsi_outstanding_zone, M_NOWAIT | M_ZERO); if (io == NULL) { ISCSI_SESSION_WARN(is, "failed to allocate %zd bytes", sizeof(*io)); return (NULL); } error = icl_conn_task_setup(is->is_conn, request, &ccb->csio, initiator_task_tagp, &io->io_icl_prv); if (error != 0) { ISCSI_SESSION_WARN(is, "icl_conn_task_setup() failed with error %d", error); uma_zfree(iscsi_outstanding_zone, io); return (NULL); } KASSERT(iscsi_outstanding_find(is, *initiator_task_tagp) == NULL, ("initiator_task_tag 0x%x already added", *initiator_task_tagp)); io->io_initiator_task_tag = *initiator_task_tagp; io->io_ccb = ccb; TAILQ_INSERT_TAIL(&is->is_outstanding, io, io_next); return (io); } static void iscsi_outstanding_remove(struct iscsi_session *is, struct iscsi_outstanding *io) { ISCSI_SESSION_LOCK_ASSERT(is); icl_conn_task_done(is->is_conn, io->io_icl_prv); TAILQ_REMOVE(&is->is_outstanding, io, io_next); uma_zfree(iscsi_outstanding_zone, io); } static void iscsi_action_abort(struct iscsi_session *is, union ccb *ccb) { struct icl_pdu *request; struct iscsi_bhs_task_management_request *bhstmr; struct ccb_abort *cab = &ccb->cab; struct iscsi_outstanding *io, *aio; uint32_t initiator_task_tag; ISCSI_SESSION_LOCK_ASSERT(is); #if 0 KASSERT(is->is_login_phase == false, ("%s called during Login Phase", __func__)); #else if (is->is_login_phase) { ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); return; } #endif aio = iscsi_outstanding_find_ccb(is, cab->abort_ccb); if (aio == NULL) { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } request = icl_pdu_new(is->is_conn, M_NOWAIT); if (request == NULL) { ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } initiator_task_tag = is->is_initiator_task_tag++; io = iscsi_outstanding_add(is, request, NULL, &initiator_task_tag); if (io == NULL) { icl_pdu_free(request); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } io->io_datasn = aio->io_initiator_task_tag; bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs; bhstmr->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_REQUEST; bhstmr->bhstmr_function = 0x80 | BHSTMR_FUNCTION_ABORT_TASK; bhstmr->bhstmr_lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); bhstmr->bhstmr_initiator_task_tag = initiator_task_tag; bhstmr->bhstmr_referenced_task_tag = aio->io_initiator_task_tag; iscsi_pdu_queue_locked(request); } static void iscsi_action_scsiio(struct iscsi_session *is, union ccb *ccb) { struct icl_pdu *request; struct iscsi_bhs_scsi_command *bhssc; struct ccb_scsiio *csio; struct iscsi_outstanding *io; size_t len; uint32_t initiator_task_tag; int error; ISCSI_SESSION_LOCK_ASSERT(is); #if 0 KASSERT(is->is_login_phase == false, ("%s called during Login Phase", __func__)); #else if (is->is_login_phase) { ISCSI_SESSION_DEBUG(is, "called during login phase"); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_DEV_QFRZN; xpt_done(ccb); return; } #endif request = icl_pdu_new(is->is_conn, M_NOWAIT); if (request == NULL) { if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_RESRC_UNAVAIL | CAM_DEV_QFRZN; xpt_done(ccb); return; } initiator_task_tag = is->is_initiator_task_tag++; io = iscsi_outstanding_add(is, request, ccb, &initiator_task_tag); if (io == NULL) { icl_pdu_free(request); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_RESRC_UNAVAIL | CAM_DEV_QFRZN; xpt_done(ccb); return; } csio = &ccb->csio; bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; bhssc->bhssc_opcode = ISCSI_BHS_OPCODE_SCSI_COMMAND; bhssc->bhssc_flags |= BHSSC_FLAGS_F; switch (csio->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bhssc->bhssc_flags |= BHSSC_FLAGS_R; break; case CAM_DIR_OUT: bhssc->bhssc_flags |= BHSSC_FLAGS_W; break; } if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { switch (csio->tag_action) { case MSG_HEAD_OF_Q_TAG: bhssc->bhssc_flags |= BHSSC_FLAGS_ATTR_HOQ; break; case MSG_ORDERED_Q_TAG: bhssc->bhssc_flags |= BHSSC_FLAGS_ATTR_ORDERED; break; case MSG_ACA_TASK: bhssc->bhssc_flags |= BHSSC_FLAGS_ATTR_ACA; break; case MSG_SIMPLE_Q_TAG: default: bhssc->bhssc_flags |= BHSSC_FLAGS_ATTR_SIMPLE; break; } } else bhssc->bhssc_flags |= BHSSC_FLAGS_ATTR_UNTAGGED; + if (is->is_protocol_level >= 2) { + bhssc->bhssc_pri = (csio->priority << BHSSC_PRI_SHIFT) & + BHSSC_PRI_MASK; + } + bhssc->bhssc_lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); bhssc->bhssc_initiator_task_tag = initiator_task_tag; bhssc->bhssc_expected_data_transfer_length = htonl(csio->dxfer_len); KASSERT(csio->cdb_len <= sizeof(bhssc->bhssc_cdb), ("unsupported CDB size %zd", (size_t)csio->cdb_len)); if (csio->ccb_h.flags & CAM_CDB_POINTER) memcpy(&bhssc->bhssc_cdb, csio->cdb_io.cdb_ptr, csio->cdb_len); else memcpy(&bhssc->bhssc_cdb, csio->cdb_io.cdb_bytes, csio->cdb_len); if (is->is_immediate_data && (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { len = csio->dxfer_len; //ISCSI_SESSION_DEBUG(is, "adding %zd of immediate data", len); if (len > is->is_first_burst_length) { ISCSI_SESSION_DEBUG(is, "len %zd -> %d", len, is->is_first_burst_length); len = is->is_first_burst_length; } if (len > is->is_max_send_data_segment_length) { ISCSI_SESSION_DEBUG(is, "len %zd -> %d", len, is->is_max_send_data_segment_length); len = is->is_max_send_data_segment_length; } error = icl_pdu_append_data(request, csio->data_ptr, len, M_NOWAIT); if (error != 0) { iscsi_outstanding_remove(is, io); icl_pdu_free(request); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_RESRC_UNAVAIL | CAM_DEV_QFRZN; xpt_done(ccb); return; } } iscsi_pdu_queue_locked(request); } static void iscsi_action(struct cam_sim *sim, union ccb *ccb) { struct iscsi_session *is; is = cam_sim_softc(sim); ISCSI_SESSION_LOCK_ASSERT(is); if (is->is_terminating || (is->is_connected == false && fail_on_disconnection)) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } /* * Make sure CAM doesn't sneak in a CCB just after freezing the queue. */ if (is->is_simq_frozen == true) { ccb->ccb_h.status &= ~(CAM_SIM_QUEUED | CAM_STATUS_MASK); ccb->ccb_h.status |= CAM_REQUEUE_REQ; /* Don't freeze the devq - the SIM queue is already frozen. */ xpt_done(ccb); return; } switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_EXTLUNS; /* * XXX: It shouldn't ever be NULL; this could be turned * into a KASSERT eventually. */ if (is->is_conn == NULL) ISCSI_WARN("NULL conn"); else if (is->is_conn->ic_unmapped) cpi->hba_misc |= PIM_UNMAPPED; cpi->hba_eng_cnt = 0; cpi->max_target = 0; /* * Note that the variable below is only relevant for targets * that don't claim compliance with anything above SPC2, which * means they don't support REPORT_LUNS. */ cpi->max_lun = 255; cpi->initiator_id = ~0; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "iSCSI", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; /* XXX */ cpi->transport = XPORT_ISCSI; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC3; cpi->maxio = MAXPHYS; cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_scsi *scsi; cts = &ccb->cts; scsi = &cts->proto_specific.scsi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC3; cts->transport = XPORT_ISCSI; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; cts->ccb_h.status = CAM_REQ_CMP; break; } case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, /*extended*/1); ccb->ccb_h.status = CAM_REQ_CMP; break; #if 0 /* * XXX: What's the point? */ case XPT_RESET_BUS: case XPT_TERM_IO: ISCSI_SESSION_DEBUG(is, "faking success for reset, abort, or term_io"); ccb->ccb_h.status = CAM_REQ_CMP; break; #endif case XPT_ABORT: iscsi_action_abort(is, ccb); return; case XPT_SCSI_IO: iscsi_action_scsiio(is, ccb); return; default: #if 0 ISCSI_SESSION_DEBUG(is, "got unsupported code 0x%x", ccb->ccb_h.func_code); #endif ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } xpt_done(ccb); } static void iscsi_poll(struct cam_sim *sim) { KASSERT(0, ("%s: you're not supposed to be here", __func__)); } static void iscsi_terminate_sessions(struct iscsi_softc *sc) { struct iscsi_session *is; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) iscsi_session_terminate(is); while(!TAILQ_EMPTY(&sc->sc_sessions)) { ISCSI_DEBUG("waiting for sessions to terminate"); cv_wait(&sc->sc_cv, &sc->sc_lock); } ISCSI_DEBUG("all sessions terminated"); sx_sunlock(&sc->sc_lock); } static void iscsi_shutdown_pre(struct iscsi_softc *sc) { struct iscsi_session *is; if (!fail_on_shutdown) return; /* * If we have any sessions waiting for reconnection, request * maintenance thread to fail them immediately instead of waiting * for reconnect timeout. * * This prevents LUNs with mounted filesystems that are supported * by disconnected iSCSI sessions from hanging, however it will * fail all queued BIOs. */ ISCSI_DEBUG("forcing failing all disconnected sessions due to shutdown"); fail_on_disconnection = 1; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { ISCSI_SESSION_LOCK(is); if (!is->is_connected) { ISCSI_SESSION_DEBUG(is, "force failing disconnected session early"); iscsi_session_reconnect(is); } ISCSI_SESSION_UNLOCK(is); } sx_sunlock(&sc->sc_lock); } static void iscsi_shutdown_post(struct iscsi_softc *sc) { if (!KERNEL_PANICKED()) { ISCSI_DEBUG("removing all sessions due to shutdown"); iscsi_terminate_sessions(sc); } } static int iscsi_load(void) { int error; sc = malloc(sizeof(*sc), M_ISCSI, M_ZERO | M_WAITOK); sx_init(&sc->sc_lock, "iscsi"); TAILQ_INIT(&sc->sc_sessions); cv_init(&sc->sc_cv, "iscsi_cv"); iscsi_outstanding_zone = uma_zcreate("iscsi_outstanding", sizeof(struct iscsi_outstanding), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); error = make_dev_p(MAKEDEV_CHECKNAME, &sc->sc_cdev, &iscsi_cdevsw, NULL, UID_ROOT, GID_WHEEL, 0600, "iscsi"); if (error != 0) { ISCSI_WARN("failed to create device node, error %d", error); return (error); } sc->sc_cdev->si_drv1 = sc; sc->sc_shutdown_pre_eh = EVENTHANDLER_REGISTER(shutdown_pre_sync, iscsi_shutdown_pre, sc, SHUTDOWN_PRI_FIRST); /* * shutdown_post_sync needs to run after filesystem shutdown and before * CAM shutdown - otherwise when rebooting with an iSCSI session that is * disconnected but has outstanding requests, dashutdown() will hang on * cam_periph_runccb(). */ sc->sc_shutdown_post_eh = EVENTHANDLER_REGISTER(shutdown_post_sync, iscsi_shutdown_post, sc, SHUTDOWN_PRI_DEFAULT - 1); return (0); } static int iscsi_unload(void) { if (sc->sc_cdev != NULL) { ISCSI_DEBUG("removing device node"); destroy_dev(sc->sc_cdev); ISCSI_DEBUG("device node removed"); } if (sc->sc_shutdown_pre_eh != NULL) EVENTHANDLER_DEREGISTER(shutdown_pre_sync, sc->sc_shutdown_pre_eh); if (sc->sc_shutdown_post_eh != NULL) EVENTHANDLER_DEREGISTER(shutdown_post_sync, sc->sc_shutdown_post_eh); iscsi_terminate_sessions(sc); uma_zdestroy(iscsi_outstanding_zone); sx_destroy(&sc->sc_lock); cv_destroy(&sc->sc_cv); free(sc, M_ISCSI); return (0); } static int iscsi_quiesce(void) { sx_slock(&sc->sc_lock); if (!TAILQ_EMPTY(&sc->sc_sessions)) { sx_sunlock(&sc->sc_lock); return (EBUSY); } sx_sunlock(&sc->sc_lock); return (0); } static int iscsi_modevent(module_t mod, int what, void *arg) { int error; switch (what) { case MOD_LOAD: error = iscsi_load(); break; case MOD_UNLOAD: error = iscsi_unload(); break; case MOD_QUIESCE: error = iscsi_quiesce(); break; default: error = EINVAL; break; } return (error); } moduledata_t iscsi_data = { "iscsi", iscsi_modevent, 0 }; DECLARE_MODULE(iscsi, iscsi_data, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); MODULE_DEPEND(iscsi, cam, 1, 1, 1); MODULE_DEPEND(iscsi, icl, 1, 1, 1); Index: head/sys/dev/isp/isp.c =================================================================== --- head/sys/dev/isp/isp.c (revision 367043) +++ head/sys/dev/isp/isp.c (revision 367044) @@ -1,8199 +1,8201 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2018 Alexander Motin * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Machine and OS Independent (well, as best as possible) * code for the Qlogic ISP SCSI and FC-SCSI adapters. */ /* * Inspiration and ideas about this driver are from Erik Moe's Linux driver * (qlogicisp.c) and Dave Miller's SBus version of same (qlogicisp.c). Some * ideas dredged from the Solaris driver. */ /* * Include header file appropriate for platform we're building on. */ #ifdef __NetBSD__ #include __KERNEL_RCSID(0, "$NetBSD$"); #include #endif #ifdef __FreeBSD__ #include __FBSDID("$FreeBSD$"); #include #endif #ifdef __OpenBSD__ #include #endif #ifdef __linux__ #include "isp_linux.h" #endif #ifdef __svr4__ #include "isp_solaris.h" #endif /* * General defines */ #define MBOX_DELAY_COUNT 1000000 / 100 /* * Local static data */ static const char notresp[] = "Unknown IOCB in RESPONSE Queue (type 0x%x) @ idx %d (next %d)"; static const char bun[] = "bad underrun (count %d, resid %d, status %s)"; static const char lipd[] = "Chan %d LIP destroyed %d active commands"; static const char sacq[] = "unable to acquire scratch area"; static const uint8_t alpa_map[] = { 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6, 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca, 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5, 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9, 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97, 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79, 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56, 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a, 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17, 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01, 0x00 }; /* * Local function prototypes. */ static void isp_parse_async(ispsoftc_t *, uint16_t); static void isp_parse_async_fc(ispsoftc_t *, uint16_t); static int isp_handle_other_response(ispsoftc_t *, int, isphdr_t *, uint32_t *); static void isp_parse_status(ispsoftc_t *, ispstatusreq_t *, XS_T *, uint32_t *); static void isp_parse_status_24xx(ispsoftc_t *, isp24xx_statusreq_t *, XS_T *, uint32_t *); static void isp_fastpost_complete(ispsoftc_t *, uint32_t); static void isp_scsi_init(ispsoftc_t *); static void isp_scsi_channel_init(ispsoftc_t *, int); static void isp_fibre_init(ispsoftc_t *); static void isp_fibre_init_2400(ispsoftc_t *); static void isp_clear_portdb(ispsoftc_t *, int); static void isp_mark_portdb(ispsoftc_t *, int); static int isp_plogx(ispsoftc_t *, int, uint16_t, uint32_t, int); static int isp_port_login(ispsoftc_t *, uint16_t, uint32_t); static int isp_port_logout(ispsoftc_t *, uint16_t, uint32_t); static int isp_getpdb(ispsoftc_t *, int, uint16_t, isp_pdb_t *); static int isp_gethandles(ispsoftc_t *, int, uint16_t *, int *, int); static void isp_dump_chip_portdb(ispsoftc_t *, int); static uint64_t isp_get_wwn(ispsoftc_t *, int, int, int); static int isp_fclink_test(ispsoftc_t *, int, int); static int isp_pdb_sync(ispsoftc_t *, int); static int isp_scan_loop(ispsoftc_t *, int); static int isp_gid_pt(ispsoftc_t *, int); static int isp_scan_fabric(ispsoftc_t *, int); static int isp_login_device(ispsoftc_t *, int, uint32_t, isp_pdb_t *, uint16_t *); static int isp_send_change_request(ispsoftc_t *, int); static int isp_register_fc4_type(ispsoftc_t *, int); static int isp_register_fc4_features_24xx(ispsoftc_t *, int); static int isp_register_port_name_24xx(ispsoftc_t *, int); static int isp_register_node_name_24xx(ispsoftc_t *, int); static uint16_t isp_next_handle(ispsoftc_t *, uint16_t *); static int isp_fw_state(ispsoftc_t *, int); static void isp_mboxcmd(ispsoftc_t *, mbreg_t *); static void isp_spi_update(ispsoftc_t *, int); static void isp_setdfltsdparm(ispsoftc_t *); static void isp_setdfltfcparm(ispsoftc_t *, int); static int isp_read_nvram(ispsoftc_t *, int); static int isp_read_nvram_2400(ispsoftc_t *, uint8_t *); static void isp_rdnvram_word(ispsoftc_t *, int, uint16_t *); static void isp_rd_2400_nvram(ispsoftc_t *, uint32_t, uint32_t *); static void isp_parse_nvram_1020(ispsoftc_t *, uint8_t *); static void isp_parse_nvram_1080(ispsoftc_t *, int, uint8_t *); static void isp_parse_nvram_12160(ispsoftc_t *, int, uint8_t *); static void isp_parse_nvram_2100(ispsoftc_t *, uint8_t *); static void isp_parse_nvram_2400(ispsoftc_t *, uint8_t *); static void isp_change_fw_state(ispsoftc_t *isp, int chan, int state) { fcparam *fcp = FCPARAM(isp, chan); if (fcp->isp_fwstate == state) return; isp_prt(isp, ISP_LOGCONFIG|ISP_LOG_SANCFG, "Chan %d Firmware state <%s->%s>", chan, isp_fc_fw_statename(fcp->isp_fwstate), isp_fc_fw_statename(state)); fcp->isp_fwstate = state; } /* * Reset Hardware. * * Hit the chip over the head, download new f/w if available and set it running. * * Locking done elsewhere. */ void isp_reset(ispsoftc_t *isp, int do_load_defaults) { mbreg_t mbs; char *buf; uint64_t fwt; uint32_t code_org, val; int loops, i, dodnld = 1; const char *btype = "????"; static const char dcrc[] = "Downloaded RISC Code Checksum Failure"; /* * Basic types (SCSI, FibreChannel and PCI or SBus) * have been set in the MD code. We figure out more * here. Possibly more refined types based upon PCI * identification. Chip revision has been gathered. * * After we've fired this chip up, zero out the conf1 register * for SCSI adapters and do other settings for the 2100. */ isp->isp_state = ISP_NILSTATE; ISP_DISABLE_INTS(isp); /* * Put the board into PAUSE mode (so we can read the SXP registers * or write FPM/FBM registers). */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_HOST_INT); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_PAUSE); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); } if (IS_FC(isp)) { switch (isp->isp_type) { case ISP_HA_FC_2100: btype = "2100"; break; case ISP_HA_FC_2200: btype = "2200"; break; case ISP_HA_FC_2300: btype = "2300"; break; case ISP_HA_FC_2312: btype = "2312"; break; case ISP_HA_FC_2322: btype = "2322"; break; case ISP_HA_FC_2400: btype = "2422"; break; case ISP_HA_FC_2500: btype = "2532"; break; case ISP_HA_FC_2600: btype = "2600"; break; case ISP_HA_FC_2700: btype = "2700"; break; default: break; } if (!IS_24XX(isp)) { /* * While we're paused, reset the FPM module and FBM * fifos. */ ISP_WRITE(isp, BIU2100_CSR, BIU2100_FPM0_REGS); ISP_WRITE(isp, FPM_DIAG_CONFIG, FPM_SOFT_RESET); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FB_REGS); ISP_WRITE(isp, FBM_CMD, FBMCMD_FIFO_RESET_ALL); ISP_WRITE(isp, BIU2100_CSR, BIU2100_RISC_REGS); } } else if (IS_1240(isp)) { sdparam *sdp; btype = "1240"; isp->isp_clock = 60; sdp = SDPARAM(isp, 0); sdp->isp_ultramode = 1; sdp = SDPARAM(isp, 1); sdp->isp_ultramode = 1; /* * XXX: Should probably do some bus sensing. */ } else if (IS_ULTRA3(isp)) { sdparam *sdp = isp->isp_param; isp->isp_clock = 100; if (IS_10160(isp)) btype = "10160"; else if (IS_12160(isp)) btype = "12160"; else btype = ""; sdp->isp_lvdmode = 1; if (IS_DUALBUS(isp)) { sdp++; sdp->isp_lvdmode = 1; } } else if (IS_ULTRA2(isp)) { static const char m[] = "bus %d is in %s Mode"; uint16_t l; sdparam *sdp = SDPARAM(isp, 0); isp->isp_clock = 100; if (IS_1280(isp)) btype = "1280"; else if (IS_1080(isp)) btype = "1080"; else btype = ""; l = ISP_READ(isp, SXP_PINS_DIFF) & ISP1080_MODE_MASK; switch (l) { case ISP1080_LVD_MODE: sdp->isp_lvdmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "LVD"); break; case ISP1080_HVD_MODE: sdp->isp_diffmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "Differential"); break; case ISP1080_SE_MODE: sdp->isp_ultramode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "Single-Ended"); break; default: isp_prt(isp, ISP_LOGERR, "unknown mode on bus %d (0x%x)", 0, l); break; } if (IS_DUALBUS(isp)) { sdp = SDPARAM(isp, 1); l = ISP_READ(isp, SXP_PINS_DIFF|SXP_BANK1_SELECT); l &= ISP1080_MODE_MASK; switch (l) { case ISP1080_LVD_MODE: sdp->isp_lvdmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "LVD"); break; case ISP1080_HVD_MODE: sdp->isp_diffmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "Differential"); break; case ISP1080_SE_MODE: sdp->isp_ultramode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "Single-Ended"); break; default: isp_prt(isp, ISP_LOGERR, "unknown mode on bus %d (0x%x)", 1, l); break; } } } else { sdparam *sdp = SDPARAM(isp, 0); i = ISP_READ(isp, BIU_CONF0) & BIU_CONF0_HW_MASK; switch (i) { default: isp_prt(isp, ISP_LOGALL, "Unknown Chip Type 0x%x", i); /* FALLTHROUGH */ case 1: btype = "1020"; isp->isp_type = ISP_HA_SCSI_1020; isp->isp_clock = 40; break; case 2: /* * Some 1020A chips are Ultra Capable, but don't * run the clock rate up for that unless told to * do so by the Ultra Capable bits being set. */ btype = "1020A"; isp->isp_type = ISP_HA_SCSI_1020A; isp->isp_clock = 40; break; case 3: btype = "1040"; isp->isp_type = ISP_HA_SCSI_1040; isp->isp_clock = 60; break; case 4: btype = "1040A"; isp->isp_type = ISP_HA_SCSI_1040A; isp->isp_clock = 60; break; case 5: btype = "1040B"; isp->isp_type = ISP_HA_SCSI_1040B; isp->isp_clock = 60; break; case 6: btype = "1040C"; isp->isp_type = ISP_HA_SCSI_1040C; isp->isp_clock = 60; break; } /* * Now, while we're at it, gather info about ultra * and/or differential mode. */ if (ISP_READ(isp, SXP_PINS_DIFF) & SXP_PINS_DIFF_MODE) { isp_prt(isp, ISP_LOGCONFIG, "Differential Mode"); sdp->isp_diffmode = 1; } else { sdp->isp_diffmode = 0; } i = ISP_READ(isp, RISC_PSR); if (isp->isp_bustype == ISP_BT_SBUS) { i &= RISC_PSR_SBUS_ULTRA; } else { i &= RISC_PSR_PCI_ULTRA; } if (i != 0) { isp_prt(isp, ISP_LOGCONFIG, "Ultra Mode Capable"); sdp->isp_ultramode = 1; /* * If we're in Ultra Mode, we have to be 60MHz clock- * even for the SBus version. */ isp->isp_clock = 60; } else { sdp->isp_ultramode = 0; /* * Clock is known. Gronk. */ } /* * Machine dependent clock (if set) overrides * our generic determinations. */ if (isp->isp_mdvec->dv_clock) { if (isp->isp_mdvec->dv_clock < isp->isp_clock) { isp->isp_clock = isp->isp_mdvec->dv_clock; } } } /* * Hit the chip over the head with hammer, * and give it a chance to recover. */ if (IS_SCSI(isp)) { ISP_WRITE(isp, BIU_ICR, BIU_ICR_SOFT_RESET); /* * A slight delay... */ ISP_DELAY(100); /* * Clear data && control DMA engines. */ ISP_WRITE(isp, CDMA_CONTROL, DMA_CNTRL_CLEAR_CHAN | DMA_CNTRL_RESET_INT); ISP_WRITE(isp, DDMA_CONTROL, DMA_CNTRL_CLEAR_CHAN | DMA_CNTRL_RESET_INT); } else if (IS_24XX(isp)) { /* * Stop DMA and wait for it to stop. */ ISP_WRITE(isp, BIU2400_CSR, BIU2400_DMA_STOP|(3 << 4)); for (val = loops = 0; loops < 30000; loops++) { ISP_DELAY(10); val = ISP_READ(isp, BIU2400_CSR); if ((val & BIU2400_DMA_ACTIVE) == 0) { break; } } if (val & BIU2400_DMA_ACTIVE) { isp_prt(isp, ISP_LOGERR, "DMA Failed to Stop on Reset"); return; } /* * Hold it in SOFT_RESET and STOP state for 100us. */ ISP_WRITE(isp, BIU2400_CSR, BIU2400_SOFT_RESET|BIU2400_DMA_STOP|(3 << 4)); ISP_DELAY(100); for (loops = 0; loops < 10000; loops++) { ISP_DELAY(5); val = ISP_READ(isp, OUTMAILBOX0); } for (val = loops = 0; loops < 500000; loops ++) { val = ISP_READ(isp, BIU2400_CSR); if ((val & BIU2400_SOFT_RESET) == 0) { break; } } if (val & BIU2400_SOFT_RESET) { isp_prt(isp, ISP_LOGERR, "Failed to come out of reset"); return; } } else { ISP_WRITE(isp, BIU2100_CSR, BIU2100_SOFT_RESET); /* * A slight delay... */ ISP_DELAY(100); /* * Clear data && control DMA engines. */ ISP_WRITE(isp, CDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); ISP_WRITE(isp, TDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); ISP_WRITE(isp, RDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); } /* * Wait for ISP to be ready to go... */ loops = MBOX_DELAY_COUNT; for (;;) { if (IS_SCSI(isp)) { if (!(ISP_READ(isp, BIU_ICR) & BIU_ICR_SOFT_RESET)) { break; } } else if (IS_24XX(isp)) { if (ISP_READ(isp, OUTMAILBOX0) == 0) { break; } } else { if (!(ISP_READ(isp, BIU2100_CSR) & BIU2100_SOFT_RESET)) break; } ISP_DELAY(100); if (--loops < 0) { ISP_DUMPREGS(isp, "chip reset timed out"); return; } } /* * After we've fired this chip up, zero out the conf1 register * for SCSI adapters and other settings for the 2100. */ if (IS_SCSI(isp)) { ISP_WRITE(isp, BIU_CONF1, 0); } else if (!IS_24XX(isp)) { ISP_WRITE(isp, BIU2100_CSR, 0); } /* * Reset RISC Processor */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RELEASE); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RESET); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); ISP_DELAY(100); ISP_WRITE(isp, BIU_SEMA, 0); } /* * Post-RISC Reset stuff. */ if (IS_24XX(isp)) { for (val = loops = 0; loops < 5000000; loops++) { ISP_DELAY(5); val = ISP_READ(isp, OUTMAILBOX0); if (val == 0) { break; } } if (val != 0) { isp_prt(isp, ISP_LOGERR, "reset didn't clear"); return; } } else if (IS_SCSI(isp)) { uint16_t tmp = isp->isp_mdvec->dv_conf1; /* * Busted FIFO. Turn off all but burst enables. */ if (isp->isp_type == ISP_HA_SCSI_1040A) { tmp &= BIU_BURST_ENABLE; } ISP_SETBITS(isp, BIU_CONF1, tmp); if (tmp & BIU_BURST_ENABLE) { ISP_SETBITS(isp, CDMA_CONF, DMA_ENABLE_BURST); ISP_SETBITS(isp, DDMA_CONF, DMA_ENABLE_BURST); } if (SDPARAM(isp, 0)->isp_ptisp) { if (SDPARAM(isp, 0)->isp_ultramode) { while (ISP_READ(isp, RISC_MTR) != 0x1313) { ISP_WRITE(isp, RISC_MTR, 0x1313); ISP_WRITE(isp, HCCR, HCCR_CMD_STEP); } } else { ISP_WRITE(isp, RISC_MTR, 0x1212); } /* * PTI specific register */ ISP_WRITE(isp, RISC_EMB, DUAL_BANK); } else { ISP_WRITE(isp, RISC_MTR, 0x1212); } ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); } else { ISP_WRITE(isp, RISC_MTR2100, 0x1212); if (IS_2200(isp) || IS_23XX(isp)) { ISP_WRITE(isp, HCCR, HCCR_2X00_DISABLE_PARITY_PAUSE); } ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); } /* * Set up default request/response queue in-pointer/out-pointer * register indices. */ if (IS_24XX(isp)) { isp->isp_rqstinrp = BIU2400_REQINP; isp->isp_rqstoutrp = BIU2400_REQOUTP; isp->isp_respinrp = BIU2400_RSPINP; isp->isp_respoutrp = BIU2400_RSPOUTP; } else if (IS_23XX(isp)) { isp->isp_rqstinrp = BIU_REQINP; isp->isp_rqstoutrp = BIU_REQOUTP; isp->isp_respinrp = BIU_RSPINP; isp->isp_respoutrp = BIU_RSPOUTP; } else { isp->isp_rqstinrp = INMAILBOX4; isp->isp_rqstoutrp = OUTMAILBOX4; isp->isp_respinrp = OUTMAILBOX5; isp->isp_respoutrp = INMAILBOX5; } ISP_WRITE(isp, isp->isp_rqstinrp, 0); ISP_WRITE(isp, isp->isp_rqstoutrp, 0); ISP_WRITE(isp, isp->isp_respinrp, 0); ISP_WRITE(isp, isp->isp_respoutrp, 0); if (IS_24XX(isp)) { if (!IS_26XX(isp)) { ISP_WRITE(isp, BIU2400_PRI_REQINP, 0); ISP_WRITE(isp, BIU2400_PRI_REQOUTP, 0); } ISP_WRITE(isp, BIU2400_ATIO_RSPINP, 0); ISP_WRITE(isp, BIU2400_ATIO_RSPOUTP, 0); } if (!IS_24XX(isp) && isp->isp_bustype == ISP_BT_PCI) { /* Make sure the BIOS is disabled */ ISP_WRITE(isp, HCCR, PCI_HCCR_CMD_BIOS); } /* * Wait for everything to finish firing up. * * Avoid doing this on early 2312s because you can generate a PCI * parity error (chip breakage). */ if (IS_2312(isp) && isp->isp_revision < 2) { ISP_DELAY(100); } else { loops = MBOX_DELAY_COUNT; while (ISP_READ(isp, OUTMAILBOX0) == MBOX_BUSY) { ISP_DELAY(100); if (--loops < 0) { isp_prt(isp, ISP_LOGERR, "MBOX_BUSY never cleared on reset"); return; } } } /* * Up until this point we've done everything by just reading or * setting registers. From this point on we rely on at least *some* * kind of firmware running in the card. */ /* * Do some sanity checking by running a NOP command. * If it succeeds, the ROM firmware is now running. */ MBSINIT(&mbs, MBOX_NO_OP, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "NOP command failed (%x)", mbs.param[0]); return; } /* * Do some operational tests */ if (IS_SCSI(isp) || IS_24XX(isp)) { static const uint16_t patterns[MAX_MAILBOX] = { 0x0000, 0xdead, 0xbeef, 0xffff, 0xa5a5, 0x5a5a, 0x7f7f, 0x7ff7, 0x3421, 0xabcd, 0xdcba, 0xfeef, 0xbead, 0xdebe, 0x2222, 0x3333, 0x5555, 0x6666, 0x7777, 0xaaaa, 0xffff, 0xdddd, 0x9999, 0x1fbc, 0x6666, 0x6677, 0x1122, 0x33ff, 0x0000, 0x0001, 0x1000, 0x1010, }; int nmbox = ISP_NMBOX(isp); if (IS_SCSI(isp)) nmbox = 6; MBSINIT(&mbs, MBOX_MAILBOX_REG_TEST, MBLOGALL, 0); for (i = 1; i < nmbox; i++) { mbs.param[i] = patterns[i]; } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } for (i = 1; i < nmbox; i++) { if (mbs.param[i] != patterns[i]) { isp_prt(isp, ISP_LOGERR, "Register Test Failed at Register %d: should have 0x%04x but got 0x%04x", i, patterns[i], mbs.param[i]); return; } } } /* * Download new Firmware, unless requested not to do so. * This is made slightly trickier in some cases where the * firmware of the ROM revision is newer than the revision * compiled into the driver. So, where we used to compare * versions of our f/w and the ROM f/w, now we just see * whether we have f/w at all and whether a config flag * has disabled our download. */ if ((isp->isp_mdvec->dv_ispfw == NULL) || (isp->isp_confopts & ISP_CFG_NORELOAD)) { dodnld = 0; } else { /* * Set up DMA for the request and response queues. * We do this now so we can use the request queue * for dma to load firmware from. */ if (ISP_MBOXDMASETUP(isp) != 0) { isp_prt(isp, ISP_LOGERR, "Cannot setup DMA"); return; } } if (IS_24XX(isp)) { code_org = ISP_CODE_ORG_2400; } else if (IS_23XX(isp)) { code_org = ISP_CODE_ORG_2300; } else { code_org = ISP_CODE_ORG; } isp->isp_loaded_fw = 0; if (dodnld && IS_24XX(isp)) { const uint32_t *ptr = isp->isp_mdvec->dv_ispfw; uint32_t la, wi, wl; /* * Keep loading until we run out of f/w. */ code_org = ptr[2]; /* 1st load address is our start addr */ for (;;) { isp_prt(isp, ISP_LOGDEBUG0, "load 0x%x words of code at load address 0x%x", ptr[3], ptr[2]); wi = 0; la = ptr[2]; wl = ptr[3]; while (wi < ptr[3]) { uint32_t *cp; uint32_t nw; nw = min(wl, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) / 4); cp = isp->isp_rquest; for (i = 0; i < nw; i++) ISP_IOXPUT_32(isp, ptr[wi + i], &cp[i]); MEMORYBARRIER(isp, SYNC_REQUEST, 0, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), -1); MBSINIT(&mbs, MBOX_LOAD_RISC_RAM, MBLOGALL, 0); mbs.param[1] = la; mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[4] = nw >> 16; mbs.param[5] = nw; mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); mbs.param[8] = la >> 16; isp_prt(isp, ISP_LOGDEBUG0, "LOAD RISC RAM %u words at load address 0x%x", nw, la); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "F/W download failed"); return; } la += nw; wi += nw; wl -= nw; } if (ptr[1] == 0) { break; } ptr += ptr[3]; } isp->isp_loaded_fw = 1; } else if (dodnld && IS_23XX(isp)) { const uint16_t *ptr = isp->isp_mdvec->dv_ispfw; uint16_t wi, wl, segno; uint32_t la; la = code_org; segno = 0; for (;;) { uint32_t nxtaddr; isp_prt(isp, ISP_LOGDEBUG0, "load 0x%x words of code at load address 0x%x", ptr[3], la); wi = 0; wl = ptr[3]; while (wi < ptr[3]) { uint16_t *cp; uint16_t nw; nw = min(wl, min((1 << 15), ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) / 2)); cp = isp->isp_rquest; for (i = 0; i < nw; i++) ISP_IOXPUT_16(isp, ptr[wi + i], &cp[i]); MEMORYBARRIER(isp, SYNC_REQUEST, 0, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), -1); MBSINIT(&mbs, 0, MBLOGALL, 0); if (la < 0x10000) { mbs.param[0] = MBOX_LOAD_RISC_RAM_2100; mbs.param[1] = la; mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[4] = nw; mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); isp_prt(isp, ISP_LOGDEBUG1, "LOAD RISC RAM 2100 %u words at load address 0x%x\n", nw, la); } else { mbs.param[0] = MBOX_LOAD_RISC_RAM; mbs.param[1] = la; mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[4] = nw; mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); mbs.param[8] = la >> 16; isp_prt(isp, ISP_LOGDEBUG1, "LOAD RISC RAM %u words at load address 0x%x\n", nw, la); } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "F/W download failed"); return; } la += nw; wi += nw; wl -= nw; } if (!IS_2322(isp)) { break; } if (++segno == 3) { break; } /* * If we're a 2322, the firmware actually comes in * three chunks. We loaded the first at the code_org * address. The other two chunks, which follow right * after each other in memory here, get loaded at * addresses specfied at offset 0x9..0xB. */ nxtaddr = ptr[3]; ptr = &ptr[nxtaddr]; la = ptr[5] | ((ptr[4] & 0x3f) << 16); } isp->isp_loaded_fw = 1; } else if (dodnld) { const uint16_t *ptr = isp->isp_mdvec->dv_ispfw; u_int i, wl; wl = ptr[3]; isp_prt(isp, ISP_LOGDEBUG1, "WRITE RAM %u words at load address 0x%x", wl, code_org); for (i = 0; i < wl; i++) { MBSINIT(&mbs, MBOX_WRITE_RAM_WORD, MBLOGNONE, 0); mbs.param[1] = code_org + i; mbs.param[2] = ptr[i]; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "F/W download failed at word %d", i); return; } } } else if (IS_26XX(isp)) { isp_prt(isp, ISP_LOGDEBUG1, "loading firmware from flash"); MBSINIT(&mbs, MBOX_LOAD_FLASH_FIRMWARE, MBLOGALL, 5000000); mbs.ibitm = 0x01; mbs.obitm = 0x07; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "Flash F/W load failed"); return; } } else { isp_prt(isp, ISP_LOGDEBUG2, "skipping f/w download"); } /* * If we loaded firmware, verify its checksum */ if (isp->isp_loaded_fw) { MBSINIT(&mbs, MBOX_VERIFY_CHECKSUM, MBLOGNONE, 0); if (IS_24XX(isp)) { mbs.param[1] = code_org >> 16; mbs.param[2] = code_org; } else { mbs.param[1] = code_org; } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, dcrc); return; } } /* * Now start it rolling. * * If we didn't actually download f/w, * we still need to (re)start it. */ MBSINIT(&mbs, MBOX_EXEC_FIRMWARE, MBLOGALL, 5000000); if (IS_26XX(isp)) { mbs.param[1] = code_org >> 16; mbs.param[2] = code_org; } else if (IS_24XX(isp)) { mbs.param[1] = code_org >> 16; mbs.param[2] = code_org; if (isp->isp_loaded_fw) { mbs.param[3] = 0; } else { mbs.param[3] = 1; } } else if (IS_2322(isp)) { mbs.param[1] = code_org; if (isp->isp_loaded_fw) { mbs.param[2] = 0; } else { mbs.param[2] = 1; } } else { mbs.param[1] = code_org; } isp_mboxcmd(isp, &mbs); if (IS_2322(isp) || IS_24XX(isp)) { if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } } if (IS_SCSI(isp)) { /* * Set CLOCK RATE, but only if asked to. */ if (isp->isp_clock) { MBSINIT(&mbs, MBOX_SET_CLOCK_RATE, MBLOGALL, 0); mbs.param[1] = isp->isp_clock; isp_mboxcmd(isp, &mbs); /* we will try not to care if this fails */ } } /* * Ask the chip for the current firmware version. * This should prove that the new firmware is working. */ MBSINIT(&mbs, MBOX_ABOUT_FIRMWARE, MBLOGALL, 5000000); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* * The SBus firmware that we are using apparently does not return * major, minor, micro revisions in the mailbox registers, which * is really, really, annoying. */ if (ISP_SBUS_SUPPORTED && isp->isp_bustype == ISP_BT_SBUS) { if (dodnld) { #ifdef ISP_TARGET_MODE isp->isp_fwrev[0] = 7; isp->isp_fwrev[1] = 55; #else isp->isp_fwrev[0] = 1; isp->isp_fwrev[1] = 37; #endif isp->isp_fwrev[2] = 0; } } else { isp->isp_fwrev[0] = mbs.param[1]; isp->isp_fwrev[1] = mbs.param[2]; isp->isp_fwrev[2] = mbs.param[3]; } if (IS_FC(isp)) { /* * We do not believe firmware attributes for 2100 code less * than 1.17.0, unless it's the firmware we specifically * are loading. * * Note that all 22XX and later f/w is greater than 1.X.0. */ if ((ISP_FW_OLDER_THAN(isp, 1, 17, 1))) { #ifdef USE_SMALLER_2100_FIRMWARE isp->isp_fwattr = ISP_FW_ATTR_SCCLUN; #else isp->isp_fwattr = 0; #endif } else { isp->isp_fwattr = mbs.param[6]; } if (IS_24XX(isp)) { isp->isp_fwattr |= ((uint64_t) mbs.param[15]) << 16; if (isp->isp_fwattr & ISP2400_FW_ATTR_EXTNDED) { isp->isp_fwattr |= (((uint64_t) mbs.param[16]) << 32) | (((uint64_t) mbs.param[17]) << 48); } } } else { isp->isp_fwattr = 0; } isp_prt(isp, ISP_LOGCONFIG, "Board Type %s, Chip Revision 0x%x, %s F/W Revision %d.%d.%d", btype, isp->isp_revision, dodnld? "loaded" : "resident", isp->isp_fwrev[0], isp->isp_fwrev[1], isp->isp_fwrev[2]); fwt = isp->isp_fwattr; if (IS_24XX(isp)) { buf = FCPARAM(isp, 0)->isp_scanscratch; ISP_SNPRINTF(buf, ISP_FC_SCRLEN, "Attributes:"); if (fwt & ISP2400_FW_ATTR_CLASS2) { fwt ^=ISP2400_FW_ATTR_CLASS2; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s Class2", buf); } if (fwt & ISP2400_FW_ATTR_IP) { fwt ^=ISP2400_FW_ATTR_IP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s IP", buf); } if (fwt & ISP2400_FW_ATTR_MULTIID) { fwt ^=ISP2400_FW_ATTR_MULTIID; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s MultiID", buf); } if (fwt & ISP2400_FW_ATTR_SB2) { fwt ^=ISP2400_FW_ATTR_SB2; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s SB2", buf); } if (fwt & ISP2400_FW_ATTR_T10CRC) { fwt ^=ISP2400_FW_ATTR_T10CRC; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s T10CRC", buf); } if (fwt & ISP2400_FW_ATTR_VI) { fwt ^=ISP2400_FW_ATTR_VI; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VI", buf); } if (fwt & ISP2400_FW_ATTR_MQ) { fwt ^=ISP2400_FW_ATTR_MQ; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s MQ", buf); } if (fwt & ISP2400_FW_ATTR_MSIX) { fwt ^=ISP2400_FW_ATTR_MSIX; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s MSIX", buf); } if (fwt & ISP2400_FW_ATTR_FCOE) { fwt ^=ISP2400_FW_ATTR_FCOE; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s FCOE", buf); } if (fwt & ISP2400_FW_ATTR_VP0) { fwt ^= ISP2400_FW_ATTR_VP0; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VP0_Decoupling", buf); } if (fwt & ISP2400_FW_ATTR_EXPFW) { fwt ^= ISP2400_FW_ATTR_EXPFW; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s (Experimental)", buf); } if (fwt & ISP2400_FW_ATTR_HOTFW) { fwt ^= ISP2400_FW_ATTR_HOTFW; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s HotFW", buf); } fwt &= ~ISP2400_FW_ATTR_EXTNDED; if (fwt & ISP2400_FW_ATTR_EXTVP) { fwt ^= ISP2400_FW_ATTR_EXTVP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s ExtVP", buf); } if (fwt & ISP2400_FW_ATTR_VN2VN) { fwt ^= ISP2400_FW_ATTR_VN2VN; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VN2VN", buf); } if (fwt & ISP2400_FW_ATTR_EXMOFF) { fwt ^= ISP2400_FW_ATTR_EXMOFF; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s EXMOFF", buf); } if (fwt & ISP2400_FW_ATTR_NPMOFF) { fwt ^= ISP2400_FW_ATTR_NPMOFF; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s NPMOFF", buf); } if (fwt & ISP2400_FW_ATTR_DIFCHOP) { fwt ^= ISP2400_FW_ATTR_DIFCHOP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s DIFCHOP", buf); } if (fwt & ISP2400_FW_ATTR_SRIOV) { fwt ^= ISP2400_FW_ATTR_SRIOV; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s SRIOV", buf); } if (fwt & ISP2400_FW_ATTR_ASICTMP) { fwt ^= ISP2400_FW_ATTR_ASICTMP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s ASICTMP", buf); } if (fwt & ISP2400_FW_ATTR_ATIOMQ) { fwt ^= ISP2400_FW_ATTR_ATIOMQ; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s ATIOMQ", buf); } if (fwt) { ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s (unknown 0x%08x%08x)", buf, (uint32_t) (fwt >> 32), (uint32_t) fwt); } isp_prt(isp, ISP_LOGCONFIG, "%s", buf); } else if (IS_FC(isp)) { buf = FCPARAM(isp, 0)->isp_scanscratch; ISP_SNPRINTF(buf, ISP_FC_SCRLEN, "Attributes:"); if (fwt & ISP_FW_ATTR_TMODE) { fwt ^=ISP_FW_ATTR_TMODE; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s TargetMode", buf); } if (fwt & ISP_FW_ATTR_SCCLUN) { fwt ^=ISP_FW_ATTR_SCCLUN; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s SCC-Lun", buf); } if (fwt & ISP_FW_ATTR_FABRIC) { fwt ^=ISP_FW_ATTR_FABRIC; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s Fabric", buf); } if (fwt & ISP_FW_ATTR_CLASS2) { fwt ^=ISP_FW_ATTR_CLASS2; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s Class2", buf); } if (fwt & ISP_FW_ATTR_FCTAPE) { fwt ^=ISP_FW_ATTR_FCTAPE; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s FC-Tape", buf); } if (fwt & ISP_FW_ATTR_IP) { fwt ^=ISP_FW_ATTR_IP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s IP", buf); } if (fwt & ISP_FW_ATTR_VI) { fwt ^=ISP_FW_ATTR_VI; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VI", buf); } if (fwt & ISP_FW_ATTR_VI_SOLARIS) { fwt ^=ISP_FW_ATTR_VI_SOLARIS; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VI_SOLARIS", buf); } if (fwt & ISP_FW_ATTR_2KLOGINS) { fwt ^=ISP_FW_ATTR_2KLOGINS; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s 2K-Login", buf); } if (fwt != 0) { ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s (unknown 0x%08x%08x)", buf, (uint32_t) (fwt >> 32), (uint32_t) fwt); } isp_prt(isp, ISP_LOGCONFIG, "%s", buf); } if (IS_24XX(isp)) { MBSINIT(&mbs, MBOX_GET_RESOURCE_COUNT, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_maxcmds = mbs.param[3]; } else { MBSINIT(&mbs, MBOX_GET_FIRMWARE_STATUS, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_maxcmds = mbs.param[2]; } isp_prt(isp, ISP_LOGCONFIG, "%d max I/O command limit set", isp->isp_maxcmds); /* * If we don't have Multi-ID f/w loaded, we need to restrict channels to one. * Only make this check for non-SCSI cards (I'm not sure firmware attributes * work for them). */ if (IS_FC(isp) && isp->isp_nchan > 1) { if (!ISP_CAP_MULTI_ID(isp)) { isp_prt(isp, ISP_LOGWARN, "non-MULTIID f/w loaded, " "only can enable 1 of %d channels", isp->isp_nchan); isp->isp_nchan = 1; } else if (!ISP_CAP_VP0(isp)) { isp_prt(isp, ISP_LOGWARN, "We can not use MULTIID " "feature properly without VP0_Decoupling"); isp->isp_nchan = 1; } } /* * Final DMA setup after we got isp_maxcmds. */ if (ISP_MBOXDMASETUP(isp) != 0) { isp_prt(isp, ISP_LOGERR, "Cannot setup DMA"); return; } /* * Setup interrupts. */ if (ISP_IRQSETUP(isp) != 0) { isp_prt(isp, ISP_LOGERR, "Cannot setup IRQ"); return; } ISP_ENABLE_INTS(isp); if (IS_FC(isp)) { for (i = 0; i < isp->isp_nchan; i++) isp_change_fw_state(isp, i, FW_CONFIG_WAIT); } isp->isp_state = ISP_RESETSTATE; /* * Okay- now that we have new firmware running, we now (re)set our * notion of how many luns we support. This is somewhat tricky because * if we haven't loaded firmware, we sometimes do not have an easy way * of knowing how many luns we support. * * Expanded lun firmware gives you 32 luns for SCSI cards and * unlimited luns for Fibre Channel cards. * * It turns out that even for QLogic 2100s with ROM 1.10 and above * we do get a firmware attributes word returned in mailbox register 6. * * Because the lun is in a different position in the Request Queue * Entry structure for Fibre Channel with expanded lun firmware, we * can only support one lun (lun zero) when we don't know what kind * of firmware we're running. */ if (IS_SCSI(isp)) { if (dodnld) { if (IS_ULTRA2(isp) || IS_ULTRA3(isp)) { isp->isp_maxluns = 32; } else { isp->isp_maxluns = 8; } } else { isp->isp_maxluns = 8; } } else { if (ISP_CAP_SCCFW(isp)) { isp->isp_maxluns = 0; /* No limit -- 2/8 bytes */ } else { isp->isp_maxluns = 16; } } /* * We get some default values established. As a side * effect, NVRAM is read here (unless overriden by * a configuration flag). */ if (do_load_defaults) { if (IS_SCSI(isp)) { isp_setdfltsdparm(isp); } else { for (i = 0; i < isp->isp_nchan; i++) { isp_setdfltfcparm(isp, i); } } } } /* * Clean firmware shutdown. */ static int isp_stop(ispsoftc_t *isp) { mbreg_t mbs; isp->isp_state = ISP_NILSTATE; MBSINIT(&mbs, MBOX_STOP_FIRMWARE, MBLOGALL, 500000); mbs.param[1] = 0; mbs.param[2] = 0; mbs.param[3] = 0; mbs.param[4] = 0; mbs.param[5] = 0; mbs.param[6] = 0; mbs.param[7] = 0; mbs.param[8] = 0; isp_mboxcmd(isp, &mbs); return (mbs.param[0] == MBOX_COMMAND_COMPLETE ? 0 : mbs.param[0]); } /* * Hardware shutdown. */ void isp_shutdown(ispsoftc_t *isp) { if (isp->isp_state >= ISP_RESETSTATE) isp_stop(isp); ISP_DISABLE_INTS(isp); if (IS_FC(isp)) { if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_ICR, 0); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_PAUSE); } else { ISP_WRITE(isp, BIU_ICR, 0); ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FPM0_REGS); ISP_WRITE(isp, FPM_DIAG_CONFIG, FPM_SOFT_RESET); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FB_REGS); ISP_WRITE(isp, FBM_CMD, FBMCMD_FIFO_RESET_ALL); ISP_WRITE(isp, BIU2100_CSR, BIU2100_RISC_REGS); } } else { ISP_WRITE(isp, BIU_ICR, 0); ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); } } /* * Initialize Parameters of Hardware to a known state. * * Locks are held before coming here. */ void isp_init(ispsoftc_t *isp) { if (IS_FC(isp)) { if (IS_24XX(isp)) { isp_fibre_init_2400(isp); } else { isp_fibre_init(isp); } } else { isp_scsi_init(isp); } } static void isp_scsi_init(ispsoftc_t *isp) { sdparam *sdp_chan0, *sdp_chan1; mbreg_t mbs; isp->isp_state = ISP_INITSTATE; sdp_chan0 = SDPARAM(isp, 0); sdp_chan1 = sdp_chan0; if (IS_DUALBUS(isp)) { sdp_chan1 = SDPARAM(isp, 1); } /* First do overall per-card settings. */ /* * If we have fast memory timing enabled, turn it on. */ if (sdp_chan0->isp_fast_mttr) { ISP_WRITE(isp, RISC_MTR, 0x1313); } /* * Set Retry Delay and Count. * You set both channels at the same time. */ MBSINIT(&mbs, MBOX_SET_RETRY_COUNT, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_retry_count; mbs.param[2] = sdp_chan0->isp_retry_delay; mbs.param[6] = sdp_chan1->isp_retry_count; mbs.param[7] = sdp_chan1->isp_retry_delay; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* * Set ASYNC DATA SETUP time. This is very important. */ MBSINIT(&mbs, MBOX_SET_ASYNC_DATA_SETUP_TIME, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_async_data_setup; mbs.param[2] = sdp_chan1->isp_async_data_setup; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* * Set ACTIVE Negation State. */ MBSINIT(&mbs, MBOX_SET_ACT_NEG_STATE, MBLOGNONE, 0); mbs.param[1] = (sdp_chan0->isp_req_ack_active_neg << 4) | (sdp_chan0->isp_data_line_active_neg << 5); mbs.param[2] = (sdp_chan1->isp_req_ack_active_neg << 4) | (sdp_chan1->isp_data_line_active_neg << 5); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "failed to set active negation state (%d,%d), (%d,%d)", sdp_chan0->isp_req_ack_active_neg, sdp_chan0->isp_data_line_active_neg, sdp_chan1->isp_req_ack_active_neg, sdp_chan1->isp_data_line_active_neg); /* * But don't return. */ } /* * Set the Tag Aging limit */ MBSINIT(&mbs, MBOX_SET_TAG_AGE_LIMIT, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_tag_aging; mbs.param[2] = sdp_chan1->isp_tag_aging; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "failed to set tag age limit (%d,%d)", sdp_chan0->isp_tag_aging, sdp_chan1->isp_tag_aging); return; } /* * Set selection timeout. */ MBSINIT(&mbs, MBOX_SET_SELECT_TIMEOUT, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_selection_timeout; mbs.param[2] = sdp_chan1->isp_selection_timeout; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* now do per-channel settings */ isp_scsi_channel_init(isp, 0); if (IS_DUALBUS(isp)) isp_scsi_channel_init(isp, 1); /* * Now enable request/response queues */ if (IS_ULTRA2(isp) || IS_1240(isp)) { MBSINIT(&mbs, MBOX_INIT_RES_QUEUE_A64, MBLOGALL, 0); mbs.param[1] = RESULT_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_result_dma); mbs.param[3] = DMA_WD0(isp->isp_result_dma); mbs.param[4] = 0; mbs.param[6] = DMA_WD3(isp->isp_result_dma); mbs.param[7] = DMA_WD2(isp->isp_result_dma); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_residx = isp->isp_resodx = mbs.param[5]; MBSINIT(&mbs, MBOX_INIT_REQ_QUEUE_A64, MBLOGALL, 0); mbs.param[1] = RQUEST_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[5] = 0; mbs.param[6] = DMA_WD3(isp->isp_result_dma); mbs.param[7] = DMA_WD2(isp->isp_result_dma); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = isp->isp_reqodx = mbs.param[4]; } else { MBSINIT(&mbs, MBOX_INIT_RES_QUEUE, MBLOGALL, 0); mbs.param[1] = RESULT_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_result_dma); mbs.param[3] = DMA_WD0(isp->isp_result_dma); mbs.param[4] = 0; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_residx = isp->isp_resodx = mbs.param[5]; MBSINIT(&mbs, MBOX_INIT_REQ_QUEUE, MBLOGALL, 0); mbs.param[1] = RQUEST_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[5] = 0; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = isp->isp_reqodx = mbs.param[4]; } /* * Turn on LVD transitions for ULTRA2 or better and other features * * Now that we have 32 bit handles, don't do any fast posting * any more. For Ultra2/Ultra3 cards, we can turn on 32 bit RIO * operation or use fast posting. To be conservative, we'll only * do this for Ultra3 cards now because the other cards are so * rare for this author to find and test with. */ MBSINIT(&mbs, MBOX_SET_FW_FEATURES, MBLOGALL, 0); if (IS_ULTRA2(isp)) mbs.param[1] |= FW_FEATURE_LVD_NOTIFY; #ifdef ISP_NO_RIO if (IS_ULTRA3(isp)) mbs.param[1] |= FW_FEATURE_FAST_POST; #else if (IS_ULTRA3(isp)) mbs.param[1] |= FW_FEATURE_RIO_32BIT; #endif if (mbs.param[1] != 0) { uint16_t sfeat = mbs.param[1]; isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGINFO, "Enabled FW features (0x%x)", sfeat); } } isp->isp_state = ISP_RUNSTATE; } static void isp_scsi_channel_init(ispsoftc_t *isp, int chan) { sdparam *sdp; mbreg_t mbs; int tgt; sdp = SDPARAM(isp, chan); /* * Set (possibly new) Initiator ID. */ MBSINIT(&mbs, MBOX_SET_INIT_SCSI_ID, MBLOGALL, 0); mbs.param[1] = (chan << 7) | sdp->isp_initiator_id; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp_prt(isp, ISP_LOGINFO, "Chan %d Initiator ID is %d", chan, sdp->isp_initiator_id); /* * Set current per-target parameters to an initial safe minimum. */ for (tgt = 0; tgt < MAX_TARGETS; tgt++) { int lun; uint16_t sdf; if (sdp->isp_devparam[tgt].dev_enable == 0) { continue; } #ifndef ISP_TARGET_MODE sdf = sdp->isp_devparam[tgt].goal_flags; sdf &= DPARM_SAFE_DFLT; /* * It is not quite clear when this changed over so that * we could force narrow and async for 1000/1020 cards, * but assume that this is only the case for loaded * firmware. */ if (isp->isp_loaded_fw) { sdf |= DPARM_NARROW | DPARM_ASYNC; } #else /* * The !$*!)$!$)* f/w uses the same index into some * internal table to decide how to respond to negotiations, * so if we've said "let's be safe" for ID X, and ID X * selects *us*, the negotiations will back to 'safe' * (as in narrow/async). What the f/w *should* do is * use the initiator id settings to decide how to respond. */ sdp->isp_devparam[tgt].goal_flags = sdf = DPARM_DEFAULT; #endif MBSINIT(&mbs, MBOX_SET_TARGET_PARAMS, MBLOGNONE, 0); mbs.param[1] = (chan << 15) | (tgt << 8); mbs.param[2] = sdf; if ((sdf & DPARM_SYNC) == 0) { mbs.param[3] = 0; } else { mbs.param[3] = (sdp->isp_devparam[tgt].goal_offset << 8) | (sdp->isp_devparam[tgt].goal_period); } isp_prt(isp, ISP_LOGDEBUG0, "Initial Settings bus%d tgt%d flags 0x%x off 0x%x per 0x%x", chan, tgt, mbs.param[2], mbs.param[3] >> 8, mbs.param[3] & 0xff); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { sdf = DPARM_SAFE_DFLT; MBSINIT(&mbs, MBOX_SET_TARGET_PARAMS, MBLOGALL, 0); mbs.param[1] = (tgt << 8) | (chan << 15); mbs.param[2] = sdf; mbs.param[3] = 0; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { continue; } } /* * We don't update any information directly from the f/w * because we need to run at least one command to cause a * new state to be latched up. So, we just assume that we * converge to the values we just had set. * * Ensure that we don't believe tagged queuing is enabled yet. * It turns out that sometimes the ISP just ignores our * attempts to set parameters for devices that it hasn't * seen yet. */ sdp->isp_devparam[tgt].actv_flags = sdf & ~DPARM_TQING; for (lun = 0; lun < (int) isp->isp_maxluns; lun++) { MBSINIT(&mbs, MBOX_SET_DEV_QUEUE_PARAMS, MBLOGALL, 0); mbs.param[1] = (chan << 15) | (tgt << 8) | lun; mbs.param[2] = sdp->isp_max_queue_depth; mbs.param[3] = sdp->isp_devparam[tgt].exc_throttle; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } } } for (tgt = 0; tgt < MAX_TARGETS; tgt++) { if (sdp->isp_devparam[tgt].dev_refresh) { sdp->sendmarker = 1; sdp->update = 1; break; } } } /* * Fibre Channel specific initialization. */ static void isp_fibre_init(ispsoftc_t *isp) { fcparam *fcp; isp_icb_t local, *icbp = &local; mbreg_t mbs; int ownloopid; /* * We only support one channel on non-24XX cards */ fcp = FCPARAM(isp, 0); if (fcp->role == ISP_ROLE_NONE) return; isp->isp_state = ISP_INITSTATE; ISP_MEMZERO(icbp, sizeof (*icbp)); icbp->icb_version = ICB_VERSION1; icbp->icb_fwoptions = fcp->isp_fwoptions; /* * Firmware Options are either retrieved from NVRAM or * are patched elsewhere. We check them for sanity here * and make changes based on board revision, but otherwise * let others decide policy. */ /* * If this is a 2100 < revision 5, we have to turn off FAIRNESS. */ if (IS_2100(isp) && isp->isp_revision < 5) { icbp->icb_fwoptions &= ~ICBOPT_FAIRNESS; } /* * We have to use FULL LOGIN even though it resets the loop too much * because otherwise port database entries don't get updated after * a LIP- this is a known f/w bug for 2100 f/w less than 1.17.0. */ if (!ISP_FW_NEWER_THAN(isp, 1, 17, 0)) { icbp->icb_fwoptions |= ICBOPT_FULL_LOGIN; } /* * Insist on Port Database Update Async notifications */ icbp->icb_fwoptions |= ICBOPT_PDBCHANGE_AE; /* * Make sure that target role reflects into fwoptions. */ if (fcp->role & ISP_ROLE_TARGET) { icbp->icb_fwoptions |= ICBOPT_TGT_ENABLE; } else { icbp->icb_fwoptions &= ~ICBOPT_TGT_ENABLE; } /* * For some reason my 2200 does not generate ATIOs in target mode * if initiator is disabled. Extra logins are better then target * not working at all. */ if ((fcp->role & ISP_ROLE_INITIATOR) || IS_2100(isp) || IS_2200(isp)) { icbp->icb_fwoptions &= ~ICBOPT_INI_DISABLE; } else { icbp->icb_fwoptions |= ICBOPT_INI_DISABLE; } icbp->icb_maxfrmlen = DEFAULT_FRAMESIZE(isp); if (icbp->icb_maxfrmlen < ICB_MIN_FRMLEN || icbp->icb_maxfrmlen > ICB_MAX_FRMLEN) { isp_prt(isp, ISP_LOGERR, "bad frame length (%d) from NVRAM- using %d", DEFAULT_FRAMESIZE(isp), ICB_DFLT_FRMLEN); icbp->icb_maxfrmlen = ICB_DFLT_FRMLEN; } icbp->icb_maxalloc = fcp->isp_maxalloc; if (icbp->icb_maxalloc < 1) { isp_prt(isp, ISP_LOGERR, "bad maximum allocation (%d)- using 16", fcp->isp_maxalloc); icbp->icb_maxalloc = 16; } icbp->icb_execthrottle = DEFAULT_EXEC_THROTTLE(isp); if (icbp->icb_execthrottle < 1) { isp_prt(isp, ISP_LOGERR, "bad execution throttle of %d- using %d", DEFAULT_EXEC_THROTTLE(isp), ICB_DFLT_THROTTLE); icbp->icb_execthrottle = ICB_DFLT_THROTTLE; } icbp->icb_retry_delay = fcp->isp_retry_delay; icbp->icb_retry_count = fcp->isp_retry_count; icbp->icb_hardaddr = fcp->isp_loopid; ownloopid = (isp->isp_confopts & ISP_CFG_OWNLOOPID) != 0; if (icbp->icb_hardaddr >= LOCAL_LOOP_LIM) { icbp->icb_hardaddr = 0; ownloopid = 0; } /* * Our life seems so much better with 2200s and later with * the latest f/w if we set Hard Address. */ if (ownloopid || ISP_FW_NEWER_THAN(isp, 2, 2, 5)) { icbp->icb_fwoptions |= ICBOPT_HARD_ADDRESS; } /* * Right now we just set extended options to prefer point-to-point * over loop based upon some soft config options. * * NB: for the 2300, ICBOPT_EXTENDED is required. */ if (IS_2100(isp)) { /* * We can't have Fast Posting any more- we now * have 32 bit handles. */ icbp->icb_fwoptions &= ~ICBOPT_FAST_POST; } else if (IS_2200(isp) || IS_23XX(isp)) { icbp->icb_fwoptions |= ICBOPT_EXTENDED; icbp->icb_xfwoptions = fcp->isp_xfwoptions; if (ISP_CAP_FCTAPE(isp)) { if (isp->isp_confopts & ISP_CFG_NOFCTAPE) icbp->icb_xfwoptions &= ~ICBXOPT_FCTAPE; if (isp->isp_confopts & ISP_CFG_FCTAPE) icbp->icb_xfwoptions |= ICBXOPT_FCTAPE; if (icbp->icb_xfwoptions & ICBXOPT_FCTAPE) { icbp->icb_fwoptions &= ~ICBOPT_FULL_LOGIN; /* per documents */ icbp->icb_xfwoptions |= ICBXOPT_FCTAPE_CCQ|ICBXOPT_FCTAPE_CONFIRM; FCPARAM(isp, 0)->fctape_enabled = 1; } else { FCPARAM(isp, 0)->fctape_enabled = 0; } } else { icbp->icb_xfwoptions &= ~ICBXOPT_FCTAPE; FCPARAM(isp, 0)->fctape_enabled = 0; } /* * Prefer or force Point-To-Point instead Loop? */ switch (isp->isp_confopts & ISP_CFG_PORT_PREF) { case ISP_CFG_LPORT_ONLY: icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; icbp->icb_xfwoptions |= ICBXOPT_LOOP_ONLY; break; case ISP_CFG_NPORT_ONLY: icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; icbp->icb_xfwoptions |= ICBXOPT_PTP_ONLY; break; case ISP_CFG_LPORT: icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; icbp->icb_xfwoptions |= ICBXOPT_LOOP_2_PTP; break; case ISP_CFG_NPORT: icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; icbp->icb_xfwoptions |= ICBXOPT_PTP_2_LOOP; break; default: /* Let NVRAM settings define it if they are sane */ switch (icbp->icb_xfwoptions & ICBXOPT_TOPO_MASK) { case ICBXOPT_PTP_2_LOOP: case ICBXOPT_PTP_ONLY: case ICBXOPT_LOOP_ONLY: case ICBXOPT_LOOP_2_PTP: break; default: icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; icbp->icb_xfwoptions |= ICBXOPT_LOOP_2_PTP; } break; } if (IS_2200(isp)) { /* * We can't have Fast Posting any more- we now * have 32 bit handles. * * RIO seemed to have to much breakage. * * Just opt for safety. */ icbp->icb_xfwoptions &= ~ICBXOPT_RIO_16BIT; icbp->icb_fwoptions &= ~ICBOPT_FAST_POST; } else { /* * QLogic recommends that FAST Posting be turned * off for 23XX cards and instead allow the HBA * to write response queue entries and interrupt * after a delay (ZIO). */ icbp->icb_fwoptions &= ~ICBOPT_FAST_POST; if ((fcp->isp_xfwoptions & ICBXOPT_TIMER_MASK) == ICBXOPT_ZIO) { icbp->icb_xfwoptions |= ICBXOPT_ZIO; icbp->icb_idelaytimer = 10; } icbp->icb_zfwoptions = fcp->isp_zfwoptions; if (isp->isp_confopts & ISP_CFG_1GB) { icbp->icb_zfwoptions &= ~ICBZOPT_RATE_MASK; icbp->icb_zfwoptions |= ICBZOPT_RATE_1GB; } else if (isp->isp_confopts & ISP_CFG_2GB) { icbp->icb_zfwoptions &= ~ICBZOPT_RATE_MASK; icbp->icb_zfwoptions |= ICBZOPT_RATE_2GB; } else { switch (icbp->icb_zfwoptions & ICBZOPT_RATE_MASK) { case ICBZOPT_RATE_1GB: case ICBZOPT_RATE_2GB: case ICBZOPT_RATE_AUTO: break; default: icbp->icb_zfwoptions &= ~ICBZOPT_RATE_MASK; icbp->icb_zfwoptions |= ICBZOPT_RATE_AUTO; break; } } } } /* * For 22XX > 2.1.26 && 23XX, set some options. */ if (ISP_FW_NEWER_THAN(isp, 2, 26, 0)) { MBSINIT(&mbs, MBOX_SET_FIRMWARE_OPTIONS, MBLOGALL, 0); mbs.param[1] = IFCOPT1_DISF7SWTCH|IFCOPT1_LIPASYNC|IFCOPT1_LIPF8; mbs.param[2] = 0; mbs.param[3] = 0; if (ISP_FW_NEWER_THAN(isp, 3, 16, 0)) { mbs.param[1] |= IFCOPT1_EQFQASYNC|IFCOPT1_CTIO_RETRY; if (fcp->role & ISP_ROLE_TARGET) { if (ISP_FW_NEWER_THAN(isp, 3, 25, 0)) { mbs.param[1] |= IFCOPT1_ENAPURE; } mbs.param[3] = IFCOPT3_NOPRLI; } } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } } icbp->icb_logintime = ICB_LOGIN_TOV; #ifdef ISP_TARGET_MODE if (icbp->icb_fwoptions & ICBOPT_TGT_ENABLE) { icbp->icb_lunenables = 0xffff; icbp->icb_ccnt = 0xff; icbp->icb_icnt = 0xff; icbp->icb_lunetimeout = ICB_LUN_ENABLE_TOV; } #endif if (fcp->isp_wwnn && fcp->isp_wwpn) { icbp->icb_fwoptions |= ICBOPT_BOTH_WWNS; MAKE_NODE_NAME_FROM_WWN(icbp->icb_nodename, fcp->isp_wwnn); MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Node 0x%08x%08x Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwnn >> 32)), ((uint32_t) (fcp->isp_wwnn)), ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); } else if (fcp->isp_wwpn) { icbp->icb_fwoptions &= ~ICBOPT_BOTH_WWNS; MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); } else { isp_prt(isp, ISP_LOGERR, "No valid WWNs to use"); return; } icbp->icb_rqstqlen = RQUEST_QUEUE_LEN(isp); if (icbp->icb_rqstqlen < 1) { isp_prt(isp, ISP_LOGERR, "bad request queue length"); } icbp->icb_rsltqlen = RESULT_QUEUE_LEN(isp); if (icbp->icb_rsltqlen < 1) { isp_prt(isp, ISP_LOGERR, "bad result queue length"); } icbp->icb_rqstaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_rquest_dma); icbp->icb_respaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_result_dma); if (FC_SCRATCH_ACQUIRE(isp, 0)) { isp_prt(isp, ISP_LOGERR, sacq); return; } isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init: fwopt 0x%x xfwopt 0x%x zfwopt 0x%x", icbp->icb_fwoptions, icbp->icb_xfwoptions, icbp->icb_zfwoptions); isp_put_icb(isp, icbp, (isp_icb_t *)fcp->isp_scratch); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "isp_fibre_init", sizeof(*icbp), fcp->isp_scratch); } /* * Init the firmware */ MBSINIT(&mbs, MBOX_INIT_FIRMWARE, MBLOGALL, 30000000); mbs.param[1] = 0; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); isp_prt(isp, ISP_LOGDEBUG0, "INIT F/W from %p (%08x%08x)", fcp->isp_scratch, (uint32_t) ((uint64_t)fcp->isp_scdma >> 32), (uint32_t) fcp->isp_scdma); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, sizeof (*icbp), 0); isp_mboxcmd(isp, &mbs); FC_SCRATCH_RELEASE(isp, 0); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) return; isp->isp_reqidx = 0; isp->isp_reqodx = 0; isp->isp_residx = 0; isp->isp_resodx = 0; /* * Whatever happens, we're now committed to being here. */ isp->isp_state = ISP_RUNSTATE; } static void isp_fibre_init_2400(ispsoftc_t *isp) { fcparam *fcp; isp_icb_2400_t local, *icbp = &local; mbreg_t mbs; int chan; int ownloopid = 0; /* * Check to see whether all channels have *some* kind of role */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role != ISP_ROLE_NONE) { break; } } if (chan == isp->isp_nchan) { isp_prt(isp, ISP_LOG_WARN1, "all %d channels with role 'none'", chan); return; } isp->isp_state = ISP_INITSTATE; /* * Start with channel 0. */ fcp = FCPARAM(isp, 0); /* * Turn on LIP F8 async event (1) */ MBSINIT(&mbs, MBOX_SET_FIRMWARE_OPTIONS, MBLOGALL, 0); mbs.param[1] = 1; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } ISP_MEMZERO(icbp, sizeof (*icbp)); icbp->icb_fwoptions1 = fcp->isp_fwoptions; icbp->icb_fwoptions2 = fcp->isp_xfwoptions; icbp->icb_fwoptions3 = fcp->isp_zfwoptions; if (isp->isp_nchan > 1 && ISP_CAP_VP0(isp)) { icbp->icb_fwoptions1 &= ~ICB2400_OPT1_INI_DISABLE; icbp->icb_fwoptions1 |= ICB2400_OPT1_TGT_ENABLE; } else { if (fcp->role & ISP_ROLE_TARGET) icbp->icb_fwoptions1 |= ICB2400_OPT1_TGT_ENABLE; else icbp->icb_fwoptions1 &= ~ICB2400_OPT1_TGT_ENABLE; if (fcp->role & ISP_ROLE_INITIATOR) icbp->icb_fwoptions1 &= ~ICB2400_OPT1_INI_DISABLE; else icbp->icb_fwoptions1 |= ICB2400_OPT1_INI_DISABLE; } icbp->icb_version = ICB_VERSION1; icbp->icb_maxfrmlen = DEFAULT_FRAMESIZE(isp); if (icbp->icb_maxfrmlen < ICB_MIN_FRMLEN || icbp->icb_maxfrmlen > ICB_MAX_FRMLEN) { isp_prt(isp, ISP_LOGERR, "bad frame length (%d) from NVRAM- using %d", DEFAULT_FRAMESIZE(isp), ICB_DFLT_FRMLEN); icbp->icb_maxfrmlen = ICB_DFLT_FRMLEN; } icbp->icb_execthrottle = DEFAULT_EXEC_THROTTLE(isp); if (icbp->icb_execthrottle < 1 && !IS_26XX(isp)) { isp_prt(isp, ISP_LOGERR, "bad execution throttle of %d- using %d", DEFAULT_EXEC_THROTTLE(isp), ICB_DFLT_THROTTLE); icbp->icb_execthrottle = ICB_DFLT_THROTTLE; } /* * Set target exchange count. Take half if we are supporting both roles. */ if (icbp->icb_fwoptions1 & ICB2400_OPT1_TGT_ENABLE) { icbp->icb_xchgcnt = isp->isp_maxcmds; if ((icbp->icb_fwoptions1 & ICB2400_OPT1_INI_DISABLE) == 0) icbp->icb_xchgcnt >>= 1; } ownloopid = (isp->isp_confopts & ISP_CFG_OWNLOOPID) != 0; icbp->icb_hardaddr = fcp->isp_loopid; if (icbp->icb_hardaddr >= LOCAL_LOOP_LIM) { icbp->icb_hardaddr = 0; ownloopid = 0; } if (ownloopid) icbp->icb_fwoptions1 |= ICB2400_OPT1_HARD_ADDRESS; if (isp->isp_confopts & ISP_CFG_NOFCTAPE) { icbp->icb_fwoptions2 &= ~ICB2400_OPT2_FCTAPE; } if (isp->isp_confopts & ISP_CFG_FCTAPE) { icbp->icb_fwoptions2 |= ICB2400_OPT2_FCTAPE; } for (chan = 0; chan < isp->isp_nchan; chan++) { if (icbp->icb_fwoptions2 & ICB2400_OPT2_FCTAPE) FCPARAM(isp, chan)->fctape_enabled = 1; else FCPARAM(isp, chan)->fctape_enabled = 0; } switch (isp->isp_confopts & ISP_CFG_PORT_PREF) { case ISP_CFG_LPORT_ONLY: icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; icbp->icb_fwoptions2 |= ICB2400_OPT2_LOOP_ONLY; break; case ISP_CFG_NPORT_ONLY: icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; icbp->icb_fwoptions2 |= ICB2400_OPT2_PTP_ONLY; break; case ISP_CFG_NPORT: /* ISP_CFG_PTP_2_LOOP not available in 24XX/25XX */ case ISP_CFG_LPORT: icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; icbp->icb_fwoptions2 |= ICB2400_OPT2_LOOP_2_PTP; break; default: /* Let NVRAM settings define it if they are sane */ switch (icbp->icb_fwoptions2 & ICB2400_OPT2_TOPO_MASK) { case ICB2400_OPT2_LOOP_ONLY: case ICB2400_OPT2_PTP_ONLY: case ICB2400_OPT2_LOOP_2_PTP: break; default: icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; icbp->icb_fwoptions2 |= ICB2400_OPT2_LOOP_2_PTP; } break; } switch (icbp->icb_fwoptions2 & ICB2400_OPT2_TIMER_MASK) { case ICB2400_OPT2_ZIO: case ICB2400_OPT2_ZIO1: icbp->icb_idelaytimer = 0; break; case 0: break; default: isp_prt(isp, ISP_LOGWARN, "bad value %x in fwopt2 timer field", icbp->icb_fwoptions2 & ICB2400_OPT2_TIMER_MASK); icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TIMER_MASK; break; } if (IS_26XX(isp)) { /* Use handshake to reduce global lock congestion. */ icbp->icb_fwoptions2 |= ICB2400_OPT2_ENA_IHR; icbp->icb_fwoptions2 |= ICB2400_OPT2_ENA_IHA; } if ((icbp->icb_fwoptions3 & ICB2400_OPT3_RSPSZ_MASK) == 0) { icbp->icb_fwoptions3 |= ICB2400_OPT3_RSPSZ_24; } if (isp->isp_confopts & ISP_CFG_1GB) { icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_MASK; icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_1GB; } else if (isp->isp_confopts & ISP_CFG_2GB) { icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_MASK; icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_2GB; } else if (isp->isp_confopts & ISP_CFG_4GB) { icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_MASK; icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_4GB; } else if (isp->isp_confopts & ISP_CFG_8GB) { icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_MASK; icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_8GB; } else if (isp->isp_confopts & ISP_CFG_16GB) { icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_MASK; icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_16GB; } else if (isp->isp_confopts & ISP_CFG_32GB) { icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_MASK; icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_32GB; } else { switch (icbp->icb_fwoptions3 & ICB2400_OPT3_RATE_MASK) { case ICB2400_OPT3_RATE_4GB: case ICB2400_OPT3_RATE_8GB: case ICB2400_OPT3_RATE_16GB: case ICB2400_OPT3_RATE_32GB: case ICB2400_OPT3_RATE_AUTO: break; case ICB2400_OPT3_RATE_2GB: if (isp->isp_type <= ISP_HA_FC_2500) break; /*FALLTHROUGH*/ case ICB2400_OPT3_RATE_1GB: if (isp->isp_type <= ISP_HA_FC_2400) break; /*FALLTHROUGH*/ default: icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_MASK; icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_AUTO; break; } } if (ownloopid == 0) { icbp->icb_fwoptions3 |= ICB2400_OPT3_SOFTID; } icbp->icb_logintime = ICB_LOGIN_TOV; if (fcp->isp_wwnn && fcp->isp_wwpn) { icbp->icb_fwoptions1 |= ICB2400_OPT1_BOTH_WWNS; MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); MAKE_NODE_NAME_FROM_WWN(icbp->icb_nodename, fcp->isp_wwnn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Node 0x%08x%08x Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwnn >> 32)), ((uint32_t) (fcp->isp_wwnn)), ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); } else if (fcp->isp_wwpn) { icbp->icb_fwoptions1 &= ~ICB2400_OPT1_BOTH_WWNS; MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Node to be same as Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); } else { isp_prt(isp, ISP_LOGERR, "No valid WWNs to use"); return; } icbp->icb_retry_count = fcp->isp_retry_count; icbp->icb_rqstqlen = RQUEST_QUEUE_LEN(isp); if (icbp->icb_rqstqlen < 8) { isp_prt(isp, ISP_LOGERR, "bad request queue length %d", icbp->icb_rqstqlen); return; } icbp->icb_rsltqlen = RESULT_QUEUE_LEN(isp); if (icbp->icb_rsltqlen < 8) { isp_prt(isp, ISP_LOGERR, "bad result queue length %d", icbp->icb_rsltqlen); return; } icbp->icb_rqstaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_rquest_dma); icbp->icb_respaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_result_dma); #ifdef ISP_TARGET_MODE /* unconditionally set up the ATIO queue if we support target mode */ icbp->icb_atioqlen = RESULT_QUEUE_LEN(isp); if (icbp->icb_atioqlen < 8) { isp_prt(isp, ISP_LOGERR, "bad ATIO queue length %d", icbp->icb_atioqlen); return; } icbp->icb_atioqaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_atioq_dma); icbp->icb_atioqaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_atioq_dma); icbp->icb_atioqaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_atioq_dma); icbp->icb_atioqaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_atioq_dma); isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init_2400: atioq %04x%04x%04x%04x", DMA_WD3(isp->isp_atioq_dma), DMA_WD2(isp->isp_atioq_dma), DMA_WD1(isp->isp_atioq_dma), DMA_WD0(isp->isp_atioq_dma)); #endif if (ISP_CAP_MSIX(isp) && isp->isp_nirq >= 2) { icbp->icb_msixresp = 1; if (IS_26XX(isp) && isp->isp_nirq >= 3) icbp->icb_msixatio = 2; } isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init_2400: fwopt1 0x%x fwopt2 0x%x fwopt3 0x%x", icbp->icb_fwoptions1, icbp->icb_fwoptions2, icbp->icb_fwoptions3); isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init_2400: rqst %04x%04x%04x%04x rsp %04x%04x%04x%04x", DMA_WD3(isp->isp_rquest_dma), DMA_WD2(isp->isp_rquest_dma), DMA_WD1(isp->isp_rquest_dma), DMA_WD0(isp->isp_rquest_dma), DMA_WD3(isp->isp_result_dma), DMA_WD2(isp->isp_result_dma), DMA_WD1(isp->isp_result_dma), DMA_WD0(isp->isp_result_dma)); if (FC_SCRATCH_ACQUIRE(isp, 0)) { isp_prt(isp, ISP_LOGERR, sacq); return; } ISP_MEMZERO(fcp->isp_scratch, ISP_FC_SCRLEN); isp_put_icb_2400(isp, icbp, fcp->isp_scratch); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "isp_fibre_init_2400", sizeof (*icbp), fcp->isp_scratch); } /* * Now fill in information about any additional channels */ if (isp->isp_nchan > 1) { isp_icb_2400_vpinfo_t vpinfo, *vdst; vp_port_info_t pi, *pdst; size_t amt = 0; uint8_t *off; vpinfo.vp_global_options = ICB2400_VPGOPT_GEN_RIDA; if (ISP_CAP_VP0(isp)) { vpinfo.vp_global_options |= ICB2400_VPGOPT_VP0_DECOUPLE; vpinfo.vp_count = isp->isp_nchan; chan = 0; } else { vpinfo.vp_count = isp->isp_nchan - 1; chan = 1; } off = fcp->isp_scratch; off += ICB2400_VPINFO_OFF; vdst = (isp_icb_2400_vpinfo_t *) off; isp_put_icb_2400_vpinfo(isp, &vpinfo, vdst); amt = ICB2400_VPINFO_OFF + sizeof (isp_icb_2400_vpinfo_t); for (; chan < isp->isp_nchan; chan++) { fcparam *fcp2; ISP_MEMZERO(&pi, sizeof (pi)); fcp2 = FCPARAM(isp, chan); if (fcp2->role != ISP_ROLE_NONE) { pi.vp_port_options = ICB2400_VPOPT_ENABLED | ICB2400_VPOPT_ENA_SNSLOGIN; if (fcp2->role & ISP_ROLE_INITIATOR) pi.vp_port_options |= ICB2400_VPOPT_INI_ENABLE; if ((fcp2->role & ISP_ROLE_TARGET) == 0) pi.vp_port_options |= ICB2400_VPOPT_TGT_DISABLE; if (fcp2->isp_loopid < LOCAL_LOOP_LIM) { pi.vp_port_loopid = fcp2->isp_loopid; if (isp->isp_confopts & ISP_CFG_OWNLOOPID) pi.vp_port_options |= ICB2400_VPOPT_HARD_ADDRESS; } } MAKE_NODE_NAME_FROM_WWN(pi.vp_port_portname, fcp2->isp_wwpn); MAKE_NODE_NAME_FROM_WWN(pi.vp_port_nodename, fcp2->isp_wwnn); off = fcp->isp_scratch; if (ISP_CAP_VP0(isp)) off += ICB2400_VPINFO_PORT_OFF(chan); else off += ICB2400_VPINFO_PORT_OFF(chan - 1); pdst = (vp_port_info_t *) off; isp_put_vp_port_info(isp, &pi, pdst); amt += ICB2400_VPOPT_WRITE_SIZE; } if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "isp_fibre_init_2400", amt - ICB2400_VPINFO_OFF, (char *)fcp->isp_scratch + ICB2400_VPINFO_OFF); } } /* * Init the firmware */ MBSINIT(&mbs, 0, MBLOGALL, 30000000); if (isp->isp_nchan > 1) { mbs.param[0] = MBOX_INIT_FIRMWARE_MULTI_ID; } else { mbs.param[0] = MBOX_INIT_FIRMWARE; } mbs.param[1] = 0; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); isp_prt(isp, ISP_LOGDEBUG0, "INIT F/W from %04x%04x%04x%04x", DMA_WD3(fcp->isp_scdma), DMA_WD2(fcp->isp_scdma), DMA_WD1(fcp->isp_scdma), DMA_WD0(fcp->isp_scdma)); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, sizeof (*icbp), 0); isp_mboxcmd(isp, &mbs); FC_SCRATCH_RELEASE(isp, 0); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = 0; isp->isp_reqodx = 0; isp->isp_residx = 0; isp->isp_resodx = 0; isp->isp_atioodx = 0; /* * Whatever happens, we're now committed to being here. */ isp->isp_state = ISP_RUNSTATE; } static int isp_fc_enable_vp(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); vp_modify_t vp; void *reqp; uint8_t resp[QENTRY_LEN]; /* Build a VP MODIFY command in memory */ ISP_MEMZERO(&vp, sizeof(vp)); vp.vp_mod_hdr.rqs_entry_type = RQSTYPE_VP_MODIFY; vp.vp_mod_hdr.rqs_entry_count = 1; vp.vp_mod_cnt = 1; vp.vp_mod_idx0 = chan; vp.vp_mod_cmd = VP_MODIFY_ENA; vp.vp_mod_ports[0].options = ICB2400_VPOPT_ENABLED | ICB2400_VPOPT_ENA_SNSLOGIN; if (fcp->role & ISP_ROLE_INITIATOR) vp.vp_mod_ports[0].options |= ICB2400_VPOPT_INI_ENABLE; if ((fcp->role & ISP_ROLE_TARGET) == 0) vp.vp_mod_ports[0].options |= ICB2400_VPOPT_TGT_DISABLE; if (fcp->isp_loopid < LOCAL_LOOP_LIM) { vp.vp_mod_ports[0].loopid = fcp->isp_loopid; if (isp->isp_confopts & ISP_CFG_OWNLOOPID) vp.vp_mod_ports[0].options |= ICB2400_VPOPT_HARD_ADDRESS; } MAKE_NODE_NAME_FROM_WWN(vp.vp_mod_ports[0].wwpn, fcp->isp_wwpn); MAKE_NODE_NAME_FROM_WWN(vp.vp_mod_ports[0].wwnn, fcp->isp_wwnn); /* Prepare space for response in memory */ memset(resp, 0xff, sizeof(resp)); vp.vp_mod_hdl = isp_allocate_handle(isp, resp, ISP_HANDLE_CTRL); if (vp.vp_mod_hdl == 0) { isp_prt(isp, ISP_LOGERR, "%s: VP_MODIFY of Chan %d out of handles", __func__, chan); return (EIO); } /* Send request and wait for response. */ reqp = isp_getrqentry(isp); if (reqp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: VP_MODIFY of Chan %d out of rqent", __func__, chan); isp_destroy_handle(isp, vp.vp_mod_hdl); return (EIO); } isp_put_vp_modify(isp, &vp, (vp_modify_t *)reqp); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB VP_MODIFY", QENTRY_LEN, reqp); ISP_SYNC_REQUEST(isp); if (msleep(resp, &isp->isp_lock, 0, "VP_MODIFY", 5*hz) == EWOULDBLOCK) { isp_prt(isp, ISP_LOGERR, "%s: VP_MODIFY of Chan %d timed out", __func__, chan); isp_destroy_handle(isp, vp.vp_mod_hdl); return (EIO); } if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB VP_MODIFY response", QENTRY_LEN, resp); isp_get_vp_modify(isp, (vp_modify_t *)resp, &vp); if (vp.vp_mod_hdr.rqs_flags != 0 || vp.vp_mod_status != VP_STS_OK) { isp_prt(isp, ISP_LOGERR, "%s: VP_MODIFY of Chan %d failed with flags %x status %d", __func__, chan, vp.vp_mod_hdr.rqs_flags, vp.vp_mod_status); return (EIO); } return (0); } static int isp_fc_disable_vp(ispsoftc_t *isp, int chan) { vp_ctrl_info_t vp; void *reqp; uint8_t resp[QENTRY_LEN]; /* Build a VP CTRL command in memory */ ISP_MEMZERO(&vp, sizeof(vp)); vp.vp_ctrl_hdr.rqs_entry_type = RQSTYPE_VP_CTRL; vp.vp_ctrl_hdr.rqs_entry_count = 1; if (ISP_CAP_VP0(isp)) { vp.vp_ctrl_status = 1; } else { vp.vp_ctrl_status = 0; chan--; /* VP0 can not be controlled in this case. */ } vp.vp_ctrl_command = VP_CTRL_CMD_DISABLE_VP_LOGO_ALL; vp.vp_ctrl_vp_count = 1; vp.vp_ctrl_idmap[chan / 16] |= (1 << chan % 16); /* Prepare space for response in memory */ memset(resp, 0xff, sizeof(resp)); vp.vp_ctrl_handle = isp_allocate_handle(isp, resp, ISP_HANDLE_CTRL); if (vp.vp_ctrl_handle == 0) { isp_prt(isp, ISP_LOGERR, "%s: VP_CTRL of Chan %d out of handles", __func__, chan); return (EIO); } /* Send request and wait for response. */ reqp = isp_getrqentry(isp); if (reqp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: VP_CTRL of Chan %d out of rqent", __func__, chan); isp_destroy_handle(isp, vp.vp_ctrl_handle); return (EIO); } isp_put_vp_ctrl_info(isp, &vp, (vp_ctrl_info_t *)reqp); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB VP_CTRL", QENTRY_LEN, reqp); ISP_SYNC_REQUEST(isp); if (msleep(resp, &isp->isp_lock, 0, "VP_CTRL", 5*hz) == EWOULDBLOCK) { isp_prt(isp, ISP_LOGERR, "%s: VP_CTRL of Chan %d timed out", __func__, chan); isp_destroy_handle(isp, vp.vp_ctrl_handle); return (EIO); } if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB VP_CTRL response", QENTRY_LEN, resp); isp_get_vp_ctrl_info(isp, (vp_ctrl_info_t *)resp, &vp); if (vp.vp_ctrl_hdr.rqs_flags != 0 || vp.vp_ctrl_status != 0) { isp_prt(isp, ISP_LOGERR, "%s: VP_CTRL of Chan %d failed with flags %x status %d %d", __func__, chan, vp.vp_ctrl_hdr.rqs_flags, vp.vp_ctrl_status, vp.vp_ctrl_index_fail); return (EIO); } return (0); } static int isp_fc_change_role(ispsoftc_t *isp, int chan, int new_role) { fcparam *fcp = FCPARAM(isp, chan); int i, was, res = 0; if (chan >= isp->isp_nchan) { isp_prt(isp, ISP_LOGWARN, "%s: bad channel %d", __func__, chan); return (ENXIO); } if (fcp->role == new_role) return (0); for (was = 0, i = 0; i < isp->isp_nchan; i++) { if (FCPARAM(isp, i)->role != ISP_ROLE_NONE) was++; } if (was == 0 || (was == 1 && fcp->role != ISP_ROLE_NONE)) { fcp->role = new_role; return (isp_reinit(isp, 0)); } if (fcp->role != ISP_ROLE_NONE) { res = isp_fc_disable_vp(isp, chan); isp_clear_portdb(isp, chan); } fcp->role = new_role; if (fcp->role != ISP_ROLE_NONE) res = isp_fc_enable_vp(isp, chan); return (res); } static void isp_clear_portdb(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; int i; for (i = 0; i < MAX_FC_TARG; i++) { lp = &fcp->portdb[i]; switch (lp->state) { case FC_PORTDB_STATE_DEAD: case FC_PORTDB_STATE_CHANGED: case FC_PORTDB_STATE_VALID: lp->state = FC_PORTDB_STATE_NIL; isp_async(isp, ISPASYNC_DEV_GONE, chan, lp); break; case FC_PORTDB_STATE_NIL: case FC_PORTDB_STATE_NEW: lp->state = FC_PORTDB_STATE_NIL; break; case FC_PORTDB_STATE_ZOMBIE: break; default: panic("Don't know how to clear state %d\n", lp->state); } } } static void isp_mark_portdb(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; int i; for (i = 0; i < MAX_FC_TARG; i++) { lp = &fcp->portdb[i]; if (lp->state == FC_PORTDB_STATE_NIL) continue; if (lp->portid >= DOMAIN_CONTROLLER_BASE && lp->portid <= DOMAIN_CONTROLLER_END) continue; fcp->portdb[i].probational = 1; } } /* * Perform an IOCB PLOGI or LOGO via EXECUTE IOCB A64 for 24XX cards * or via FABRIC LOGIN/FABRIC LOGOUT for other cards. */ static int isp_plogx(ispsoftc_t *isp, int chan, uint16_t handle, uint32_t portid, int flags) { isp_plogx_t pl; void *reqp; uint8_t resp[QENTRY_LEN]; uint32_t sst, parm1; int rval, lev; const char *msg; char buf[64]; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d PLOGX %s PortID 0x%06x nphdl 0x%x", chan, (flags & PLOGX_FLG_CMD_MASK) == PLOGX_FLG_CMD_PLOGI ? "Login":"Logout", portid, handle); if (!IS_24XX(isp)) { int action = flags & PLOGX_FLG_CMD_MASK; if (action == PLOGX_FLG_CMD_PLOGI) { return (isp_port_login(isp, handle, portid)); } else if (action == PLOGX_FLG_CMD_LOGO) { return (isp_port_logout(isp, handle, portid)); } else { return (MBOX_INVALID_COMMAND); } } ISP_MEMZERO(&pl, sizeof(pl)); pl.plogx_header.rqs_entry_count = 1; pl.plogx_header.rqs_entry_type = RQSTYPE_LOGIN; pl.plogx_nphdl = handle; pl.plogx_vphdl = chan; pl.plogx_portlo = portid; pl.plogx_rspsz_porthi = (portid >> 16) & 0xff; pl.plogx_flags = flags; /* Prepare space for response in memory */ memset(resp, 0xff, sizeof(resp)); pl.plogx_handle = isp_allocate_handle(isp, resp, ISP_HANDLE_CTRL); if (pl.plogx_handle == 0) { isp_prt(isp, ISP_LOGERR, "%s: PLOGX of Chan %d out of handles", __func__, chan); return (-1); } /* Send request and wait for response. */ reqp = isp_getrqentry(isp); if (reqp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: PLOGX of Chan %d out of rqent", __func__, chan); isp_destroy_handle(isp, pl.plogx_handle); return (-1); } isp_put_plogx(isp, &pl, (isp_plogx_t *)reqp); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB LOGX", QENTRY_LEN, reqp); FCPARAM(isp, chan)->isp_login_hdl = handle; ISP_SYNC_REQUEST(isp); if (msleep(resp, &isp->isp_lock, 0, "PLOGX", 3 * ICB_LOGIN_TOV * hz) == EWOULDBLOCK) { isp_prt(isp, ISP_LOGERR, "%s: PLOGX of Chan %d timed out", __func__, chan); isp_destroy_handle(isp, pl.plogx_handle); return (-1); } FCPARAM(isp, chan)->isp_login_hdl = NIL_HANDLE; if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB LOGX response", QENTRY_LEN, resp); isp_get_plogx(isp, (isp_plogx_t *)resp, &pl); if (pl.plogx_status == PLOGX_STATUS_OK) { return (0); } else if (pl.plogx_status != PLOGX_STATUS_IOCBERR) { isp_prt(isp, ISP_LOGWARN, "status 0x%x on port login IOCB channel %d", pl.plogx_status, chan); return (-1); } sst = pl.plogx_ioparm[0].lo16 | (pl.plogx_ioparm[0].hi16 << 16); parm1 = pl.plogx_ioparm[1].lo16 | (pl.plogx_ioparm[1].hi16 << 16); rval = -1; lev = ISP_LOGERR; msg = NULL; switch (sst) { case PLOGX_IOCBERR_NOLINK: msg = "no link"; break; case PLOGX_IOCBERR_NOIOCB: msg = "no IOCB buffer"; break; case PLOGX_IOCBERR_NOXGHG: msg = "no Exchange Control Block"; break; case PLOGX_IOCBERR_FAILED: ISP_SNPRINTF(buf, sizeof (buf), "reason 0x%x (last LOGIN state 0x%x)", parm1 & 0xff, (parm1 >> 8) & 0xff); msg = buf; break; case PLOGX_IOCBERR_NOFABRIC: msg = "no fabric"; break; case PLOGX_IOCBERR_NOTREADY: msg = "firmware not ready"; break; case PLOGX_IOCBERR_NOLOGIN: ISP_SNPRINTF(buf, sizeof (buf), "not logged in (last state 0x%x)", parm1); msg = buf; rval = MBOX_NOT_LOGGED_IN; break; case PLOGX_IOCBERR_REJECT: ISP_SNPRINTF(buf, sizeof (buf), "LS_RJT = 0x%x", parm1); msg = buf; break; case PLOGX_IOCBERR_NOPCB: msg = "no PCB allocated"; break; case PLOGX_IOCBERR_EINVAL: ISP_SNPRINTF(buf, sizeof (buf), "invalid parameter at offset 0x%x", parm1); msg = buf; break; case PLOGX_IOCBERR_PORTUSED: lev = ISP_LOG_SANCFG|ISP_LOG_WARN1; ISP_SNPRINTF(buf, sizeof (buf), "already logged in with N-Port handle 0x%x", parm1); msg = buf; rval = MBOX_PORT_ID_USED | (parm1 << 16); break; case PLOGX_IOCBERR_HNDLUSED: lev = ISP_LOG_SANCFG|ISP_LOG_WARN1; ISP_SNPRINTF(buf, sizeof (buf), "handle already used for PortID 0x%06x", parm1); msg = buf; rval = MBOX_LOOP_ID_USED; break; case PLOGX_IOCBERR_NOHANDLE: msg = "no handle allocated"; break; case PLOGX_IOCBERR_NOFLOGI: msg = "no FLOGI_ACC"; break; default: ISP_SNPRINTF(buf, sizeof (buf), "status %x from %x", pl.plogx_status, flags); msg = buf; break; } if (msg) { isp_prt(isp, lev, "Chan %d PLOGX PortID 0x%06x to N-Port handle 0x%x: %s", chan, portid, handle, msg); } return (rval); } static int isp_port_login(ispsoftc_t *isp, uint16_t handle, uint32_t portid) { mbreg_t mbs; MBSINIT(&mbs, MBOX_FABRIC_LOGIN, MBLOGNONE, 500000); if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = handle; mbs.ibits = (1 << 10); } else { mbs.param[1] = handle << 8; } mbs.param[2] = portid >> 16; mbs.param[3] = portid; mbs.logval = MBLOGNONE; mbs.timeout = 500000; isp_mboxcmd(isp, &mbs); switch (mbs.param[0]) { case MBOX_PORT_ID_USED: isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "isp_port_login: portid 0x%06x already logged in as 0x%x", portid, mbs.param[1]); return (MBOX_PORT_ID_USED | (mbs.param[1] << 16)); case MBOX_LOOP_ID_USED: isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "isp_port_login: handle 0x%x in use for port id 0x%02xXXXX", handle, mbs.param[1] & 0xff); return (MBOX_LOOP_ID_USED); case MBOX_COMMAND_COMPLETE: return (0); case MBOX_COMMAND_ERROR: isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "isp_port_login: error 0x%x in PLOGI to port 0x%06x", mbs.param[1], portid); return (MBOX_COMMAND_ERROR); case MBOX_ALL_IDS_USED: isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "isp_port_login: all IDs used for fabric login"); return (MBOX_ALL_IDS_USED); default: isp_prt(isp, ISP_LOG_SANCFG, "isp_port_login: error 0x%x on port login of 0x%06x@0x%0x", mbs.param[0], portid, handle); return (mbs.param[0]); } } /* * Pre-24XX fabric port logout * * Note that portid is not used */ static int isp_port_logout(ispsoftc_t *isp, uint16_t handle, uint32_t portid) { mbreg_t mbs; MBSINIT(&mbs, MBOX_FABRIC_LOGOUT, MBLOGNONE, 500000); if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = handle; mbs.ibits = (1 << 10); } else { mbs.param[1] = handle << 8; } isp_mboxcmd(isp, &mbs); return (mbs.param[0] == MBOX_COMMAND_COMPLETE? 0 : mbs.param[0]); } static int isp_getpdb(ispsoftc_t *isp, int chan, uint16_t id, isp_pdb_t *pdb) { mbreg_t mbs; union { isp_pdb_21xx_t fred; isp_pdb_24xx_t bill; } un; MBSINIT(&mbs, MBOX_GET_PORT_DB, MBLOGALL & ~MBLOGMASK(MBOX_COMMAND_PARAM_ERROR), 250000); if (IS_24XX(isp)) { mbs.ibits = (1 << 9)|(1 << 10); mbs.param[1] = id; mbs.param[9] = chan; } else if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = id; } else { mbs.param[1] = id << 8; } mbs.param[2] = DMA_WD1(isp->isp_iocb_dma); mbs.param[3] = DMA_WD0(isp->isp_iocb_dma); mbs.param[6] = DMA_WD3(isp->isp_iocb_dma); mbs.param[7] = DMA_WD2(isp->isp_iocb_dma); MEMORYBARRIER(isp, SYNC_IFORDEV, 0, sizeof(un), chan); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) return (mbs.param[0] | (mbs.param[1] << 16)); MEMORYBARRIER(isp, SYNC_IFORCPU, 0, sizeof(un), chan); if (IS_24XX(isp)) { isp_get_pdb_24xx(isp, isp->isp_iocb, &un.bill); pdb->handle = un.bill.pdb_handle; pdb->prli_word0 = un.bill.pdb_prli_svc0; pdb->prli_word3 = un.bill.pdb_prli_svc3; pdb->portid = BITS2WORD_24XX(un.bill.pdb_portid_bits); ISP_MEMCPY(pdb->portname, un.bill.pdb_portname, 8); ISP_MEMCPY(pdb->nodename, un.bill.pdb_nodename, 8); isp_prt(isp, ISP_LOGDEBUG0, "Chan %d handle 0x%x Port 0x%06x flags 0x%x curstate %x laststate %x", chan, id, pdb->portid, un.bill.pdb_flags, un.bill.pdb_curstate, un.bill.pdb_laststate); if (un.bill.pdb_curstate < PDB2400_STATE_PLOGI_DONE || un.bill.pdb_curstate > PDB2400_STATE_LOGGED_IN) { mbs.param[0] = MBOX_NOT_LOGGED_IN; return (mbs.param[0]); } } else { isp_get_pdb_21xx(isp, isp->isp_iocb, &un.fred); pdb->handle = un.fred.pdb_loopid; pdb->prli_word0 = un.fred.pdb_prli_svc0; pdb->prli_word3 = un.fred.pdb_prli_svc3; pdb->portid = BITS2WORD(un.fred.pdb_portid_bits); ISP_MEMCPY(pdb->portname, un.fred.pdb_portname, 8); ISP_MEMCPY(pdb->nodename, un.fred.pdb_nodename, 8); isp_prt(isp, ISP_LOGDEBUG1, "Chan %d handle 0x%x Port 0x%06x", chan, id, pdb->portid); } return (0); } static int isp_gethandles(ispsoftc_t *isp, int chan, uint16_t *handles, int *num, int loop) { fcparam *fcp = FCPARAM(isp, chan); mbreg_t mbs; isp_pnhle_21xx_t el1, *elp1; isp_pnhle_23xx_t el3, *elp3; isp_pnhle_24xx_t el4, *elp4; int i, j; uint32_t p; uint16_t h; MBSINIT(&mbs, MBOX_GET_ID_LIST, MBLOGALL, 250000); if (IS_24XX(isp)) { mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); mbs.param[8] = ISP_FC_SCRLEN; mbs.param[9] = chan; } else { mbs.ibits = (1 << 1)|(1 << 2)|(1 << 3)|(1 << 6); mbs.param[1] = DMA_WD1(fcp->isp_scdma); mbs.param[2] = DMA_WD0(fcp->isp_scdma); mbs.param[3] = DMA_WD3(fcp->isp_scdma); mbs.param[6] = DMA_WD2(fcp->isp_scdma); } if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } MEMORYBARRIER(isp, SYNC_SFORDEV, 0, ISP_FC_SCRLEN, chan); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { FC_SCRATCH_RELEASE(isp, chan); return (mbs.param[0] | (mbs.param[1] << 16)); } MEMORYBARRIER(isp, SYNC_SFORCPU, 0, ISP_FC_SCRLEN, chan); elp1 = fcp->isp_scratch; elp3 = fcp->isp_scratch; elp4 = fcp->isp_scratch; for (i = 0, j = 0; i < mbs.param[1] && j < *num; i++) { if (IS_24XX(isp)) { isp_get_pnhle_24xx(isp, &elp4[i], &el4); p = el4.pnhle_port_id_lo | (el4.pnhle_port_id_hi << 16); h = el4.pnhle_handle; } else if (IS_23XX(isp)) { isp_get_pnhle_23xx(isp, &elp3[i], &el3); p = el3.pnhle_port_id_lo | (el3.pnhle_port_id_hi << 16); h = el3.pnhle_handle; } else { /* 21xx */ isp_get_pnhle_21xx(isp, &elp1[i], &el1); p = el1.pnhle_port_id_lo | ((el1.pnhle_port_id_hi_handle & 0xff) << 16); h = el1.pnhle_port_id_hi_handle >> 8; } if (loop && (p >> 8) != (fcp->isp_portid >> 8)) continue; handles[j++] = h; } *num = j; FC_SCRATCH_RELEASE(isp, chan); return (0); } static void isp_dump_chip_portdb(ispsoftc_t *isp, int chan) { isp_pdb_t pdb; uint16_t lim, nphdl; isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGINFO, "Chan %d chip port dump", chan); if (ISP_CAP_2KLOGIN(isp)) { lim = NPH_MAX_2K; } else { lim = NPH_MAX; } for (nphdl = 0; nphdl != lim; nphdl++) { if (isp_getpdb(isp, chan, nphdl, &pdb)) { continue; } isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGINFO, "Chan %d Handle 0x%04x " "PortID 0x%06x WWPN 0x%02x%02x%02x%02x%02x%02x%02x%02x", chan, nphdl, pdb.portid, pdb.portname[0], pdb.portname[1], pdb.portname[2], pdb.portname[3], pdb.portname[4], pdb.portname[5], pdb.portname[6], pdb.portname[7]); } } static uint64_t isp_get_wwn(ispsoftc_t *isp, int chan, int nphdl, int nodename) { uint64_t wwn = INI_NONE; mbreg_t mbs; MBSINIT(&mbs, MBOX_GET_PORT_NAME, MBLOGALL & ~MBLOGMASK(MBOX_COMMAND_PARAM_ERROR), 500000); if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = nphdl; if (nodename) { mbs.param[10] = 1; } mbs.param[9] = chan; } else { mbs.ibitm = 3; mbs.param[1] = nphdl << 8; if (nodename) { mbs.param[1] |= 1; } } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return (wwn); } if (IS_24XX(isp)) { wwn = (((uint64_t)(mbs.param[2] >> 8)) << 56) | (((uint64_t)(mbs.param[2] & 0xff)) << 48) | (((uint64_t)(mbs.param[3] >> 8)) << 40) | (((uint64_t)(mbs.param[3] & 0xff)) << 32) | (((uint64_t)(mbs.param[6] >> 8)) << 24) | (((uint64_t)(mbs.param[6] & 0xff)) << 16) | (((uint64_t)(mbs.param[7] >> 8)) << 8) | (((uint64_t)(mbs.param[7] & 0xff))); } else { wwn = (((uint64_t)(mbs.param[2] & 0xff)) << 56) | (((uint64_t)(mbs.param[2] >> 8)) << 48) | (((uint64_t)(mbs.param[3] & 0xff)) << 40) | (((uint64_t)(mbs.param[3] >> 8)) << 32) | (((uint64_t)(mbs.param[6] & 0xff)) << 24) | (((uint64_t)(mbs.param[6] >> 8)) << 16) | (((uint64_t)(mbs.param[7] & 0xff)) << 8) | (((uint64_t)(mbs.param[7] >> 8))); } return (wwn); } /* * Make sure we have good FC link. */ static int isp_fclink_test(ispsoftc_t *isp, int chan, int usdelay) { mbreg_t mbs; int i, r; uint16_t nphdl; fcparam *fcp; isp_pdb_t pdb; NANOTIME_T hra, hrb; fcp = FCPARAM(isp, chan); if (fcp->isp_loopstate < LOOP_HAVE_LINK) return (-1); if (fcp->isp_loopstate >= LOOP_LTEST_DONE) return (0); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC link test", chan); /* * Wait up to N microseconds for F/W to go to a ready state. */ GET_NANOTIME(&hra); while (1) { isp_change_fw_state(isp, chan, isp_fw_state(isp, chan)); if (fcp->isp_fwstate == FW_READY) { break; } if (fcp->isp_loopstate < LOOP_HAVE_LINK) goto abort; GET_NANOTIME(&hrb); if ((NANOTIME_SUB(&hrb, &hra) / 1000 + 1000 >= usdelay)) break; ISP_SLEEP(isp, 1000); } if (fcp->isp_fwstate != FW_READY) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Firmware is not ready (%s)", chan, isp_fc_fw_statename(fcp->isp_fwstate)); return (-1); } /* * Get our Loop ID and Port ID. */ MBSINIT(&mbs, MBOX_GET_LOOP_ID, MBLOGALL, 0); mbs.param[9] = chan; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return (-1); } if (IS_2100(isp)) { /* * Don't bother with fabric if we are using really old * 2100 firmware. It's just not worth it. */ if (ISP_FW_NEWER_THAN(isp, 1, 15, 37)) fcp->isp_topo = TOPO_FL_PORT; else fcp->isp_topo = TOPO_NL_PORT; } else { int topo = (int) mbs.param[6]; if (topo < TOPO_NL_PORT || topo > TOPO_PTP_STUB) { topo = TOPO_PTP_STUB; } fcp->isp_topo = topo; } fcp->isp_portid = mbs.param[2] | (mbs.param[3] << 16); if (!TOPO_IS_FABRIC(fcp->isp_topo)) { fcp->isp_loopid = mbs.param[1] & 0xff; } else if (fcp->isp_topo != TOPO_F_PORT) { uint8_t alpa = fcp->isp_portid; for (i = 0; alpa_map[i]; i++) { if (alpa_map[i] == alpa) break; } if (alpa_map[i]) fcp->isp_loopid = i; } #if 0 fcp->isp_loopstate = LOOP_HAVE_ADDR; #endif fcp->isp_loopstate = LOOP_TESTING_LINK; if (fcp->isp_topo == TOPO_F_PORT || fcp->isp_topo == TOPO_FL_PORT) { nphdl = IS_24XX(isp) ? NPH_FL_ID : FL_ID; r = isp_getpdb(isp, chan, nphdl, &pdb); if (r != 0 || pdb.portid == 0) { if (IS_2100(isp)) { fcp->isp_topo = TOPO_NL_PORT; } else { isp_prt(isp, ISP_LOGWARN, "fabric topology, but cannot get info about fabric controller (0x%x)", r); fcp->isp_topo = TOPO_PTP_STUB; } goto not_on_fabric; } if (IS_24XX(isp)) { fcp->isp_fabric_params = mbs.param[7]; fcp->isp_sns_hdl = NPH_SNS_ID; r = isp_register_fc4_type(isp, chan); if (fcp->isp_loopstate < LOOP_TESTING_LINK) goto abort; if (r != 0) goto not_on_fabric; r = isp_register_fc4_features_24xx(isp, chan); if (fcp->isp_loopstate < LOOP_TESTING_LINK) goto abort; if (r != 0) goto not_on_fabric; r = isp_register_port_name_24xx(isp, chan); if (fcp->isp_loopstate < LOOP_TESTING_LINK) goto abort; if (r != 0) goto not_on_fabric; isp_register_node_name_24xx(isp, chan); if (fcp->isp_loopstate < LOOP_TESTING_LINK) goto abort; } else { fcp->isp_sns_hdl = SNS_ID; r = isp_register_fc4_type(isp, chan); if (r != 0) goto not_on_fabric; if (fcp->role == ISP_ROLE_TARGET) isp_send_change_request(isp, chan); } } not_on_fabric: /* Get link speed. */ fcp->isp_gbspeed = 1; if (IS_23XX(isp) || IS_24XX(isp)) { MBSINIT(&mbs, MBOX_GET_SET_DATA_RATE, MBLOGALL, 3000000); mbs.param[1] = MBGSD_GET_RATE; /* mbs.param[2] undefined if we're just getting rate */ isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { if (mbs.param[1] == MBGSD_10GB) fcp->isp_gbspeed = 10; else if (mbs.param[1] == MBGSD_32GB) fcp->isp_gbspeed = 32; else if (mbs.param[1] == MBGSD_16GB) fcp->isp_gbspeed = 16; else if (mbs.param[1] == MBGSD_8GB) fcp->isp_gbspeed = 8; else if (mbs.param[1] == MBGSD_4GB) fcp->isp_gbspeed = 4; else if (mbs.param[1] == MBGSD_2GB) fcp->isp_gbspeed = 2; else if (mbs.param[1] == MBGSD_1GB) fcp->isp_gbspeed = 1; } } if (fcp->isp_loopstate < LOOP_TESTING_LINK) { abort: isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC link test aborted", chan); return (1); } fcp->isp_loopstate = LOOP_LTEST_DONE; isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGCONFIG, "Chan %d WWPN %016jx WWNN %016jx", chan, (uintmax_t)fcp->isp_wwpn, (uintmax_t)fcp->isp_wwnn); isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGCONFIG, "Chan %d %dGb %s PortID 0x%06x LoopID 0x%02x", chan, fcp->isp_gbspeed, isp_fc_toponame(fcp), fcp->isp_portid, fcp->isp_loopid); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC link test done", chan); return (0); } /* * Complete the synchronization of our Port Database. * * At this point, we've scanned the local loop (if any) and the fabric * and performed fabric logins on all new devices. * * Our task here is to go through our port database removing any entities * that are still marked probational (issuing PLOGO for ones which we had * PLOGI'd into) or are dead, and notifying upper layers about new/changed * devices. */ static int isp_pdb_sync(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; uint16_t dbidx; if (fcp->isp_loopstate < LOOP_FSCAN_DONE) return (-1); if (fcp->isp_loopstate >= LOOP_READY) return (0); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC PDB sync", chan); fcp->isp_loopstate = LOOP_SYNCING_PDB; for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { lp = &fcp->portdb[dbidx]; if (lp->state == FC_PORTDB_STATE_NIL) continue; if (lp->probational && lp->state != FC_PORTDB_STATE_ZOMBIE) lp->state = FC_PORTDB_STATE_DEAD; switch (lp->state) { case FC_PORTDB_STATE_DEAD: lp->state = FC_PORTDB_STATE_NIL; isp_async(isp, ISPASYNC_DEV_GONE, chan, lp); if ((lp->portid & 0xffff00) != 0) { (void) isp_plogx(isp, chan, lp->handle, lp->portid, PLOGX_FLG_CMD_LOGO | PLOGX_FLG_IMPLICIT | PLOGX_FLG_FREE_NPHDL); } /* * Note that we might come out of this with our state * set to FC_PORTDB_STATE_ZOMBIE. */ break; case FC_PORTDB_STATE_NEW: lp->state = FC_PORTDB_STATE_VALID; isp_async(isp, ISPASYNC_DEV_ARRIVED, chan, lp); break; case FC_PORTDB_STATE_CHANGED: lp->state = FC_PORTDB_STATE_VALID; isp_async(isp, ISPASYNC_DEV_CHANGED, chan, lp); lp->portid = lp->new_portid; lp->prli_word0 = lp->new_prli_word0; lp->prli_word3 = lp->new_prli_word3; break; case FC_PORTDB_STATE_VALID: isp_async(isp, ISPASYNC_DEV_STAYED, chan, lp); break; case FC_PORTDB_STATE_ZOMBIE: break; default: isp_prt(isp, ISP_LOGWARN, "isp_pdb_sync: state %d for idx %d", lp->state, dbidx); isp_dump_portdb(isp, chan); } } if (fcp->isp_loopstate < LOOP_SYNCING_PDB) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC PDB sync aborted", chan); return (1); } fcp->isp_loopstate = LOOP_READY; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC PDB sync done", chan); return (0); } static void isp_pdb_add_update(ispsoftc_t *isp, int chan, isp_pdb_t *pdb) { fcportdb_t *lp; uint64_t wwnn, wwpn; MAKE_WWN_FROM_NODE_NAME(wwnn, pdb->nodename); MAKE_WWN_FROM_NODE_NAME(wwpn, pdb->portname); /* Search port database for the same WWPN. */ if (isp_find_pdb_by_wwpn(isp, chan, wwpn, &lp)) { if (!lp->probational) { isp_prt(isp, ISP_LOGERR, "Chan %d Port 0x%06x@0x%04x [%d] is not probational (0x%x)", chan, lp->portid, lp->handle, FC_PORTDB_TGT(isp, chan, lp), lp->state); isp_dump_portdb(isp, chan); return; } lp->probational = 0; lp->node_wwn = wwnn; /* Old device, nothing new. */ if (lp->portid == pdb->portid && lp->handle == pdb->handle && lp->prli_word3 == pdb->prli_word3 && ((pdb->prli_word0 & PRLI_WD0_EST_IMAGE_PAIR) == (lp->prli_word0 & PRLI_WD0_EST_IMAGE_PAIR))) { if (lp->state != FC_PORTDB_STATE_NEW) lp->state = FC_PORTDB_STATE_VALID; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x@0x%04x is valid", chan, pdb->portid, pdb->handle); return; } /* Something has changed. */ lp->state = FC_PORTDB_STATE_CHANGED; lp->handle = pdb->handle; lp->new_portid = pdb->portid; lp->new_prli_word0 = pdb->prli_word0; lp->new_prli_word3 = pdb->prli_word3; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x@0x%04x is changed", chan, pdb->portid, pdb->handle); return; } /* It seems like a new port. Find an empty slot for it. */ if (!isp_find_pdb_empty(isp, chan, &lp)) { isp_prt(isp, ISP_LOGERR, "Chan %d out of portdb entries", chan); return; } ISP_MEMZERO(lp, sizeof (fcportdb_t)); lp->probational = 0; lp->state = FC_PORTDB_STATE_NEW; lp->portid = lp->new_portid = pdb->portid; lp->prli_word0 = lp->new_prli_word0 = pdb->prli_word0; lp->prli_word3 = lp->new_prli_word3 = pdb->prli_word3; lp->handle = pdb->handle; lp->port_wwn = wwpn; lp->node_wwn = wwnn; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x@0x%04x is new", chan, pdb->portid, pdb->handle); } /* * Fix port IDs for logged-in initiators on pre-2400 chips. * For those chips we are not receiving login events, adding initiators * based on ATIO requests, but there is no port ID in that structure. */ static void isp_fix_portids(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); isp_pdb_t pdb; uint64_t wwpn; int i, r; for (i = 0; i < MAX_FC_TARG; i++) { fcportdb_t *lp = &fcp->portdb[i]; if (lp->state == FC_PORTDB_STATE_NIL || lp->state == FC_PORTDB_STATE_ZOMBIE) continue; if (VALID_PORT(lp->portid)) continue; r = isp_getpdb(isp, chan, lp->handle, &pdb); if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) return; if (r != 0) { isp_prt(isp, ISP_LOGDEBUG1, "Chan %d FC Scan Loop handle %d returned %x", chan, lp->handle, r); continue; } MAKE_WWN_FROM_NODE_NAME(wwpn, pdb.portname); if (lp->port_wwn != wwpn) continue; lp->portid = lp->new_portid = pdb.portid; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x@0x%04x is fixed", chan, pdb.portid, pdb.handle); } } /* * Scan local loop for devices. */ static int isp_scan_loop(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); int idx, lim, r; isp_pdb_t pdb; uint16_t *handles; uint16_t handle; if (fcp->isp_loopstate < LOOP_LTEST_DONE) return (-1); if (fcp->isp_loopstate >= LOOP_LSCAN_DONE) return (0); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC loop scan", chan); fcp->isp_loopstate = LOOP_SCANNING_LOOP; if (TOPO_IS_FABRIC(fcp->isp_topo)) { if (!IS_24XX(isp)) { isp_fix_portids(isp, chan); if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) goto abort; } isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC loop scan done (no loop)", chan); fcp->isp_loopstate = LOOP_LSCAN_DONE; return (0); } handles = (uint16_t *)fcp->isp_scanscratch; lim = ISP_FC_SCRLEN / 2; r = isp_gethandles(isp, chan, handles, &lim, 1); if (r != 0) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Getting list of handles failed with %x", chan, r); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC loop scan done (bad)", chan); return (-1); } isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Got %d handles", chan, lim); /* * Run through the list and get the port database info for each one. */ isp_mark_portdb(isp, chan); for (idx = 0; idx < lim; idx++) { handle = handles[idx]; /* * Don't scan "special" ids. */ if (ISP_CAP_2KLOGIN(isp)) { if (handle >= NPH_RESERVED) continue; } else { if (handle >= FL_ID && handle <= SNS_ID) continue; } /* * In older cards with older f/w GET_PORT_DATABASE has been * known to hang. This trick gets around that problem. */ if (IS_2100(isp) || IS_2200(isp)) { uint64_t node_wwn = isp_get_wwn(isp, chan, handle, 1); if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) { abort: isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC loop scan aborted", chan); return (1); } if (node_wwn == INI_NONE) { continue; } } /* * Get the port database entity for this index. */ r = isp_getpdb(isp, chan, handle, &pdb); if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) goto abort; if (r != 0) { isp_prt(isp, ISP_LOGDEBUG1, "Chan %d FC Scan Loop handle %d returned %x", chan, handle, r); continue; } isp_pdb_add_update(isp, chan, &pdb); } if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) goto abort; fcp->isp_loopstate = LOOP_LSCAN_DONE; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC loop scan done", chan); return (0); } static int isp_ct_sns(ispsoftc_t *isp, int chan, uint32_t cmd_bcnt, uint32_t rsp_bcnt) { fcparam *fcp = FCPARAM(isp, chan); mbreg_t mbs; if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT SNS request", cmd_bcnt, fcp->isp_scratch); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, cmd_bcnt, chan); MBSINIT(&mbs, MBOX_SEND_SNS, MBLOGALL, 10000000); mbs.param[1] = cmd_bcnt >> 1; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { if (mbs.param[0] == MBOX_INVALID_COMMAND) { return (1); } else { return (-1); } } MEMORYBARRIER(isp, SYNC_SFORCPU, 0, rsp_bcnt, chan); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT response", rsp_bcnt, fcp->isp_scratch); return (0); } static int isp_ct_passthru(ispsoftc_t *isp, int chan, uint32_t cmd_bcnt, uint32_t rsp_bcnt) { fcparam *fcp = FCPARAM(isp, chan); isp_ct_pt_t pt; void *reqp; uint8_t resp[QENTRY_LEN]; if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT request", cmd_bcnt, fcp->isp_scratch); /* * Build a Passthrough IOCB in memory. */ ISP_MEMZERO(&pt, sizeof(pt)); pt.ctp_header.rqs_entry_count = 1; pt.ctp_header.rqs_entry_type = RQSTYPE_CT_PASSTHRU; pt.ctp_nphdl = fcp->isp_sns_hdl; pt.ctp_cmd_cnt = 1; pt.ctp_vpidx = ISP_GET_VPIDX(isp, chan); pt.ctp_time = 10; pt.ctp_rsp_cnt = 1; pt.ctp_rsp_bcnt = rsp_bcnt; pt.ctp_cmd_bcnt = cmd_bcnt; pt.ctp_dataseg[0].ds_base = DMA_LO32(fcp->isp_scdma); pt.ctp_dataseg[0].ds_basehi = DMA_HI32(fcp->isp_scdma); pt.ctp_dataseg[0].ds_count = cmd_bcnt; pt.ctp_dataseg[1].ds_base = DMA_LO32(fcp->isp_scdma); pt.ctp_dataseg[1].ds_basehi = DMA_HI32(fcp->isp_scdma); pt.ctp_dataseg[1].ds_count = rsp_bcnt; /* Prepare space for response in memory */ memset(resp, 0xff, sizeof(resp)); pt.ctp_handle = isp_allocate_handle(isp, resp, ISP_HANDLE_CTRL); if (pt.ctp_handle == 0) { isp_prt(isp, ISP_LOGERR, "%s: CTP of Chan %d out of handles", __func__, chan); return (-1); } /* Send request and wait for response. */ reqp = isp_getrqentry(isp); if (reqp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: CTP of Chan %d out of rqent", __func__, chan); isp_destroy_handle(isp, pt.ctp_handle); return (-1); } isp_put_ct_pt(isp, &pt, (isp_ct_pt_t *)reqp); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT IOCB request", QENTRY_LEN, reqp); ISP_SYNC_REQUEST(isp); if (msleep(resp, &isp->isp_lock, 0, "CTP", pt.ctp_time*hz) == EWOULDBLOCK) { isp_prt(isp, ISP_LOGERR, "%s: CTP of Chan %d timed out", __func__, chan); isp_destroy_handle(isp, pt.ctp_handle); return (-1); } if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT IOCB response", QENTRY_LEN, resp); isp_get_ct_pt(isp, (isp_ct_pt_t *)resp, &pt); if (pt.ctp_status && pt.ctp_status != RQCS_DATA_UNDERRUN) { isp_prt(isp, ISP_LOGWARN, "Chan %d CT pass-through returned 0x%x", chan, pt.ctp_status); return (-1); } if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT response", rsp_bcnt, fcp->isp_scratch); return (0); } /* * Scan the fabric for devices and add them to our port database. * * Use the GID_PT command to get list of all Nx_Port IDs SNS knows. * Use GFF_ID and GFT_ID to check port type (FCP) and features (target). * * For 2100-23XX cards, we use the SNS mailbox command to pass simple name * server commands to the switch management server via the QLogic f/w. * * For the 24XX and above card, we use CT Pass-through IOCB. */ #define GIDLEN ISP_FC_SCRLEN #define NGENT ((GIDLEN - 16) >> 2) static int isp_gid_pt(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); ct_hdr_t ct; sns_gid_pt_req_t rq; uint8_t *scp = fcp->isp_scratch; isp_prt(isp, ISP_LOGDEBUG0, "Chan %d requesting GID_PT", chan); if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } if (IS_24XX(isp)) { /* Build the CT command and execute via pass-through. */ ISP_MEMZERO(&ct, sizeof (ct)); ct.ct_revision = CT_REVISION; ct.ct_fcs_type = CT_FC_TYPE_FC; ct.ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct.ct_cmd_resp = SNS_GID_PT; ct.ct_bcnt_resid = (GIDLEN - 16) >> 2; isp_put_ct_hdr(isp, &ct, (ct_hdr_t *)scp); scp[sizeof(ct)] = 0x7f; /* Port Type = Nx_Port */ scp[sizeof(ct)+1] = 0; /* Domain_ID = any */ scp[sizeof(ct)+2] = 0; /* Area_ID = any */ scp[sizeof(ct)+3] = 0; /* Flags = no Area_ID */ if (isp_ct_passthru(isp, chan, sizeof(ct) + sizeof(uint32_t), GIDLEN)) { FC_SCRATCH_RELEASE(isp, chan); return (-1); } } else { /* Build the SNS request and execute via firmware. */ ISP_MEMZERO(&rq, SNS_GID_PT_REQ_SIZE); rq.snscb_rblen = GIDLEN >> 1; rq.snscb_addr[RQRSP_ADDR0015] = DMA_WD0(fcp->isp_scdma); rq.snscb_addr[RQRSP_ADDR1631] = DMA_WD1(fcp->isp_scdma); rq.snscb_addr[RQRSP_ADDR3247] = DMA_WD2(fcp->isp_scdma); rq.snscb_addr[RQRSP_ADDR4863] = DMA_WD3(fcp->isp_scdma); rq.snscb_sblen = 6; rq.snscb_cmd = SNS_GID_PT; rq.snscb_mword_div_2 = NGENT; rq.snscb_port_type = 0x7f; /* Port Type = Nx_Port */ rq.snscb_domain = 0; /* Domain_ID = any */ rq.snscb_area = 0; /* Area_ID = any */ rq.snscb_flags = 0; /* Flags = no Area_ID */ isp_put_gid_pt_request(isp, &rq, (sns_gid_pt_req_t *)scp); if (isp_ct_sns(isp, chan, sizeof(rq), NGENT)) { FC_SCRATCH_RELEASE(isp, chan); return (-1); } } isp_get_gid_xx_response(isp, (sns_gid_xx_rsp_t *)scp, (sns_gid_xx_rsp_t *)fcp->isp_scanscratch, NGENT); FC_SCRATCH_RELEASE(isp, chan); return (0); } static int isp_gff_id(ispsoftc_t *isp, int chan, uint32_t portid) { fcparam *fcp = FCPARAM(isp, chan); ct_hdr_t ct; uint32_t *rp; uint8_t *scp = fcp->isp_scratch; sns_gff_id_rsp_t rsp; int i, res = -1; if (!fcp->isp_use_gff_id) /* User may block GFF_ID use. */ return (res); if (!IS_24XX(isp)) /* Old chips can't request GFF_ID. */ return (res); isp_prt(isp, ISP_LOGDEBUG0, "Chan %d requesting GFF_ID", chan); if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (res); } /* Build the CT command and execute via pass-through. */ ISP_MEMZERO(&ct, sizeof (ct)); ct.ct_revision = CT_REVISION; ct.ct_fcs_type = CT_FC_TYPE_FC; ct.ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct.ct_cmd_resp = SNS_GFF_ID; ct.ct_bcnt_resid = (SNS_GFF_ID_RESP_SIZE - sizeof(ct)) / 4; isp_put_ct_hdr(isp, &ct, (ct_hdr_t *)scp); rp = (uint32_t *) &scp[sizeof(ct)]; ISP_IOZPUT_32(isp, portid, rp); if (isp_ct_passthru(isp, chan, sizeof(ct) + sizeof(uint32_t), SNS_GFF_ID_RESP_SIZE)) { FC_SCRATCH_RELEASE(isp, chan); return (res); } isp_get_gff_id_response(isp, (sns_gff_id_rsp_t *)scp, &rsp); if (rsp.snscb_cthdr.ct_cmd_resp == LS_ACC) { for (i = 0; i < 32; i++) { if (rsp.snscb_fc4_features[i] != 0) { res = 0; break; } } if (((rsp.snscb_fc4_features[FC4_SCSI / 8] >> ((FC4_SCSI % 8) * 4)) & 0x01) != 0) res = 1; /* Workaround for broken Brocade firmware. */ if (((ISP_SWAP32(isp, rsp.snscb_fc4_features[FC4_SCSI / 8]) >> ((FC4_SCSI % 8) * 4)) & 0x01) != 0) res = 1; } FC_SCRATCH_RELEASE(isp, chan); isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GFF_ID result is %d", chan, res); return (res); } static int isp_gft_id(ispsoftc_t *isp, int chan, uint32_t portid) { fcparam *fcp = FCPARAM(isp, chan); ct_hdr_t ct; sns_gxx_id_req_t rq; uint32_t *rp; uint8_t *scp = fcp->isp_scratch; sns_gft_id_rsp_t rsp; int i, res = -1; if (!fcp->isp_use_gft_id) /* User may block GFT_ID use. */ return (res); isp_prt(isp, ISP_LOGDEBUG0, "Chan %d requesting GFT_ID", chan); if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (res); } if (IS_24XX(isp)) { /* Build the CT command and execute via pass-through. */ ISP_MEMZERO(&ct, sizeof (ct)); ct.ct_revision = CT_REVISION; ct.ct_fcs_type = CT_FC_TYPE_FC; ct.ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct.ct_cmd_resp = SNS_GFT_ID; ct.ct_bcnt_resid = (SNS_GFT_ID_RESP_SIZE - sizeof(ct)) / 4; isp_put_ct_hdr(isp, &ct, (ct_hdr_t *)scp); rp = (uint32_t *) &scp[sizeof(ct)]; ISP_IOZPUT_32(isp, portid, rp); if (isp_ct_passthru(isp, chan, sizeof(ct) + sizeof(uint32_t), SNS_GFT_ID_RESP_SIZE)) { FC_SCRATCH_RELEASE(isp, chan); return (res); } } else { /* Build the SNS request and execute via firmware. */ ISP_MEMZERO(&rq, SNS_GXX_ID_REQ_SIZE); rq.snscb_rblen = SNS_GFT_ID_RESP_SIZE >> 1; rq.snscb_addr[RQRSP_ADDR0015] = DMA_WD0(fcp->isp_scdma); rq.snscb_addr[RQRSP_ADDR1631] = DMA_WD1(fcp->isp_scdma); rq.snscb_addr[RQRSP_ADDR3247] = DMA_WD2(fcp->isp_scdma); rq.snscb_addr[RQRSP_ADDR4863] = DMA_WD3(fcp->isp_scdma); rq.snscb_sblen = 6; rq.snscb_cmd = SNS_GFT_ID; rq.snscb_mword_div_2 = (SNS_GFT_ID_RESP_SIZE - sizeof(ct)) / 4; rq.snscb_portid = portid; isp_put_gxx_id_request(isp, &rq, (sns_gxx_id_req_t *)scp); if (isp_ct_sns(isp, chan, sizeof(rq), SNS_GFT_ID_RESP_SIZE)) { FC_SCRATCH_RELEASE(isp, chan); return (res); } } isp_get_gft_id_response(isp, (sns_gft_id_rsp_t *)scp, &rsp); if (rsp.snscb_cthdr.ct_cmd_resp == LS_ACC) { for (i = 0; i < 8; i++) { if (rsp.snscb_fc4_types[i] != 0) { res = 0; break; } } if (((rsp.snscb_fc4_types[FC4_SCSI / 32] >> (FC4_SCSI % 32)) & 0x01) != 0) res = 1; } FC_SCRATCH_RELEASE(isp, chan); isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GFT_ID result is %d", chan, res); return (res); } static int isp_scan_fabric(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; uint32_t portid; uint16_t nphdl; isp_pdb_t pdb; int portidx, portlim, r; sns_gid_xx_rsp_t *rs; if (fcp->isp_loopstate < LOOP_LSCAN_DONE) return (-1); if (fcp->isp_loopstate >= LOOP_FSCAN_DONE) return (0); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC fabric scan", chan); fcp->isp_loopstate = LOOP_SCANNING_FABRIC; if (!TOPO_IS_FABRIC(fcp->isp_topo)) { fcp->isp_loopstate = LOOP_FSCAN_DONE; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC fabric scan done (no fabric)", chan); return (0); } if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { abort: FC_SCRATCH_RELEASE(isp, chan); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC fabric scan aborted", chan); return (1); } /* * Make sure we still are logged into the fabric controller. */ nphdl = IS_24XX(isp) ? NPH_FL_ID : FL_ID; r = isp_getpdb(isp, chan, nphdl, &pdb); if ((r & 0xffff) == MBOX_NOT_LOGGED_IN) { isp_dump_chip_portdb(isp, chan); } if (r) { fcp->isp_loopstate = LOOP_LTEST_DONE; fail: isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC fabric scan done (bad)", chan); return (-1); } /* Get list of port IDs from SNS. */ r = isp_gid_pt(isp, chan); if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) goto abort; if (r > 0) { fcp->isp_loopstate = LOOP_FSCAN_DONE; return (-1); } else if (r < 0) { fcp->isp_loopstate = LOOP_LTEST_DONE; /* try again */ return (-1); } rs = (sns_gid_xx_rsp_t *) fcp->isp_scanscratch; if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) goto abort; if (rs->snscb_cthdr.ct_cmd_resp != LS_ACC) { int level; /* FC-4 Type and Port Type not registered are not errors. */ if (rs->snscb_cthdr.ct_reason == 9 && (rs->snscb_cthdr.ct_explanation == 0x07 || rs->snscb_cthdr.ct_explanation == 0x0a)) { level = ISP_LOG_SANCFG; } else { level = ISP_LOGWARN; } isp_prt(isp, level, "Chan %d Fabric Nameserver rejected GID_PT" " (Reason=0x%x Expl=0x%x)", chan, rs->snscb_cthdr.ct_reason, rs->snscb_cthdr.ct_explanation); fcp->isp_loopstate = LOOP_FSCAN_DONE; return (-1); } /* Check our buffer was big enough to get the full list. */ for (portidx = 0; portidx < NGENT-1; portidx++) { if (rs->snscb_ports[portidx].control & 0x80) break; } if ((rs->snscb_ports[portidx].control & 0x80) == 0) { isp_prt(isp, ISP_LOGWARN, "fabric too big for scratch area: increase ISP_FC_SCRLEN"); } portlim = portidx + 1; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Got %d ports back from name server", chan, portlim); /* Go through the list and remove duplicate port ids. */ for (portidx = 0; portidx < portlim; portidx++) { int npidx; portid = ((rs->snscb_ports[portidx].portid[0]) << 16) | ((rs->snscb_ports[portidx].portid[1]) << 8) | ((rs->snscb_ports[portidx].portid[2])); for (npidx = portidx + 1; npidx < portlim; npidx++) { uint32_t new_portid = ((rs->snscb_ports[npidx].portid[0]) << 16) | ((rs->snscb_ports[npidx].portid[1]) << 8) | ((rs->snscb_ports[npidx].portid[2])); if (new_portid == portid) { break; } } if (npidx < portlim) { rs->snscb_ports[npidx].portid[0] = 0; rs->snscb_ports[npidx].portid[1] = 0; rs->snscb_ports[npidx].portid[2] = 0; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d removing duplicate PortID 0x%06x entry from list", chan, portid); } } /* * We now have a list of Port IDs for all FC4 SCSI devices * that the Fabric Name server knows about. * * For each entry on this list go through our port database looking * for probational entries- if we find one, then an old entry is * maybe still this one. We get some information to find out. * * Otherwise, it's a new fabric device, and we log into it * (unconditionally). After searching the entire database * again to make sure that we never ever ever ever have more * than one entry that has the same PortID or the same * WWNN/WWPN duple, we enter the device into our database. */ isp_mark_portdb(isp, chan); for (portidx = 0; portidx < portlim; portidx++) { portid = ((rs->snscb_ports[portidx].portid[0]) << 16) | ((rs->snscb_ports[portidx].portid[1]) << 8) | ((rs->snscb_ports[portidx].portid[2])); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Checking fabric port 0x%06x", chan, portid); if (portid == 0) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port at idx %d is zero", chan, portidx); continue; } if (portid == fcp->isp_portid) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x is our", chan, portid); continue; } /* Now search the entire port database for the same portid. */ if (isp_find_pdb_by_portid(isp, chan, portid, &lp)) { if (!lp->probational) { isp_prt(isp, ISP_LOGERR, "Chan %d Port 0x%06x@0x%04x [%d] is not probational (0x%x)", chan, lp->portid, lp->handle, FC_PORTDB_TGT(isp, chan, lp), lp->state); isp_dump_portdb(isp, chan); goto fail; } if (lp->state == FC_PORTDB_STATE_ZOMBIE) goto relogin; /* * See if we're still logged into it. * * If we aren't, mark it as a dead device and * leave the new portid in the database entry * for somebody further along to decide what to * do (policy choice). * * If we are, check to see if it's the same * device still (it should be). If for some * reason it isn't, mark it as a changed device * and leave the new portid and role in the * database entry for somebody further along to * decide what to do (policy choice). */ r = isp_getpdb(isp, chan, lp->handle, &pdb); if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) goto abort; if (r != 0) { lp->state = FC_PORTDB_STATE_DEAD; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x handle 0x%x is dead (%d)", chan, portid, lp->handle, r); goto relogin; } isp_pdb_add_update(isp, chan, &pdb); continue; } relogin: if ((fcp->role & ISP_ROLE_INITIATOR) == 0) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x is not logged in", chan, portid); continue; } r = isp_gff_id(isp, chan, portid); if (r == 0) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x is not an FCP target", chan, portid); continue; } if (r < 0) r = isp_gft_id(isp, chan, portid); if (r == 0) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x is not FCP", chan, portid); continue; } if (isp_login_device(isp, chan, portid, &pdb, &FCPARAM(isp, 0)->isp_lasthdl)) { if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) goto abort; continue; } isp_pdb_add_update(isp, chan, &pdb); } if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) goto abort; fcp->isp_loopstate = LOOP_FSCAN_DONE; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC fabric scan done", chan); return (0); } /* * Find an unused handle and try and use to login to a port. */ static int isp_login_device(ispsoftc_t *isp, int chan, uint32_t portid, isp_pdb_t *p, uint16_t *ohp) { int lim, i, r; uint16_t handle; if (ISP_CAP_2KLOGIN(isp)) { lim = NPH_MAX_2K; } else { lim = NPH_MAX; } handle = isp_next_handle(isp, ohp); for (i = 0; i < lim; i++) { if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) return (-1); /* Check if this handle is free. */ r = isp_getpdb(isp, chan, handle, p); if (r == 0) { if (p->portid != portid) { /* This handle is busy, try next one. */ handle = isp_next_handle(isp, ohp); continue; } break; } if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) return (-1); /* * Now try and log into the device */ r = isp_plogx(isp, chan, handle, portid, PLOGX_FLG_CMD_PLOGI); if (r == 0) { break; } else if ((r & 0xffff) == MBOX_PORT_ID_USED) { /* * If we get here, then the firmwware still thinks we're logged into this device, but with a different * handle. We need to break that association. We used to try and just substitute the handle, but then * failed to get any data via isp_getpdb (below). */ if (isp_plogx(isp, chan, r >> 16, portid, PLOGX_FLG_CMD_LOGO | PLOGX_FLG_IMPLICIT | PLOGX_FLG_FREE_NPHDL)) { isp_prt(isp, ISP_LOGERR, "baw... logout of %x failed", r >> 16); } if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) return (-1); r = isp_plogx(isp, chan, handle, portid, PLOGX_FLG_CMD_PLOGI); if (r != 0) i = lim; break; } else if ((r & 0xffff) == MBOX_LOOP_ID_USED) { /* Try the next handle. */ handle = isp_next_handle(isp, ohp); } else { /* Give up. */ i = lim; break; } } if (i == lim) { isp_prt(isp, ISP_LOGWARN, "Chan %d PLOGI 0x%06x failed", chan, portid); return (-1); } /* * If we successfully logged into it, get the PDB for it * so we can crosscheck that it is still what we think it * is and that we also have the role it plays */ r = isp_getpdb(isp, chan, handle, p); if (r != 0) { isp_prt(isp, ISP_LOGERR, "Chan %d new device 0x%06x@0x%x disappeared", chan, portid, handle); return (-1); } if (p->handle != handle || p->portid != portid) { isp_prt(isp, ISP_LOGERR, "Chan %d new device 0x%06x@0x%x changed (0x%06x@0x%0x)", chan, portid, handle, p->portid, p->handle); return (-1); } return (0); } static int isp_send_change_request(ispsoftc_t *isp, int chan) { mbreg_t mbs; MBSINIT(&mbs, MBOX_SEND_CHANGE_REQUEST, MBLOGALL, 500000); mbs.param[1] = 0x03; mbs.param[9] = chan; isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { return (0); } else { isp_prt(isp, ISP_LOGWARN, "Chan %d Send Change Request: 0x%x", chan, mbs.param[0]); return (-1); } } static int isp_register_fc4_type(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); rft_id_t rp; ct_hdr_t *ct = &rp.rftid_hdr; uint8_t local[SNS_RFT_ID_REQ_SIZE]; sns_screq_t *reqp = (sns_screq_t *) local; uint8_t *scp = fcp->isp_scratch; if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } if (IS_24XX(isp)) { /* Build the CT command and execute via pass-through. */ ISP_MEMZERO(&rp, sizeof(rp)); ct->ct_revision = CT_REVISION; ct->ct_fcs_type = CT_FC_TYPE_FC; ct->ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct->ct_cmd_resp = SNS_RFT_ID; ct->ct_bcnt_resid = (sizeof (rft_id_t) - sizeof (ct_hdr_t)) >> 2; rp.rftid_portid[0] = fcp->isp_portid >> 16; rp.rftid_portid[1] = fcp->isp_portid >> 8; rp.rftid_portid[2] = fcp->isp_portid; rp.rftid_fc4types[FC4_SCSI >> 5] = 1 << (FC4_SCSI & 0x1f); isp_put_rft_id(isp, &rp, (rft_id_t *)scp); if (isp_ct_passthru(isp, chan, sizeof(rft_id_t), sizeof(ct_hdr_t))) { FC_SCRATCH_RELEASE(isp, chan); return (-1); } } else { /* Build the SNS request and execute via firmware. */ ISP_MEMZERO((void *) reqp, SNS_RFT_ID_REQ_SIZE); reqp->snscb_rblen = sizeof (ct_hdr_t) >> 1; reqp->snscb_addr[RQRSP_ADDR0015] = DMA_WD0(fcp->isp_scdma); reqp->snscb_addr[RQRSP_ADDR1631] = DMA_WD1(fcp->isp_scdma); reqp->snscb_addr[RQRSP_ADDR3247] = DMA_WD2(fcp->isp_scdma); reqp->snscb_addr[RQRSP_ADDR4863] = DMA_WD3(fcp->isp_scdma); reqp->snscb_sblen = 22; reqp->snscb_data[0] = SNS_RFT_ID; reqp->snscb_data[4] = fcp->isp_portid & 0xffff; reqp->snscb_data[5] = (fcp->isp_portid >> 16) & 0xff; reqp->snscb_data[6] = (1 << FC4_SCSI); isp_put_sns_request(isp, reqp, (sns_screq_t *)scp); if (isp_ct_sns(isp, chan, SNS_RFT_ID_REQ_SIZE, sizeof(ct_hdr_t))) { FC_SCRATCH_RELEASE(isp, chan); return (-1); } } isp_get_ct_hdr(isp, (ct_hdr_t *) scp, ct); FC_SCRATCH_RELEASE(isp, chan); if (ct->ct_cmd_resp == LS_RJT) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "Chan %d Register FC4 Type rejected", chan); return (-1); } else if (ct->ct_cmd_resp == LS_ACC) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Register FC4 Type accepted", chan); } else { isp_prt(isp, ISP_LOGWARN, "Chan %d Register FC4 Type: 0x%x", chan, ct->ct_cmd_resp); return (-1); } return (0); } static int isp_register_fc4_features_24xx(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); ct_hdr_t *ct; rff_id_t rp; uint8_t *scp = fcp->isp_scratch; if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } /* * Build the CT header and command in memory. */ ISP_MEMZERO(&rp, sizeof(rp)); ct = &rp.rffid_hdr; ct->ct_revision = CT_REVISION; ct->ct_fcs_type = CT_FC_TYPE_FC; ct->ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct->ct_cmd_resp = SNS_RFF_ID; ct->ct_bcnt_resid = (sizeof (rff_id_t) - sizeof (ct_hdr_t)) >> 2; rp.rffid_portid[0] = fcp->isp_portid >> 16; rp.rffid_portid[1] = fcp->isp_portid >> 8; rp.rffid_portid[2] = fcp->isp_portid; rp.rffid_fc4features = 0; if (fcp->role & ISP_ROLE_TARGET) rp.rffid_fc4features |= 1; if (fcp->role & ISP_ROLE_INITIATOR) rp.rffid_fc4features |= 2; rp.rffid_fc4type = FC4_SCSI; isp_put_rff_id(isp, &rp, (rff_id_t *)scp); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT request", sizeof(rft_id_t), scp); if (isp_ct_passthru(isp, chan, sizeof(rft_id_t), sizeof(ct_hdr_t))) { FC_SCRATCH_RELEASE(isp, chan); return (-1); } isp_get_ct_hdr(isp, (ct_hdr_t *) scp, ct); FC_SCRATCH_RELEASE(isp, chan); if (ct->ct_cmd_resp == LS_RJT) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "Chan %d Register FC4 Features rejected", chan); return (-1); } else if (ct->ct_cmd_resp == LS_ACC) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Register FC4 Features accepted", chan); } else { isp_prt(isp, ISP_LOGWARN, "Chan %d Register FC4 Features: 0x%x", chan, ct->ct_cmd_resp); return (-1); } return (0); } static int isp_register_port_name_24xx(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); ct_hdr_t *ct; rspn_id_t rp; uint8_t *scp = fcp->isp_scratch; int len; if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } /* * Build the CT header and command in memory. */ ISP_MEMZERO(&rp, sizeof(rp)); ct = &rp.rspnid_hdr; ct->ct_revision = CT_REVISION; ct->ct_fcs_type = CT_FC_TYPE_FC; ct->ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct->ct_cmd_resp = SNS_RSPN_ID; rp.rspnid_portid[0] = fcp->isp_portid >> 16; rp.rspnid_portid[1] = fcp->isp_portid >> 8; rp.rspnid_portid[2] = fcp->isp_portid; rp.rspnid_length = 0; len = offsetof(rspn_id_t, rspnid_name); mtx_lock(&prison0.pr_mtx); rp.rspnid_length += sprintf(&scp[len + rp.rspnid_length], "%s", prison0.pr_hostname[0] ? prison0.pr_hostname : "FreeBSD"); mtx_unlock(&prison0.pr_mtx); rp.rspnid_length += sprintf(&scp[len + rp.rspnid_length], ":%s", device_get_nameunit(isp->isp_dev)); if (chan != 0) { rp.rspnid_length += sprintf(&scp[len + rp.rspnid_length], "/%d", chan); } len += rp.rspnid_length; ct->ct_bcnt_resid = (len - sizeof(ct_hdr_t)) >> 2; isp_put_rspn_id(isp, &rp, (rspn_id_t *)scp); if (isp_ct_passthru(isp, chan, len, sizeof(ct_hdr_t))) { FC_SCRATCH_RELEASE(isp, chan); return (-1); } isp_get_ct_hdr(isp, (ct_hdr_t *) scp, ct); FC_SCRATCH_RELEASE(isp, chan); if (ct->ct_cmd_resp == LS_RJT) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "Chan %d Register Symbolic Port Name rejected", chan); return (-1); } else if (ct->ct_cmd_resp == LS_ACC) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Register Symbolic Port Name accepted", chan); } else { isp_prt(isp, ISP_LOGWARN, "Chan %d Register Symbolic Port Name: 0x%x", chan, ct->ct_cmd_resp); return (-1); } return (0); } static int isp_register_node_name_24xx(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); ct_hdr_t *ct; rsnn_nn_t rp; uint8_t *scp = fcp->isp_scratch; int len; if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } /* * Build the CT header and command in memory. */ ISP_MEMZERO(&rp, sizeof(rp)); ct = &rp.rsnnnn_hdr; ct->ct_revision = CT_REVISION; ct->ct_fcs_type = CT_FC_TYPE_FC; ct->ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct->ct_cmd_resp = SNS_RSNN_NN; MAKE_NODE_NAME_FROM_WWN(rp.rsnnnn_nodename, fcp->isp_wwnn); rp.rsnnnn_length = 0; len = offsetof(rsnn_nn_t, rsnnnn_name); mtx_lock(&prison0.pr_mtx); rp.rsnnnn_length += sprintf(&scp[len + rp.rsnnnn_length], "%s", prison0.pr_hostname[0] ? prison0.pr_hostname : "FreeBSD"); mtx_unlock(&prison0.pr_mtx); len += rp.rsnnnn_length; ct->ct_bcnt_resid = (len - sizeof(ct_hdr_t)) >> 2; isp_put_rsnn_nn(isp, &rp, (rsnn_nn_t *)scp); if (isp_ct_passthru(isp, chan, len, sizeof(ct_hdr_t))) { FC_SCRATCH_RELEASE(isp, chan); return (-1); } isp_get_ct_hdr(isp, (ct_hdr_t *) scp, ct); FC_SCRATCH_RELEASE(isp, chan); if (ct->ct_cmd_resp == LS_RJT) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "Chan %d Register Symbolic Node Name rejected", chan); return (-1); } else if (ct->ct_cmd_resp == LS_ACC) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Register Symbolic Node Name accepted", chan); } else { isp_prt(isp, ISP_LOGWARN, "Chan %d Register Symbolic Node Name: 0x%x", chan, ct->ct_cmd_resp); return (-1); } return (0); } static uint16_t isp_next_handle(ispsoftc_t *isp, uint16_t *ohp) { fcparam *fcp; int i, chan, wrap; uint16_t handle, minh, maxh; handle = *ohp; if (ISP_CAP_2KLOGIN(isp)) { minh = 0; maxh = NPH_RESERVED - 1; } else { minh = SNS_ID + 1; maxh = NPH_MAX - 1; } wrap = 0; next: if (handle == NIL_HANDLE) { handle = minh; } else { handle++; if (handle > maxh) { if (++wrap >= 2) { isp_prt(isp, ISP_LOGERR, "Out of port handles!"); return (NIL_HANDLE); } handle = minh; } } for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; for (i = 0; i < MAX_FC_TARG; i++) { if (fcp->portdb[i].state != FC_PORTDB_STATE_NIL && fcp->portdb[i].handle == handle) goto next; } } *ohp = handle; return (handle); } /* * Start a command. Locking is assumed done in the caller. */ int isp_start(XS_T *xs) { ispsoftc_t *isp; uint32_t cdblen; uint8_t local[QENTRY_LEN]; ispreq_t *reqp; void *cdbp, *qep; uint16_t *tptr; fcportdb_t *lp; int target, dmaresult; XS_INITERR(xs); isp = XS_ISP(xs); /* * Check command CDB length, etc.. We really are limited to 16 bytes * for Fibre Channel, but can do up to 44 bytes in parallel SCSI, * but probably only if we're running fairly new firmware (we'll * let the old f/w choke on an extended command queue entry). */ if (XS_CDBLEN(xs) > (IS_FC(isp)? 16 : 44) || XS_CDBLEN(xs) == 0) { isp_prt(isp, ISP_LOGERR, "unsupported cdb length (%d, CDB[0]=0x%x)", XS_CDBLEN(xs), XS_CDBP(xs)[0] & 0xff); XS_SETERR(xs, HBA_REQINVAL); return (CMD_COMPLETE); } /* * Translate the target to device handle as appropriate, checking * for correct device state as well. */ target = XS_TGT(xs); if (IS_FC(isp)) { fcparam *fcp = FCPARAM(isp, XS_CHANNEL(xs)); if ((fcp->role & ISP_ROLE_INITIATOR) == 0) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx I am not an initiator", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGERR, "Adapter not at RUNSTATE"); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } isp_prt(isp, ISP_LOGDEBUG2, "XS_TGT(xs)=%d", target); lp = &fcp->portdb[target]; if (target < 0 || target >= MAX_FC_TARG || lp->is_target == 0) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } if (fcp->isp_loopstate != LOOP_READY) { isp_prt(isp, ISP_LOGDEBUG1, "%d.%d.%jx loop is not ready", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); return (CMD_RQLATER); } if (lp->state == FC_PORTDB_STATE_ZOMBIE) { isp_prt(isp, ISP_LOGDEBUG1, "%d.%d.%jx target zombie", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); return (CMD_RQLATER); } if (lp->state != FC_PORTDB_STATE_VALID) { isp_prt(isp, ISP_LOGDEBUG1, "%d.%d.%jx bad db port state 0x%x", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs), lp->state); XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } } else { sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGERR, "Adapter not at RUNSTATE"); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } if (sdp->update) { isp_spi_update(isp, XS_CHANNEL(xs)); } lp = NULL; } start_again: qep = isp_getrqentry(isp); if (qep == NULL) { isp_prt(isp, ISP_LOG_WARN1, "Request Queue Overflow"); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } XS_SETERR(xs, HBA_NOERROR); /* * Now see if we need to synchronize the ISP with respect to anything. * We do dual duty here (cough) for synchronizing for buses other * than which we got here to send a command to. */ reqp = (ispreq_t *) local; ISP_MEMZERO(local, QENTRY_LEN); if (ISP_TST_SENDMARKER(isp, XS_CHANNEL(xs))) { if (IS_24XX(isp)) { isp_marker_24xx_t *m = (isp_marker_24xx_t *) reqp; m->mrk_header.rqs_entry_count = 1; m->mrk_header.rqs_entry_type = RQSTYPE_MARKER; m->mrk_modifier = SYNC_ALL; m->mrk_vphdl = XS_CHANNEL(xs); isp_put_marker_24xx(isp, m, qep); } else { isp_marker_t *m = (isp_marker_t *) reqp; m->mrk_header.rqs_entry_count = 1; m->mrk_header.rqs_entry_type = RQSTYPE_MARKER; m->mrk_target = (XS_CHANNEL(xs) << 7); /* bus # */ m->mrk_modifier = SYNC_ALL; isp_put_marker(isp, m, qep); } ISP_SYNC_REQUEST(isp); ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 0); goto start_again; } reqp->req_header.rqs_entry_count = 1; /* * Select and install Header Code. * Note that it might be overridden before going out * if we're on a 64 bit platform. The lower level * code (isp_send_cmd) will select the appropriate * 64 bit variant if it needs to. */ if (IS_24XX(isp)) { reqp->req_header.rqs_entry_type = RQSTYPE_T7RQS; } else if (IS_FC(isp)) { reqp->req_header.rqs_entry_type = RQSTYPE_T2RQS; } else { if (XS_CDBLEN(xs) > 12) { reqp->req_header.rqs_entry_type = RQSTYPE_CMDONLY; } else { reqp->req_header.rqs_entry_type = RQSTYPE_REQUEST; } } /* * Set task attributes */ if (IS_24XX(isp)) { int ttype; if (XS_TAG_P(xs)) { ttype = XS_TAG_TYPE(xs); } else { ttype = REQFLAG_STAG; } if (ttype == REQFLAG_OTAG) { ttype = FCP_CMND_TASK_ATTR_ORDERED; } else if (ttype == REQFLAG_HTAG) { ttype = FCP_CMND_TASK_ATTR_HEAD; } else { ttype = FCP_CMND_TASK_ATTR_SIMPLE; } - ((ispreqt7_t *)reqp)->req_task_attribute = ttype; + ((ispreqt7_t *)reqp)->req_task_attribute = ttype | + ((XS_PRIORITY(xs) << FCP_CMND_PRIO_SHIFT) & + FCP_CMND_PRIO_MASK); } else if (IS_FC(isp)) { /* * See comment in isp_intr_respq */ /* XS_SET_RESID(xs, 0); */ /* * Fibre Channel always requires some kind of tag. * The Qlogic drivers seem be happy not to use a tag, * but this breaks for some devices (IBM drives). */ if (XS_TAG_P(xs)) { ((ispreqt2_t *)reqp)->req_flags = XS_TAG_TYPE(xs); } else { ((ispreqt2_t *)reqp)->req_flags = REQFLAG_STAG; } } else { sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); if ((sdp->isp_devparam[target].actv_flags & DPARM_TQING) && XS_TAG_P(xs)) { reqp->req_flags = XS_TAG_TYPE(xs); } } /* * NB: we do not support long CDBs (yet) */ cdblen = XS_CDBLEN(xs); if (IS_SCSI(isp)) { if (cdblen > sizeof (reqp->req_cdb)) { isp_prt(isp, ISP_LOGERR, "Command Length %u too long for this chip", cdblen); XS_SETERR(xs, HBA_REQINVAL); return (CMD_COMPLETE); } reqp->req_target = target | (XS_CHANNEL(xs) << 7); reqp->req_lun_trn = XS_LUN(xs); reqp->req_cdblen = cdblen; tptr = &reqp->req_time; cdbp = reqp->req_cdb; } else if (IS_24XX(isp)) { ispreqt7_t *t7 = (ispreqt7_t *)local; if (cdblen > sizeof (t7->req_cdb)) { isp_prt(isp, ISP_LOGERR, "Command Length %u too long for this chip", cdblen); XS_SETERR(xs, HBA_REQINVAL); return (CMD_COMPLETE); } t7->req_nphdl = lp->handle; t7->req_tidlo = lp->portid; t7->req_tidhi = lp->portid >> 16; t7->req_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(xs)); be64enc(t7->req_lun, CAM_EXTLUN_BYTE_SWIZZLE(XS_LUN(xs))); if (FCPARAM(isp, XS_CHANNEL(xs))->fctape_enabled && (lp->prli_word3 & PRLI_WD3_RETRY)) { if (FCP_NEXT_CRN(isp, &t7->req_crn, xs)) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx cannot generate next CRN", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } } tptr = &t7->req_time; cdbp = t7->req_cdb; } else { ispreqt2_t *t2 = (ispreqt2_t *)local; if (cdblen > sizeof t2->req_cdb) { isp_prt(isp, ISP_LOGERR, "Command Length %u too long for this chip", cdblen); XS_SETERR(xs, HBA_REQINVAL); return (CMD_COMPLETE); } if (FCPARAM(isp, XS_CHANNEL(xs))->fctape_enabled && (lp->prli_word3 & PRLI_WD3_RETRY)) { if (FCP_NEXT_CRN(isp, &t2->req_crn, xs)) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx cannot generate next CRN", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } } if (ISP_CAP_2KLOGIN(isp)) { ispreqt2e_t *t2e = (ispreqt2e_t *)local; t2e->req_target = lp->handle; t2e->req_scclun = XS_LUN(xs); tptr = &t2e->req_time; cdbp = t2e->req_cdb; } else if (ISP_CAP_SCCFW(isp)) { t2->req_target = lp->handle; t2->req_scclun = XS_LUN(xs); tptr = &t2->req_time; cdbp = t2->req_cdb; } else { t2->req_target = lp->handle; t2->req_lun_trn = XS_LUN(xs); tptr = &t2->req_time; cdbp = t2->req_cdb; } } *tptr = XS_TIME(xs); ISP_MEMCPY(cdbp, XS_CDBP(xs), cdblen); /* Whew. Thankfully the same for type 7 requests */ reqp->req_handle = isp_allocate_handle(isp, xs, ISP_HANDLE_INITIATOR); if (reqp->req_handle == 0) { isp_prt(isp, ISP_LOG_WARN1, "out of xflist pointers"); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } /* * Set up DMA and/or do any platform dependent swizzling of the request entry * so that the Qlogic F/W understands what is being asked of it. * * The callee is responsible for adding all requests at this point. */ dmaresult = ISP_DMASETUP(isp, xs, reqp); if (dmaresult != CMD_QUEUED) { isp_destroy_handle(isp, reqp->req_handle); /* * dmasetup sets actual error in packet, and * return what we were given to return. */ return (dmaresult); } isp_xs_prt(isp, xs, ISP_LOGDEBUG0, "START cmd cdb[0]=0x%x datalen %ld", XS_CDBP(xs)[0], (long) XS_XFRLEN(xs)); return (CMD_QUEUED); } /* * isp control * Locks (ints blocked) assumed held. */ int isp_control(ispsoftc_t *isp, ispctl_t ctl, ...) { XS_T *xs; mbreg_t *mbr, mbs; int chan, tgt; uint32_t handle; va_list ap; switch (ctl) { case ISPCTL_RESET_BUS: /* * Issue a bus reset. */ if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGERR, "BUS RESET NOT IMPLEMENTED"); break; } else if (IS_FC(isp)) { mbs.param[1] = 10; chan = 0; } else { va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); mbs.param[1] = SDPARAM(isp, chan)->isp_bus_reset_delay; if (mbs.param[1] < 2) { mbs.param[1] = 2; } mbs.param[2] = chan; } MBSINIT(&mbs, MBOX_BUS_RESET, MBLOGALL, 0); ISP_SET_SENDMARKER(isp, chan, 1); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } isp_prt(isp, ISP_LOGINFO, "driver initiated bus reset of bus %d", chan); return (0); case ISPCTL_RESET_DEV: va_start(ap, ctl); chan = va_arg(ap, int); tgt = va_arg(ap, int); va_end(ap); if (IS_24XX(isp)) { uint8_t local[QENTRY_LEN]; isp24xx_tmf_t *tmf; isp24xx_statusreq_t *sp; fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; if (tgt < 0 || tgt >= MAX_FC_TARG) { isp_prt(isp, ISP_LOGWARN, "Chan %d trying to reset bad target %d", chan, tgt); break; } lp = &fcp->portdb[tgt]; if (lp->is_target == 0 || lp->state != FC_PORTDB_STATE_VALID) { isp_prt(isp, ISP_LOGWARN, "Chan %d abort of no longer valid target %d", chan, tgt); break; } tmf = (isp24xx_tmf_t *) local; ISP_MEMZERO(tmf, QENTRY_LEN); tmf->tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT; tmf->tmf_header.rqs_entry_count = 1; tmf->tmf_nphdl = lp->handle; tmf->tmf_delay = 2; tmf->tmf_timeout = 4; tmf->tmf_flags = ISP24XX_TMF_TARGET_RESET; tmf->tmf_tidlo = lp->portid; tmf->tmf_tidhi = lp->portid >> 16; tmf->tmf_vpidx = ISP_GET_VPIDX(isp, chan); isp_put_24xx_tmf(isp, tmf, isp->isp_iocb); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "TMF IOCB request", QENTRY_LEN, isp->isp_iocb); MEMORYBARRIER(isp, SYNC_IFORDEV, 0, QENTRY_LEN, chan); fcp->sendmarker = 1; isp_prt(isp, ISP_LOGALL, "Chan %d Reset N-Port Handle 0x%04x @ Port 0x%06x", chan, lp->handle, lp->portid); MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, MBCMD_DEFAULT_TIMEOUT + tmf->tmf_timeout * 1000000); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(isp->isp_iocb_dma); mbs.param[3] = DMA_WD0(isp->isp_iocb_dma); mbs.param[6] = DMA_WD3(isp->isp_iocb_dma); mbs.param[7] = DMA_WD2(isp->isp_iocb_dma); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) break; MEMORYBARRIER(isp, SYNC_IFORCPU, QENTRY_LEN, QENTRY_LEN, chan); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "TMF IOCB response", QENTRY_LEN, &((isp24xx_statusreq_t *)isp->isp_iocb)[1]); sp = (isp24xx_statusreq_t *) local; isp_get_24xx_response(isp, &((isp24xx_statusreq_t *)isp->isp_iocb)[1], sp); if (sp->req_completion_status == 0) { return (0); } isp_prt(isp, ISP_LOGWARN, "Chan %d reset of target %d returned 0x%x", chan, tgt, sp->req_completion_status); break; } else if (IS_FC(isp)) { if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = tgt; mbs.ibits = (1 << 10); } else { mbs.param[1] = (tgt << 8); } } else { mbs.param[1] = (chan << 15) | (tgt << 8); } MBSINIT(&mbs, MBOX_ABORT_TARGET, MBLOGALL, 0); mbs.param[2] = 3; /* 'delay', in seconds */ isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } isp_prt(isp, ISP_LOGINFO, "Target %d on Bus %d Reset Succeeded", tgt, chan); ISP_SET_SENDMARKER(isp, chan, 1); return (0); case ISPCTL_ABORT_CMD: va_start(ap, ctl); xs = va_arg(ap, XS_T *); va_end(ap); tgt = XS_TGT(xs); chan = XS_CHANNEL(xs); handle = isp_find_handle(isp, xs); if (handle == 0) { isp_prt(isp, ISP_LOGWARN, "cannot find handle for command to abort"); break; } if (IS_24XX(isp)) { isp24xx_abrt_t local, *ab = &local; fcparam *fcp; fcportdb_t *lp; fcp = FCPARAM(isp, chan); if (tgt < 0 || tgt >= MAX_FC_TARG) { isp_prt(isp, ISP_LOGWARN, "Chan %d trying to abort bad target %d", chan, tgt); break; } lp = &fcp->portdb[tgt]; if (lp->is_target == 0 || lp->state != FC_PORTDB_STATE_VALID) { isp_prt(isp, ISP_LOGWARN, "Chan %d abort of no longer valid target %d", chan, tgt); break; } isp_prt(isp, ISP_LOGALL, "Chan %d Abort Cmd for N-Port 0x%04x @ Port 0x%06x", chan, lp->handle, lp->portid); ISP_MEMZERO(ab, QENTRY_LEN); ab->abrt_header.rqs_entry_type = RQSTYPE_ABORT_IO; ab->abrt_header.rqs_entry_count = 1; ab->abrt_handle = lp->handle; ab->abrt_cmd_handle = handle; ab->abrt_tidlo = lp->portid; ab->abrt_tidhi = lp->portid >> 16; ab->abrt_vpidx = ISP_GET_VPIDX(isp, chan); isp_put_24xx_abrt(isp, ab, isp->isp_iocb); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "AB IOCB quest", QENTRY_LEN, isp->isp_iocb); MEMORYBARRIER(isp, SYNC_IFORDEV, 0, 2 * QENTRY_LEN, chan); ISP_MEMZERO(&mbs, sizeof (mbs)); MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 5000000); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(isp->isp_iocb_dma); mbs.param[3] = DMA_WD0(isp->isp_iocb_dma); mbs.param[6] = DMA_WD3(isp->isp_iocb_dma); mbs.param[7] = DMA_WD2(isp->isp_iocb_dma); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) break; MEMORYBARRIER(isp, SYNC_IFORCPU, QENTRY_LEN, QENTRY_LEN, chan); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "AB IOCB response", QENTRY_LEN, &((isp24xx_abrt_t *)isp->isp_iocb)[1]); isp_get_24xx_abrt(isp, &((isp24xx_abrt_t *)isp->isp_iocb)[1], ab); if (ab->abrt_nphdl == ISP24XX_ABRT_OKAY) { return (0); } isp_prt(isp, ISP_LOGWARN, "Chan %d handle %d abort returned 0x%x", chan, tgt, ab->abrt_nphdl); break; } else if (IS_FC(isp)) { if (ISP_CAP_SCCFW(isp)) { if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = tgt; } else { mbs.param[1] = tgt << 8; } mbs.param[6] = XS_LUN(xs); } else { mbs.param[1] = tgt << 8 | XS_LUN(xs); } } else { mbs.param[1] = (chan << 15) | (tgt << 8) | XS_LUN(xs); } MBSINIT(&mbs, MBOX_ABORT, MBLOGALL & ~MBLOGMASK(MBOX_COMMAND_ERROR), 0); mbs.param[2] = handle; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } return (0); case ISPCTL_UPDATE_PARAMS: va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); isp_spi_update(isp, chan); return (0); case ISPCTL_FCLINK_TEST: if (IS_FC(isp)) { int usdelay; va_start(ap, ctl); chan = va_arg(ap, int); usdelay = va_arg(ap, int); va_end(ap); if (usdelay == 0) { usdelay = 250000; } return (isp_fclink_test(isp, chan, usdelay)); } break; case ISPCTL_SCAN_FABRIC: if (IS_FC(isp)) { va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); return (isp_scan_fabric(isp, chan)); } break; case ISPCTL_SCAN_LOOP: if (IS_FC(isp)) { va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); return (isp_scan_loop(isp, chan)); } break; case ISPCTL_PDB_SYNC: if (IS_FC(isp)) { va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); return (isp_pdb_sync(isp, chan)); } break; case ISPCTL_SEND_LIP: if (IS_FC(isp) && !IS_24XX(isp)) { MBSINIT(&mbs, MBOX_INIT_LIP, MBLOGALL, 0); if (ISP_CAP_2KLOGIN(isp)) { mbs.ibits = (1 << 10); } isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { return (0); } } break; case ISPCTL_GET_PDB: if (IS_FC(isp)) { isp_pdb_t *pdb; va_start(ap, ctl); chan = va_arg(ap, int); tgt = va_arg(ap, int); pdb = va_arg(ap, isp_pdb_t *); va_end(ap); return (isp_getpdb(isp, chan, tgt, pdb)); } break; case ISPCTL_GET_NAMES: { uint64_t *wwnn, *wwnp; va_start(ap, ctl); chan = va_arg(ap, int); tgt = va_arg(ap, int); wwnn = va_arg(ap, uint64_t *); wwnp = va_arg(ap, uint64_t *); va_end(ap); if (wwnn == NULL && wwnp == NULL) { break; } if (wwnn) { *wwnn = isp_get_wwn(isp, chan, tgt, 1); if (*wwnn == INI_NONE) { break; } } if (wwnp) { *wwnp = isp_get_wwn(isp, chan, tgt, 0); if (*wwnp == INI_NONE) { break; } } return (0); } case ISPCTL_RUN_MBOXCMD: { va_start(ap, ctl); mbr = va_arg(ap, mbreg_t *); va_end(ap); isp_mboxcmd(isp, mbr); return (0); } case ISPCTL_PLOGX: { isp_plcmd_t *p; int r; va_start(ap, ctl); p = va_arg(ap, isp_plcmd_t *); va_end(ap); if ((p->flags & PLOGX_FLG_CMD_MASK) != PLOGX_FLG_CMD_PLOGI || (p->handle != NIL_HANDLE)) { return (isp_plogx(isp, p->channel, p->handle, p->portid, p->flags)); } do { isp_next_handle(isp, &p->handle); r = isp_plogx(isp, p->channel, p->handle, p->portid, p->flags); if ((r & 0xffff) == MBOX_PORT_ID_USED) { p->handle = r >> 16; r = 0; break; } } while ((r & 0xffff) == MBOX_LOOP_ID_USED); return (r); } case ISPCTL_CHANGE_ROLE: if (IS_FC(isp)) { int role, r; va_start(ap, ctl); chan = va_arg(ap, int); role = va_arg(ap, int); va_end(ap); r = isp_fc_change_role(isp, chan, role); return (r); } break; default: isp_prt(isp, ISP_LOGERR, "Unknown Control Opcode 0x%x", ctl); break; } return (-1); } /* * Interrupt Service Routine(s). * * External (OS) framework has done the appropriate locking, * and the locking will be held throughout this function. */ #ifdef ISP_TARGET_MODE void isp_intr_atioq(ispsoftc_t *isp) { uint8_t qe[QENTRY_LEN]; isphdr_t *hp; void *addr; uint32_t iptr, optr, oop; iptr = ISP_READ(isp, BIU2400_ATIO_RSPINP); optr = isp->isp_atioodx; while (optr != iptr) { oop = optr; MEMORYBARRIER(isp, SYNC_ATIOQ, oop, QENTRY_LEN, -1); addr = ISP_QUEUE_ENTRY(isp->isp_atioq, oop); isp_get_hdr(isp, addr, (isphdr_t *)qe); hp = (isphdr_t *)qe; switch (hp->rqs_entry_type) { case RQSTYPE_NOTIFY: case RQSTYPE_ATIO: (void) isp_target_notify(isp, addr, &oop); break; default: isp_print_qentry(isp, "?ATIOQ entry?", oop, addr); break; } optr = ISP_NXT_QENTRY(oop, RESULT_QUEUE_LEN(isp)); } if (isp->isp_atioodx != optr) { ISP_WRITE(isp, BIU2400_ATIO_RSPOUTP, optr); isp->isp_atioodx = optr; } } #endif void isp_intr_async(ispsoftc_t *isp, uint16_t event) { if (IS_FC(isp)) isp_parse_async_fc(isp, event); else isp_parse_async(isp, event); } void isp_intr_mbox(ispsoftc_t *isp, uint16_t mbox0) { int i, obits; if (!isp->isp_mboxbsy) { isp_prt(isp, ISP_LOGWARN, "mailbox 0x%x with no waiters", mbox0); return; } obits = isp->isp_obits; isp->isp_mboxtmp[0] = mbox0; for (i = 1; i < ISP_NMBOX(isp); i++) { if ((obits & (1 << i)) == 0) continue; isp->isp_mboxtmp[i] = ISP_READ(isp, MBOX_OFF(i)); } MBOX_NOTIFY_COMPLETE(isp); } void isp_intr_respq(ispsoftc_t *isp) { XS_T *xs, *cont_xs; uint8_t qe[QENTRY_LEN]; ispstatusreq_t *sp = (ispstatusreq_t *)qe; isp24xx_statusreq_t *sp2 = (isp24xx_statusreq_t *)qe; isphdr_t *hp; uint8_t *resp, *snsp; int buddaboom, completion_status, cont = 0, etype, i; int req_status_flags, req_state_flags, scsi_status; uint32_t iptr, junk, cptr, optr, rlen, slen, sptr, totslen, resid; /* * We can't be getting this now. */ if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGINFO, "respq interrupt when not ready"); return; } iptr = ISP_READ(isp, isp->isp_respinrp); /* Debounce the 2300 if revision less than 2. */ if (IS_2100(isp) || (IS_2300(isp) && isp->isp_revision < 2)) { do { junk = iptr; iptr = ISP_READ(isp, isp->isp_respinrp); } while (junk != iptr); } isp->isp_residx = iptr; optr = isp->isp_resodx; while (optr != iptr) { sptr = cptr = optr; hp = (isphdr_t *) ISP_QUEUE_ENTRY(isp->isp_result, cptr); optr = ISP_NXT_QENTRY(optr, RESULT_QUEUE_LEN(isp)); /* * Synchronize our view of this response queue entry. */ MEMORYBARRIER(isp, SYNC_RESULT, cptr, QENTRY_LEN, -1); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_qentry(isp, "Response Queue Entry", cptr, hp); isp_get_hdr(isp, hp, &sp->req_header); etype = sp->req_header.rqs_entry_type; /* We expected Status Continuation, but got different IOCB. */ if (cont > 0 && etype != RQSTYPE_STATUS_CONT) { cont = 0; isp_done(cont_xs); } if (IS_24XX(isp) && etype == RQSTYPE_RESPONSE) { isp_get_24xx_response(isp, (isp24xx_statusreq_t *)hp, sp2); scsi_status = sp2->req_scsi_status; completion_status = sp2->req_completion_status; req_status_flags = 0; if ((scsi_status & 0xff) != 0) req_state_flags = RQSF_GOT_STATUS; else req_state_flags = 0; resid = sp2->req_resid; } else if (etype == RQSTYPE_RESPONSE) { isp_get_response(isp, (ispstatusreq_t *) hp, sp); scsi_status = sp->req_scsi_status; completion_status = sp->req_completion_status; req_status_flags = sp->req_status_flags; req_state_flags = sp->req_state_flags; resid = sp->req_resid; } else if (etype == RQSTYPE_RIO1) { isp_rio1_t *rio = (isp_rio1_t *) qe; isp_get_rio1(isp, (isp_rio1_t *) hp, rio); for (i = 0; i < rio->req_header.rqs_seqno; i++) { isp_fastpost_complete(isp, rio->req_handles[i]); } ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ continue; } else if (etype == RQSTYPE_RIO2) { isp_prt(isp, ISP_LOGERR, "dropping RIO2 response"); ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ continue; } else if (etype == RQSTYPE_STATUS_CONT) { ispstatus_cont_t *scp = (ispstatus_cont_t *)qe; isp_get_cont_response(isp, (ispstatus_cont_t *)hp, scp); if (cont > 0) { i = min(cont, sizeof(scp->req_sense_data)); XS_SENSE_APPEND(cont_xs, scp->req_sense_data, i); cont -= i; if (cont == 0) { isp_done(cont_xs); } else { isp_prt(isp, ISP_LOGDEBUG0|ISP_LOG_CWARN, "Expecting Status Continuations for %u bytes", cont); } } else { isp_prt(isp, ISP_LOG_WARN1, "Ignored Continuation Response"); } ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ continue; } else if (isp_handle_other_response(isp, etype, hp, &cptr)) { /* More then one IOCB could be consumed. */ while (sptr != cptr) { ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ sptr = ISP_NXT_QENTRY(sptr, RESULT_QUEUE_LEN(isp)); hp = (isphdr_t *)ISP_QUEUE_ENTRY(isp->isp_result, sptr); } ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ optr = ISP_NXT_QENTRY(cptr, RESULT_QUEUE_LEN(isp)); continue; } else { /* We don't know what was this -- log and skip. */ isp_prt(isp, ISP_LOGERR, notresp, etype, cptr, optr); ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ continue; } buddaboom = 0; if (sp->req_header.rqs_flags & RQSFLAG_MASK) { if (sp->req_header.rqs_flags & RQSFLAG_CONTINUATION) { isp_print_qentry(isp, "unexpected continuation segment", cptr, hp); continue; } if (sp->req_header.rqs_flags & RQSFLAG_FULL) { isp_prt(isp, ISP_LOG_WARN1, "internal queues full"); /* * We'll synthesize a QUEUE FULL message below. */ } if (sp->req_header.rqs_flags & RQSFLAG_BADHEADER) { isp_print_qentry(isp, "bad header flag", cptr, hp); buddaboom++; } if (sp->req_header.rqs_flags & RQSFLAG_BADPACKET) { isp_print_qentry(isp, "bad request packet", cptr, hp); buddaboom++; } if (sp->req_header.rqs_flags & RQSFLAG_BADCOUNT) { isp_print_qentry(isp, "invalid entry count", cptr, hp); buddaboom++; } if (sp->req_header.rqs_flags & RQSFLAG_BADORDER) { isp_print_qentry(isp, "invalid IOCB ordering", cptr, hp); continue; } } xs = isp_find_xs(isp, sp->req_handle); if (xs == NULL) { uint8_t ts = completion_status & 0xff; /* * Only whine if this isn't the expected fallout of * aborting the command or resetting the target. */ if (etype != RQSTYPE_RESPONSE) { isp_prt(isp, ISP_LOGERR, "cannot find handle 0x%x (type 0x%x)", sp->req_handle, etype); } else if (ts != RQCS_ABORTED && ts != RQCS_RESET_OCCURRED) { isp_prt(isp, ISP_LOGERR, "cannot find handle 0x%x (status 0x%x)", sp->req_handle, ts); } ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ continue; } if (req_status_flags & RQSTF_BUS_RESET) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx bus was reset", XS_CHANNEL(xs), XS_TGT(xs), (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BUSRESET); ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 1); } if (buddaboom) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx buddaboom", XS_CHANNEL(xs), XS_TGT(xs), (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BOTCH); } resp = snsp = NULL; rlen = slen = totslen = 0; if (IS_24XX(isp) && (scsi_status & (RQCS_RV|RQCS_SV)) != 0) { resp = sp2->req_rsp_sense; rlen = sp2->req_response_len; } else if (IS_FC(isp) && (scsi_status & RQCS_RV) != 0) { resp = sp->req_response; rlen = sp->req_response_len; } if (IS_FC(isp) && (scsi_status & RQCS_SV) != 0) { /* * Fibre Channel F/W doesn't say we got status * if there's Sense Data instead. I guess they * think it goes w/o saying. */ req_state_flags |= RQSF_GOT_STATUS|RQSF_GOT_SENSE; if (IS_24XX(isp)) { snsp = sp2->req_rsp_sense; snsp += rlen; totslen = sp2->req_sense_len; slen = sizeof(sp2->req_rsp_sense) - rlen; } else { snsp = sp->req_sense_data; totslen = sp->req_sense_len; slen = sizeof(sp->req_sense_data); } } else if (IS_SCSI(isp) && (req_state_flags & RQSF_GOT_SENSE)) { snsp = sp->req_sense_data; totslen = sp->req_sense_len; slen = sizeof (sp->req_sense_data); } if (slen > totslen) slen = totslen; if (req_state_flags & RQSF_GOT_STATUS) *XS_STSP(xs) = scsi_status & 0xff; if (rlen >= 4 && resp[FCP_RSPNS_CODE_OFFSET] != 0) { const char *ptr; char lb[64]; const char *rnames[10] = { "Task Management function complete", "FCP_DATA length different than FCP_BURST_LEN", "FCP_CMND fields invalid", "FCP_DATA parameter mismatch with FCP_DATA_RO", "Task Management function rejected", "Task Management function failed", NULL, NULL, "Task Management function succeeded", "Task Management function incorrect logical unit number", }; uint8_t code = resp[FCP_RSPNS_CODE_OFFSET]; if (code >= 10 || rnames[code] == NULL) { ISP_SNPRINTF(lb, sizeof(lb), "Unknown FCP Response Code 0x%x", code); ptr = lb; } else { ptr = rnames[code]; } isp_xs_prt(isp, xs, ISP_LOGWARN, "FCP RESPONSE, LENGTH %u: %s CDB0=0x%02x", rlen, ptr, XS_CDBP(xs)[0] & 0xff); if (code != 0 && code != 8) XS_SETERR(xs, HBA_BOTCH); } if (IS_24XX(isp)) isp_parse_status_24xx(isp, sp2, xs, &resid); else isp_parse_status(isp, sp, xs, &resid); if ((XS_NOERR(xs) || XS_ERR(xs) == HBA_NOERROR) && (*XS_STSP(xs) == SCSI_BUSY)) XS_SETERR(xs, HBA_TGTBSY); if (IS_SCSI(isp)) { XS_SET_RESID(xs, resid); /* * A new synchronous rate was negotiated for * this target. Mark state such that we'll go * look up that which has changed later. */ if (req_status_flags & RQSTF_NEGOTIATION) { int t = XS_TGT(xs); sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); sdp->isp_devparam[t].dev_refresh = 1; sdp->update = 1; } } else { if (req_status_flags & RQSF_XFER_COMPLETE) { XS_SET_RESID(xs, 0); } else if (scsi_status & RQCS_RESID) { XS_SET_RESID(xs, resid); } else { XS_SET_RESID(xs, 0); } } if (slen > 0) { XS_SAVE_SENSE(xs, snsp, slen); if (totslen > slen) { cont = totslen - slen; cont_xs = xs; isp_prt(isp, ISP_LOGDEBUG0|ISP_LOG_CWARN, "Expecting Status Continuations for %u bytes", cont); } } isp_prt(isp, ISP_LOGDEBUG2, "asked for %lu got raw resid %lu settled for %lu", (u_long)XS_XFRLEN(xs), (u_long)resid, (u_long)XS_GET_RESID(xs)); if (XS_XFRLEN(xs)) ISP_DMAFREE(isp, xs, sp->req_handle); isp_destroy_handle(isp, sp->req_handle); ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ /* Complete command if we expect no Status Continuations. */ if (cont == 0) isp_done(xs); } /* We haven't received all Status Continuations, but that is it. */ if (cont > 0) isp_done(cont_xs); /* If we processed any IOCBs, let ISP know about it. */ if (optr != isp->isp_resodx) { ISP_WRITE(isp, isp->isp_respoutrp, optr); isp->isp_resodx = optr; } } /* * Parse an ASYNC mailbox complete */ static void isp_parse_async(ispsoftc_t *isp, uint16_t mbox) { uint32_t h1 = 0, h2 = 0; uint16_t chan = 0; /* * Pick up the channel, but not if this is a ASYNC_RIO32_2, * where Mailboxes 6/7 have the second handle. */ if (mbox != ASYNC_RIO32_2) { if (IS_DUALBUS(isp)) { chan = ISP_READ(isp, OUTMAILBOX6); } } isp_prt(isp, ISP_LOGDEBUG2, "Async Mbox 0x%x", mbox); switch (mbox) { case ASYNC_BUS_RESET: ISP_SET_SENDMARKER(isp, chan, 1); #ifdef ISP_TARGET_MODE isp_target_async(isp, chan, mbox); #endif isp_async(isp, ISPASYNC_BUS_RESET, chan); break; case ASYNC_SYSTEM_ERROR: isp->isp_state = ISP_CRASHED; /* * Were we waiting for a mailbox command to complete? * If so, it's dead, so wake up the waiter. */ if (isp->isp_mboxbsy) { isp->isp_obits = 1; isp->isp_mboxtmp[0] = MBOX_HOST_INTERFACE_ERROR; MBOX_NOTIFY_COMPLETE(isp); } /* * It's up to the handler for isp_async to reinit stuff and * restart the firmware */ isp_async(isp, ISPASYNC_FW_CRASH); break; case ASYNC_RQS_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Request Queue Transfer Error"); break; case ASYNC_RSP_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Response Queue Transfer Error"); break; case ASYNC_QWAKEUP: /* * We've just been notified that the Queue has woken up. * We don't need to be chatty about this- just unlatch things * and move on. */ mbox = ISP_READ(isp, isp->isp_rqstoutrp); break; case ASYNC_TIMEOUT_RESET: isp_prt(isp, ISP_LOGWARN, "timeout initiated SCSI bus reset of chan %d", chan); ISP_SET_SENDMARKER(isp, chan, 1); #ifdef ISP_TARGET_MODE isp_target_async(isp, chan, mbox); #endif break; case ASYNC_DEVICE_RESET: isp_prt(isp, ISP_LOGINFO, "device reset on chan %d", chan); ISP_SET_SENDMARKER(isp, chan, 1); #ifdef ISP_TARGET_MODE isp_target_async(isp, chan, mbox); #endif break; case ASYNC_EXTMSG_UNDERRUN: isp_prt(isp, ISP_LOGWARN, "extended message underrun"); break; case ASYNC_SCAM_INT: isp_prt(isp, ISP_LOGINFO, "SCAM interrupt"); break; case ASYNC_HUNG_SCSI: isp_prt(isp, ISP_LOGERR, "stalled SCSI Bus after DATA Overrun"); /* XXX: Need to issue SCSI reset at this point */ break; case ASYNC_KILLED_BUS: isp_prt(isp, ISP_LOGERR, "SCSI Bus reset after DATA Overrun"); break; case ASYNC_BUS_TRANSIT: mbox = ISP_READ(isp, OUTMAILBOX2); switch (mbox & SXP_PINS_MODE_MASK) { case SXP_PINS_LVD_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to LVD mode"); SDPARAM(isp, chan)->isp_diffmode = 0; SDPARAM(isp, chan)->isp_ultramode = 0; SDPARAM(isp, chan)->isp_lvdmode = 1; break; case SXP_PINS_HVD_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to Differential mode"); SDPARAM(isp, chan)->isp_diffmode = 1; SDPARAM(isp, chan)->isp_ultramode = 0; SDPARAM(isp, chan)->isp_lvdmode = 0; break; case SXP_PINS_SE_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to Single Ended mode"); SDPARAM(isp, chan)->isp_diffmode = 0; SDPARAM(isp, chan)->isp_ultramode = 1; SDPARAM(isp, chan)->isp_lvdmode = 0; break; default: isp_prt(isp, ISP_LOGWARN, "Transition to Unknown Mode 0x%x", mbox); break; } /* * XXX: Set up to renegotiate again! */ /* Can only be for a 1080... */ ISP_SET_SENDMARKER(isp, chan, 1); break; case ASYNC_CMD_CMPLT: case ASYNC_RIO32_1: if (!IS_ULTRA3(isp)) { isp_prt(isp, ISP_LOGERR, "unexpected fast posting completion"); break; } /* FALLTHROUGH */ h1 = (ISP_READ(isp, OUTMAILBOX2) << 16) | ISP_READ(isp, OUTMAILBOX1); break; case ASYNC_RIO32_2: h1 = (ISP_READ(isp, OUTMAILBOX2) << 16) | ISP_READ(isp, OUTMAILBOX1); h2 = (ISP_READ(isp, OUTMAILBOX7) << 16) | ISP_READ(isp, OUTMAILBOX6); break; case ASYNC_RIO16_5: case ASYNC_RIO16_4: case ASYNC_RIO16_3: case ASYNC_RIO16_2: case ASYNC_RIO16_1: isp_prt(isp, ISP_LOGERR, "unexpected 16 bit RIO handle"); break; default: isp_prt(isp, ISP_LOGWARN, "%s: unhandled async code 0x%x", __func__, mbox); break; } if (h1 || h2) { isp_prt(isp, ISP_LOGDEBUG3, "fast post/rio completion of 0x%08x", h1); isp_fastpost_complete(isp, h1); if (h2) { isp_prt(isp, ISP_LOGDEBUG3, "fast post/rio completion of 0x%08x", h2); isp_fastpost_complete(isp, h2); } } } static void isp_parse_async_fc(ispsoftc_t *isp, uint16_t mbox) { fcparam *fcp; uint16_t chan; if (IS_DUALBUS(isp)) { chan = ISP_READ(isp, OUTMAILBOX6); } else { chan = 0; } isp_prt(isp, ISP_LOGDEBUG2, "Async Mbox 0x%x", mbox); switch (mbox) { case ASYNC_SYSTEM_ERROR: isp->isp_state = ISP_CRASHED; FCPARAM(isp, chan)->isp_loopstate = LOOP_NIL; isp_change_fw_state(isp, chan, FW_CONFIG_WAIT); /* * Were we waiting for a mailbox command to complete? * If so, it's dead, so wake up the waiter. */ if (isp->isp_mboxbsy) { isp->isp_obits = 1; isp->isp_mboxtmp[0] = MBOX_HOST_INTERFACE_ERROR; MBOX_NOTIFY_COMPLETE(isp); } /* * It's up to the handler for isp_async to reinit stuff and * restart the firmware */ isp_async(isp, ISPASYNC_FW_CRASH); break; case ASYNC_RQS_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Request Queue Transfer Error"); break; case ASYNC_RSP_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Response Queue Transfer Error"); break; case ASYNC_QWAKEUP: #ifdef ISP_TARGET_MODE if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGERR, "ATIO Queue Transfer Error"); break; } #endif isp_prt(isp, ISP_LOGERR, "%s: unexpected ASYNC_QWAKEUP code", __func__); break; case ASYNC_CMD_CMPLT: isp_fastpost_complete(isp, (ISP_READ(isp, OUTMAILBOX2) << 16) | ISP_READ(isp, OUTMAILBOX1)); break; case ASYNC_RIOZIO_STALL: isp_intr_respq(isp); break; case ASYNC_CTIO_DONE: #ifdef ISP_TARGET_MODE isp_target_async(isp, (ISP_READ(isp, OUTMAILBOX2) << 16) | ISP_READ(isp, OUTMAILBOX1), mbox); #else isp_prt(isp, ISP_LOGWARN, "unexpected ASYNC CTIO done"); #endif break; case ASYNC_LIP_ERROR: case ASYNC_LIP_NOS_OLS_RECV: case ASYNC_LIP_OCCURRED: case ASYNC_PTPMODE: /* * These are broadcast events that have to be sent across * all active channels. */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); int topo = fcp->isp_topo; if (fcp->role == ISP_ROLE_NONE) continue; if (fcp->isp_loopstate > LOOP_HAVE_LINK) fcp->isp_loopstate = LOOP_HAVE_LINK; ISP_SET_SENDMARKER(isp, chan, 1); isp_async(isp, ISPASYNC_LIP, chan); #ifdef ISP_TARGET_MODE isp_target_async(isp, chan, mbox); #endif /* * We've had problems with data corruption occurring on * commands that complete (with no apparent error) after * we receive a LIP. This has been observed mostly on * Local Loop topologies. To be safe, let's just mark * all active initiator commands as dead. */ if (topo == TOPO_NL_PORT || topo == TOPO_FL_PORT) { int i, j; for (i = j = 0; i < isp->isp_maxcmds; i++) { XS_T *xs; isp_hdl_t *hdp; hdp = &isp->isp_xflist[i]; if (ISP_H2HT(hdp->handle) != ISP_HANDLE_INITIATOR) { continue; } xs = hdp->cmd; if (XS_CHANNEL(xs) != chan) { continue; } j++; isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx bus reset set at %s:%u", XS_CHANNEL(xs), XS_TGT(xs), (uintmax_t)XS_LUN(xs), __func__, __LINE__); XS_SETERR(xs, HBA_BUSRESET); } if (j) { isp_prt(isp, ISP_LOGERR, lipd, chan, j); } } } break; case ASYNC_LOOP_UP: /* * This is a broadcast event that has to be sent across * all active channels. */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; fcp->isp_linkstate = 1; if (fcp->isp_loopstate < LOOP_HAVE_LINK) fcp->isp_loopstate = LOOP_HAVE_LINK; ISP_SET_SENDMARKER(isp, chan, 1); isp_async(isp, ISPASYNC_LOOP_UP, chan); #ifdef ISP_TARGET_MODE isp_target_async(isp, chan, mbox); #endif } break; case ASYNC_LOOP_DOWN: /* * This is a broadcast event that has to be sent across * all active channels. */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; ISP_SET_SENDMARKER(isp, chan, 1); fcp->isp_linkstate = 0; fcp->isp_loopstate = LOOP_NIL; isp_async(isp, ISPASYNC_LOOP_DOWN, chan); #ifdef ISP_TARGET_MODE isp_target_async(isp, chan, mbox); #endif } break; case ASYNC_LOOP_RESET: /* * This is a broadcast event that has to be sent across * all active channels. */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; ISP_SET_SENDMARKER(isp, chan, 1); if (fcp->isp_loopstate > LOOP_HAVE_LINK) fcp->isp_loopstate = LOOP_HAVE_LINK; isp_async(isp, ISPASYNC_LOOP_RESET, chan); #ifdef ISP_TARGET_MODE isp_target_async(isp, chan, mbox); #endif } break; case ASYNC_PDB_CHANGED: { int echan, nphdl, nlstate, reason; if (IS_23XX(isp) || IS_24XX(isp)) { nphdl = ISP_READ(isp, OUTMAILBOX1); nlstate = ISP_READ(isp, OUTMAILBOX2); } else { nphdl = nlstate = 0xffff; } if (IS_24XX(isp)) reason = ISP_READ(isp, OUTMAILBOX3) >> 8; else reason = 0xff; if (ISP_CAP_MULTI_ID(isp)) { chan = ISP_READ(isp, OUTMAILBOX3) & 0xff; if (chan == 0xff || nphdl == NIL_HANDLE) { chan = 0; echan = isp->isp_nchan - 1; } else if (chan >= isp->isp_nchan) { break; } else { echan = chan; } } else { chan = echan = 0; } for (; chan <= echan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; if (fcp->isp_loopstate > LOOP_LTEST_DONE) { if (nphdl != NIL_HANDLE && nphdl == fcp->isp_login_hdl && reason == PDB24XX_AE_OPN_2) continue; fcp->isp_loopstate = LOOP_LTEST_DONE; } else if (fcp->isp_loopstate < LOOP_HAVE_LINK) fcp->isp_loopstate = LOOP_HAVE_LINK; isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, ISPASYNC_CHANGE_PDB, nphdl, nlstate, reason); } break; } case ASYNC_CHANGE_NOTIFY: { int portid; portid = ((ISP_READ(isp, OUTMAILBOX1) & 0xff) << 16) | ISP_READ(isp, OUTMAILBOX2); if (ISP_CAP_MULTI_ID(isp)) { chan = ISP_READ(isp, OUTMAILBOX3) & 0xff; if (chan >= isp->isp_nchan) break; } else { chan = 0; } fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) break; if (fcp->isp_loopstate > LOOP_LTEST_DONE) fcp->isp_loopstate = LOOP_LTEST_DONE; else if (fcp->isp_loopstate < LOOP_HAVE_LINK) fcp->isp_loopstate = LOOP_HAVE_LINK; isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, ISPASYNC_CHANGE_SNS, portid); break; } case ASYNC_ERR_LOGGING_DISABLED: isp_prt(isp, ISP_LOGWARN, "Error logging disabled (reason 0x%x)", ISP_READ(isp, OUTMAILBOX1)); break; case ASYNC_CONNMODE: /* * This only applies to 2100 amd 2200 cards */ if (!IS_2200(isp) && !IS_2100(isp)) { isp_prt(isp, ISP_LOGWARN, "bad card for ASYNC_CONNMODE event"); break; } chan = 0; mbox = ISP_READ(isp, OUTMAILBOX1); switch (mbox) { case ISP_CONN_LOOP: isp_prt(isp, ISP_LOGINFO, "Point-to-Point -> Loop mode"); break; case ISP_CONN_PTP: isp_prt(isp, ISP_LOGINFO, "Loop -> Point-to-Point mode"); break; case ISP_CONN_BADLIP: isp_prt(isp, ISP_LOGWARN, "Point-to-Point -> Loop mode (BAD LIP)"); break; case ISP_CONN_FATAL: isp->isp_state = ISP_CRASHED; isp_prt(isp, ISP_LOGERR, "FATAL CONNECTION ERROR"); isp_async(isp, ISPASYNC_FW_CRASH); return; case ISP_CONN_LOOPBACK: isp_prt(isp, ISP_LOGWARN, "Looped Back in Point-to-Point mode"); break; default: isp_prt(isp, ISP_LOGWARN, "Unknown connection mode (0x%x)", mbox); break; } ISP_SET_SENDMARKER(isp, chan, 1); FCPARAM(isp, chan)->isp_loopstate = LOOP_HAVE_LINK; isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, ISPASYNC_CHANGE_OTHER); break; case ASYNC_P2P_INIT_ERR: isp_prt(isp, ISP_LOGWARN, "P2P init error (reason 0x%x)", ISP_READ(isp, OUTMAILBOX1)); break; case ASYNC_RCV_ERR: if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGWARN, "Receive Error"); } else { isp_prt(isp, ISP_LOGWARN, "unexpected ASYNC_RCV_ERR"); } break; case ASYNC_RJT_SENT: /* same as ASYNC_QFULL_SENT */ if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGTDEBUG0, "LS_RJT sent"); break; } else { isp_prt(isp, ISP_LOGTDEBUG0, "QFULL sent"); break; } case ASYNC_FW_RESTART_COMPLETE: isp_prt(isp, ISP_LOGDEBUG0, "FW restart complete"); break; case ASYNC_TEMPERATURE_ALERT: isp_prt(isp, ISP_LOGERR, "Temperature alert (subcode 0x%x)", ISP_READ(isp, OUTMAILBOX1)); break; case ASYNC_TRANSCEIVER_INSERTION: isp_prt(isp, ISP_LOGDEBUG0, "Transceiver insertion (0x%x)", ISP_READ(isp, OUTMAILBOX1)); break; case ASYNC_TRANSCEIVER_REMOVAL: isp_prt(isp, ISP_LOGDEBUG0, "Transceiver removal"); break; case ASYNC_AUTOLOAD_FW_COMPLETE: isp_prt(isp, ISP_LOGDEBUG0, "Autoload FW init complete"); break; case ASYNC_AUTOLOAD_FW_FAILURE: isp_prt(isp, ISP_LOGERR, "Autoload FW init failure"); break; default: isp_prt(isp, ISP_LOGWARN, "Unknown Async Code 0x%x", mbox); break; } } /* * Handle other response entries. A pointer to the request queue output * index is here in case we want to eat several entries at once, although * this is not used currently. */ static int isp_handle_other_response(ispsoftc_t *isp, int type, isphdr_t *hp, uint32_t *optrp) { isp_ridacq_t rid; int chan, c; uint32_t hdl, portid; void *ptr; switch (type) { case RQSTYPE_MARKER: isp_prt(isp, ISP_LOG_WARN1, "Marker Response"); return (1); case RQSTYPE_RPT_ID_ACQ: isp_get_ridacq(isp, (isp_ridacq_t *)hp, &rid); portid = (uint32_t)rid.ridacq_vp_port_hi << 16 | rid.ridacq_vp_port_lo; if (rid.ridacq_format == 0) { for (chan = 0; chan < isp->isp_nchan; chan++) { fcparam *fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; c = (chan == 0) ? 127 : (chan - 1); if (rid.ridacq_map[c / 16] & (1 << (c % 16)) || chan == 0) { fcp->isp_loopstate = LOOP_HAVE_LINK; isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, ISPASYNC_CHANGE_OTHER); } else { fcp->isp_loopstate = LOOP_NIL; isp_async(isp, ISPASYNC_LOOP_DOWN, chan); } } } else { fcparam *fcp = FCPARAM(isp, rid.ridacq_vp_index); if (rid.ridacq_vp_status == RIDACQ_STS_COMPLETE || rid.ridacq_vp_status == RIDACQ_STS_CHANGED) { fcp->isp_topo = (rid.ridacq_map[0] >> 9) & 0x7; fcp->isp_portid = portid; fcp->isp_loopstate = LOOP_HAVE_ADDR; isp_async(isp, ISPASYNC_CHANGE_NOTIFY, rid.ridacq_vp_index, ISPASYNC_CHANGE_OTHER); } else { fcp->isp_loopstate = LOOP_NIL; isp_async(isp, ISPASYNC_LOOP_DOWN, rid.ridacq_vp_index); } } return (1); case RQSTYPE_CT_PASSTHRU: case RQSTYPE_VP_MODIFY: case RQSTYPE_VP_CTRL: case RQSTYPE_LOGIN: ISP_IOXGET_32(isp, (uint32_t *)(hp + 1), hdl); ptr = isp_find_xs(isp, hdl); if (ptr != NULL) { isp_destroy_handle(isp, hdl); memcpy(ptr, hp, QENTRY_LEN); wakeup(ptr); } return (1); case RQSTYPE_ATIO: case RQSTYPE_CTIO: case RQSTYPE_NOTIFY: case RQSTYPE_NOTIFY_ACK: case RQSTYPE_CTIO1: case RQSTYPE_ATIO2: case RQSTYPE_CTIO2: case RQSTYPE_CTIO3: case RQSTYPE_CTIO7: case RQSTYPE_ABTS_RCVD: case RQSTYPE_ABTS_RSP: #ifdef ISP_TARGET_MODE return (isp_target_notify(isp, (ispstatusreq_t *) hp, optrp)); #endif /* FALLTHROUGH */ case RQSTYPE_REQUEST: default: return (0); } } static void isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, uint32_t *rp) { switch (sp->req_completion_status & 0xff) { case RQCS_COMPLETE: if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_INCOMPLETE: if ((sp->req_state_flags & RQSF_GOT_TARGET) == 0) { isp_xs_prt(isp, xs, ISP_LOG_WARN1, "Selection Timeout @ %s:%d", __func__, __LINE__); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); *rp = XS_XFRLEN(xs); } return; } isp_xs_prt(isp, xs, ISP_LOGERR, "Command Incomplete, state 0x%x", sp->req_state_flags); break; case RQCS_DMA_ERROR: isp_xs_prt(isp, xs, ISP_LOGERR, "DMA Error"); *rp = XS_XFRLEN(xs); break; case RQCS_TRANSPORT_ERROR: { char buf[172]; ISP_SNPRINTF(buf, sizeof (buf), "states=>"); if (sp->req_state_flags & RQSF_GOT_BUS) { ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_BUS", buf); } if (sp->req_state_flags & RQSF_GOT_TARGET) { ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_TGT", buf); } if (sp->req_state_flags & RQSF_SENT_CDB) { ISP_SNPRINTF(buf, sizeof (buf), "%s SENT_CDB", buf); } if (sp->req_state_flags & RQSF_XFRD_DATA) { ISP_SNPRINTF(buf, sizeof (buf), "%s XFRD_DATA", buf); } if (sp->req_state_flags & RQSF_GOT_STATUS) { ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_STS", buf); } if (sp->req_state_flags & RQSF_GOT_SENSE) { ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_SNS", buf); } if (sp->req_state_flags & RQSF_XFER_COMPLETE) { ISP_SNPRINTF(buf, sizeof (buf), "%s XFR_CMPLT", buf); } ISP_SNPRINTF(buf, sizeof (buf), "%s\nstatus=>", buf); if (sp->req_status_flags & RQSTF_DISCONNECT) { ISP_SNPRINTF(buf, sizeof (buf), "%s Disconnect", buf); } if (sp->req_status_flags & RQSTF_SYNCHRONOUS) { ISP_SNPRINTF(buf, sizeof (buf), "%s Sync_xfr", buf); } if (sp->req_status_flags & RQSTF_PARITY_ERROR) { ISP_SNPRINTF(buf, sizeof (buf), "%s Parity", buf); } if (sp->req_status_flags & RQSTF_BUS_RESET) { ISP_SNPRINTF(buf, sizeof (buf), "%s Bus_Reset", buf); } if (sp->req_status_flags & RQSTF_DEVICE_RESET) { ISP_SNPRINTF(buf, sizeof (buf), "%s Device_Reset", buf); } if (sp->req_status_flags & RQSTF_ABORTED) { ISP_SNPRINTF(buf, sizeof (buf), "%s Aborted", buf); } if (sp->req_status_flags & RQSTF_TIMEOUT) { ISP_SNPRINTF(buf, sizeof (buf), "%s Timeout", buf); } if (sp->req_status_flags & RQSTF_NEGOTIATION) { ISP_SNPRINTF(buf, sizeof (buf), "%s Negotiation", buf); } isp_xs_prt(isp, xs, ISP_LOGERR, "Transport Error: %s", buf); *rp = XS_XFRLEN(xs); break; } case RQCS_RESET_OCCURRED: { int chan; isp_xs_prt(isp, xs, ISP_LOGWARN, "Bus Reset destroyed command"); for (chan = 0; chan < isp->isp_nchan; chan++) { FCPARAM(isp, chan)->sendmarker = 1; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BUSRESET); } *rp = XS_XFRLEN(xs); return; } case RQCS_ABORTED: isp_xs_prt(isp, xs, ISP_LOGERR, "Command Aborted"); ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 1); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } return; case RQCS_TIMEOUT: isp_xs_prt(isp, xs, ISP_LOGWARN, "Command timed out"); /* * XXX: Check to see if we logged out of the device. */ if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_CMDTIMEOUT); } return; case RQCS_DATA_OVERRUN: XS_SET_RESID(xs, sp->req_resid); isp_xs_prt(isp, xs, ISP_LOGERR, "data overrun (%ld)", (long) XS_GET_RESID(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_DATAOVR); } return; case RQCS_COMMAND_OVERRUN: isp_xs_prt(isp, xs, ISP_LOGERR, "command overrun"); break; case RQCS_STATUS_OVERRUN: isp_xs_prt(isp, xs, ISP_LOGERR, "status overrun"); break; case RQCS_BAD_MESSAGE: isp_xs_prt(isp, xs, ISP_LOGERR, "msg not COMMAND COMPLETE after status"); break; case RQCS_NO_MESSAGE_OUT: isp_xs_prt(isp, xs, ISP_LOGERR, "No MESSAGE OUT phase after selection"); break; case RQCS_EXT_ID_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "EXTENDED IDENTIFY failed"); break; case RQCS_IDE_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "INITIATOR DETECTED ERROR rejected"); break; case RQCS_ABORT_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "ABORT OPERATION rejected"); break; case RQCS_REJECT_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "MESSAGE REJECT rejected"); break; case RQCS_NOP_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "NOP rejected"); break; case RQCS_PARITY_ERROR_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "MESSAGE PARITY ERROR rejected"); break; case RQCS_DEVICE_RESET_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGWARN, "BUS DEVICE RESET rejected"); break; case RQCS_ID_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "IDENTIFY rejected"); break; case RQCS_UNEXP_BUS_FREE: isp_xs_prt(isp, xs, ISP_LOGERR, "Unexpected Bus Free"); break; case RQCS_DATA_UNDERRUN: { if (IS_FC(isp)) { int ru_marked = (sp->req_scsi_status & RQCS_RU) != 0; if (!ru_marked || sp->req_resid > XS_XFRLEN(xs)) { isp_xs_prt(isp, xs, ISP_LOGWARN, bun, XS_XFRLEN(xs), sp->req_resid, (ru_marked)? "marked" : "not marked"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } return; } } XS_SET_RESID(xs, sp->req_resid); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; } case RQCS_XACT_ERR1: isp_xs_prt(isp, xs, ISP_LOGERR, "HBA attempted queued transaction with disconnect not set"); break; case RQCS_XACT_ERR2: isp_xs_prt(isp, xs, ISP_LOGERR, "HBA attempted queued transaction to target routine %jx", (uintmax_t)XS_LUN(xs)); break; case RQCS_XACT_ERR3: isp_xs_prt(isp, xs, ISP_LOGERR, "HBA attempted queued cmd when queueing disabled"); break; case RQCS_BAD_ENTRY: isp_prt(isp, ISP_LOGERR, "Invalid IOCB entry type detected"); break; case RQCS_QUEUE_FULL: isp_xs_prt(isp, xs, ISP_LOG_WARN1, "internal queues full status 0x%x", *XS_STSP(xs)); /* * If QFULL or some other status byte is set, then this * isn't an error, per se. * * Unfortunately, some QLogic f/w writers have, in * some cases, omitted to *set* status to QFULL. */ #if 0 if (*XS_STSP(xs) != SCSI_GOOD && XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); return; } #endif *XS_STSP(xs) = SCSI_QFULL; XS_SETERR(xs, HBA_NOERROR); return; case RQCS_PHASE_SKIPPED: isp_xs_prt(isp, xs, ISP_LOGERR, "SCSI phase skipped"); break; case RQCS_ARQS_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "Auto Request Sense Failed"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ARQFAIL); } return; case RQCS_WIDE_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "Wide Negotiation Failed"); if (IS_SCSI(isp)) { sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); sdp->isp_devparam[XS_TGT(xs)].goal_flags &= ~DPARM_WIDE; sdp->isp_devparam[XS_TGT(xs)].dev_update = 1; sdp->update = 1; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_SYNCXFER_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "SDTR Message Failed"); if (IS_SCSI(isp)) { sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); sdp += XS_CHANNEL(xs); sdp->isp_devparam[XS_TGT(xs)].goal_flags &= ~DPARM_SYNC; sdp->isp_devparam[XS_TGT(xs)].dev_update = 1; sdp->update = 1; } break; case RQCS_LVD_BUSERR: isp_xs_prt(isp, xs, ISP_LOGERR, "Bad LVD condition"); break; case RQCS_PORT_UNAVAILABLE: /* * No such port on the loop. Moral equivalent of SELTIMEO */ case RQCS_PORT_LOGGED_OUT: { const char *reason; uint8_t sts = sp->req_completion_status & 0xff; fcparam *fcp = FCPARAM(isp, 0); fcportdb_t *lp; /* * It was there (maybe)- treat as a selection timeout. */ if (sts == RQCS_PORT_UNAVAILABLE) { reason = "unavailable"; } else { reason = "logout"; } isp_prt(isp, ISP_LOGINFO, "port %s for target %d", reason, XS_TGT(xs)); /* XXX: Should we trigger rescan or FW announce change? */ if (XS_NOERR(xs)) { lp = &fcp->portdb[XS_TGT(xs)]; if (lp->state == FC_PORTDB_STATE_ZOMBIE) { *XS_STSP(xs) = SCSI_BUSY; XS_SETERR(xs, HBA_TGTBSY); } else XS_SETERR(xs, HBA_SELTIMEOUT); } return; } case RQCS_PORT_CHANGED: isp_prt(isp, ISP_LOGWARN, "port changed for target %d", XS_TGT(xs)); if (XS_NOERR(xs)) { *XS_STSP(xs) = SCSI_BUSY; XS_SETERR(xs, HBA_TGTBSY); } return; case RQCS_PORT_BUSY: isp_prt(isp, ISP_LOGWARN, "port busy for target %d", XS_TGT(xs)); if (XS_NOERR(xs)) { *XS_STSP(xs) = SCSI_BUSY; XS_SETERR(xs, HBA_TGTBSY); } return; default: isp_prt(isp, ISP_LOGERR, "Unknown Completion Status 0x%x", sp->req_completion_status); break; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } } static void isp_parse_status_24xx(ispsoftc_t *isp, isp24xx_statusreq_t *sp, XS_T *xs, uint32_t *rp) { int ru_marked, sv_marked; int chan = XS_CHANNEL(xs); switch (sp->req_completion_status) { case RQCS_COMPLETE: if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_DMA_ERROR: isp_xs_prt(isp, xs, ISP_LOGERR, "DMA error"); break; case RQCS_TRANSPORT_ERROR: isp_xs_prt(isp, xs, ISP_LOGERR, "Transport Error"); break; case RQCS_RESET_OCCURRED: isp_xs_prt(isp, xs, ISP_LOGWARN, "reset destroyed command"); FCPARAM(isp, chan)->sendmarker = 1; if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BUSRESET); } return; case RQCS_ABORTED: isp_xs_prt(isp, xs, ISP_LOGERR, "Command Aborted"); FCPARAM(isp, chan)->sendmarker = 1; if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } return; case RQCS_TIMEOUT: isp_xs_prt(isp, xs, ISP_LOGWARN, "Command Timed Out"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_CMDTIMEOUT); } return; case RQCS_DATA_OVERRUN: XS_SET_RESID(xs, sp->req_resid); isp_xs_prt(isp, xs, ISP_LOGERR, "Data Overrun"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_DATAOVR); } return; case RQCS_24XX_DRE: /* data reassembly error */ isp_prt(isp, ISP_LOGERR, "Chan %d data reassembly error for target %d", chan, XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } *rp = XS_XFRLEN(xs); return; case RQCS_24XX_TABORT: /* aborted by target */ isp_prt(isp, ISP_LOGERR, "Chan %d target %d sent ABTS", chan, XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } return; case RQCS_DATA_UNDERRUN: ru_marked = (sp->req_scsi_status & RQCS_RU) != 0; /* * We can get an underrun w/o things being marked * if we got a non-zero status. */ sv_marked = (sp->req_scsi_status & (RQCS_SV|RQCS_RV)) != 0; if ((ru_marked == 0 && sv_marked == 0) || (sp->req_resid > XS_XFRLEN(xs))) { isp_xs_prt(isp, xs, ISP_LOGWARN, bun, XS_XFRLEN(xs), sp->req_resid, (ru_marked)? "marked" : "not marked"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } return; } XS_SET_RESID(xs, sp->req_resid); isp_xs_prt(isp, xs, ISP_LOG_WARN1, "Data Underrun (%d) for command 0x%x", sp->req_resid, XS_CDBP(xs)[0] & 0xff); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_PORT_UNAVAILABLE: /* * No such port on the loop. Moral equivalent of SELTIMEO */ case RQCS_PORT_LOGGED_OUT: { const char *reason; uint8_t sts = sp->req_completion_status & 0xff; fcparam *fcp = FCPARAM(isp, XS_CHANNEL(xs)); fcportdb_t *lp; /* * It was there (maybe)- treat as a selection timeout. */ if (sts == RQCS_PORT_UNAVAILABLE) { reason = "unavailable"; } else { reason = "logout"; } isp_prt(isp, ISP_LOGINFO, "Chan %d port %s for target %d", chan, reason, XS_TGT(xs)); /* XXX: Should we trigger rescan or FW announce change? */ if (XS_NOERR(xs)) { lp = &fcp->portdb[XS_TGT(xs)]; if (lp->state == FC_PORTDB_STATE_ZOMBIE) { *XS_STSP(xs) = SCSI_BUSY; XS_SETERR(xs, HBA_TGTBSY); } else XS_SETERR(xs, HBA_SELTIMEOUT); } return; } case RQCS_PORT_CHANGED: isp_prt(isp, ISP_LOGWARN, "port changed for target %d chan %d", XS_TGT(xs), chan); if (XS_NOERR(xs)) { *XS_STSP(xs) = SCSI_BUSY; XS_SETERR(xs, HBA_TGTBSY); } return; case RQCS_24XX_ENOMEM: /* f/w resource unavailable */ isp_prt(isp, ISP_LOGWARN, "f/w resource unavailable for target %d chan %d", XS_TGT(xs), chan); if (XS_NOERR(xs)) { *XS_STSP(xs) = SCSI_BUSY; XS_SETERR(xs, HBA_TGTBSY); } return; case RQCS_24XX_TMO: /* task management overrun */ isp_prt(isp, ISP_LOGWARN, "command for target %d overlapped task management for chan %d", XS_TGT(xs), chan); if (XS_NOERR(xs)) { *XS_STSP(xs) = SCSI_BUSY; XS_SETERR(xs, HBA_TGTBSY); } return; default: isp_prt(isp, ISP_LOGERR, "Unknown Completion Status 0x%x on chan %d", sp->req_completion_status, chan); break; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } } static void isp_fastpost_complete(ispsoftc_t *isp, uint32_t fph) { XS_T *xs; if (fph == 0) { return; } xs = isp_find_xs(isp, fph); if (xs == NULL) { isp_prt(isp, ISP_LOGWARN, "Command for fast post handle 0x%x not found", fph); return; } isp_destroy_handle(isp, fph); /* * Since we don't have a result queue entry item, * we must believe that SCSI status is zero and * that all data transferred. */ XS_SET_RESID(xs, 0); *XS_STSP(xs) = SCSI_GOOD; if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, fph); } isp_done(xs); } #define ISP_SCSI_IBITS(op) (mbpscsi[((op)<<1)]) #define ISP_SCSI_OBITS(op) (mbpscsi[((op)<<1) + 1]) #define ISP_SCSI_OPMAP(in, out) in, out static const uint8_t mbpscsi[] = { ISP_SCSI_OPMAP(0x01, 0x01), /* 0x00: MBOX_NO_OP */ ISP_SCSI_OPMAP(0x1f, 0x01), /* 0x01: MBOX_LOAD_RAM */ ISP_SCSI_OPMAP(0x03, 0x01), /* 0x02: MBOX_EXEC_FIRMWARE */ ISP_SCSI_OPMAP(0x1f, 0x01), /* 0x03: MBOX_DUMP_RAM */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x04: MBOX_WRITE_RAM_WORD */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x05: MBOX_READ_RAM_WORD */ ISP_SCSI_OPMAP(0x3f, 0x3f), /* 0x06: MBOX_MAILBOX_REG_TEST */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x07: MBOX_VERIFY_CHECKSUM */ ISP_SCSI_OPMAP(0x01, 0x0f), /* 0x08: MBOX_ABOUT_FIRMWARE */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x09: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0a: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0b: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0c: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0d: */ ISP_SCSI_OPMAP(0x01, 0x05), /* 0x0e: MBOX_CHECK_FIRMWARE */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0f: */ ISP_SCSI_OPMAP(0x1f, 0x1f), /* 0x10: MBOX_INIT_REQ_QUEUE */ ISP_SCSI_OPMAP(0x3f, 0x3f), /* 0x11: MBOX_INIT_RES_QUEUE */ ISP_SCSI_OPMAP(0x0f, 0x0f), /* 0x12: MBOX_EXECUTE_IOCB */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x13: MBOX_WAKE_UP */ ISP_SCSI_OPMAP(0x01, 0x3f), /* 0x14: MBOX_STOP_FIRMWARE */ ISP_SCSI_OPMAP(0x0f, 0x0f), /* 0x15: MBOX_ABORT */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x16: MBOX_ABORT_DEVICE */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x17: MBOX_ABORT_TARGET */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x18: MBOX_BUS_RESET */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x19: MBOX_STOP_QUEUE */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x1a: MBOX_START_QUEUE */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x1b: MBOX_SINGLE_STEP_QUEUE */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x1c: MBOX_ABORT_QUEUE */ ISP_SCSI_OPMAP(0x03, 0x4f), /* 0x1d: MBOX_GET_DEV_QUEUE_STATUS */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x1e: */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x1f: MBOX_GET_FIRMWARE_STATUS */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x20: MBOX_GET_INIT_SCSI_ID */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x21: MBOX_GET_SELECT_TIMEOUT */ ISP_SCSI_OPMAP(0x01, 0xc7), /* 0x22: MBOX_GET_RETRY_COUNT */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x23: MBOX_GET_TAG_AGE_LIMIT */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x24: MBOX_GET_CLOCK_RATE */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x25: MBOX_GET_ACT_NEG_STATE */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x26: MBOX_GET_ASYNC_DATA_SETUP_TIME */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x27: MBOX_GET_PCI_PARAMS */ ISP_SCSI_OPMAP(0x03, 0x4f), /* 0x28: MBOX_GET_TARGET_PARAMS */ ISP_SCSI_OPMAP(0x03, 0x0f), /* 0x29: MBOX_GET_DEV_QUEUE_PARAMS */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x2a: MBOX_GET_RESET_DELAY_PARAMS */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2b: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2c: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2d: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2e: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2f: */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x30: MBOX_SET_INIT_SCSI_ID */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x31: MBOX_SET_SELECT_TIMEOUT */ ISP_SCSI_OPMAP(0xc7, 0xc7), /* 0x32: MBOX_SET_RETRY_COUNT */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x33: MBOX_SET_TAG_AGE_LIMIT */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x34: MBOX_SET_CLOCK_RATE */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x35: MBOX_SET_ACT_NEG_STATE */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x36: MBOX_SET_ASYNC_DATA_SETUP_TIME */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x37: MBOX_SET_PCI_CONTROL_PARAMS */ ISP_SCSI_OPMAP(0x4f, 0x4f), /* 0x38: MBOX_SET_TARGET_PARAMS */ ISP_SCSI_OPMAP(0x0f, 0x0f), /* 0x39: MBOX_SET_DEV_QUEUE_PARAMS */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x3a: MBOX_SET_RESET_DELAY_PARAMS */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3b: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3c: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3d: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3e: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3f: */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x40: MBOX_RETURN_BIOS_BLOCK_ADDR */ ISP_SCSI_OPMAP(0x3f, 0x01), /* 0x41: MBOX_WRITE_FOUR_RAM_WORDS */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x42: MBOX_EXEC_BIOS_IOCB */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x43: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x44: */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x45: SET SYSTEM PARAMETER */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x46: GET SYSTEM PARAMETER */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x47: */ ISP_SCSI_OPMAP(0x01, 0xcf), /* 0x48: GET SCAM CONFIGURATION */ ISP_SCSI_OPMAP(0xcf, 0xcf), /* 0x49: SET SCAM CONFIGURATION */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x4a: MBOX_SET_FIRMWARE_FEATURES */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x4b: MBOX_GET_FIRMWARE_FEATURES */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x4c: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x4d: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x4e: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x4f: */ ISP_SCSI_OPMAP(0xdf, 0xdf), /* 0x50: LOAD RAM A64 */ ISP_SCSI_OPMAP(0xdf, 0xdf), /* 0x51: DUMP RAM A64 */ ISP_SCSI_OPMAP(0xdf, 0xff), /* 0x52: INITIALIZE REQUEST QUEUE A64 */ ISP_SCSI_OPMAP(0xef, 0xff), /* 0x53: INITIALIZE RESPONSE QUEUE A64 */ ISP_SCSI_OPMAP(0xcf, 0x01), /* 0x54: EXECUCUTE COMMAND IOCB A64 */ ISP_SCSI_OPMAP(0x07, 0x01), /* 0x55: ENABLE TARGET MODE */ ISP_SCSI_OPMAP(0x03, 0x0f), /* 0x56: GET TARGET STATUS */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x57: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x58: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x59: */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x5a: SET DATA OVERRUN RECOVERY MODE */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x5b: GET DATA OVERRUN RECOVERY MODE */ ISP_SCSI_OPMAP(0x0f, 0x0f), /* 0x5c: SET HOST DATA */ ISP_SCSI_OPMAP(0x01, 0x01) /* 0x5d: GET NOST DATA */ }; #define MAX_SCSI_OPCODE 0x5d static const char *scsi_mbcmd_names[] = { "NO-OP", "LOAD RAM", "EXEC FIRMWARE", "DUMP RAM", "WRITE RAM WORD", "READ RAM WORD", "MAILBOX REG TEST", "VERIFY CHECKSUM", "ABOUT FIRMWARE", NULL, NULL, NULL, NULL, NULL, "CHECK FIRMWARE", NULL, "INIT REQUEST QUEUE", "INIT RESULT QUEUE", "EXECUTE IOCB", "WAKE UP", "STOP FIRMWARE", "ABORT", "ABORT DEVICE", "ABORT TARGET", "BUS RESET", "STOP QUEUE", "START QUEUE", "SINGLE STEP QUEUE", "ABORT QUEUE", "GET DEV QUEUE STATUS", NULL, "GET FIRMWARE STATUS", "GET INIT SCSI ID", "GET SELECT TIMEOUT", "GET RETRY COUNT", "GET TAG AGE LIMIT", "GET CLOCK RATE", "GET ACT NEG STATE", "GET ASYNC DATA SETUP TIME", "GET PCI PARAMS", "GET TARGET PARAMS", "GET DEV QUEUE PARAMS", "GET RESET DELAY PARAMS", NULL, NULL, NULL, NULL, NULL, "SET INIT SCSI ID", "SET SELECT TIMEOUT", "SET RETRY COUNT", "SET TAG AGE LIMIT", "SET CLOCK RATE", "SET ACT NEG STATE", "SET ASYNC DATA SETUP TIME", "SET PCI CONTROL PARAMS", "SET TARGET PARAMS", "SET DEV QUEUE PARAMS", "SET RESET DELAY PARAMS", NULL, NULL, NULL, NULL, NULL, "RETURN BIOS BLOCK ADDR", "WRITE FOUR RAM WORDS", "EXEC BIOS IOCB", NULL, NULL, "SET SYSTEM PARAMETER", "GET SYSTEM PARAMETER", NULL, "GET SCAM CONFIGURATION", "SET SCAM CONFIGURATION", "SET FIRMWARE FEATURES", "GET FIRMWARE FEATURES", NULL, NULL, NULL, NULL, "LOAD RAM A64", "DUMP RAM A64", "INITIALIZE REQUEST QUEUE A64", "INITIALIZE RESPONSE QUEUE A64", "EXECUTE IOCB A64", "ENABLE TARGET MODE", "GET TARGET MODE STATE", NULL, NULL, NULL, "SET DATA OVERRUN RECOVERY MODE", "GET DATA OVERRUN RECOVERY MODE", "SET HOST DATA", "GET NOST DATA", }; #define ISP_FC_IBITS(op) ((mbpfc[((op)<<3) + 0] << 24) | (mbpfc[((op)<<3) + 1] << 16) | (mbpfc[((op)<<3) + 2] << 8) | (mbpfc[((op)<<3) + 3])) #define ISP_FC_OBITS(op) ((mbpfc[((op)<<3) + 4] << 24) | (mbpfc[((op)<<3) + 5] << 16) | (mbpfc[((op)<<3) + 6] << 8) | (mbpfc[((op)<<3) + 7])) #define ISP_FC_OPMAP(in0, out0) 0, 0, 0, in0, 0, 0, 0, out0 #define ISP_FC_OPMAP_HALF(in1, in0, out1, out0) 0, 0, in1, in0, 0, 0, out1, out0 #define ISP_FC_OPMAP_FULL(in3, in2, in1, in0, out3, out2, out1, out0) in3, in2, in1, in0, out3, out2, out1, out0 static const uint32_t mbpfc[] = { ISP_FC_OPMAP(0x01, 0x01), /* 0x00: MBOX_NO_OP */ ISP_FC_OPMAP(0x1f, 0x01), /* 0x01: MBOX_LOAD_RAM */ ISP_FC_OPMAP_HALF(0x07, 0xff, 0x00, 0x1f), /* 0x02: MBOX_EXEC_FIRMWARE */ ISP_FC_OPMAP(0xdf, 0x01), /* 0x03: MBOX_DUMP_RAM */ ISP_FC_OPMAP(0x07, 0x07), /* 0x04: MBOX_WRITE_RAM_WORD */ ISP_FC_OPMAP(0x03, 0x07), /* 0x05: MBOX_READ_RAM_WORD */ ISP_FC_OPMAP_FULL(0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff), /* 0x06: MBOX_MAILBOX_REG_TEST */ ISP_FC_OPMAP(0x07, 0x07), /* 0x07: MBOX_VERIFY_CHECKSUM */ ISP_FC_OPMAP_FULL(0x0, 0x0, 0x0, 0x01, 0x0, 0x3, 0x80, 0x7f), /* 0x08: MBOX_ABOUT_FIRMWARE */ ISP_FC_OPMAP(0xdf, 0x01), /* 0x09: MBOX_LOAD_RISC_RAM_2100 */ ISP_FC_OPMAP(0xdf, 0x01), /* 0x0a: DUMP RAM */ ISP_FC_OPMAP_HALF(0x1, 0xff, 0x0, 0x01), /* 0x0b: MBOX_LOAD_RISC_RAM */ ISP_FC_OPMAP(0x00, 0x00), /* 0x0c: */ ISP_FC_OPMAP_HALF(0x1, 0x0f, 0x0, 0x01), /* 0x0d: MBOX_WRITE_RAM_WORD_EXTENDED */ ISP_FC_OPMAP(0x01, 0x05), /* 0x0e: MBOX_CHECK_FIRMWARE */ ISP_FC_OPMAP_HALF(0x1, 0x03, 0x0, 0x0d), /* 0x0f: MBOX_READ_RAM_WORD_EXTENDED */ ISP_FC_OPMAP(0x1f, 0x11), /* 0x10: MBOX_INIT_REQ_QUEUE */ ISP_FC_OPMAP(0x2f, 0x21), /* 0x11: MBOX_INIT_RES_QUEUE */ ISP_FC_OPMAP(0x0f, 0x01), /* 0x12: MBOX_EXECUTE_IOCB */ ISP_FC_OPMAP(0x03, 0x03), /* 0x13: MBOX_WAKE_UP */ ISP_FC_OPMAP_HALF(0x1, 0xff, 0x0, 0x03), /* 0x14: MBOX_STOP_FIRMWARE */ ISP_FC_OPMAP(0x4f, 0x01), /* 0x15: MBOX_ABORT */ ISP_FC_OPMAP(0x07, 0x01), /* 0x16: MBOX_ABORT_DEVICE */ ISP_FC_OPMAP(0x07, 0x01), /* 0x17: MBOX_ABORT_TARGET */ ISP_FC_OPMAP(0x03, 0x03), /* 0x18: MBOX_BUS_RESET */ ISP_FC_OPMAP(0x07, 0x05), /* 0x19: MBOX_STOP_QUEUE */ ISP_FC_OPMAP(0x07, 0x05), /* 0x1a: MBOX_START_QUEUE */ ISP_FC_OPMAP(0x07, 0x05), /* 0x1b: MBOX_SINGLE_STEP_QUEUE */ ISP_FC_OPMAP(0x07, 0x05), /* 0x1c: MBOX_ABORT_QUEUE */ ISP_FC_OPMAP(0x07, 0x03), /* 0x1d: MBOX_GET_DEV_QUEUE_STATUS */ ISP_FC_OPMAP(0x00, 0x00), /* 0x1e: */ ISP_FC_OPMAP(0x01, 0x07), /* 0x1f: MBOX_GET_FIRMWARE_STATUS */ ISP_FC_OPMAP_HALF(0x2, 0x01, 0x7e, 0xcf), /* 0x20: MBOX_GET_LOOP_ID */ ISP_FC_OPMAP(0x00, 0x00), /* 0x21: */ ISP_FC_OPMAP(0x03, 0x4b), /* 0x22: MBOX_GET_TIMEOUT_PARAMS */ ISP_FC_OPMAP(0x00, 0x00), /* 0x23: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x24: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x25: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x26: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x27: */ ISP_FC_OPMAP(0x01, 0x03), /* 0x28: MBOX_GET_FIRMWARE_OPTIONS */ ISP_FC_OPMAP(0x03, 0x07), /* 0x29: MBOX_GET_PORT_QUEUE_PARAMS */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2a: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2b: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2c: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2d: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2e: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2f: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x30: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x31: */ ISP_FC_OPMAP(0x4b, 0x4b), /* 0x32: MBOX_SET_TIMEOUT_PARAMS */ ISP_FC_OPMAP(0x00, 0x00), /* 0x33: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x34: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x35: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x36: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x37: */ ISP_FC_OPMAP(0x0f, 0x01), /* 0x38: MBOX_SET_FIRMWARE_OPTIONS */ ISP_FC_OPMAP(0x0f, 0x07), /* 0x39: MBOX_SET_PORT_QUEUE_PARAMS */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3a: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3b: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3c: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3d: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3e: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3f: */ ISP_FC_OPMAP(0x03, 0x01), /* 0x40: MBOX_LOOP_PORT_BYPASS */ ISP_FC_OPMAP(0x03, 0x01), /* 0x41: MBOX_LOOP_PORT_ENABLE */ ISP_FC_OPMAP_HALF(0x0, 0x01, 0x1f, 0xcf), /* 0x42: MBOX_GET_RESOURCE_COUNT */ ISP_FC_OPMAP(0x01, 0x01), /* 0x43: MBOX_REQUEST_OFFLINE_MODE */ ISP_FC_OPMAP(0x00, 0x00), /* 0x44: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x45: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x46: */ ISP_FC_OPMAP(0xcf, 0x03), /* 0x47: GET PORT_DATABASE ENHANCED */ ISP_FC_OPMAP(0xcf, 0x0f), /* 0x48: MBOX_INIT_FIRMWARE_MULTI_ID */ ISP_FC_OPMAP(0xcd, 0x01), /* 0x49: MBOX_GET_VP_DATABASE */ ISP_FC_OPMAP_HALF(0x2, 0xcd, 0x0, 0x01), /* 0x4a: MBOX_GET_VP_DATABASE_ENTRY */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4b: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4c: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4d: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4e: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4f: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x50: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x51: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x52: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x53: */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x54: EXECUTE IOCB A64 */ ISP_FC_OPMAP(0x00, 0x00), /* 0x55: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x56: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x57: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x58: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x59: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x5a: */ ISP_FC_OPMAP(0x03, 0x01), /* 0x5b: MBOX_DRIVER_HEARTBEAT */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x5c: MBOX_FW_HEARTBEAT */ ISP_FC_OPMAP(0x07, 0x1f), /* 0x5d: MBOX_GET_SET_DATA_RATE */ ISP_FC_OPMAP(0x00, 0x00), /* 0x5e: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x5f: */ ISP_FC_OPMAP(0xcf, 0x0f), /* 0x60: MBOX_INIT_FIRMWARE */ ISP_FC_OPMAP(0x00, 0x00), /* 0x61: */ ISP_FC_OPMAP(0x01, 0x01), /* 0x62: MBOX_INIT_LIP */ ISP_FC_OPMAP(0xcd, 0x03), /* 0x63: MBOX_GET_FC_AL_POSITION_MAP */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x64: MBOX_GET_PORT_DB */ ISP_FC_OPMAP(0x07, 0x01), /* 0x65: MBOX_CLEAR_ACA */ ISP_FC_OPMAP(0x07, 0x01), /* 0x66: MBOX_TARGET_RESET */ ISP_FC_OPMAP(0x07, 0x01), /* 0x67: MBOX_CLEAR_TASK_SET */ ISP_FC_OPMAP(0x07, 0x01), /* 0x68: MBOX_ABORT_TASK_SET */ ISP_FC_OPMAP_HALF(0x00, 0x01, 0x0f, 0x1f), /* 0x69: MBOX_GET_FW_STATE */ ISP_FC_OPMAP_HALF(0x6, 0x03, 0x0, 0xcf), /* 0x6a: MBOX_GET_PORT_NAME */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x6b: MBOX_GET_LINK_STATUS */ ISP_FC_OPMAP(0x0f, 0x01), /* 0x6c: MBOX_INIT_LIP_RESET */ ISP_FC_OPMAP(0x00, 0x00), /* 0x6d: */ ISP_FC_OPMAP(0xcf, 0x03), /* 0x6e: MBOX_SEND_SNS */ ISP_FC_OPMAP(0x0f, 0x07), /* 0x6f: MBOX_FABRIC_LOGIN */ ISP_FC_OPMAP_HALF(0x02, 0x03, 0x00, 0x03), /* 0x70: MBOX_SEND_CHANGE_REQUEST */ ISP_FC_OPMAP(0x03, 0x03), /* 0x71: MBOX_FABRIC_LOGOUT */ ISP_FC_OPMAP(0x0f, 0x0f), /* 0x72: MBOX_INIT_LIP_LOGIN */ ISP_FC_OPMAP(0x00, 0x00), /* 0x73: */ ISP_FC_OPMAP(0x07, 0x01), /* 0x74: LOGIN LOOP PORT */ ISP_FC_OPMAP_HALF(0x03, 0xcf, 0x00, 0x07), /* 0x75: GET PORT/NODE NAME LIST */ ISP_FC_OPMAP(0x4f, 0x01), /* 0x76: SET VENDOR ID */ ISP_FC_OPMAP(0xcd, 0x01), /* 0x77: INITIALIZE IP MAILBOX */ ISP_FC_OPMAP(0x00, 0x00), /* 0x78: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x79: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x7a: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x7b: */ ISP_FC_OPMAP_HALF(0x03, 0x4f, 0x00, 0x07), /* 0x7c: Get ID List */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x7d: SEND LFA */ ISP_FC_OPMAP(0x0f, 0x01) /* 0x7e: LUN RESET */ }; #define MAX_FC_OPCODE 0x7e /* * Footnotes * * (1): this sets bits 21..16 in mailbox register #8, which we nominally * do not access at this time in the core driver. The caller is * responsible for setting this register first (Gross!). The assumption * is that we won't overflow. */ static const char *fc_mbcmd_names[] = { "NO-OP", /* 00h */ "LOAD RAM", "EXEC FIRMWARE", "DUMP RAM", "WRITE RAM WORD", "READ RAM WORD", "MAILBOX REG TEST", "VERIFY CHECKSUM", "ABOUT FIRMWARE", "LOAD RAM (2100)", "DUMP RAM", "LOAD RISC RAM", "DUMP RISC RAM", "WRITE RAM WORD EXTENDED", "CHECK FIRMWARE", "READ RAM WORD EXTENDED", "INIT REQUEST QUEUE", /* 10h */ "INIT RESULT QUEUE", "EXECUTE IOCB", "WAKE UP", "STOP FIRMWARE", "ABORT", "ABORT DEVICE", "ABORT TARGET", "BUS RESET", "STOP QUEUE", "START QUEUE", "SINGLE STEP QUEUE", "ABORT QUEUE", "GET DEV QUEUE STATUS", NULL, "GET FIRMWARE STATUS", "GET LOOP ID", /* 20h */ NULL, "GET TIMEOUT PARAMS", NULL, NULL, NULL, NULL, NULL, "GET FIRMWARE OPTIONS", "GET PORT QUEUE PARAMS", "GENERATE SYSTEM ERROR", NULL, NULL, NULL, NULL, NULL, "WRITE SFP", /* 30h */ "READ SFP", "SET TIMEOUT PARAMS", NULL, NULL, NULL, NULL, NULL, "SET FIRMWARE OPTIONS", "SET PORT QUEUE PARAMS", NULL, "SET FC LED CONF", NULL, "RESTART NIC FIRMWARE", "ACCESS CONTROL", NULL, "LOOP PORT BYPASS", /* 40h */ "LOOP PORT ENABLE", "GET RESOURCE COUNT", "REQUEST NON PARTICIPATING MODE", "DIAGNOSTIC ECHO TEST", "DIAGNOSTIC LOOPBACK", NULL, "GET PORT DATABASE ENHANCED", "INIT FIRMWARE MULTI ID", "GET VP DATABASE", "GET VP DATABASE ENTRY", NULL, NULL, NULL, NULL, NULL, "GET FCF LIST", /* 50h */ "GET DCBX PARAMETERS", NULL, "HOST MEMORY COPY", "EXECUTE IOCB A64", NULL, NULL, "SEND RNID", NULL, "SET PARAMETERS", "GET PARAMETERS", "DRIVER HEARTBEAT", "FIRMWARE HEARTBEAT", "GET/SET DATA RATE", "SEND RNFT", NULL, "INIT FIRMWARE", /* 60h */ "GET INIT CONTROL BLOCK", "INIT LIP", "GET FC-AL POSITION MAP", "GET PORT DATABASE", "CLEAR ACA", "TARGET RESET", "CLEAR TASK SET", "ABORT TASK SET", "GET FW STATE", "GET PORT NAME", "GET LINK STATUS", "INIT LIP RESET", "GET LINK STATS & PRIVATE DATA CNTS", "SEND SNS", "FABRIC LOGIN", "SEND CHANGE REQUEST", /* 70h */ "FABRIC LOGOUT", "INIT LIP LOGIN", NULL, "LOGIN LOOP PORT", "GET PORT/NODE NAME LIST", "SET VENDOR ID", "INITIALIZE IP MAILBOX", NULL, NULL, "GET XGMAC STATS", NULL, "GET ID LIST", "SEND LFA", "LUN RESET" }; static void isp_mboxcmd(ispsoftc_t *isp, mbreg_t *mbp) { const char *cname, *xname, *sname; char tname[16], mname[16]; unsigned int ibits, obits, box, opcode; opcode = mbp->param[0]; if (IS_FC(isp)) { if (opcode > MAX_FC_OPCODE) { mbp->param[0] = MBOX_INVALID_COMMAND; isp_prt(isp, ISP_LOGERR, "Unknown Command 0x%x", opcode); return; } cname = fc_mbcmd_names[opcode]; ibits = ISP_FC_IBITS(opcode); obits = ISP_FC_OBITS(opcode); } else { if (opcode > MAX_SCSI_OPCODE) { mbp->param[0] = MBOX_INVALID_COMMAND; isp_prt(isp, ISP_LOGERR, "Unknown Command 0x%x", opcode); return; } cname = scsi_mbcmd_names[opcode]; ibits = ISP_SCSI_IBITS(opcode); obits = ISP_SCSI_OBITS(opcode); } if (cname == NULL) { cname = tname; ISP_SNPRINTF(tname, sizeof tname, "opcode %x", opcode); } isp_prt(isp, ISP_LOGDEBUG3, "Mailbox Command '%s'", cname); /* * Pick up any additional bits that the caller might have set. */ ibits |= mbp->ibits; obits |= mbp->obits; /* * Mask any bits that the caller wants us to mask */ ibits &= mbp->ibitm; obits &= mbp->obitm; if (ibits == 0 && obits == 0) { mbp->param[0] = MBOX_COMMAND_PARAM_ERROR; isp_prt(isp, ISP_LOGERR, "no parameters for 0x%x", opcode); return; } /* * Get exclusive usage of mailbox registers. */ if (MBOX_ACQUIRE(isp)) { mbp->param[0] = MBOX_REGS_BUSY; goto out; } for (box = 0; box < ISP_NMBOX(isp); box++) { if (ibits & (1 << box)) { isp_prt(isp, ISP_LOGDEBUG3, "IN mbox %d = 0x%04x", box, mbp->param[box]); ISP_WRITE(isp, MBOX_OFF(box), mbp->param[box]); } isp->isp_mboxtmp[box] = mbp->param[box] = 0; } isp->isp_lastmbxcmd = opcode; /* * We assume that we can't overwrite a previous command. */ isp->isp_obits = obits; isp->isp_mboxbsy = 1; /* * Set Host Interrupt condition so that RISC will pick up mailbox regs. */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_SET_HOST_INT); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_SET_HOST_INT); } /* * While we haven't finished the command, spin our wheels here. */ MBOX_WAIT_COMPLETE(isp, mbp); /* * Did the command time out? */ if (mbp->param[0] == MBOX_TIMEOUT) { isp->isp_mboxbsy = 0; MBOX_RELEASE(isp); goto out; } /* * Copy back output registers. */ for (box = 0; box < ISP_NMBOX(isp); box++) { if (obits & (1 << box)) { mbp->param[box] = isp->isp_mboxtmp[box]; isp_prt(isp, ISP_LOGDEBUG3, "OUT mbox %d = 0x%04x", box, mbp->param[box]); } } isp->isp_mboxbsy = 0; MBOX_RELEASE(isp); out: if (mbp->logval == 0 || mbp->param[0] == MBOX_COMMAND_COMPLETE) return; if ((mbp->param[0] & 0xbfe0) == 0 && (mbp->logval & MBLOGMASK(mbp->param[0])) == 0) return; xname = NULL; sname = ""; switch (mbp->param[0]) { case MBOX_INVALID_COMMAND: xname = "INVALID COMMAND"; break; case MBOX_HOST_INTERFACE_ERROR: xname = "HOST INTERFACE ERROR"; break; case MBOX_TEST_FAILED: xname = "TEST FAILED"; break; case MBOX_COMMAND_ERROR: xname = "COMMAND ERROR"; ISP_SNPRINTF(mname, sizeof(mname), " subcode 0x%x", mbp->param[1]); sname = mname; break; case MBOX_COMMAND_PARAM_ERROR: xname = "COMMAND PARAMETER ERROR"; break; case MBOX_PORT_ID_USED: xname = "PORT ID ALREADY IN USE"; break; case MBOX_LOOP_ID_USED: xname = "LOOP ID ALREADY IN USE"; break; case MBOX_ALL_IDS_USED: xname = "ALL LOOP IDS IN USE"; break; case MBOX_NOT_LOGGED_IN: xname = "NOT LOGGED IN"; break; case MBOX_LINK_DOWN_ERROR: xname = "LINK DOWN ERROR"; break; case MBOX_LOOPBACK_ERROR: xname = "LOOPBACK ERROR"; break; case MBOX_CHECKSUM_ERROR: xname = "CHECKSUM ERROR"; break; case MBOX_INVALID_PRODUCT_KEY: xname = "INVALID PRODUCT KEY"; break; case MBOX_REGS_BUSY: xname = "REGISTERS BUSY"; break; case MBOX_TIMEOUT: xname = "TIMEOUT"; break; default: ISP_SNPRINTF(mname, sizeof mname, "error 0x%x", mbp->param[0]); xname = mname; break; } if (xname) { isp_prt(isp, ISP_LOGALL, "Mailbox Command '%s' failed (%s%s)", cname, xname, sname); } } static int isp_fw_state(ispsoftc_t *isp, int chan) { if (IS_FC(isp)) { mbreg_t mbs; MBSINIT(&mbs, MBOX_GET_FW_STATE, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { return (mbs.param[1]); } } return (FW_ERROR); } static void isp_spi_update(ispsoftc_t *isp, int chan) { int tgt; mbreg_t mbs; sdparam *sdp; if (IS_FC(isp)) { /* * There are no 'per-bus' settings for Fibre Channel. */ return; } sdp = SDPARAM(isp, chan); sdp->update = 0; for (tgt = 0; tgt < MAX_TARGETS; tgt++) { uint16_t flags, period, offset; int get; if (sdp->isp_devparam[tgt].dev_enable == 0) { sdp->isp_devparam[tgt].dev_update = 0; sdp->isp_devparam[tgt].dev_refresh = 0; isp_prt(isp, ISP_LOGDEBUG0, "skipping target %d bus %d update", tgt, chan); continue; } /* * If the goal is to update the status of the device, * take what's in goal_flags and try and set the device * toward that. Otherwise, if we're just refreshing the * current device state, get the current parameters. */ MBSINIT(&mbs, 0, MBLOGALL, 0); /* * Refresh overrides set */ if (sdp->isp_devparam[tgt].dev_refresh) { mbs.param[0] = MBOX_GET_TARGET_PARAMS; get = 1; } else if (sdp->isp_devparam[tgt].dev_update) { mbs.param[0] = MBOX_SET_TARGET_PARAMS; /* * Make sure goal_flags has "Renegotiate on Error" * on and "Freeze Queue on Error" off. */ sdp->isp_devparam[tgt].goal_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].goal_flags &= ~DPARM_QFRZ; mbs.param[2] = sdp->isp_devparam[tgt].goal_flags; /* * Insist that PARITY must be enabled * if SYNC or WIDE is enabled. */ if ((mbs.param[2] & (DPARM_SYNC|DPARM_WIDE)) != 0) { mbs.param[2] |= DPARM_PARITY; } if (mbs.param[2] & DPARM_SYNC) { mbs.param[3] = (sdp->isp_devparam[tgt].goal_offset << 8) | (sdp->isp_devparam[tgt].goal_period); } /* * A command completion later that has * RQSTF_NEGOTIATION set can cause * the dev_refresh/announce cycle also. * * Note: It is really important to update our current * flags with at least the state of TAG capabilities- * otherwise we might try and send a tagged command * when we have it all turned off. So change it here * to say that current already matches goal. */ sdp->isp_devparam[tgt].actv_flags &= ~DPARM_TQING; sdp->isp_devparam[tgt].actv_flags |= (sdp->isp_devparam[tgt].goal_flags & DPARM_TQING); isp_prt(isp, ISP_LOGDEBUG0, "bus %d set tgt %d flags 0x%x off 0x%x period 0x%x", chan, tgt, mbs.param[2], mbs.param[3] >> 8, mbs.param[3] & 0xff); get = 0; } else { continue; } mbs.param[1] = (chan << 15) | (tgt << 8); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { continue; } if (get == 0) { sdp->sendmarker = 1; sdp->isp_devparam[tgt].dev_update = 0; sdp->isp_devparam[tgt].dev_refresh = 1; } else { sdp->isp_devparam[tgt].dev_refresh = 0; flags = mbs.param[2]; period = mbs.param[3] & 0xff; offset = mbs.param[3] >> 8; sdp->isp_devparam[tgt].actv_flags = flags; sdp->isp_devparam[tgt].actv_period = period; sdp->isp_devparam[tgt].actv_offset = offset; isp_async(isp, ISPASYNC_NEW_TGT_PARAMS, chan, tgt); } } for (tgt = 0; tgt < MAX_TARGETS; tgt++) { if (sdp->isp_devparam[tgt].dev_update || sdp->isp_devparam[tgt].dev_refresh) { sdp->update = 1; break; } } } static void isp_setdfltsdparm(ispsoftc_t *isp) { int tgt; sdparam *sdp, *sdp1; sdp = SDPARAM(isp, 0); if (IS_DUALBUS(isp)) sdp1 = sdp + 1; else sdp1 = NULL; /* * Establish some default parameters. */ sdp->isp_cmd_dma_burst_enable = 0; sdp->isp_data_dma_burst_enabl = 1; sdp->isp_fifo_threshold = 0; sdp->isp_initiator_id = DEFAULT_IID(isp, 0); if (isp->isp_type >= ISP_HA_SCSI_1040) { sdp->isp_async_data_setup = 9; } else { sdp->isp_async_data_setup = 6; } sdp->isp_selection_timeout = 250; sdp->isp_max_queue_depth = MAXISPREQUEST(isp); sdp->isp_tag_aging = 8; sdp->isp_bus_reset_delay = 5; /* * Don't retry selection, busy or queue full automatically- reflect * these back to us. */ sdp->isp_retry_count = 0; sdp->isp_retry_delay = 0; for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].exc_throttle = ISP_EXEC_THROTTLE; sdp->isp_devparam[tgt].dev_enable = 1; } /* * The trick here is to establish a default for the default (honk!) * state (goal_flags). Then try and get the current status from * the card to fill in the current state. We don't, in fact, set * the default to the SAFE default state- that's not the goal state. */ for (tgt = 0; tgt < MAX_TARGETS; tgt++) { uint8_t off, per; sdp->isp_devparam[tgt].actv_offset = 0; sdp->isp_devparam[tgt].actv_period = 0; sdp->isp_devparam[tgt].actv_flags = 0; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags = DPARM_DEFAULT; /* * We default to Wide/Fast for versions less than a 1040 * (unless it's SBus). */ if (IS_ULTRA3(isp)) { off = ISP_80M_SYNCPARMS >> 8; per = ISP_80M_SYNCPARMS & 0xff; } else if (IS_ULTRA2(isp)) { off = ISP_40M_SYNCPARMS >> 8; per = ISP_40M_SYNCPARMS & 0xff; } else if (IS_1240(isp)) { off = ISP_20M_SYNCPARMS >> 8; per = ISP_20M_SYNCPARMS & 0xff; } else if ((isp->isp_bustype == ISP_BT_SBUS && isp->isp_type < ISP_HA_SCSI_1020A) || (isp->isp_bustype == ISP_BT_PCI && isp->isp_type < ISP_HA_SCSI_1040) || (isp->isp_clock && isp->isp_clock < 60) || (sdp->isp_ultramode == 0)) { off = ISP_10M_SYNCPARMS >> 8; per = ISP_10M_SYNCPARMS & 0xff; } else { off = ISP_20M_SYNCPARMS_1040 >> 8; per = ISP_20M_SYNCPARMS_1040 & 0xff; } sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset = off; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period = per; } /* * If we're a dual bus card, just copy the data over */ if (sdp1) { *sdp1 = *sdp; sdp1->isp_initiator_id = DEFAULT_IID(isp, 1); } /* * If we've not been told to avoid reading NVRAM, try and read it. * If we're successful reading it, we can then return because NVRAM * will tell us what the desired settings are. Otherwise, we establish * some reasonable 'fake' nvram and goal defaults. */ if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { mbreg_t mbs; if (isp_read_nvram(isp, 0) == 0) { if (IS_DUALBUS(isp)) { if (isp_read_nvram(isp, 1) == 0) { return; } } } MBSINIT(&mbs, MBOX_GET_ACT_NEG_STATE, MBLOGNONE, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { sdp->isp_req_ack_active_neg = 1; sdp->isp_data_line_active_neg = 1; if (sdp1) { sdp1->isp_req_ack_active_neg = 1; sdp1->isp_data_line_active_neg = 1; } } else { sdp->isp_req_ack_active_neg = (mbs.param[1] >> 4) & 0x1; sdp->isp_data_line_active_neg = (mbs.param[1] >> 5) & 0x1; if (sdp1) { sdp1->isp_req_ack_active_neg = (mbs.param[2] >> 4) & 0x1; sdp1->isp_data_line_active_neg = (mbs.param[2] >> 5) & 0x1; } } } } static void isp_setdfltfcparm(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); /* * Establish some default parameters. */ fcp->role = DEFAULT_ROLE(isp, chan); fcp->isp_maxalloc = ICB_DFLT_ALLOC; fcp->isp_retry_delay = ICB_DFLT_RDELAY; fcp->isp_retry_count = ICB_DFLT_RCOUNT; fcp->isp_loopid = DEFAULT_LOOPID(isp, chan); fcp->isp_wwnn_nvram = DEFAULT_NODEWWN(isp, chan); fcp->isp_wwpn_nvram = DEFAULT_PORTWWN(isp, chan); fcp->isp_fwoptions = 0; fcp->isp_xfwoptions = 0; fcp->isp_zfwoptions = 0; fcp->isp_lasthdl = NIL_HANDLE; fcp->isp_login_hdl = NIL_HANDLE; if (IS_24XX(isp)) { fcp->isp_fwoptions |= ICB2400_OPT1_FAIRNESS; fcp->isp_fwoptions |= ICB2400_OPT1_HARD_ADDRESS; if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) fcp->isp_fwoptions |= ICB2400_OPT1_FULL_DUPLEX; fcp->isp_fwoptions |= ICB2400_OPT1_BOTH_WWNS; fcp->isp_xfwoptions |= ICB2400_OPT2_LOOP_2_PTP; fcp->isp_zfwoptions |= ICB2400_OPT3_RATE_AUTO; } else { fcp->isp_fwoptions |= ICBOPT_FAIRNESS; fcp->isp_fwoptions |= ICBOPT_PDBCHANGE_AE; fcp->isp_fwoptions |= ICBOPT_HARD_ADDRESS; if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) fcp->isp_fwoptions |= ICBOPT_FULL_DUPLEX; /* * Make sure this is turned off now until we get * extended options from NVRAM */ fcp->isp_fwoptions &= ~ICBOPT_EXTENDED; fcp->isp_xfwoptions |= ICBXOPT_LOOP_2_PTP; fcp->isp_zfwoptions |= ICBZOPT_RATE_AUTO; } /* * Now try and read NVRAM unless told to not do so. * This will set fcparam's isp_wwnn_nvram && isp_wwpn_nvram. */ if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { int i, j = 0; /* * Give a couple of tries at reading NVRAM. */ for (i = 0; i < 2; i++) { j = isp_read_nvram(isp, chan); if (j == 0) { break; } } if (j) { isp->isp_confopts |= ISP_CFG_NONVRAM; } } fcp->isp_wwnn = ACTIVE_NODEWWN(isp, chan); fcp->isp_wwpn = ACTIVE_PORTWWN(isp, chan); isp_prt(isp, ISP_LOGCONFIG, "Chan %d 0x%08x%08x/0x%08x%08x Role %s", chan, (uint32_t) (fcp->isp_wwnn >> 32), (uint32_t) (fcp->isp_wwnn), (uint32_t) (fcp->isp_wwpn >> 32), (uint32_t) (fcp->isp_wwpn), isp_class3_roles[fcp->role]); } /* * Re-initialize the ISP and complete all orphaned commands * with a 'botched' notice. The reset/init routines should * not disturb an already active list of commands. */ int isp_reinit(ispsoftc_t *isp, int do_load_defaults) { int i, res = 0; if (isp->isp_state > ISP_RESETSTATE) isp_stop(isp); if (isp->isp_state != ISP_RESETSTATE) isp_reset(isp, do_load_defaults); if (isp->isp_state != ISP_RESETSTATE) { res = EIO; isp_prt(isp, ISP_LOGERR, "%s: cannot reset card", __func__); goto cleanup; } isp_init(isp); if (isp->isp_state > ISP_RESETSTATE && isp->isp_state != ISP_RUNSTATE) { res = EIO; isp_prt(isp, ISP_LOGERR, "%s: cannot init card", __func__); ISP_DISABLE_INTS(isp); if (IS_FC(isp)) { /* * If we're in ISP_ROLE_NONE, turn off the lasers. */ if (!IS_24XX(isp)) { ISP_WRITE(isp, BIU2100_CSR, BIU2100_FPM0_REGS); ISP_WRITE(isp, FPM_DIAG_CONFIG, FPM_SOFT_RESET); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FB_REGS); ISP_WRITE(isp, FBM_CMD, FBMCMD_FIFO_RESET_ALL); ISP_WRITE(isp, BIU2100_CSR, BIU2100_RISC_REGS); } } } cleanup: isp_clear_commands(isp); if (IS_FC(isp)) { for (i = 0; i < isp->isp_nchan; i++) isp_clear_portdb(isp, i); } return (res); } /* * NVRAM Routines */ static int isp_read_nvram(ispsoftc_t *isp, int bus) { int i, amt, retval; uint8_t csum, minversion; union { uint8_t _x[ISP2400_NVRAM_SIZE]; uint16_t _s[ISP2400_NVRAM_SIZE>>1]; } _n; #define nvram_data _n._x #define nvram_words _n._s if (IS_24XX(isp)) { return (isp_read_nvram_2400(isp, nvram_data)); } else if (IS_FC(isp)) { amt = ISP2100_NVRAM_SIZE; minversion = 1; } else if (IS_ULTRA2(isp)) { amt = ISP1080_NVRAM_SIZE; minversion = 0; } else { amt = ISP_NVRAM_SIZE; minversion = 2; } for (i = 0; i < amt>>1; i++) { isp_rdnvram_word(isp, i, &nvram_words[i]); } if (nvram_data[0] != 'I' || nvram_data[1] != 'S' || nvram_data[2] != 'P') { if (isp->isp_bustype != ISP_BT_SBUS) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM header"); isp_prt(isp, ISP_LOGDEBUG0, "%x %x %x", nvram_data[0], nvram_data[1], nvram_data[2]); } retval = -1; goto out; } for (csum = 0, i = 0; i < amt; i++) { csum += nvram_data[i]; } if (csum != 0) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM checksum"); retval = -1; goto out; } if (ISP_NVRAM_VERSION(nvram_data) < minversion) { isp_prt(isp, ISP_LOGWARN, "version %d NVRAM not understood", ISP_NVRAM_VERSION(nvram_data)); retval = -1; goto out; } if (IS_ULTRA3(isp)) { isp_parse_nvram_12160(isp, bus, nvram_data); } else if (IS_1080(isp)) { isp_parse_nvram_1080(isp, bus, nvram_data); } else if (IS_1280(isp) || IS_1240(isp)) { isp_parse_nvram_1080(isp, bus, nvram_data); } else if (IS_SCSI(isp)) { isp_parse_nvram_1020(isp, nvram_data); } else { isp_parse_nvram_2100(isp, nvram_data); } retval = 0; out: return (retval); #undef nvram_data #undef nvram_words } static int isp_read_nvram_2400(ispsoftc_t *isp, uint8_t *nvram_data) { int retval = 0; uint32_t addr, csum, lwrds, *dptr; if (isp->isp_port) { addr = ISP2400_NVRAM_PORT1_ADDR; } else { addr = ISP2400_NVRAM_PORT0_ADDR; } dptr = (uint32_t *) nvram_data; for (lwrds = 0; lwrds < ISP2400_NVRAM_SIZE >> 2; lwrds++) { isp_rd_2400_nvram(isp, addr++, dptr++); } if (nvram_data[0] != 'I' || nvram_data[1] != 'S' || nvram_data[2] != 'P') { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM header (%x %x %x)", nvram_data[0], nvram_data[1], nvram_data[2]); retval = -1; goto out; } dptr = (uint32_t *) nvram_data; for (csum = 0, lwrds = 0; lwrds < ISP2400_NVRAM_SIZE >> 2; lwrds++) { uint32_t tmp; ISP_IOXGET_32(isp, &dptr[lwrds], tmp); csum += tmp; } if (csum != 0) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM checksum"); retval = -1; goto out; } isp_parse_nvram_2400(isp, nvram_data); out: return (retval); } static void isp_rdnvram_word(ispsoftc_t *isp, int wo, uint16_t *rp) { int i, cbits; uint16_t bit, rqst, junk; ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT); ISP_DELAY(10); ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT|BIU_NVRAM_CLOCK); ISP_DELAY(10); if (IS_FC(isp)) { wo &= ((ISP2100_NVRAM_SIZE >> 1) - 1); if (IS_2312(isp) && isp->isp_port) { wo += 128; } rqst = (ISP_NVRAM_READ << 8) | wo; cbits = 10; } else if (IS_ULTRA2(isp)) { wo &= ((ISP1080_NVRAM_SIZE >> 1) - 1); rqst = (ISP_NVRAM_READ << 8) | wo; cbits = 10; } else { wo &= ((ISP_NVRAM_SIZE >> 1) - 1); rqst = (ISP_NVRAM_READ << 6) | wo; cbits = 8; } /* * Clock the word select request out... */ for (i = cbits; i >= 0; i--) { if ((rqst >> i) & 1) { bit = BIU_NVRAM_SELECT | BIU_NVRAM_DATAOUT; } else { bit = BIU_NVRAM_SELECT; } ISP_WRITE(isp, BIU_NVRAM, bit); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ ISP_WRITE(isp, BIU_NVRAM, bit | BIU_NVRAM_CLOCK); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ ISP_WRITE(isp, BIU_NVRAM, bit); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ } /* * Now read the result back in (bits come back in MSB format). */ *rp = 0; for (i = 0; i < 16; i++) { uint16_t rv; *rp <<= 1; ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT|BIU_NVRAM_CLOCK); ISP_DELAY(10); rv = ISP_READ(isp, BIU_NVRAM); if (rv & BIU_NVRAM_DATAIN) { *rp |= 1; } ISP_DELAY(10); ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ } ISP_WRITE(isp, BIU_NVRAM, 0); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ ISP_SWIZZLE_NVRAM_WORD(isp, rp); } static void isp_rd_2400_nvram(ispsoftc_t *isp, uint32_t addr, uint32_t *rp) { int loops = 0; uint32_t base = 0x7ffe0000; uint32_t tmp = 0; if (IS_26XX(isp)) { base = 0x7fe7c000; /* XXX: Observation, may be wrong. */ } else if (IS_25XX(isp)) { base = 0x7ff00000 | 0x48000; } ISP_WRITE(isp, BIU2400_FLASH_ADDR, base | addr); for (loops = 0; loops < 5000; loops++) { ISP_DELAY(10); tmp = ISP_READ(isp, BIU2400_FLASH_ADDR); if ((tmp & (1U << 31)) != 0) { break; } } if (tmp & (1U << 31)) { *rp = ISP_READ(isp, BIU2400_FLASH_DATA); ISP_SWIZZLE_NVRAM_LONG(isp, rp); } else { *rp = 0xffffffff; } } static void isp_parse_nvram_1020(ispsoftc_t *isp, uint8_t *nvram_data) { sdparam *sdp = SDPARAM(isp, 0); int tgt; sdp->isp_fifo_threshold = ISP_NVRAM_FIFO_THRESHOLD(nvram_data) | (ISP_NVRAM_FIFO_THRESHOLD_128(nvram_data) << 2); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) sdp->isp_initiator_id = ISP_NVRAM_INITIATOR_ID(nvram_data); sdp->isp_bus_reset_delay = ISP_NVRAM_BUS_RESET_DELAY(nvram_data); sdp->isp_retry_count = ISP_NVRAM_BUS_RETRY_COUNT(nvram_data); sdp->isp_retry_delay = ISP_NVRAM_BUS_RETRY_DELAY(nvram_data); sdp->isp_async_data_setup = ISP_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data); if (isp->isp_type >= ISP_HA_SCSI_1040) { if (sdp->isp_async_data_setup < 9) { sdp->isp_async_data_setup = 9; } } else { if (sdp->isp_async_data_setup != 6) { sdp->isp_async_data_setup = 6; } } sdp->isp_req_ack_active_neg = ISP_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data); sdp->isp_data_line_active_neg = ISP_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data); sdp->isp_data_dma_burst_enabl = ISP_NVRAM_DATA_DMA_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP_NVRAM_CMD_DMA_BURST_ENABLE(nvram_data); sdp->isp_tag_aging = ISP_NVRAM_TAG_AGE_LIMIT(nvram_data); sdp->isp_selection_timeout = ISP_NVRAM_SELECTION_TIMEOUT(nvram_data); sdp->isp_max_queue_depth = ISP_NVRAM_MAX_QUEUE_DEPTH(nvram_data); sdp->isp_fast_mttr = ISP_NVRAM_FAST_MTTR_ENABLE(nvram_data); for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].dev_enable = ISP_NVRAM_TGT_DEVICE_ENABLE(nvram_data, tgt); sdp->isp_devparam[tgt].exc_throttle = ISP_NVRAM_TGT_EXEC_THROTTLE(nvram_data, tgt); sdp->isp_devparam[tgt].nvrm_offset = ISP_NVRAM_TGT_SYNC_OFFSET(nvram_data, tgt); sdp->isp_devparam[tgt].nvrm_period = ISP_NVRAM_TGT_SYNC_PERIOD(nvram_data, tgt); /* * We probably shouldn't lie about this, but it * it makes it much safer if we limit NVRAM values * to sanity. */ if (isp->isp_type < ISP_HA_SCSI_1040) { /* * If we're not ultra, we can't possibly * be a shorter period than this. */ if (sdp->isp_devparam[tgt].nvrm_period < 0x19) { sdp->isp_devparam[tgt].nvrm_period = 0x19; } if (sdp->isp_devparam[tgt].nvrm_offset > 0xc) { sdp->isp_devparam[tgt].nvrm_offset = 0x0c; } } else { if (sdp->isp_devparam[tgt].nvrm_offset > 0x8) { sdp->isp_devparam[tgt].nvrm_offset = 0x8; } } sdp->isp_devparam[tgt].nvrm_flags = 0; if (ISP_NVRAM_TGT_RENEG(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].nvrm_flags |= DPARM_ARQ; if (ISP_NVRAM_TGT_TQING(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_TQING; if (ISP_NVRAM_TGT_SYNC(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_SYNC; if (ISP_NVRAM_TGT_WIDE(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_WIDE; if (ISP_NVRAM_TGT_PARITY(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_PARITY; if (ISP_NVRAM_TGT_DISC(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_DISC; sdp->isp_devparam[tgt].actv_flags = 0; /* we don't know */ sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags; } } static void isp_parse_nvram_1080(ispsoftc_t *isp, int bus, uint8_t *nvram_data) { sdparam *sdp = SDPARAM(isp, bus); int tgt; sdp->isp_fifo_threshold = ISP1080_NVRAM_FIFO_THRESHOLD(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) sdp->isp_initiator_id = ISP1080_NVRAM_INITIATOR_ID(nvram_data, bus); sdp->isp_bus_reset_delay = ISP1080_NVRAM_BUS_RESET_DELAY(nvram_data, bus); sdp->isp_retry_count = ISP1080_NVRAM_BUS_RETRY_COUNT(nvram_data, bus); sdp->isp_retry_delay = ISP1080_NVRAM_BUS_RETRY_DELAY(nvram_data, bus); sdp->isp_async_data_setup = ISP1080_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data, bus); sdp->isp_req_ack_active_neg = ISP1080_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_line_active_neg = ISP1080_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_dma_burst_enabl = ISP1080_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP1080_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_selection_timeout = ISP1080_NVRAM_SELECTION_TIMEOUT(nvram_data, bus); sdp->isp_max_queue_depth = ISP1080_NVRAM_MAX_QUEUE_DEPTH(nvram_data, bus); for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].dev_enable = ISP1080_NVRAM_TGT_DEVICE_ENABLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].exc_throttle = ISP1080_NVRAM_TGT_EXEC_THROTTLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_offset = ISP1080_NVRAM_TGT_SYNC_OFFSET(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_period = ISP1080_NVRAM_TGT_SYNC_PERIOD(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_flags = 0; if (ISP1080_NVRAM_TGT_RENEG(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].nvrm_flags |= DPARM_ARQ; if (ISP1080_NVRAM_TGT_TQING(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_TQING; if (ISP1080_NVRAM_TGT_SYNC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_SYNC; if (ISP1080_NVRAM_TGT_WIDE(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_WIDE; if (ISP1080_NVRAM_TGT_PARITY(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_PARITY; if (ISP1080_NVRAM_TGT_DISC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_DISC; sdp->isp_devparam[tgt].actv_flags = 0; sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags; } } static void isp_parse_nvram_12160(ispsoftc_t *isp, int bus, uint8_t *nvram_data) { sdparam *sdp = SDPARAM(isp, bus); int tgt; sdp->isp_fifo_threshold = ISP12160_NVRAM_FIFO_THRESHOLD(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) sdp->isp_initiator_id = ISP12160_NVRAM_INITIATOR_ID(nvram_data, bus); sdp->isp_bus_reset_delay = ISP12160_NVRAM_BUS_RESET_DELAY(nvram_data, bus); sdp->isp_retry_count = ISP12160_NVRAM_BUS_RETRY_COUNT(nvram_data, bus); sdp->isp_retry_delay = ISP12160_NVRAM_BUS_RETRY_DELAY(nvram_data, bus); sdp->isp_async_data_setup = ISP12160_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data, bus); sdp->isp_req_ack_active_neg = ISP12160_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_line_active_neg = ISP12160_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_dma_burst_enabl = ISP12160_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP12160_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_selection_timeout = ISP12160_NVRAM_SELECTION_TIMEOUT(nvram_data, bus); sdp->isp_max_queue_depth = ISP12160_NVRAM_MAX_QUEUE_DEPTH(nvram_data, bus); for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].dev_enable = ISP12160_NVRAM_TGT_DEVICE_ENABLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].exc_throttle = ISP12160_NVRAM_TGT_EXEC_THROTTLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_offset = ISP12160_NVRAM_TGT_SYNC_OFFSET(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_period = ISP12160_NVRAM_TGT_SYNC_PERIOD(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_flags = 0; if (ISP12160_NVRAM_TGT_RENEG(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].nvrm_flags |= DPARM_ARQ; if (ISP12160_NVRAM_TGT_TQING(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_TQING; if (ISP12160_NVRAM_TGT_SYNC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_SYNC; if (ISP12160_NVRAM_TGT_WIDE(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_WIDE; if (ISP12160_NVRAM_TGT_PARITY(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_PARITY; if (ISP12160_NVRAM_TGT_DISC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_DISC; sdp->isp_devparam[tgt].actv_flags = 0; sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags; } } static void isp_parse_nvram_2100(ispsoftc_t *isp, uint8_t *nvram_data) { fcparam *fcp = FCPARAM(isp, 0); uint64_t wwn; /* * There is NVRAM storage for both Port and Node entities- * but the Node entity appears to be unused on all the cards * I can find. However, we should account for this being set * at some point in the future. * * Qlogic WWNs have an NAA of 2, but usually nothing shows up in * bits 48..60. In the case of the 2202, it appears that they do * use bit 48 to distinguish between the two instances on the card. * The 2204, which I've never seen, *probably* extends this method. */ wwn = ISP2100_NVRAM_PORT_NAME(nvram_data); if (wwn) { isp_prt(isp, ISP_LOGCONFIG, "NVRAM Port WWN 0x%08x%08x", (uint32_t) (wwn >> 32), (uint32_t) (wwn)); if ((wwn >> 60) == 0) { wwn |= (((uint64_t) 2)<< 60); } } fcp->isp_wwpn_nvram = wwn; if (IS_2200(isp) || IS_23XX(isp)) { wwn = ISP2100_NVRAM_NODE_NAME(nvram_data); if (wwn) { isp_prt(isp, ISP_LOGCONFIG, "NVRAM Node WWN 0x%08x%08x", (uint32_t) (wwn >> 32), (uint32_t) (wwn)); if ((wwn >> 60) == 0) { wwn |= (((uint64_t) 2)<< 60); } } else { wwn = fcp->isp_wwpn_nvram & ~((uint64_t) 0xfff << 48); } } else { wwn &= ~((uint64_t) 0xfff << 48); } fcp->isp_wwnn_nvram = wwn; fcp->isp_maxalloc = ISP2100_NVRAM_MAXIOCBALLOCATION(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNFSZ) == 0) { DEFAULT_FRAMESIZE(isp) = ISP2100_NVRAM_MAXFRAMELENGTH(nvram_data); } fcp->isp_retry_delay = ISP2100_NVRAM_RETRY_DELAY(nvram_data); fcp->isp_retry_count = ISP2100_NVRAM_RETRY_COUNT(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) { fcp->isp_loopid = ISP2100_NVRAM_HARDLOOPID(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNEXCTHROTTLE) == 0) { DEFAULT_EXEC_THROTTLE(isp) = ISP2100_NVRAM_EXECUTION_THROTTLE(nvram_data); } fcp->isp_fwoptions = ISP2100_NVRAM_OPTIONS(nvram_data); isp_prt(isp, ISP_LOGDEBUG0, "NVRAM 0x%08x%08x 0x%08x%08x maxalloc %d maxframelen %d", (uint32_t) (fcp->isp_wwnn_nvram >> 32), (uint32_t) fcp->isp_wwnn_nvram, (uint32_t) (fcp->isp_wwpn_nvram >> 32), (uint32_t) fcp->isp_wwpn_nvram, ISP2100_NVRAM_MAXIOCBALLOCATION(nvram_data), ISP2100_NVRAM_MAXFRAMELENGTH(nvram_data)); isp_prt(isp, ISP_LOGDEBUG0, "execthrottle %d fwoptions 0x%x hardloop %d tov %d", ISP2100_NVRAM_EXECUTION_THROTTLE(nvram_data), ISP2100_NVRAM_OPTIONS(nvram_data), ISP2100_NVRAM_HARDLOOPID(nvram_data), ISP2100_NVRAM_TOV(nvram_data)); fcp->isp_xfwoptions = ISP2100_XFW_OPTIONS(nvram_data); fcp->isp_zfwoptions = ISP2100_ZFW_OPTIONS(nvram_data); isp_prt(isp, ISP_LOGDEBUG0, "xfwoptions 0x%x zfw options 0x%x", ISP2100_XFW_OPTIONS(nvram_data), ISP2100_ZFW_OPTIONS(nvram_data)); } static void isp_parse_nvram_2400(ispsoftc_t *isp, uint8_t *nvram_data) { fcparam *fcp = FCPARAM(isp, 0); uint64_t wwn; isp_prt(isp, ISP_LOGDEBUG0, "NVRAM 0x%08x%08x 0x%08x%08x exchg_cnt %d maxframelen %d", (uint32_t) (ISP2400_NVRAM_NODE_NAME(nvram_data) >> 32), (uint32_t) (ISP2400_NVRAM_NODE_NAME(nvram_data)), (uint32_t) (ISP2400_NVRAM_PORT_NAME(nvram_data) >> 32), (uint32_t) (ISP2400_NVRAM_PORT_NAME(nvram_data)), ISP2400_NVRAM_EXCHANGE_COUNT(nvram_data), ISP2400_NVRAM_MAXFRAMELENGTH(nvram_data)); isp_prt(isp, ISP_LOGDEBUG0, "NVRAM execthr %d loopid %d fwopt1 0x%x fwopt2 0x%x fwopt3 0x%x", ISP2400_NVRAM_EXECUTION_THROTTLE(nvram_data), ISP2400_NVRAM_HARDLOOPID(nvram_data), ISP2400_NVRAM_FIRMWARE_OPTIONS1(nvram_data), ISP2400_NVRAM_FIRMWARE_OPTIONS2(nvram_data), ISP2400_NVRAM_FIRMWARE_OPTIONS3(nvram_data)); wwn = ISP2400_NVRAM_PORT_NAME(nvram_data); fcp->isp_wwpn_nvram = wwn; wwn = ISP2400_NVRAM_NODE_NAME(nvram_data); if (wwn) { if ((wwn >> 60) != 2 && (wwn >> 60) != 5) { wwn = 0; } } if (wwn == 0 && (fcp->isp_wwpn_nvram >> 60) == 2) { wwn = fcp->isp_wwpn_nvram; wwn &= ~((uint64_t) 0xfff << 48); } fcp->isp_wwnn_nvram = wwn; if (ISP2400_NVRAM_EXCHANGE_COUNT(nvram_data)) { fcp->isp_maxalloc = ISP2400_NVRAM_EXCHANGE_COUNT(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNFSZ) == 0) { DEFAULT_FRAMESIZE(isp) = ISP2400_NVRAM_MAXFRAMELENGTH(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) { fcp->isp_loopid = ISP2400_NVRAM_HARDLOOPID(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNEXCTHROTTLE) == 0) { DEFAULT_EXEC_THROTTLE(isp) = ISP2400_NVRAM_EXECUTION_THROTTLE(nvram_data); } fcp->isp_fwoptions = ISP2400_NVRAM_FIRMWARE_OPTIONS1(nvram_data); fcp->isp_xfwoptions = ISP2400_NVRAM_FIRMWARE_OPTIONS2(nvram_data); fcp->isp_zfwoptions = ISP2400_NVRAM_FIRMWARE_OPTIONS3(nvram_data); } Index: head/sys/dev/isp/isp_freebsd.c =================================================================== --- head/sys/dev/isp/isp_freebsd.c (revision 367043) +++ head/sys/dev/isp/isp_freebsd.c (revision 367044) @@ -1,4329 +1,4331 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2017 Alexander Motin * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include MODULE_VERSION(isp, 1); MODULE_DEPEND(isp, cam, 1, 1, 1); int isp_announced = 0; int isp_loop_down_limit = 60; /* default loop down limit */ int isp_quickboot_time = 7; /* don't wait more than N secs for loop up */ int isp_gone_device_time = 30; /* grace time before reporting device lost */ static const char prom3[] = "Chan %d [%u] PortID 0x%06x Departed because of %s"; static void isp_freeze_loopdown(ispsoftc_t *, int); static void isp_loop_changed(ispsoftc_t *isp, int chan); static d_ioctl_t ispioctl; static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); static void isp_poll(struct cam_sim *); static callout_func_t isp_watchdog; static callout_func_t isp_gdt; static task_fn_t isp_gdt_task; static void isp_kthread(void *); static void isp_action(struct cam_sim *, union ccb *); static int isp_timer_count; static void isp_timer(void *); static struct cdevsw isp_cdevsw = { .d_version = D_VERSION, .d_ioctl = ispioctl, .d_name = "isp", }; static int isp_role_sysctl(SYSCTL_HANDLER_ARGS) { ispsoftc_t *isp = (ispsoftc_t *)arg1; int chan = arg2; int error, old, value; value = FCPARAM(isp, chan)->role; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); if (value < ISP_ROLE_NONE || value > ISP_ROLE_BOTH) return (EINVAL); ISP_LOCK(isp); old = FCPARAM(isp, chan)->role; /* We don't allow target mode switch from here. */ value = (old & ISP_ROLE_TARGET) | (value & ISP_ROLE_INITIATOR); /* If nothing has changed -- we are done. */ if (value == old) { ISP_UNLOCK(isp); return (0); } /* Actually change the role. */ error = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, value); ISP_UNLOCK(isp); return (error); } static int isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan) { struct ccb_setasync csa; struct cam_sim *sim; struct cam_path *path; #ifdef ISP_TARGET_MODE int i; #endif sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, device_get_unit(isp->isp_dev), &isp->isp_lock, isp->isp_maxcmds, isp->isp_maxcmds, devq); if (sim == NULL) return (ENOMEM); ISP_LOCK(isp); if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) { ISP_UNLOCK(isp); cam_sim_free(sim, FALSE); return (EIO); } ISP_UNLOCK(isp); if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { ISP_LOCK(isp); xpt_bus_deregister(cam_sim_path(sim)); ISP_UNLOCK(isp); cam_sim_free(sim, FALSE); return (ENXIO); } xpt_setup_ccb(&csa.ccb_h, path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = isp_cam_async; csa.callback_arg = sim; ISP_LOCK(isp); xpt_action((union ccb *)&csa); ISP_UNLOCK(isp); if (IS_SCSI(isp)) { struct isp_spi *spi = ISP_SPI_PC(isp, chan); spi->sim = sim; spi->path = path; #ifdef ISP_TARGET_MODE TAILQ_INIT(&spi->waitq); STAILQ_INIT(&spi->ntfree); for (i = 0; i < ATPDPSIZE; i++) STAILQ_INSERT_TAIL(&spi->ntfree, &spi->ntpool[i], next); LIST_INIT(&spi->atfree); for (i = ATPDPSIZE-1; i >= 0; i--) LIST_INSERT_HEAD(&spi->atfree, &spi->atpool[i], next); for (i = 0; i < ATPDPHASHSIZE; i++) LIST_INIT(&spi->atused[i]); #endif } else { fcparam *fcp = FCPARAM(isp, chan); struct isp_fc *fc = ISP_FC_PC(isp, chan); struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(isp->isp_osinfo.dev); struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev); char name[16]; ISP_LOCK(isp); fc->sim = sim; fc->path = path; fc->isp = isp; fc->ready = 1; fcp->isp_use_gft_id = 1; fcp->isp_use_gff_id = 1; callout_init_mtx(&fc->gdt, &isp->isp_lock, 0); TASK_INIT(&fc->gtask, 1, isp_gdt_task, fc); #ifdef ISP_TARGET_MODE TAILQ_INIT(&fc->waitq); STAILQ_INIT(&fc->ntfree); for (i = 0; i < ATPDPSIZE; i++) STAILQ_INSERT_TAIL(&fc->ntfree, &fc->ntpool[i], next); LIST_INIT(&fc->atfree); for (i = ATPDPSIZE-1; i >= 0; i--) LIST_INSERT_HEAD(&fc->atfree, &fc->atpool[i], next); for (i = 0; i < ATPDPHASHSIZE; i++) LIST_INIT(&fc->atused[i]); #endif isp_loop_changed(isp, chan); ISP_UNLOCK(isp); if (kproc_create(isp_kthread, fc, &fc->kproc, 0, 0, "%s_%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) { xpt_free_path(fc->path); ISP_LOCK(isp); xpt_bus_deregister(cam_sim_path(fc->sim)); ISP_UNLOCK(isp); cam_sim_free(fc->sim, FALSE); return (ENOMEM); } fc->num_threads += 1; if (chan > 0) { snprintf(name, sizeof(name), "chan%d", chan); tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, name, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Virtual channel"); } SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwnn", CTLFLAG_RD, &fcp->isp_wwnn, "World Wide Node Name"); SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwpn", CTLFLAG_RD, &fcp->isp_wwpn, "World Wide Port Name"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "loop_down_limit", CTLFLAG_RW, &fc->loop_down_limit, 0, "Loop Down Limit"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "gone_device_time", CTLFLAG_RW, &fc->gone_device_time, 0, "Gone Device Time"); #if defined(ISP_TARGET_MODE) && defined(DEBUG) SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "inject_lost_data_frame", CTLFLAG_RW, &fc->inject_lost_data_frame, 0, "Cause a Lost Frame on a Read"); #endif SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "role", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, isp, chan, isp_role_sysctl, "I", "Current role"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "speed", CTLFLAG_RD, &fcp->isp_gbspeed, 0, "Connection speed in gigabits"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "linkstate", CTLFLAG_RD, &fcp->isp_linkstate, 0, "Link state"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "fwstate", CTLFLAG_RD, &fcp->isp_fwstate, 0, "Firmware state"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "loopstate", CTLFLAG_RD, &fcp->isp_loopstate, 0, "Loop state"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "topo", CTLFLAG_RD, &fcp->isp_topo, 0, "Connection topology"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "use_gft_id", CTLFLAG_RWTUN, &fcp->isp_use_gft_id, 0, "Use GFT_ID during fabric scan"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "use_gff_id", CTLFLAG_RWTUN, &fcp->isp_use_gff_id, 0, "Use GFF_ID during fabric scan"); } return (0); } static void isp_detach_chan(ispsoftc_t *isp, int chan) { struct cam_sim *sim; struct cam_path *path; struct ccb_setasync csa; int *num_threads; ISP_GET_PC(isp, chan, sim, sim); ISP_GET_PC(isp, chan, path, path); ISP_GET_PC_ADDR(isp, chan, num_threads, num_threads); xpt_setup_ccb(&csa.ccb_h, path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = isp_cam_async; csa.callback_arg = sim; xpt_action((union ccb *)&csa); xpt_free_path(path); xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, FALSE); /* Wait for the channel's spawned threads to exit. */ wakeup(isp->isp_osinfo.pc.ptr); while (*num_threads != 0) mtx_sleep(isp, &isp->isp_lock, PRIBIO, "isp_reap", 100); } int isp_attach(ispsoftc_t *isp) { const char *nu = device_get_nameunit(isp->isp_osinfo.dev); int du = device_get_unit(isp->isp_dev); int chan; /* * Create the device queue for our SIM(s). */ isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds); if (isp->isp_osinfo.devq == NULL) { return (EIO); } for (chan = 0; chan < isp->isp_nchan; chan++) { if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) { goto unwind; } } callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_lock, 0); isp_timer_count = hz >> 2; callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp); isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu); if (isp->isp_osinfo.cdev) { isp->isp_osinfo.cdev->si_drv1 = isp; } return (0); unwind: while (--chan >= 0) { struct cam_sim *sim; struct cam_path *path; ISP_GET_PC(isp, chan, sim, sim); ISP_GET_PC(isp, chan, path, path); xpt_free_path(path); ISP_LOCK(isp); xpt_bus_deregister(cam_sim_path(sim)); ISP_UNLOCK(isp); cam_sim_free(sim, FALSE); } cam_simq_free(isp->isp_osinfo.devq); isp->isp_osinfo.devq = NULL; return (-1); } int isp_detach(ispsoftc_t *isp) { int chan; if (isp->isp_osinfo.cdev) { destroy_dev(isp->isp_osinfo.cdev); isp->isp_osinfo.cdev = NULL; } ISP_LOCK(isp); /* Tell spawned threads that we're exiting. */ isp->isp_osinfo.is_exiting = 1; for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1) isp_detach_chan(isp, chan); ISP_UNLOCK(isp); callout_drain(&isp->isp_osinfo.tmo); cam_simq_free(isp->isp_osinfo.devq); return (0); } static void isp_freeze_loopdown(ispsoftc_t *isp, int chan) { struct isp_fc *fc = ISP_FC_PC(isp, chan); if (fc->sim == NULL) return; if (fc->simqfrozen == 0) { isp_prt(isp, ISP_LOGDEBUG0, "Chan %d Freeze simq (loopdown)", chan); fc->simqfrozen = SIMQFRZ_LOOPDOWN; xpt_hold_boot(); xpt_freeze_simq(fc->sim, 1); } else { isp_prt(isp, ISP_LOGDEBUG0, "Chan %d Mark simq frozen (loopdown)", chan); fc->simqfrozen |= SIMQFRZ_LOOPDOWN; } } static void isp_unfreeze_loopdown(ispsoftc_t *isp, int chan) { struct isp_fc *fc = ISP_FC_PC(isp, chan); if (fc->sim == NULL) return; int wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN; fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN; if (wasfrozen && fc->simqfrozen == 0) { isp_prt(isp, ISP_LOGDEBUG0, "Chan %d Release simq", chan); xpt_release_simq(fc->sim, 1); xpt_release_boot(); } } static int ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td) { ispsoftc_t *isp; int nr, chan, retval = ENOTTY; isp = dev->si_drv1; switch (c) { case ISP_SDBLEV: { int olddblev = isp->isp_dblev; isp->isp_dblev = *(int *)addr; *(int *)addr = olddblev; retval = 0; break; } case ISP_GETROLE: chan = *(int *)addr; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } if (IS_FC(isp)) { *(int *)addr = FCPARAM(isp, chan)->role; } else { *(int *)addr = ISP_ROLE_INITIATOR; } retval = 0; break; case ISP_SETROLE: if (IS_SCSI(isp)) break; nr = *(int *)addr; chan = nr >> 8; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } nr &= 0xff; if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { retval = EINVAL; break; } ISP_LOCK(isp); *(int *)addr = FCPARAM(isp, chan)->role; retval = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, nr); ISP_UNLOCK(isp); retval = 0; break; case ISP_RESETHBA: ISP_LOCK(isp); isp_reinit(isp, 0); ISP_UNLOCK(isp); retval = 0; break; case ISP_RESCAN: if (IS_FC(isp)) { chan = *(intptr_t *)addr; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } ISP_LOCK(isp); if (isp_fc_runstate(isp, chan, 5 * 1000000) != LOOP_READY) { retval = EIO; } else { retval = 0; } ISP_UNLOCK(isp); } break; case ISP_FC_LIP: if (IS_FC(isp)) { chan = *(intptr_t *)addr; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } ISP_LOCK(isp); if (isp_control(isp, ISPCTL_SEND_LIP, chan)) { retval = EIO; } else { retval = 0; } ISP_UNLOCK(isp); } break; case ISP_FC_GETDINFO: { struct isp_fc_device *ifc = (struct isp_fc_device *) addr; fcportdb_t *lp; if (IS_SCSI(isp)) { break; } if (ifc->loopid >= MAX_FC_TARG) { retval = EINVAL; break; } lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid]; if (lp->state != FC_PORTDB_STATE_NIL) { ifc->role = (lp->prli_word3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; ifc->loopid = lp->handle; ifc->portid = lp->portid; ifc->node_wwn = lp->node_wwn; ifc->port_wwn = lp->port_wwn; retval = 0; } else { retval = ENODEV; } break; } case ISP_FC_GETHINFO: { struct isp_hba_device *hba = (struct isp_hba_device *) addr; int chan = hba->fc_channel; if (chan < 0 || chan >= isp->isp_nchan) { retval = ENXIO; break; } hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); hba->fc_nchannels = isp->isp_nchan; if (IS_FC(isp)) { hba->fc_nports = MAX_FC_TARG; hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed; hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1; hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid; hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram; hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram; hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn; hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn; } else { hba->fc_nports = MAX_TARGETS; hba->fc_speed = 0; hba->fc_topology = 0; hba->nvram_node_wwn = 0ull; hba->nvram_port_wwn = 0ull; hba->active_node_wwn = 0ull; hba->active_port_wwn = 0ull; } retval = 0; break; } case ISP_TSK_MGMT: { int needmarker; struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; uint16_t nphdl; mbreg_t mbs; if (IS_SCSI(isp)) { break; } chan = fct->chan; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } needmarker = retval = 0; nphdl = fct->loopid; ISP_LOCK(isp); if (IS_24XX(isp)) { void *reqp; uint8_t resp[QENTRY_LEN]; isp24xx_tmf_t tmf; isp24xx_statusreq_t sp; fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; int i; for (i = 0; i < MAX_FC_TARG; i++) { lp = &fcp->portdb[i]; if (lp->handle == nphdl) { break; } } if (i == MAX_FC_TARG) { retval = ENXIO; ISP_UNLOCK(isp); break; } ISP_MEMZERO(&tmf, sizeof(tmf)); tmf.tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT; tmf.tmf_header.rqs_entry_count = 1; tmf.tmf_nphdl = lp->handle; tmf.tmf_delay = 2; tmf.tmf_timeout = 4; tmf.tmf_tidlo = lp->portid; tmf.tmf_tidhi = lp->portid >> 16; tmf.tmf_vpidx = ISP_GET_VPIDX(isp, chan); tmf.tmf_lun[1] = fct->lun & 0xff; if (fct->lun >= 256) { tmf.tmf_lun[0] = 0x40 | (fct->lun >> 8); } switch (fct->action) { case IPT_CLEAR_ACA: tmf.tmf_flags = ISP24XX_TMF_CLEAR_ACA; break; case IPT_TARGET_RESET: tmf.tmf_flags = ISP24XX_TMF_TARGET_RESET; needmarker = 1; break; case IPT_LUN_RESET: tmf.tmf_flags = ISP24XX_TMF_LUN_RESET; needmarker = 1; break; case IPT_CLEAR_TASK_SET: tmf.tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET; needmarker = 1; break; case IPT_ABORT_TASK_SET: tmf.tmf_flags = ISP24XX_TMF_ABORT_TASK_SET; needmarker = 1; break; default: retval = EINVAL; break; } if (retval) { ISP_UNLOCK(isp); break; } /* Prepare space for response in memory */ memset(resp, 0xff, sizeof(resp)); tmf.tmf_handle = isp_allocate_handle(isp, resp, ISP_HANDLE_CTRL); if (tmf.tmf_handle == 0) { isp_prt(isp, ISP_LOGERR, "%s: TMF of Chan %d out of handles", __func__, chan); ISP_UNLOCK(isp); retval = ENOMEM; break; } /* Send request and wait for response. */ reqp = isp_getrqentry(isp); if (reqp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: TMF of Chan %d out of rqent", __func__, chan); isp_destroy_handle(isp, tmf.tmf_handle); ISP_UNLOCK(isp); retval = EIO; break; } isp_put_24xx_tmf(isp, &tmf, (isp24xx_tmf_t *)reqp); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB TMF", QENTRY_LEN, reqp); ISP_SYNC_REQUEST(isp); if (msleep(resp, &isp->isp_lock, 0, "TMF", 5*hz) == EWOULDBLOCK) { isp_prt(isp, ISP_LOGERR, "%s: TMF of Chan %d timed out", __func__, chan); isp_destroy_handle(isp, tmf.tmf_handle); ISP_UNLOCK(isp); retval = EIO; break; } if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB TMF response", QENTRY_LEN, resp); isp_get_24xx_response(isp, (isp24xx_statusreq_t *)resp, &sp); if (sp.req_completion_status != 0) retval = EIO; else if (needmarker) fcp->sendmarker = 1; } else { MBSINIT(&mbs, 0, MBLOGALL, 0); if (ISP_CAP_2KLOGIN(isp) == 0) { nphdl <<= 8; } switch (fct->action) { case IPT_CLEAR_ACA: mbs.param[0] = MBOX_CLEAR_ACA; mbs.param[1] = nphdl; mbs.param[2] = fct->lun; break; case IPT_TARGET_RESET: mbs.param[0] = MBOX_TARGET_RESET; mbs.param[1] = nphdl; needmarker = 1; break; case IPT_LUN_RESET: mbs.param[0] = MBOX_LUN_RESET; mbs.param[1] = nphdl; mbs.param[2] = fct->lun; needmarker = 1; break; case IPT_CLEAR_TASK_SET: mbs.param[0] = MBOX_CLEAR_TASK_SET; mbs.param[1] = nphdl; mbs.param[2] = fct->lun; needmarker = 1; break; case IPT_ABORT_TASK_SET: mbs.param[0] = MBOX_ABORT_TASK_SET; mbs.param[1] = nphdl; mbs.param[2] = fct->lun; needmarker = 1; break; default: retval = EINVAL; break; } if (retval == 0) { if (needmarker) { FCPARAM(isp, chan)->sendmarker = 1; } retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); if (retval) { retval = EIO; } } } ISP_UNLOCK(isp); break; } default: break; } return (retval); } /* * Local Inlines */ static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *); static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *); static ISP_INLINE int isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb) { ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free; if (ISP_PCMD(ccb) == NULL) { return (-1); } isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next; return (0); } static ISP_INLINE void isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb) { if (ISP_PCMD(ccb)) { #ifdef ISP_TARGET_MODE PISP_PCMD(ccb)->datalen = 0; #endif PISP_PCMD(ccb)->next = isp->isp_osinfo.pcmd_free; isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb); ISP_PCMD(ccb) = NULL; } } /* * Put the target mode functions here, because some are inlines */ #ifdef ISP_TARGET_MODE static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); static atio_private_data_t *isp_get_atpd(ispsoftc_t *, int, uint32_t); static atio_private_data_t *isp_find_atpd(ispsoftc_t *, int, uint32_t); static void isp_put_atpd(ispsoftc_t *, int, atio_private_data_t *); static inot_private_data_t *isp_get_ntpd(ispsoftc_t *, int); static inot_private_data_t *isp_find_ntpd(ispsoftc_t *, int, uint32_t, uint32_t); static void isp_put_ntpd(ispsoftc_t *, int, inot_private_data_t *); static cam_status create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); static void destroy_lun_state(ispsoftc_t *, int, tstate_t *); static void isp_enable_lun(ispsoftc_t *, union ccb *); static void isp_disable_lun(ispsoftc_t *, union ccb *); static callout_func_t isp_refire_putback_atio; static callout_func_t isp_refire_notify_ack; static void isp_complete_ctio(union ccb *); static void isp_target_putback_atio(union ccb *); enum Start_Ctio_How { FROM_CAM, FROM_TIMER, FROM_SRR, FROM_CTIO_DONE }; static void isp_target_start_ctio(ispsoftc_t *, union ccb *, enum Start_Ctio_How); static void isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *); static void isp_handle_platform_ctio(ispsoftc_t *, void *); static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *, uint32_t rsp); static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *); static void isp_target_mark_aborted_early(ispsoftc_t *, int chan, tstate_t *, uint32_t); static ISP_INLINE tstate_t * get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) { tstate_t *tptr = NULL; struct tslist *lhp; if (bus < isp->isp_nchan) { ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp); SLIST_FOREACH(tptr, lhp, next) { if (tptr->ts_lun == lun) return (tptr); } } return (NULL); } static int isp_atio_restart(ispsoftc_t *isp, int bus, tstate_t *tptr) { inot_private_data_t *ntp; struct ntpdlist rq; if (STAILQ_EMPTY(&tptr->restart_queue)) return (0); STAILQ_INIT(&rq); STAILQ_CONCAT(&rq, &tptr->restart_queue); while ((ntp = STAILQ_FIRST(&rq)) != NULL) { STAILQ_REMOVE_HEAD(&rq, next); if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at7_entry_t *)ntp->data)->at_rxid); isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->data); } else { isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at2_entry_t *)ntp->data)->at_rxid); isp_handle_platform_atio2(isp, (at2_entry_t *) ntp->data); } isp_put_ntpd(isp, bus, ntp); if (!STAILQ_EMPTY(&tptr->restart_queue)) break; } if (!STAILQ_EMPTY(&rq)) { STAILQ_CONCAT(&rq, &tptr->restart_queue); STAILQ_CONCAT(&tptr->restart_queue, &rq); } return (!STAILQ_EMPTY(&tptr->restart_queue)); } static void isp_tmcmd_restart(ispsoftc_t *isp) { tstate_t *tptr; union ccb *ccb; struct tslist *lhp; struct isp_ccbq *waitq; int bus, i; for (bus = 0; bus < isp->isp_nchan; bus++) { for (i = 0; i < LUN_HASH_SIZE; i++) { ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); SLIST_FOREACH(tptr, lhp, next) isp_atio_restart(isp, bus, tptr); } /* * We only need to do this once per channel. */ ISP_GET_PC_ADDR(isp, bus, waitq, waitq); ccb = (union ccb *)TAILQ_FIRST(waitq); if (ccb != NULL) { TAILQ_REMOVE(waitq, &ccb->ccb_h, sim_links.tqe); isp_target_start_ctio(isp, ccb, FROM_TIMER); } } } static atio_private_data_t * isp_get_atpd(ispsoftc_t *isp, int chan, uint32_t tag) { struct atpdlist *atfree; struct atpdlist *atused; atio_private_data_t *atp; ISP_GET_PC_ADDR(isp, chan, atfree, atfree); atp = LIST_FIRST(atfree); if (atp) { LIST_REMOVE(atp, next); atp->tag = tag; ISP_GET_PC(isp, chan, atused, atused); LIST_INSERT_HEAD(&atused[ATPDPHASH(tag)], atp, next); } return (atp); } static atio_private_data_t * isp_find_atpd(ispsoftc_t *isp, int chan, uint32_t tag) { struct atpdlist *atused; atio_private_data_t *atp; ISP_GET_PC(isp, chan, atused, atused); LIST_FOREACH(atp, &atused[ATPDPHASH(tag)], next) { if (atp->tag == tag) return (atp); } return (NULL); } static void isp_put_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp) { struct atpdlist *atfree; if (atp->ests) { isp_put_ecmd(isp, atp->ests); } LIST_REMOVE(atp, next); memset(atp, 0, sizeof (*atp)); ISP_GET_PC_ADDR(isp, chan, atfree, atfree); LIST_INSERT_HEAD(atfree, atp, next); } static void isp_dump_atpd(ispsoftc_t *isp, int chan) { atio_private_data_t *atp, *atpool; const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" }; ISP_GET_PC(isp, chan, atpool, atpool); for (atp = atpool; atp < &atpool[ATPDPSIZE]; atp++) { if (atp->state == ATPD_STATE_FREE) continue; isp_prt(isp, ISP_LOGALL, "Chan %d ATP [0x%x] origdlen %u bytes_xfrd %u lun %jx nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s", chan, atp->tag, atp->orig_datalen, atp->bytes_xfered, (uintmax_t)atp->lun, atp->nphdl, atp->sid, atp->did, atp->oxid, states[atp->state & 0x7]); } } static inot_private_data_t * isp_get_ntpd(ispsoftc_t *isp, int chan) { struct ntpdlist *ntfree; inot_private_data_t *ntp; ISP_GET_PC_ADDR(isp, chan, ntfree, ntfree); ntp = STAILQ_FIRST(ntfree); if (ntp) STAILQ_REMOVE_HEAD(ntfree, next); return (ntp); } static inot_private_data_t * isp_find_ntpd(ispsoftc_t *isp, int chan, uint32_t tag_id, uint32_t seq_id) { inot_private_data_t *ntp, *ntp2; ISP_GET_PC(isp, chan, ntpool, ntp); ISP_GET_PC_ADDR(isp, chan, ntpool[ATPDPSIZE], ntp2); for (; ntp < ntp2; ntp++) { if (ntp->tag_id == tag_id && ntp->seq_id == seq_id) return (ntp); } return (NULL); } static void isp_put_ntpd(ispsoftc_t *isp, int chan, inot_private_data_t *ntp) { struct ntpdlist *ntfree; ntp->tag_id = ntp->seq_id = 0; ISP_GET_PC_ADDR(isp, chan, ntfree, ntfree); STAILQ_INSERT_HEAD(ntfree, ntp, next); } static cam_status create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path, tstate_t **rslt) { lun_id_t lun; struct tslist *lhp; tstate_t *tptr; lun = xpt_path_lun_id(path); if (lun != CAM_LUN_WILDCARD) { if (ISP_MAX_LUNS(isp) > 0 && lun >= ISP_MAX_LUNS(isp)) { return (CAM_LUN_INVALID); } } tptr = malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); if (tptr == NULL) { return (CAM_RESRC_UNAVAIL); } tptr->ts_lun = lun; SLIST_INIT(&tptr->atios); SLIST_INIT(&tptr->inots); STAILQ_INIT(&tptr->restart_queue); ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp); SLIST_INSERT_HEAD(lhp, tptr, next); *rslt = tptr; ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n"); return (CAM_REQ_CMP); } static void destroy_lun_state(ispsoftc_t *isp, int bus, tstate_t *tptr) { union ccb *ccb; struct tslist *lhp; inot_private_data_t *ntp; while ((ccb = (union ccb *)SLIST_FIRST(&tptr->atios)) != NULL) { SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); }; while ((ccb = (union ccb *)SLIST_FIRST(&tptr->inots)) != NULL) { SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); } while ((ntp = STAILQ_FIRST(&tptr->restart_queue)) != NULL) { isp_endcmd(isp, ntp->data, NIL_HANDLE, bus, SCSI_STATUS_BUSY, 0); STAILQ_REMOVE_HEAD(&tptr->restart_queue, next); isp_put_ntpd(isp, bus, ntp); } ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(tptr->ts_lun)], lhp); SLIST_REMOVE(lhp, tptr, tstate, next); free(tptr, M_DEVBUF); } static void isp_enable_lun(ispsoftc_t *isp, union ccb *ccb) { tstate_t *tptr; int bus; target_id_t target; lun_id_t lun; if (!IS_FC(isp) || !ISP_CAP_TMODE(isp) || !ISP_CAP_SCCFW(isp)) { xpt_print(ccb->ccb_h.path, "Target mode is not supported\n"); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); return; } /* * We only support either target and lun both wildcard * or target and lun both non-wildcard. */ bus = XS_CHANNEL(ccb); target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, "enabling lun %jx\n", (uintmax_t)lun); if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } /* Create the state pointer. It should not already exist. */ tptr = get_lun_statep(isp, bus, lun); if (tptr) { ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; xpt_done(ccb); return; } ccb->ccb_h.status = create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); if (ccb->ccb_h.status != CAM_REQ_CMP) { xpt_done(ccb); return; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); } static void isp_disable_lun(ispsoftc_t *isp, union ccb *ccb) { tstate_t *tptr = NULL; int bus; target_id_t target; lun_id_t lun; bus = XS_CHANNEL(ccb); target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, "disabling lun %jx\n", (uintmax_t)lun); if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } /* Find the state pointer. */ if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) { ccb->ccb_h.status = CAM_PATH_INVALID; xpt_done(ccb); return; } destroy_lun_state(isp, bus, tptr); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); } static void isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how) { int fctape, sendstatus, resid; fcparam *fcp; atio_private_data_t *atp; struct ccb_scsiio *cso; struct isp_ccbq *waitq; uint32_t dmaresult, handle, xfrlen, sense_length, tmp; uint8_t local[QENTRY_LEN]; isp_prt(isp, ISP_LOGTDEBUG0, "%s: ENTRY[0x%x] how %u xfrlen %u sendstatus %d sense_len %u", __func__, ccb->csio.tag_id, how, ccb->csio.dxfer_len, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0, ((ccb->ccb_h.flags & CAM_SEND_SENSE)? ccb->csio.sense_len : 0)); ISP_GET_PC_ADDR(isp, XS_CHANNEL(ccb), waitq, waitq); switch (how) { case FROM_CAM: /* * Insert at the tail of the list, if any, waiting CTIO CCBs */ TAILQ_INSERT_TAIL(waitq, &ccb->ccb_h, sim_links.tqe); break; case FROM_TIMER: case FROM_SRR: case FROM_CTIO_DONE: TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); break; } while ((ccb = (union ccb *) TAILQ_FIRST(waitq)) != NULL) { TAILQ_REMOVE(waitq, &ccb->ccb_h, sim_links.tqe); cso = &ccb->csio; xfrlen = cso->dxfer_len; if (xfrlen == 0) { if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { ISP_PATH_PRT(isp, ISP_LOGERR, ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); continue; } } atp = isp_find_atpd(isp, XS_CHANNEL(ccb), cso->tag_id); if (atp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find private data adjunct in %s", __func__, cso->tag_id, __func__); isp_dump_atpd(isp, XS_CHANNEL(ccb)); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); continue; } /* * Is this command a dead duck? */ if (atp->dead) { isp_prt(isp, ISP_LOGERR, "%s: [0x%x] not sending a CTIO for a dead command", __func__, cso->tag_id); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); continue; } /* * Check to make sure we're still in target mode. */ fcp = FCPARAM(isp, XS_CHANNEL(ccb)); if ((fcp->role & ISP_ROLE_TARGET) == 0) { isp_prt(isp, ISP_LOGERR, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode", __func__, cso->tag_id); ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); continue; } /* * We're only handling ATPD_CCB_OUTSTANDING outstanding CCB at a time (one of which * could be split into two CTIOs to split data and status). */ if (atp->ctcnt >= ATPD_CCB_OUTSTANDING) { isp_prt(isp, ISP_LOGTINFO, "[0x%x] handling only %d CCBs at a time (flags for this ccb: 0x%x)", cso->tag_id, ATPD_CCB_OUTSTANDING, ccb->ccb_h.flags); TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); break; } /* * Does the initiator expect FC-Tape style responses? */ if ((atp->word3 & PRLI_WD3_RETRY) && fcp->fctape_enabled) { fctape = 1; } else { fctape = 0; } /* * If we already did the data xfer portion of a CTIO that sends data * and status, don't do it again and do the status portion now. */ if (atp->sendst) { isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u", cso->tag_id, atp->orig_datalen, atp->bytes_xfered, atp->bytes_in_transit); xfrlen = 0; /* we already did the data transfer */ atp->sendst = 0; } if (ccb->ccb_h.flags & CAM_SEND_STATUS) { sendstatus = 1; } else { sendstatus = 0; } if (ccb->ccb_h.flags & CAM_SEND_SENSE) { KASSERT((sendstatus != 0), ("how can you have CAM_SEND_SENSE w/o CAM_SEND_STATUS?")); /* * Sense length is not the entire sense data structure size. Periph * drivers don't seem to be setting sense_len to reflect the actual * size. We'll peek inside to get the right amount. */ sense_length = cso->sense_len; /* * This 'cannot' happen */ if (sense_length > (XCMD_SIZE - MIN_FCP_RESPONSE_SIZE)) { sense_length = XCMD_SIZE - MIN_FCP_RESPONSE_SIZE; } } else { sense_length = 0; } memset(local, 0, QENTRY_LEN); /* * Check for overflow */ tmp = atp->bytes_xfered + atp->bytes_in_transit; if (xfrlen > 0 && tmp > atp->orig_datalen) { isp_prt(isp, ISP_LOGERR, "%s: [0x%x] data overflow by %u bytes", __func__, cso->tag_id, tmp + xfrlen - atp->orig_datalen); ccb->ccb_h.status = CAM_DATA_RUN_ERR; xpt_done(ccb); continue; } if (xfrlen > atp->orig_datalen - tmp) { xfrlen = atp->orig_datalen - tmp; if (xfrlen == 0 && !sendstatus) { cso->resid = cso->dxfer_len; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); continue; } } if (IS_24XX(isp)) { ct7_entry_t *cto = (ct7_entry_t *) local; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; cto->ct_header.rqs_entry_count = 1; cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM; ATPD_SET_SEQNO(cto, atp); cto->ct_nphdl = atp->nphdl; cto->ct_rxid = atp->tag; cto->ct_iid_lo = atp->sid; cto->ct_iid_hi = atp->sid >> 16; cto->ct_oxid = atp->oxid; cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb)); cto->ct_timeout = XS_TIME(ccb); cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT; /* * Mode 1, status, no data. Only possible when we are sending status, have * no data to transfer, and any sense data can fit into a ct7_entry_t. * * Mode 2, status, no data. We have to use this in the case that * the sense data won't fit into a ct7_entry_t. * */ if (sendstatus && xfrlen == 0) { cto->ct_flags |= CT7_SENDSTATUS | CT7_NO_DATA; resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit; if (sense_length <= MAXRESPLEN_24XX) { cto->ct_flags |= CT7_FLAG_MODE1; cto->ct_scsi_status = cso->scsi_status; if (resid < 0) { cto->ct_resid = -resid; cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8); } else if (resid > 0) { cto->ct_resid = resid; cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8); } if (fctape) { cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; } if (sense_length) { cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8); cto->rsp.m1.ct_resplen = cto->ct_senselen = sense_length; memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length); } } else { bus_addr_t addr; char buf[XCMD_SIZE]; fcp_rsp_iu_t *rp; if (atp->ests == NULL) { atp->ests = isp_get_ecmd(isp); if (atp->ests == NULL) { TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); break; } } memset(buf, 0, sizeof (buf)); rp = (fcp_rsp_iu_t *)buf; if (fctape) { cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; rp->fcp_rsp_bits |= FCP_CONF_REQ; } cto->ct_flags |= CT7_FLAG_MODE2; rp->fcp_rsp_scsi_status = cso->scsi_status; if (resid < 0) { rp->fcp_rsp_resid = -resid; rp->fcp_rsp_bits |= FCP_RESID_OVERFLOW; } else if (resid > 0) { rp->fcp_rsp_resid = resid; rp->fcp_rsp_bits |= FCP_RESID_UNDERFLOW; } if (sense_length) { rp->fcp_rsp_snslen = sense_length; cto->ct_senselen = sense_length; rp->fcp_rsp_bits |= FCP_SNSLEN_VALID; isp_put_fcp_rsp_iu(isp, rp, atp->ests); memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length); } else { isp_put_fcp_rsp_iu(isp, rp, atp->ests); } if (isp->isp_dblev & ISP_LOGTDEBUG1) { isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests); } addr = isp->isp_osinfo.ecmd_dma; addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE); isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests, (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length); cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length; cto->rsp.m2.ct_fcp_rsp_iudata.ds_base = DMA_LO32(addr); cto->rsp.m2.ct_fcp_rsp_iudata.ds_basehi = DMA_HI32(addr); cto->rsp.m2.ct_fcp_rsp_iudata.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length; } if (sense_length) { isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d slen %u sense: %x %x/%x/%x", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, sense_length, cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]); } else { isp_prt(isp, ISP_LOGDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid); } atp->state = ATPD_STATE_LAST_CTIO; } /* * Mode 0 data transfers, *possibly* with status. */ if (xfrlen != 0) { cto->ct_flags |= CT7_FLAG_MODE0; if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cto->ct_flags |= CT7_DATA_IN; } else { cto->ct_flags |= CT7_DATA_OUT; } cto->rsp.m0.reloff = atp->bytes_xfered + atp->bytes_in_transit; cto->rsp.m0.ct_xfrlen = xfrlen; #ifdef DEBUG if (ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame && xfrlen > ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame) { isp_prt(isp, ISP_LOGWARN, "%s: truncating data frame with xfrlen %d to %d", __func__, xfrlen, xfrlen - (xfrlen >> 2)); ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame = 0; cto->rsp.m0.ct_xfrlen -= xfrlen >> 2; } #endif if (sendstatus) { resid = atp->orig_datalen - atp->bytes_xfered - xfrlen; if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /* && fctape == 0 */) { cto->ct_flags |= CT7_SENDSTATUS; atp->state = ATPD_STATE_LAST_CTIO; if (fctape) { cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; } } else { atp->sendst = 1; /* send status later */ cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM; atp->state = ATPD_STATE_CTIO; } } else { atp->state = ATPD_STATE_CTIO; } isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x xfrlen=%u off=%u", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, xfrlen, atp->bytes_xfered); } } else { ct2_entry_t *cto = (ct2_entry_t *) local; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; cto->ct_header.rqs_entry_count = 1; cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM; ATPD_SET_SEQNO(cto, atp); if (ISP_CAP_2KLOGIN(isp)) { ((ct2e_entry_t *)cto)->ct_iid = atp->nphdl; } else { cto->ct_iid = atp->nphdl; if (ISP_CAP_SCCFW(isp) == 0) { cto->ct_lun = ccb->ccb_h.target_lun; } } cto->ct_timeout = XS_TIME(ccb); cto->ct_rxid = cso->tag_id; /* * Mode 1, status, no data. Only possible when we are sending status, have * no data to transfer, and the sense length can fit in the ct7_entry. * * Mode 2, status, no data. We have to use this in the case the response * length won't fit into a ct2_entry_t. * * We'll fill out this structure with information as if this were a * Mode 1. The hardware layer will create the Mode 2 FCP RSP IU as * needed based upon this. */ if (sendstatus && xfrlen == 0) { cto->ct_flags |= CT2_SENDSTATUS | CT2_NO_DATA; resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit; if (sense_length <= MAXRESPLEN) { if (resid < 0) { cto->ct_resid = -resid; } else if (resid > 0) { cto->ct_resid = resid; } cto->ct_flags |= CT2_FLAG_MODE1; cto->rsp.m1.ct_scsi_status = cso->scsi_status; if (resid < 0) { cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER; } else if (resid > 0) { cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER; } if (fctape) { cto->ct_flags |= CT2_CONFIRM; } if (sense_length) { cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; cto->rsp.m1.ct_resplen = cto->rsp.m1.ct_senselen = sense_length; memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length); } } else { bus_addr_t addr; char buf[XCMD_SIZE]; fcp_rsp_iu_t *rp; if (atp->ests == NULL) { atp->ests = isp_get_ecmd(isp); if (atp->ests == NULL) { TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); break; } } memset(buf, 0, sizeof (buf)); rp = (fcp_rsp_iu_t *)buf; if (fctape) { cto->ct_flags |= CT2_CONFIRM; rp->fcp_rsp_bits |= FCP_CONF_REQ; } cto->ct_flags |= CT2_FLAG_MODE2; rp->fcp_rsp_scsi_status = cso->scsi_status; if (resid < 0) { rp->fcp_rsp_resid = -resid; rp->fcp_rsp_bits |= FCP_RESID_OVERFLOW; } else if (resid > 0) { rp->fcp_rsp_resid = resid; rp->fcp_rsp_bits |= FCP_RESID_UNDERFLOW; } if (sense_length) { rp->fcp_rsp_snslen = sense_length; rp->fcp_rsp_bits |= FCP_SNSLEN_VALID; isp_put_fcp_rsp_iu(isp, rp, atp->ests); memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length); } else { isp_put_fcp_rsp_iu(isp, rp, atp->ests); } if (isp->isp_dblev & ISP_LOGTDEBUG1) { isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests); } addr = isp->isp_osinfo.ecmd_dma; addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE); isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests, (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length); cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length; cto->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base = DMA_LO32(addr); cto->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length; } if (sense_length) { isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO2[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d sense: %x %x/%x/%x", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cso->scsi_status, cto->ct_flags, cto->ct_resid, cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]); } else { isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO2[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cso->scsi_status, cto->ct_flags, cto->ct_resid); } atp->state = ATPD_STATE_LAST_CTIO; } if (xfrlen != 0) { cto->ct_flags |= CT2_FLAG_MODE0; if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cto->ct_flags |= CT2_DATA_IN; } else { cto->ct_flags |= CT2_DATA_OUT; } cto->ct_reloff = atp->bytes_xfered + atp->bytes_in_transit; cto->rsp.m0.ct_xfrlen = xfrlen; if (sendstatus) { resid = atp->orig_datalen - atp->bytes_xfered - xfrlen; if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /*&& fctape == 0*/) { cto->ct_flags |= CT2_SENDSTATUS; atp->state = ATPD_STATE_LAST_CTIO; if (fctape) { cto->ct_flags |= CT2_CONFIRM; } } else { atp->sendst = 1; /* send status later */ cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM; atp->state = ATPD_STATE_CTIO; } } else { atp->state = ATPD_STATE_CTIO; } } isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO2[%x] seq %u nc %d CDB0=%x scsi status %x flags %x resid %d xfrlen %u offset %u", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cso->scsi_status, cto->ct_flags, cto->ct_resid, cso->dxfer_len, atp->bytes_xfered); } if (isp_get_pcmd(isp, ccb)) { ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "out of PCMDs\n"); TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); break; } handle = isp_allocate_handle(isp, ccb, ISP_HANDLE_TARGET); if (handle == 0) { ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__); TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); isp_free_pcmd(isp, ccb); break; } atp->bytes_in_transit += xfrlen; PISP_PCMD(ccb)->datalen = xfrlen; /* * Call the dma setup routines for this entry (and any subsequent * CTIOs) if there's data to move, and then tell the f/w it's got * new things to play with. As with isp_start's usage of DMA setup, * any swizzling is done in the machine dependent layer. Because * of this, we put the request onto the queue area first in native * format. */ if (IS_24XX(isp)) { ct7_entry_t *cto = (ct7_entry_t *) local; cto->ct_syshandle = handle; } else { ct2_entry_t *cto = (ct2_entry_t *) local; cto->ct_syshandle = handle; } dmaresult = ISP_DMASETUP(isp, cso, (ispreq_t *) local); if (dmaresult != CMD_QUEUED) { isp_destroy_handle(isp, handle); isp_free_pcmd(isp, ccb); if (dmaresult == CMD_EAGAIN) { TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); break; } ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); continue; } ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED; if (xfrlen) { ccb->ccb_h.spriv_field0 = atp->bytes_xfered; } else { ccb->ccb_h.spriv_field0 = ~0; } atp->ctcnt++; atp->seqno++; } } static void isp_refire_putback_atio(void *arg) { union ccb *ccb = arg; ISP_ASSERT_LOCKED((ispsoftc_t *)XS_ISP(ccb)); isp_target_putback_atio(ccb); } static void isp_refire_notify_ack(void *arg) { isp_tna_t *tp = arg; ispsoftc_t *isp = tp->isp; ISP_ASSERT_LOCKED(isp); if (isp_notify_ack(isp, tp->not)) { callout_schedule(&tp->timer, 5); } else { free(tp, M_DEVBUF); } } static void isp_target_putback_atio(union ccb *ccb) { ispsoftc_t *isp = XS_ISP(ccb); struct ccb_scsiio *cso = &ccb->csio; at2_entry_t local, *at = &local; ISP_MEMZERO(at, sizeof (at2_entry_t)); at->at_header.rqs_entry_type = RQSTYPE_ATIO2; at->at_header.rqs_entry_count = 1; if (ISP_CAP_SCCFW(isp)) { at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; } else { at->at_lun = (uint8_t) ccb->ccb_h.target_lun; } at->at_status = CT_OK; at->at_rxid = cso->tag_id; at->at_iid = cso->init_id; if (isp_target_put_entry(isp, at)) { callout_reset(&PISP_PCMD(ccb)->wdog, 10, isp_refire_putback_atio, ccb); } else isp_complete_ctio(ccb); } static void isp_complete_ctio(union ccb *ccb) { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); } } static void isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep) { fcparam *fcp; lun_id_t lun; fcportdb_t *lp; tstate_t *tptr; struct ccb_accept_tio *atiop; uint16_t nphdl; atio_private_data_t *atp; inot_private_data_t *ntp; /* * The firmware status (except for the QLTM_SVALID bit) * indicates why this ATIO was sent to us. * * If QLTM_SVALID is set, the firmware has recommended Sense Data. */ if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { isp_prt(isp, ISP_LOGWARN, "bogus atio (0x%x) leaked to platform", aep->at_status); isp_endcmd(isp, aep, NIL_HANDLE, 0, SCSI_STATUS_BUSY, 0); return; } fcp = FCPARAM(isp, 0); if (ISP_CAP_SCCFW(isp)) { lun = aep->at_scclun; } else { lun = aep->at_lun; } if (ISP_CAP_2KLOGIN(isp)) { nphdl = ((at2e_entry_t *)aep)->at_iid; } else { nphdl = aep->at_iid; } tptr = get_lun_statep(isp, 0, lun); if (tptr == NULL) { tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); if (tptr == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: [0x%x] no state pointer for lun %jx or wildcard", __func__, aep->at_rxid, (uintmax_t)lun); if (lun == 0) { isp_endcmd(isp, aep, nphdl, 0, SCSI_STATUS_BUSY, 0); } else { isp_endcmd(isp, aep, nphdl, 0, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0); } return; } } /* * Start any commands pending resources first. */ if (isp_atio_restart(isp, 0, tptr)) goto noresrc; atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); if (atiop == NULL) { goto noresrc; } atp = isp_get_atpd(isp, 0, aep->at_rxid); if (atp == NULL) { goto noresrc; } atp->state = ATPD_STATE_ATIO; SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO\n"); atiop->ccb_h.target_id = ISP_MAX_TARGETS(isp); atiop->ccb_h.target_lun = lun; /* * We don't get 'suggested' sense data as we do with SCSI cards. */ atiop->sense_len = 0; /* * If we're not in the port database, add ourselves. */ if (IS_2100(isp)) atiop->init_id = nphdl; else { if (isp_find_pdb_by_handle(isp, 0, nphdl, &lp)) { atiop->init_id = FC_PORTDB_TGT(isp, 0, lp); } else { isp_prt(isp, ISP_LOGTINFO, "%s: port %x isn't in PDB", __func__, nphdl); isp_dump_portdb(isp, 0); isp_endcmd(isp, aep, NIL_HANDLE, 0, ECMD_TERMINATE, 0); return; } } atiop->cdb_len = ATIO2_CDBLEN; ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); atiop->ccb_h.status = CAM_CDB_RECVD; atiop->tag_id = atp->tag; switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { case ATIO2_TC_ATTR_SIMPLEQ: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_SIMPLE_Q_TAG; break; case ATIO2_TC_ATTR_HEADOFQ: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_HEAD_OF_Q_TAG; break; case ATIO2_TC_ATTR_ORDERED: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_ORDERED_Q_TAG; break; case ATIO2_TC_ATTR_ACAQ: /* ?? */ case ATIO2_TC_ATTR_UNTAGGED: default: atiop->tag_action = 0; break; } atp->orig_datalen = aep->at_datalen; atp->bytes_xfered = 0; atp->lun = lun; atp->nphdl = nphdl; atp->sid = PORT_ANY; atp->oxid = aep->at_oxid; atp->cdb0 = aep->at_cdb[0]; atp->tattr = aep->at_taskflags & ATIO2_TC_ATTR_MASK; atp->state = ATPD_STATE_CAM; xpt_done((union ccb *)atiop); isp_prt(isp, ISP_LOGTDEBUG0, "ATIO2[0x%x] CDB=0x%x lun %jx datalen %u", aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen); return; noresrc: ntp = isp_get_ntpd(isp, 0); if (ntp == NULL) { isp_endcmd(isp, aep, nphdl, 0, SCSI_STATUS_BUSY, 0); return; } memcpy(ntp->data, aep, QENTRY_LEN); STAILQ_INSERT_TAIL(&tptr->restart_queue, ntp, next); } static void isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep) { int cdbxlen; lun_id_t lun; uint16_t chan, nphdl = NIL_HANDLE; uint32_t did, sid; fcportdb_t *lp; tstate_t *tptr; struct ccb_accept_tio *atiop; atio_private_data_t *atp = NULL; atio_private_data_t *oatp; inot_private_data_t *ntp; did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2]; sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(aep->at_cmnd.fcp_cmnd_lun)); if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) { /* Channel has to be derived from D_ID */ isp_find_chan_by_did(isp, did, &chan); if (chan == ISP_NOCHAN) { isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel", __func__, aep->at_rxid, did); isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, ECMD_TERMINATE, 0); return; } } else { chan = 0; } /* * Find the PDB entry for this initiator */ if (isp_find_pdb_by_portid(isp, chan, sid, &lp) == 0) { /* * If we're not in the port database terminate the exchange. */ isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already", __func__, aep->at_rxid, did, chan, sid); isp_dump_portdb(isp, chan); isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0); return; } nphdl = lp->handle; /* * Get the tstate pointer */ tptr = get_lun_statep(isp, chan, lun); if (tptr == NULL) { tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD); if (tptr == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: [0x%x] no state pointer for lun %jx or wildcard", __func__, aep->at_rxid, (uintmax_t)lun); if (lun == 0) { isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0); } else { isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0); } return; } } /* * Start any commands pending resources first. */ if (isp_atio_restart(isp, chan, tptr)) goto noresrc; /* * If the f/w is out of resources, just send a BUSY status back. */ if (aep->at_rxid == AT7_NORESRC_RXID) { isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0); return; } /* * If we're out of resources, just send a BUSY status back. */ atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); if (atiop == NULL) { isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid); goto noresrc; } oatp = isp_find_atpd(isp, chan, aep->at_rxid); if (oatp) { isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] tag wraparound in isp_handle_platforms_atio7 (N-Port Handle 0x%04x S_ID 0x%04x OX_ID 0x%04x) oatp state %d", aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id, oatp->state); /* * It's not a "no resource" condition- but we can treat it like one */ goto noresrc; } atp = isp_get_atpd(isp, chan, aep->at_rxid); if (atp == NULL) { isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid); goto noresrc; } atp->word3 = lp->prli_word3; atp->state = ATPD_STATE_ATIO; SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO\n"); atiop->init_id = FC_PORTDB_TGT(isp, chan, lp); atiop->ccb_h.target_id = ISP_MAX_TARGETS(isp); atiop->ccb_h.target_lun = lun; atiop->sense_len = 0; cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT; if (cdbxlen) { isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored"); } cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb); ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen); atiop->cdb_len = cdbxlen; atiop->ccb_h.status = CAM_CDB_RECVD; atiop->tag_id = atp->tag; switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) { case FCP_CMND_TASK_ATTR_SIMPLE: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_SIMPLE_Q_TAG; break; case FCP_CMND_TASK_ATTR_HEAD: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_HEAD_OF_Q_TAG; break; case FCP_CMND_TASK_ATTR_ORDERED: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_ORDERED_Q_TAG; break; default: /* FALLTHROUGH */ case FCP_CMND_TASK_ATTR_ACA: case FCP_CMND_TASK_ATTR_UNTAGGED: atiop->tag_action = 0; break; } + atiop->priority = (aep->at_cmnd.fcp_cmnd_task_attribute & + FCP_CMND_PRIO_MASK) >> FCP_CMND_PRIO_SHIFT; atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl; atp->bytes_xfered = 0; atp->lun = lun; atp->nphdl = nphdl; atp->sid = sid; atp->did = did; atp->oxid = aep->at_hdr.ox_id; atp->rxid = aep->at_hdr.rx_id; atp->cdb0 = atiop->cdb_io.cdb_bytes[0]; atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK; atp->state = ATPD_STATE_CAM; isp_prt(isp, ISP_LOGTDEBUG0, "ATIO7[0x%x] CDB=0x%x lun %jx datalen %u", aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen); xpt_done((union ccb *)atiop); return; noresrc: if (atp) isp_put_atpd(isp, chan, atp); ntp = isp_get_ntpd(isp, chan); if (ntp == NULL) { isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0); return; } memcpy(ntp->data, aep, QENTRY_LEN); STAILQ_INSERT_TAIL(&tptr->restart_queue, ntp, next); } /* * Handle starting an SRR (sequence retransmit request) * We get here when we've gotten the immediate notify * and the return of all outstanding CTIOs for this * transaction. */ static void isp_handle_srr_start(ispsoftc_t *isp, atio_private_data_t *atp) { in_fcentry_24xx_t *inot; uint32_t srr_off, ccb_off, ccb_len, ccb_end; union ccb *ccb; inot = (in_fcentry_24xx_t *)atp->srr; srr_off = inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16); ccb = atp->srr_ccb; atp->srr_ccb = NULL; atp->nsrr++; if (ccb == NULL) { isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] null ccb", atp->tag); goto fail; } ccb_off = ccb->ccb_h.spriv_field0; ccb_len = ccb->csio.dxfer_len; ccb_end = (ccb_off == ~0)? ~0 : ccb_off + ccb_len; switch (inot->in_srr_iu) { case R_CTL_INFO_SOLICITED_DATA: /* * We have to restart a FCP_DATA data out transaction */ atp->sendst = 0; atp->bytes_xfered = srr_off; if (ccb_len == 0) { isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x but current CCB doesn't transfer data", atp->tag, srr_off); goto mdp; } if (srr_off < ccb_off || ccb_off > srr_off + ccb_len) { isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x not covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end); goto mdp; } isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end); break; case R_CTL_INFO_COMMAND_STATUS: isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] Got an FCP RSP SRR- resending status", atp->tag); atp->sendst = 1; /* * We have to restart a FCP_RSP IU transaction */ break; case R_CTL_INFO_DATA_DESCRIPTOR: /* * We have to restart an FCP DATA in transaction */ isp_prt(isp, ISP_LOGWARN, "Got an FCP DATA IN SRR- dropping"); goto fail; default: isp_prt(isp, ISP_LOGWARN, "Got an unknown information (%x) SRR- dropping", inot->in_srr_iu); goto fail; } /* * We can't do anything until this is acked, so we might as well start it now. * We aren't going to do the usual asynchronous ack issue because we need * to make sure this gets on the wire first. */ if (isp_notify_ack(isp, inot)) { isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose"); goto fail; } isp_target_start_ctio(isp, ccb, FROM_SRR); return; fail: inot->in_reserved = 1; isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP_ERR; isp_complete_ctio(ccb); return; mdp: if (isp_notify_ack(isp, inot)) { isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose"); goto fail; } ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status = CAM_MESSAGE_RECV; /* * This is not a strict interpretation of MDP, but it's close */ ccb->csio.msg_ptr = &ccb->csio.sense_data.sense_buf[SSD_FULL_SIZE - 16]; ccb->csio.msg_len = 7; ccb->csio.msg_ptr[0] = MSG_EXTENDED; ccb->csio.msg_ptr[1] = 5; ccb->csio.msg_ptr[2] = 0; /* modify data pointer */ ccb->csio.msg_ptr[3] = srr_off >> 24; ccb->csio.msg_ptr[4] = srr_off >> 16; ccb->csio.msg_ptr[5] = srr_off >> 8; ccb->csio.msg_ptr[6] = srr_off; isp_complete_ctio(ccb); } static void isp_handle_platform_srr(ispsoftc_t *isp, isp_notify_t *notify) { in_fcentry_24xx_t *inot = notify->nt_lreserved; atio_private_data_t *atp; uint32_t tag = notify->nt_tagval & 0xffffffff; atp = isp_find_atpd(isp, notify->nt_channel, tag); if (atp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x in SRR Notify", __func__, tag); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); return; } atp->srr_notify_rcvd = 1; memcpy(atp->srr, inot, sizeof (atp->srr)); isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] flags 0x%x srr_iu %x reloff 0x%x", inot->in_rxid, inot->in_flags, inot->in_srr_iu, ((uint32_t)inot->in_srr_reloff_hi << 16) | inot->in_srr_reloff_lo); if (atp->srr_ccb) isp_handle_srr_start(isp, atp); } static void isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) { union ccb *ccb; int sentstatus = 0, ok = 0, notify_cam = 0, failure = 0; atio_private_data_t *atp = NULL; int bus; uint32_t handle, data_requested, resid; handle = ((ct2_entry_t *)arg)->ct_syshandle; ccb = isp_find_xs(isp, handle); if (ccb == NULL) { isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, arg); return; } isp_destroy_handle(isp, handle); resid = data_requested = PISP_PCMD(ccb)->datalen; isp_free_pcmd(isp, ccb); bus = XS_CHANNEL(ccb); if (IS_24XX(isp)) { atp = isp_find_atpd(isp, bus, ((ct7_entry_t *)arg)->ct_rxid); } else { atp = isp_find_atpd(isp, bus, ((ct2_entry_t *)arg)->ct_rxid); } if (atp == NULL) { /* * XXX: isp_clear_commands() generates fake CTIO with zero * ct_rxid value, filling only ct_syshandle. Workaround * that using tag_id from the CCB, pointed by ct_syshandle. */ atp = isp_find_atpd(isp, bus, ccb->csio.tag_id); } if (atp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ccb->csio.tag_id); return; } KASSERT((atp->ctcnt > 0), ("ctio count not greater than zero")); atp->bytes_in_transit -= data_requested; atp->ctcnt -= 1; ccb->ccb_h.status &= ~CAM_STATUS_MASK; if (IS_24XX(isp)) { ct7_entry_t *ct = arg; if (ct->ct_nphdl == CT7_SRR) { atp->srr_ccb = ccb; if (atp->srr_notify_rcvd) isp_handle_srr_start(isp, atp); return; } if (ct->ct_nphdl == CT_HBA_RESET) { sentstatus = (ccb->ccb_h.flags & CAM_SEND_STATUS) && (atp->sendst == 0); failure = CAM_UNREC_HBA_ERROR; } else { sentstatus = ct->ct_flags & CT7_SENDSTATUS; ok = (ct->ct_nphdl == CT7_OK); notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0; if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA) resid = ct->ct_resid; } isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct), notify_cam, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID"); } else { ct2_entry_t *ct = arg; if (ct->ct_status == CT_SRR) { atp->srr_ccb = ccb; if (atp->srr_notify_rcvd) isp_handle_srr_start(isp, atp); isp_target_putback_atio(ccb); return; } if (ct->ct_status == CT_HBA_RESET) { sentstatus = (ccb->ccb_h.flags & CAM_SEND_STATUS) && (atp->sendst == 0); failure = CAM_UNREC_HBA_ERROR; } else { sentstatus = ct->ct_flags & CT2_SENDSTATUS; ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0; if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) resid = ct->ct_resid; } isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO2[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct), notify_cam, ct->ct_status, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID"); } if (ok) { if (data_requested > 0) { atp->bytes_xfered += data_requested - resid; ccb->csio.resid = ccb->csio.dxfer_len - (data_requested - resid); } if (sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) ccb->ccb_h.status |= CAM_SENT_SENSE; ccb->ccb_h.status |= CAM_REQ_CMP; } else { notify_cam = 1; if (failure == CAM_UNREC_HBA_ERROR) ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; else ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } atp->state = ATPD_STATE_PDON; /* * We never *not* notify CAM when there has been any error (ok == 0), * so we never need to do an ATIO putback if we're not notifying CAM. */ isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done (ok=%d nc=%d nowsendstatus=%d ccb ss=%d)", (sentstatus)? " FINAL " : "MIDTERM ", atp->tag, ok, notify_cam, atp->sendst, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0); if (notify_cam == 0) { if (atp->sendst) { isp_target_start_ctio(isp, ccb, FROM_CTIO_DONE); } return; } /* * We are done with this ATIO if we successfully sent status. * In all other cases expect either another CTIO or XPT_ABORT. */ if (ok && sentstatus) isp_put_atpd(isp, bus, atp); /* * We're telling CAM we're done with this CTIO transaction. * * 24XX cards never need an ATIO put back. * * Other cards need one put back only on error. * In the latter case, a timeout will re-fire * and try again in case we didn't have * queue resources to do so at first. In any case, * once the putback is done we do the completion * call. */ if (ok || IS_24XX(isp)) { isp_complete_ctio(ccb); } else { isp_target_putback_atio(ccb); } } static int isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp, uint32_t rsp) { if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL); return (0); } /* * This case is for a Task Management Function, which shows up as an ATIO7 entry. */ if (IS_24XX(isp) && mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) { ct7_entry_t local, *cto = &local; at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved; fcportdb_t *lp; uint32_t sid; uint16_t nphdl; sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; if (isp_find_pdb_by_portid(isp, mp->nt_channel, sid, &lp)) { nphdl = lp->handle; } else { nphdl = NIL_HANDLE; } ISP_MEMZERO(&local, sizeof (local)); cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; cto->ct_header.rqs_entry_count = 1; cto->ct_nphdl = nphdl; cto->ct_rxid = aep->at_rxid; cto->ct_vpidx = mp->nt_channel; cto->ct_iid_lo = sid; cto->ct_iid_hi = sid >> 16; cto->ct_oxid = aep->at_hdr.ox_id; cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1; cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT; if (rsp != 0) { cto->ct_scsi_status |= (FCP_RSPLEN_VALID << 8); cto->rsp.m1.ct_resplen = 4; ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp)); cto->rsp.m1.ct_resp[0] = rsp & 0xff; cto->rsp.m1.ct_resp[1] = (rsp >> 8) & 0xff; cto->rsp.m1.ct_resp[2] = (rsp >> 16) & 0xff; cto->rsp.m1.ct_resp[3] = (rsp >> 24) & 0xff; } return (isp_target_put_entry(isp, &local)); } /* * This case is for a responding to an ABTS frame */ if (IS_24XX(isp) && mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) { /* * Overload nt_need_ack here to mark whether we've terminated the associated command. */ if (mp->nt_need_ack) { uint8_t storage[QENTRY_LEN]; ct7_entry_t *cto = (ct7_entry_t *) storage; abts_t *abts = (abts_t *)mp->nt_lreserved; ISP_MEMZERO(cto, sizeof (ct7_entry_t)); isp_prt(isp, ISP_LOGTDEBUG0, "%s: [%x] terminating after ABTS received", __func__, abts->abts_rxid_task); cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; cto->ct_header.rqs_entry_count = 1; cto->ct_nphdl = mp->nt_nphdl; cto->ct_rxid = abts->abts_rxid_task; cto->ct_iid_lo = mp->nt_sid; cto->ct_iid_hi = mp->nt_sid >> 16; cto->ct_oxid = abts->abts_ox_id; cto->ct_vpidx = mp->nt_channel; cto->ct_flags = CT7_NOACK|CT7_TERMINATE; if (isp_target_put_entry(isp, cto)) { return (ENOMEM); } mp->nt_need_ack = 0; } if (isp_acknak_abts(isp, mp->nt_lreserved, 0) == ENOMEM) { return (ENOMEM); } else { return (0); } } /* * Handle logout cases here */ if (mp->nt_ncode == NT_GLOBAL_LOGOUT) { isp_del_all_wwn_entries(isp, mp->nt_channel); } if (mp->nt_ncode == NT_LOGOUT) { if (!IS_2100(isp) && IS_FC(isp)) { isp_del_wwn_entries(isp, mp); } } /* * General purpose acknowledgement */ if (mp->nt_need_ack) { isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL); /* * Don't need to use the guaranteed send because the caller can retry */ return (isp_notify_ack(isp, mp->nt_lreserved)); } return (0); } /* * Handle task management functions. * * We show up here with a notify structure filled out. * * The nt_lreserved tag points to the original queue entry */ static void isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify) { tstate_t *tptr; fcportdb_t *lp; struct ccb_immediate_notify *inot; inot_private_data_t *ntp = NULL; atio_private_data_t *atp; lun_id_t lun; isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid 0x%x tagval 0x%016llx chan %d lun %jx", __func__, notify->nt_ncode, notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun); if (notify->nt_lun == LUN_ANY) { if (notify->nt_tagval == TAG_ANY) { lun = CAM_LUN_WILDCARD; } else { atp = isp_find_atpd(isp, notify->nt_channel, notify->nt_tagval & 0xffffffff); lun = atp ? atp->lun : CAM_LUN_WILDCARD; } } else { lun = notify->nt_lun; } tptr = get_lun_statep(isp, notify->nt_channel, lun); if (tptr == NULL) { tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD); if (tptr == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun); goto bad; } } inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots); if (inot == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun); goto bad; } inot->ccb_h.target_id = ISP_MAX_TARGETS(isp); inot->ccb_h.target_lun = lun; if (isp_find_pdb_by_portid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0 && isp_find_pdb_by_handle(isp, notify->nt_channel, notify->nt_nphdl, &lp) == 0) { inot->initiator_id = CAM_TARGET_WILDCARD; } else { inot->initiator_id = FC_PORTDB_TGT(isp, notify->nt_channel, lp); } inot->seq_id = notify->nt_tagval; inot->tag_id = notify->nt_tagval >> 32; switch (notify->nt_ncode) { case NT_ABORT_TASK: isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, inot->tag_id); inot->arg = MSG_ABORT_TASK; break; case NT_ABORT_TASK_SET: isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, TAG_ANY); inot->arg = MSG_ABORT_TASK_SET; break; case NT_CLEAR_ACA: inot->arg = MSG_CLEAR_ACA; break; case NT_CLEAR_TASK_SET: inot->arg = MSG_CLEAR_TASK_SET; break; case NT_LUN_RESET: inot->arg = MSG_LOGICAL_UNIT_RESET; break; case NT_TARGET_RESET: inot->arg = MSG_TARGET_RESET; break; case NT_QUERY_TASK_SET: inot->arg = MSG_QUERY_TASK_SET; break; case NT_QUERY_ASYNC_EVENT: inot->arg = MSG_QUERY_ASYNC_EVENT; break; default: isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun %#jx", __func__, notify->nt_ncode, notify->nt_channel, (uintmax_t)lun); goto bad; } ntp = isp_get_ntpd(isp, notify->nt_channel); if (ntp == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__); goto bad; } ISP_MEMCPY(&ntp->nt, notify, sizeof (isp_notify_t)); if (notify->nt_lreserved) { ISP_MEMCPY(&ntp->data, notify->nt_lreserved, QENTRY_LEN); ntp->nt.nt_lreserved = &ntp->data; } ntp->seq_id = notify->nt_tagval; ntp->tag_id = notify->nt_tagval >> 32; SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "Take FREE INOT\n"); inot->ccb_h.status = CAM_MESSAGE_RECV; xpt_done((union ccb *)inot); return; bad: if (notify->nt_need_ack) { if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) { if (isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM)) { isp_prt(isp, ISP_LOGWARN, "you lose- unable to send an ACKNAK"); } } else { isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, notify->nt_lreserved); } } } static void isp_target_mark_aborted_early(ispsoftc_t *isp, int chan, tstate_t *tptr, uint32_t tag_id) { atio_private_data_t *atp, *atpool; inot_private_data_t *ntp, *tmp; uint32_t this_tag_id; /* * First, clean any commands pending restart */ STAILQ_FOREACH_SAFE(ntp, &tptr->restart_queue, next, tmp) { if (IS_24XX(isp)) this_tag_id = ((at7_entry_t *)ntp->data)->at_rxid; else this_tag_id = ((at2_entry_t *)ntp->data)->at_rxid; if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) { isp_endcmd(isp, ntp->data, NIL_HANDLE, chan, ECMD_TERMINATE, 0); isp_put_ntpd(isp, chan, ntp); STAILQ_REMOVE(&tptr->restart_queue, ntp, inot_private_data, next); } } /* * Now mark other ones dead as well. */ ISP_GET_PC(isp, chan, atpool, atpool); for (atp = atpool; atp < &atpool[ATPDPSIZE]; atp++) { if (atp->lun != tptr->ts_lun) continue; if ((uint64_t)tag_id == TAG_ANY || atp->tag == tag_id) atp->dead = 1; } } #endif static void isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) { struct cam_sim *sim; int bus, tgt; ispsoftc_t *isp; sim = (struct cam_sim *)cbarg; isp = (ispsoftc_t *) cam_sim_softc(sim); bus = cam_sim_bus(sim); tgt = xpt_path_target_id(path); switch (code) { case AC_LOST_DEVICE: if (IS_SCSI(isp)) { uint16_t oflags, nflags; sdparam *sdp = SDPARAM(isp, bus); if (tgt >= 0) { nflags = sdp->isp_devparam[tgt].nvrm_flags; nflags &= DPARM_SAFE_DFLT; if (isp->isp_loaded_fw) { nflags |= DPARM_NARROW | DPARM_ASYNC; } oflags = sdp->isp_devparam[tgt].goal_flags; sdp->isp_devparam[tgt].goal_flags = nflags; sdp->isp_devparam[tgt].dev_update = 1; sdp->update = 1; (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, bus); sdp->isp_devparam[tgt].goal_flags = oflags; } } break; default: isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); break; } } static void isp_poll(struct cam_sim *sim) { ispsoftc_t *isp = cam_sim_softc(sim); ISP_RUN_ISR(isp); } static void isp_watchdog(void *arg) { struct ccb_scsiio *xs = arg; ispsoftc_t *isp; uint32_t ohandle = ISP_HANDLE_FREE, handle; isp = XS_ISP(xs); handle = isp_find_handle(isp, xs); /* * Hand crank the interrupt code just to be sure the command isn't stuck somewhere. */ if (handle != ISP_HANDLE_FREE) { ISP_RUN_ISR(isp); ohandle = handle; handle = isp_find_handle(isp, xs); } if (handle != ISP_HANDLE_FREE) { /* * Try and make sure the command is really dead before * we release the handle (and DMA resources) for reuse. * * If we are successful in aborting the command then * we're done here because we'll get the command returned * back separately. */ if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) { return; } /* * Note that after calling the above, the command may in * fact have been completed. */ xs = isp_find_xs(isp, handle); /* * If the command no longer exists, then we won't * be able to find the xs again with this handle. */ if (xs == NULL) { return; } /* * After this point, the command is really dead. */ if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, handle); } isp_destroy_handle(isp, handle); isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle); XS_SETERR(xs, CAM_CMD_TIMEOUT); isp_done(xs); } else { if (ohandle != ISP_HANDLE_FREE) { isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle 0x%x, recovered during interrupt", __func__, ohandle); } else { isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle already free", __func__); } } } static void isp_make_here(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt) { union ccb *ccb; struct isp_fc *fc = ISP_FC_PC(isp, chan); /* * Allocate a CCB, create a wildcard path for this target and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan"); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void isp_make_gone(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt) { struct cam_path *tp; struct isp_fc *fc = ISP_FC_PC(isp, chan); if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) { xpt_async(AC_LOST_DEVICE, tp, NULL); xpt_free_path(tp); } } /* * Gone Device Timer Function- when we have decided that a device has gone * away, we wait a specific period of time prior to telling the OS it has * gone away. * * This timer function fires once a second and then scans the port database * for devices that are marked dead but still have a virtual target assigned. * We decrement a counter for that port database entry, and when it hits zero, * we tell the OS the device has gone away. */ static void isp_gdt(void *arg) { struct isp_fc *fc = arg; taskqueue_enqueue(taskqueue_thread, &fc->gtask); } static void isp_gdt_task(void *arg, int pending) { struct isp_fc *fc = arg; ispsoftc_t *isp = fc->isp; int chan = fc - isp->isp_osinfo.pc.fc; fcportdb_t *lp; struct ac_contract ac; struct ac_device_changed *adc; int dbidx, more_to_do = 0; ISP_LOCK(isp); isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan); for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { lp = &FCPARAM(isp, chan)->portdb[dbidx]; if (lp->state != FC_PORTDB_STATE_ZOMBIE) { continue; } if (lp->gone_timer != 0) { lp->gone_timer -= 1; more_to_do++; continue; } isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Gone Device Timeout"); if (lp->is_target) { lp->is_target = 0; isp_make_gone(isp, lp, chan, dbidx); } if (lp->is_initiator) { lp->is_initiator = 0; ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = lp->port_wwn; adc->port = lp->portid; adc->target = dbidx; adc->arrived = 0; xpt_async(AC_CONTRACT, fc->path, &ac); } lp->state = FC_PORTDB_STATE_NIL; } if (fc->ready) { if (more_to_do) { callout_reset(&fc->gdt, hz, isp_gdt, fc); } else { callout_deactivate(&fc->gdt); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Stopping Gone Device Timer @ %lu", chan, (unsigned long) time_uptime); } } ISP_UNLOCK(isp); } /* * When loop goes down we remember the time and freeze CAM command queue. * During some time period we are trying to reprobe the loop. But if we * fail, we tell the OS that devices have gone away and drop the freeze. * * We don't clear the devices out of our port database because, when loop * come back up, we have to do some actual cleanup with the chip at that * point (implicit PLOGO, e.g., to get the chip's port database state right). */ static void isp_loop_changed(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); struct isp_fc *fc = ISP_FC_PC(isp, chan); if (fc->loop_down_time) return; isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop changed", chan); if (fcp->role & ISP_ROLE_INITIATOR) isp_freeze_loopdown(isp, chan); fc->loop_down_time = time_uptime; wakeup(fc); } static void isp_loop_up(ispsoftc_t *isp, int chan) { struct isp_fc *fc = ISP_FC_PC(isp, chan); isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is up", chan); fc->loop_seen_once = 1; fc->loop_down_time = 0; isp_unfreeze_loopdown(isp, chan); } static void isp_loop_dead(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); struct isp_fc *fc = ISP_FC_PC(isp, chan); fcportdb_t *lp; struct ac_contract ac; struct ac_device_changed *adc; int dbidx, i; isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is dead", chan); /* * Notify to the OS all targets who we now consider have departed. */ for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { lp = &fcp->portdb[dbidx]; if (lp->state == FC_PORTDB_STATE_NIL) continue; for (i = 0; i < isp->isp_maxcmds; i++) { struct ccb_scsiio *xs; if (ISP_H2HT(isp->isp_xflist[i].handle) != ISP_HANDLE_INITIATOR) { continue; } if ((xs = isp->isp_xflist[i].cmd) == NULL) { continue; } if (dbidx != XS_TGT(xs)) { continue; } isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx orphaned by loop down timeout", isp->isp_xflist[i].handle, chan, XS_TGT(xs), (uintmax_t)XS_LUN(xs)); /* * Just like in isp_watchdog, abort the outstanding * command or immediately free its resources if it is * not active */ if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) { continue; } if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, isp->isp_xflist[i].handle); } isp_destroy_handle(isp, isp->isp_xflist[i].handle); isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx could not be aborted and was destroyed", isp->isp_xflist[i].handle, chan, XS_TGT(xs), (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BUSRESET); isp_done(xs); } isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Loop Down Timeout"); if (lp->is_target) { lp->is_target = 0; isp_make_gone(isp, lp, chan, dbidx); } if (lp->is_initiator) { lp->is_initiator = 0; ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = lp->port_wwn; adc->port = lp->portid; adc->target = dbidx; adc->arrived = 0; xpt_async(AC_CONTRACT, fc->path, &ac); } } isp_unfreeze_loopdown(isp, chan); fc->loop_down_time = 0; } static void isp_kthread(void *arg) { struct isp_fc *fc = arg; ispsoftc_t *isp = fc->isp; int chan = fc - isp->isp_osinfo.pc.fc; int slp = 0, d; int lb, lim; ISP_LOCK(isp); while (isp->isp_osinfo.is_exiting == 0) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Checking FC state", chan); lb = isp_fc_runstate(isp, chan, 250000); isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d FC got to %s state", chan, isp_fc_loop_statename(lb)); /* * Our action is different based upon whether we're supporting * Initiator mode or not. If we are, we might freeze the simq * when loop is down and set all sorts of different delays to * check again. * * If not, we simply just wait for loop to come up. */ if (lb == LOOP_READY || lb < 0) { slp = 0; } else { /* * If we've never seen loop up and we've waited longer * than quickboot time, or we've seen loop up but we've * waited longer than loop_down_limit, give up and go * to sleep until loop comes up. */ if (fc->loop_seen_once == 0) lim = isp_quickboot_time; else lim = fc->loop_down_limit; d = time_uptime - fc->loop_down_time; if (d >= lim) slp = 0; else if (d < 10) slp = 1; else if (d < 30) slp = 5; else if (d < 60) slp = 10; else if (d < 120) slp = 20; else slp = 30; } if (slp == 0) { if (lb == LOOP_READY) isp_loop_up(isp, chan); else isp_loop_dead(isp, chan); } isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d sleep for %d seconds", chan, slp); msleep(fc, &isp->isp_lock, PRIBIO, "ispf", slp * hz); } fc->num_threads -= 1; ISP_UNLOCK(isp); kthread_exit(); } #ifdef ISP_TARGET_MODE static void isp_abort_atio(ispsoftc_t *isp, union ccb *ccb) { atio_private_data_t *atp; union ccb *accb = ccb->cab.abort_ccb; struct ccb_hdr *sccb; tstate_t *tptr; tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb)); if (tptr != NULL) { /* Search for the ATIO among queueued. */ SLIST_FOREACH(sccb, &tptr->atios, sim_links.sle) { if (sccb != &accb->ccb_h) continue; SLIST_REMOVE(&tptr->atios, sccb, ccb_hdr, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path, "Abort FREE ATIO\n"); accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); ccb->ccb_h.status = CAM_REQ_CMP; return; } } /* Search for the ATIO among running. */ atp = isp_find_atpd(isp, XS_CHANNEL(accb), accb->atio.tag_id); if (atp != NULL) { /* Send TERMINATE to firmware. */ if (!atp->dead && IS_24XX(isp)) { uint8_t storage[QENTRY_LEN]; ct7_entry_t *cto = (ct7_entry_t *) storage; ISP_MEMZERO(cto, sizeof (ct7_entry_t)); cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; cto->ct_header.rqs_entry_count = 1; cto->ct_nphdl = atp->nphdl; cto->ct_rxid = atp->tag; cto->ct_iid_lo = atp->sid; cto->ct_iid_hi = atp->sid >> 16; cto->ct_oxid = atp->oxid; cto->ct_vpidx = XS_CHANNEL(accb); cto->ct_flags = CT7_NOACK|CT7_TERMINATE; isp_target_put_entry(isp, cto); } isp_put_atpd(isp, XS_CHANNEL(accb), atp); ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_UA_ABORT; } } static void isp_abort_inot(ispsoftc_t *isp, union ccb *ccb) { inot_private_data_t *ntp; union ccb *accb = ccb->cab.abort_ccb; struct ccb_hdr *sccb; tstate_t *tptr; tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb)); if (tptr != NULL) { /* Search for the INOT among queueued. */ SLIST_FOREACH(sccb, &tptr->inots, sim_links.sle) { if (sccb != &accb->ccb_h) continue; SLIST_REMOVE(&tptr->inots, sccb, ccb_hdr, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path, "Abort FREE INOT\n"); accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); ccb->ccb_h.status = CAM_REQ_CMP; return; } } /* Search for the INOT among running. */ ntp = isp_find_ntpd(isp, XS_CHANNEL(accb), accb->cin1.tag_id, accb->cin1.seq_id); if (ntp != NULL) { if (ntp->nt.nt_need_ack) { isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, ntp->nt.nt_lreserved); } isp_put_ntpd(isp, XS_CHANNEL(accb), ntp); ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_UA_ABORT; return; } } #endif static void isp_action(struct cam_sim *sim, union ccb *ccb) { int bus, tgt, error; ispsoftc_t *isp; struct ccb_trans_settings *cts; sbintime_t ts; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); isp = (ispsoftc_t *)cam_sim_softc(sim); ISP_ASSERT_LOCKED(isp); bus = cam_sim_bus(sim); isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); ISP_PCMD(ccb) = NULL; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ /* * Do a couple of preliminary checks... */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; isp_done((struct ccb_scsiio *) ccb); break; } } ccb->csio.req_map = NULL; #ifdef DIAGNOSTIC if (ccb->ccb_h.target_id >= ISP_MAX_TARGETS(isp)) { xpt_print(ccb->ccb_h.path, "invalid target\n"); ccb->ccb_h.status = CAM_PATH_INVALID; } else if (ISP_MAX_LUNS(isp) > 0 && ccb->ccb_h.target_lun >= ISP_MAX_LUNS(isp)) { xpt_print(ccb->ccb_h.path, "invalid lun\n"); ccb->ccb_h.status = CAM_PATH_INVALID; } if (ccb->ccb_h.status == CAM_PATH_INVALID) { xpt_done(ccb); break; } #endif ccb->csio.scsi_status = SCSI_STATUS_OK; if (isp_get_pcmd(isp, ccb)) { isp_prt(isp, ISP_LOGWARN, "out of PCMDs"); cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); break; } error = isp_start((XS_T *) ccb); switch (error) { case CMD_QUEUED: ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) break; /* Give firmware extra 10s to handle timeout. */ ts = SBT_1MS * ccb->ccb_h.timeout + 10 * SBT_1S; callout_reset_sbt(&PISP_PCMD(ccb)->wdog, ts, 0, isp_watchdog, ccb, 0); break; case CMD_RQLATER: isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx retry later", XS_TGT(ccb), (uintmax_t)XS_LUN(ccb)); cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); ccb->ccb_h.status = CAM_REQUEUE_REQ; isp_free_pcmd(isp, ccb); xpt_done(ccb); break; case CMD_EAGAIN: isp_free_pcmd(isp, ccb); cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 100, 0); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); break; case CMD_COMPLETE: isp_done((struct ccb_scsiio *) ccb); break; default: isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__); ccb->ccb_h.status = CAM_REQUEUE_REQ; isp_free_pcmd(isp, ccb); xpt_done(ccb); } break; #ifdef ISP_TARGET_MODE case XPT_EN_LUN: /* Enable/Disable LUN as a target */ if (ccb->cel.enable) { isp_enable_lun(isp, ccb); } else { isp_disable_lun(isp, ccb); } break; case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ { tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); if (tptr == NULL) { const char *str; if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) str = "XPT_IMMEDIATE_NOTIFY"; else str = "XPT_ACCEPT_TARGET_IO"; ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: no state pointer found for %s\n", __func__, str); ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); break; } ccb->ccb_h.spriv_field0 = 0; ccb->ccb_h.spriv_ptr1 = isp; if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { ccb->atio.tag_id = 0; SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, "Put FREE ATIO\n"); } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { ccb->cin1.seq_id = ccb->cin1.tag_id = 0; SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, "Put FREE INOT\n"); } ccb->ccb_h.status = CAM_REQ_INPROG; break; } case XPT_NOTIFY_ACKNOWLEDGE: /* notify ack */ { inot_private_data_t *ntp; /* * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb * XXX: matches that for the immediate notify, we have to *search* for the notify structure */ /* * All the relevant path information is in the associated immediate notify */ ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); ntp = isp_find_ntpd(isp, XS_CHANNEL(ccb), ccb->cna2.tag_id, ccb->cna2.seq_id); if (ntp == NULL) { ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); break; } if (isp_handle_platform_target_notify_ack(isp, &ntp->nt, (ccb->ccb_h.flags & CAM_SEND_STATUS) ? ccb->cna2.arg : 0)) { cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; } isp_put_ntpd(isp, XS_CHANNEL(ccb), ntp); ccb->ccb_h.status = CAM_REQ_CMP; ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); xpt_done(ccb); break; } case XPT_CONT_TARGET_IO: isp_target_start_ctio(isp, ccb, FROM_CAM); break; #endif case XPT_RESET_DEV: /* BDR the specified SCSI device */ tgt = ccb->ccb_h.target_id; tgt |= (bus << 16); error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt); if (error) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { /* * If we have a FC device, reset the Command * Reference Number, because the target will expect * that we re-start the CRN at 1 after a reset. */ if (IS_FC(isp)) isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); break; case XPT_ABORT: /* Abort the specified CCB */ { union ccb *accb = ccb->cab.abort_ccb; switch (accb->ccb_h.func_code) { #ifdef ISP_TARGET_MODE case XPT_ACCEPT_TARGET_IO: isp_abort_atio(isp, ccb); break; case XPT_IMMEDIATE_NOTIFY: isp_abort_inot(isp, ccb); break; #endif case XPT_SCSI_IO: error = isp_control(isp, ISPCTL_ABORT_CMD, accb); if (error) { ccb->ccb_h.status = CAM_UA_ABORT; } else { ccb->ccb_h.status = CAM_REQ_CMP; } break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } /* * This is not a queued CCB, so the caller expects it to be * complete when control is returned. */ break; } #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ cts = &ccb->cts; if (!IS_CURRENT_SETTINGS(cts)) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } tgt = cts->ccb_h.target_id; if (IS_SCSI(isp)) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; sdparam *sdp = SDPARAM(isp, bus); uint16_t *dptr; if (spi->valid == 0 && scsi->valid == 0) { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } /* * We always update (internally) from goal_flags * so any request to change settings just gets * vectored to that location. */ dptr = &sdp->isp_devparam[tgt].goal_flags; if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) *dptr |= DPARM_DISC; else *dptr &= ~DPARM_DISC; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) *dptr |= DPARM_TQING; else *dptr &= ~DPARM_TQING; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) *dptr |= DPARM_WIDE; else *dptr &= ~DPARM_WIDE; } /* * XXX: FIX ME */ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && (spi->valid & CTS_SPI_VALID_SYNC_RATE) && (spi->sync_period && spi->sync_offset)) { *dptr |= DPARM_SYNC; /* * XXX: CHECK FOR LEGALITY */ sdp->isp_devparam[tgt].goal_period = spi->sync_period; sdp->isp_devparam[tgt].goal_offset = spi->sync_offset; } else { *dptr &= ~DPARM_SYNC; } isp_prt(isp, ISP_LOGDEBUG0, "SET (%d.%d.%jx) to flags %x off %x per %x", bus, tgt, (uintmax_t)cts->ccb_h.target_lun, sdp->isp_devparam[tgt].goal_flags, sdp->isp_devparam[tgt].goal_offset, sdp->isp_devparam[tgt].goal_period); sdp->isp_devparam[tgt].dev_update = 1; sdp->update = 1; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: cts = &ccb->cts; tgt = cts->ccb_h.target_id; if (IS_FC(isp)) { fcparam *fcp = FCPARAM(isp, bus); struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_FC; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 100000; fc->bitrate *= fcp->isp_gbspeed; if (tgt < MAX_FC_TARG) { fcportdb_t *lp = &fcp->portdb[tgt]; fc->wwnn = lp->node_wwn; fc->wwpn = lp->port_wwn; fc->port = lp->portid; fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; } } else { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; sdparam *sdp = SDPARAM(isp, bus); uint16_t dval, pval, oval; if (IS_CURRENT_SETTINGS(cts)) { sdp->isp_devparam[tgt].dev_refresh = 1; sdp->update = 1; (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, bus); dval = sdp->isp_devparam[tgt].actv_flags; oval = sdp->isp_devparam[tgt].actv_offset; pval = sdp->isp_devparam[tgt].actv_period; } else { dval = sdp->isp_devparam[tgt].nvrm_flags; oval = sdp->isp_devparam[tgt].nvrm_offset; pval = sdp->isp_devparam[tgt].nvrm_period; } cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = 0; scsi->valid = 0; spi->flags = 0; scsi->flags = 0; if (dval & DPARM_DISC) { spi->flags |= CTS_SPI_FLAGS_DISC_ENB; } if ((dval & DPARM_SYNC) && oval && pval) { spi->sync_offset = oval; spi->sync_period = pval; } else { spi->sync_offset = 0; spi->sync_period = 0; } spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; if (dval & DPARM_WIDE) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; if (dval & DPARM_TQING) { scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->valid |= CTS_SPI_VALID_DISC; } isp_prt(isp, ISP_LOGDEBUG0, "GET %s (%d.%d.%jx) to flags %x off %x per %x", IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", bus, tgt, (uintmax_t)cts->ccb_h.target_lun, dval, oval, pval); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_RESET_BUS: /* Reset the specified bus */ error = isp_control(isp, ISPCTL_RESET_BUS, bus); if (error) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); break; } if (bootverbose) { xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus); } if (IS_FC(isp)) { xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0); } else { xpt_async(AC_BUS_RESET, ISP_SPI_PC(isp, bus)->path, 0); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_TERM_IO: /* Terminate the I/O process */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_SIM_KNOB: /* Set SIM knobs */ { struct ccb_sim_knob *kp = &ccb->knob; fcparam *fcp; if (!IS_FC(isp)) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } fcp = FCPARAM(isp, bus); if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) { fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn; fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn; isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn); } ccb->ccb_h.status = CAM_REQ_CMP; if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) { int rchange = 0; int newrole = 0; switch (kp->xport_specific.fc.role) { case KNOB_ROLE_NONE: if (fcp->role != ISP_ROLE_NONE) { rchange = 1; newrole = ISP_ROLE_NONE; } break; case KNOB_ROLE_TARGET: if (fcp->role != ISP_ROLE_TARGET) { rchange = 1; newrole = ISP_ROLE_TARGET; } break; case KNOB_ROLE_INITIATOR: if (fcp->role != ISP_ROLE_INITIATOR) { rchange = 1; newrole = ISP_ROLE_INITIATOR; } break; case KNOB_ROLE_BOTH: if (fcp->role != ISP_ROLE_BOTH) { rchange = 1; newrole = ISP_ROLE_BOTH; } break; } if (rchange) { ISP_PATH_PRT(isp, ISP_LOGCONFIG, ccb->ccb_h.path, "changing role on from %d to %d\n", fcp->role, newrole); if (isp_control(isp, ISPCTL_CHANGE_ROLE, bus, newrole) != 0) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); break; } } } xpt_done(ccb); break; } case XPT_GET_SIM_KNOB_OLD: /* Get SIM knobs -- compat value */ case XPT_GET_SIM_KNOB: /* Get SIM knobs */ { struct ccb_sim_knob *kp = &ccb->knob; if (IS_FC(isp)) { fcparam *fcp; fcp = FCPARAM(isp, bus); kp->xport_specific.fc.wwnn = fcp->isp_wwnn; kp->xport_specific.fc.wwpn = fcp->isp_wwpn; switch (fcp->role) { case ISP_ROLE_NONE: kp->xport_specific.fc.role = KNOB_ROLE_NONE; break; case ISP_ROLE_TARGET: kp->xport_specific.fc.role = KNOB_ROLE_TARGET; break; case ISP_ROLE_INITIATOR: kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR; break; case ISP_ROLE_BOTH: kp->xport_specific.fc.role = KNOB_ROLE_BOTH; break; } kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_REQ_INVALID; } xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; #ifdef ISP_TARGET_MODE if (IS_FC(isp) && ISP_CAP_TMODE(isp) && ISP_CAP_SCCFW(isp)) cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; else #endif cpi->target_sprt = 0; cpi->hba_eng_cnt = 0; cpi->max_target = ISP_MAX_TARGETS(isp) - 1; cpi->max_lun = ISP_MAX_LUNS(isp) == 0 ? 255 : ISP_MAX_LUNS(isp) - 1; cpi->bus_id = cam_sim_bus(sim); if (sizeof (bus_size_t) > 4) cpi->maxio = (ISP_NSEG64_MAX - 1) * PAGE_SIZE; else cpi->maxio = (ISP_NSEG_MAX - 1) * PAGE_SIZE; if (IS_FC(isp)) { fcparam *fcp = FCPARAM(isp, bus); cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN; /* * Because our loop ID can shift from time to time, * make our initiator ID out of range of our bus. */ cpi->initiator_id = cpi->max_target + 1; /* * Set base transfer capabilities for Fibre Channel, for this HBA. */ if (IS_25XX(isp)) { cpi->base_transfer_speed = 8000000; } else if (IS_24XX(isp)) { cpi->base_transfer_speed = 4000000; } else if (IS_23XX(isp)) { cpi->base_transfer_speed = 2000000; } else { cpi->base_transfer_speed = 1000000; } cpi->hba_inquiry = PI_TAG_ABLE; cpi->transport = XPORT_FC; cpi->transport_version = 0; cpi->xport_specific.fc.wwnn = fcp->isp_wwnn; cpi->xport_specific.fc.wwpn = fcp->isp_wwpn; cpi->xport_specific.fc.port = fcp->isp_portid; cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000; } else { sdparam *sdp = SDPARAM(isp, bus); cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->hba_misc = PIM_UNMAPPED; cpi->initiator_id = sdp->isp_initiator_id; cpi->base_transfer_speed = 3300; cpi->transport = XPORT_SPI; cpi->transport_version = 2; } cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } void isp_done(XS_T *sccb) { ispsoftc_t *isp = XS_ISP(sccb); uint32_t status; if (XS_NOERR(sccb)) XS_SETERR(sccb, CAM_REQ_CMP); if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) { sccb->ccb_h.status &= ~CAM_STATUS_MASK; if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; } else { sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; } } sccb->ccb_h.status &= ~CAM_SIM_QUEUED; status = sccb->ccb_h.status & CAM_STATUS_MASK; if (status != CAM_REQ_CMP && (sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { sccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(sccb->ccb_h.path, 1); } if (ISP_PCMD(sccb)) { if (callout_active(&PISP_PCMD(sccb)->wdog)) callout_stop(&PISP_PCMD(sccb)->wdog); isp_free_pcmd(isp, (union ccb *) sccb); } xpt_done((union ccb *) sccb); } void isp_async(ispsoftc_t *isp, ispasync_t cmd, ...) { int bus; static const char prom[] = "Chan %d [%d] WWPN 0x%16jx PortID 0x%06x handle 0x%x %s %s"; char buf[64]; char *msg = NULL; target_id_t tgt = 0; fcportdb_t *lp; struct isp_fc *fc; struct cam_path *tmppath; struct ac_contract ac; struct ac_device_changed *adc; va_list ap; switch (cmd) { case ISPASYNC_NEW_TGT_PARAMS: { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; int flags, tgt; sdparam *sdp; struct ccb_trans_settings cts; memset(&cts, 0, sizeof (struct ccb_trans_settings)); va_start(ap, cmd); bus = va_arg(ap, int); tgt = va_arg(ap, int); va_end(ap); sdp = SDPARAM(isp, bus); if (xpt_create_path(&tmppath, NULL, cam_sim_path(ISP_SPI_PC(isp, bus)->sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { isp_prt(isp, ISP_LOGWARN, "isp_async cannot make temp path for %d.%d", tgt, bus); break; } flags = sdp->isp_devparam[tgt].actv_flags; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.protocol = PROTO_SCSI; cts.transport = XPORT_SPI; scsi = &cts.proto_specific.scsi; spi = &cts.xport_specific.spi; if (flags & DPARM_TQING) { scsi->valid |= CTS_SCSI_VALID_TQ; scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } if (flags & DPARM_DISC) { spi->valid |= CTS_SPI_VALID_DISC; spi->flags |= CTS_SPI_FLAGS_DISC_ENB; } spi->flags |= CTS_SPI_VALID_BUS_WIDTH; if (flags & DPARM_WIDE) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (flags & DPARM_SYNC) { spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->sync_period = sdp->isp_devparam[tgt].actv_period; spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; } isp_prt(isp, ISP_LOGDEBUG2, "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", bus, tgt, sdp->isp_devparam[tgt].actv_period, sdp->isp_devparam[tgt].actv_offset, flags); xpt_setup_ccb(&cts.ccb_h, tmppath, 1); xpt_async(AC_TRANSFER_NEG, tmppath, &cts); xpt_free_path(tmppath); break; } case ISPASYNC_BUS_RESET: { va_start(ap, cmd); bus = va_arg(ap, int); va_end(ap); isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", bus); if (IS_FC(isp)) { xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, NULL); } else { xpt_async(AC_BUS_RESET, ISP_SPI_PC(isp, bus)->path, NULL); } break; } case ISPASYNC_LOOP_RESET: { uint16_t lipp; fcparam *fcp; va_start(ap, cmd); bus = va_arg(ap, int); va_end(ap); lipp = ISP_READ(isp, OUTMAILBOX1); fcp = FCPARAM(isp, bus); isp_prt(isp, ISP_LOGINFO, "Chan %d LOOP Reset, LIP primitive %x", bus, lipp); /* * Per FCP-4, a Reset LIP should result in a CRN reset. Other * LIPs and loop up/down events should never reset the CRN. For * an as of yet unknown reason, 24xx series cards (and * potentially others) can interrupt with a LIP Reset status * when no LIP reset came down the wire. Additionally, the LIP * primitive accompanying this status would not be a valid LIP * Reset primitive, but some variation of an invalid AL_PA * LIP. As a result, we have to verify the AL_PD in the LIP * addresses our port before blindly resetting. */ if (FCP_IS_DEST_ALPD(fcp, (lipp & 0x00FF))) isp_fcp_reset_crn(isp, bus, /*tgt*/0, /*tgt_set*/ 0); isp_loop_changed(isp, bus); break; } case ISPASYNC_LIP: if (msg == NULL) msg = "LIP Received"; /* FALLTHROUGH */ case ISPASYNC_LOOP_DOWN: if (msg == NULL) msg = "LOOP Down"; /* FALLTHROUGH */ case ISPASYNC_LOOP_UP: if (msg == NULL) msg = "LOOP Up"; va_start(ap, cmd); bus = va_arg(ap, int); va_end(ap); isp_loop_changed(isp, bus); isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg); break; case ISPASYNC_DEV_ARRIVED: va_start(ap, cmd); bus = va_arg(ap, int); lp = va_arg(ap, fcportdb_t *); va_end(ap); fc = ISP_FC_PC(isp, bus); tgt = FC_PORTDB_TGT(isp, bus, lp); isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "arrived"); if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && (lp->prli_word3 & PRLI_WD3_TARGET_FUNCTION)) { lp->is_target = 1; isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); isp_make_here(isp, lp, bus, tgt); } if ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) && (lp->prli_word3 & PRLI_WD3_INITIATOR_FUNCTION)) { lp->is_initiator = 1; ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = lp->port_wwn; adc->port = lp->portid; adc->target = tgt; adc->arrived = 1; xpt_async(AC_CONTRACT, fc->path, &ac); } break; case ISPASYNC_DEV_CHANGED: case ISPASYNC_DEV_STAYED: { int crn_reset_done; crn_reset_done = 0; va_start(ap, cmd); bus = va_arg(ap, int); lp = va_arg(ap, fcportdb_t *); va_end(ap); fc = ISP_FC_PC(isp, bus); tgt = FC_PORTDB_TGT(isp, bus, lp); isp_gen_role_str(buf, sizeof (buf), lp->new_prli_word3); if (cmd == ISPASYNC_DEV_CHANGED) isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->new_portid, lp->handle, buf, "changed"); else isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "stayed"); if (lp->is_target != ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && (lp->new_prli_word3 & PRLI_WD3_TARGET_FUNCTION))) { lp->is_target = !lp->is_target; if (lp->is_target) { if (cmd == ISPASYNC_DEV_CHANGED) { isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); crn_reset_done = 1; } isp_make_here(isp, lp, bus, tgt); } else { isp_make_gone(isp, lp, bus, tgt); if (cmd == ISPASYNC_DEV_CHANGED) { isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); crn_reset_done = 1; } } } if (lp->is_initiator != ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) && (lp->new_prli_word3 & PRLI_WD3_INITIATOR_FUNCTION))) { lp->is_initiator = !lp->is_initiator; ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = lp->port_wwn; adc->port = lp->portid; adc->target = tgt; adc->arrived = lp->is_initiator; xpt_async(AC_CONTRACT, fc->path, &ac); } if ((cmd == ISPASYNC_DEV_CHANGED) && (crn_reset_done == 0)) isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); break; } case ISPASYNC_DEV_GONE: va_start(ap, cmd); bus = va_arg(ap, int); lp = va_arg(ap, fcportdb_t *); va_end(ap); fc = ISP_FC_PC(isp, bus); tgt = FC_PORTDB_TGT(isp, bus, lp); /* * If this has a virtual target or initiator set the isp_gdt * timer running on it to delay its departure. */ isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); if (lp->is_target || lp->is_initiator) { lp->state = FC_PORTDB_STATE_ZOMBIE; lp->gone_timer = fc->gone_device_time; isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone zombie"); if (fc->ready && !callout_active(&fc->gdt)) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Starting Gone Device Timer with %u seconds time now %lu", bus, lp->gone_timer, (unsigned long)time_uptime); callout_reset(&fc->gdt, hz, isp_gdt, fc); } break; } isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone"); break; case ISPASYNC_CHANGE_NOTIFY: { char *msg; int evt, nphdl, nlstate, portid, reason; va_start(ap, cmd); bus = va_arg(ap, int); evt = va_arg(ap, int); if (evt == ISPASYNC_CHANGE_PDB) { nphdl = va_arg(ap, int); nlstate = va_arg(ap, int); reason = va_arg(ap, int); } else if (evt == ISPASYNC_CHANGE_SNS) { portid = va_arg(ap, int); } else { nphdl = NIL_HANDLE; nlstate = reason = 0; } va_end(ap); if (evt == ISPASYNC_CHANGE_PDB) { int tgt_set = 0; msg = "Port Database Changed"; isp_prt(isp, ISP_LOGINFO, "Chan %d %s (nphdl 0x%x state 0x%x reason 0x%x)", bus, msg, nphdl, nlstate, reason); /* * Port database syncs are not sufficient for * determining that logins or logouts are done on the * loop, but this information is directly available from * the reason code from the incoming mbox. We must reset * the fcp crn on these events according to FCP-4 */ switch (reason) { case PDB24XX_AE_IMPL_LOGO_1: case PDB24XX_AE_IMPL_LOGO_2: case PDB24XX_AE_IMPL_LOGO_3: case PDB24XX_AE_PLOGI_RCVD: case PDB24XX_AE_PRLI_RCVD: case PDB24XX_AE_PRLO_RCVD: case PDB24XX_AE_LOGO_RCVD: case PDB24XX_AE_PLOGI_DONE: case PDB24XX_AE_PRLI_DONE: /* * If the event is not global, twiddle tgt and * tgt_set to nominate only the target * associated with the nphdl. */ if (nphdl != PDB24XX_AE_GLOBAL) { /* Break if we don't yet have the pdb */ if (!isp_find_pdb_by_handle(isp, bus, nphdl, &lp)) break; tgt = FC_PORTDB_TGT(isp, bus, lp); tgt_set = 1; } isp_fcp_reset_crn(isp, bus, tgt, tgt_set); break; default: break; /* NOP */ } } else if (evt == ISPASYNC_CHANGE_SNS) { msg = "Name Server Database Changed"; isp_prt(isp, ISP_LOGINFO, "Chan %d %s (PortID 0x%06x)", bus, msg, portid); } else { msg = "Other Change Notify"; isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg); } isp_loop_changed(isp, bus); break; } #ifdef ISP_TARGET_MODE case ISPASYNC_TARGET_NOTIFY: { isp_notify_t *notify; va_start(ap, cmd); notify = va_arg(ap, isp_notify_t *); va_end(ap); switch (notify->nt_ncode) { case NT_ABORT_TASK: case NT_ABORT_TASK_SET: case NT_CLEAR_ACA: case NT_CLEAR_TASK_SET: case NT_LUN_RESET: case NT_TARGET_RESET: case NT_QUERY_TASK_SET: case NT_QUERY_ASYNC_EVENT: /* * These are task management functions. */ isp_handle_platform_target_tmf(isp, notify); break; case NT_BUS_RESET: case NT_LIP_RESET: case NT_LINK_UP: case NT_LINK_DOWN: case NT_HBA_RESET: /* * No action need be taken here. */ break; case NT_GLOBAL_LOGOUT: case NT_LOGOUT: /* * This is device arrival/departure notification */ isp_handle_platform_target_notify_ack(isp, notify, 0); break; case NT_SRR: isp_handle_platform_srr(isp, notify); break; default: isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode); isp_handle_platform_target_notify_ack(isp, notify, 0); break; } break; } case ISPASYNC_TARGET_NOTIFY_ACK: { void *inot; va_start(ap, cmd); inot = va_arg(ap, void *); va_end(ap); if (isp_notify_ack(isp, inot)) { isp_tna_t *tp = malloc(sizeof (*tp), M_DEVBUF, M_NOWAIT); if (tp) { tp->isp = isp; memcpy(tp->data, inot, sizeof (tp->data)); tp->not = tp->data; callout_init_mtx(&tp->timer, &isp->isp_lock, 0); callout_reset(&tp->timer, 5, isp_refire_notify_ack, tp); } else { isp_prt(isp, ISP_LOGERR, "you lose- cannot allocate a notify refire"); } } break; } case ISPASYNC_TARGET_ACTION: { isphdr_t *hp; va_start(ap, cmd); hp = va_arg(ap, isphdr_t *); va_end(ap); switch (hp->rqs_entry_type) { case RQSTYPE_ATIO: isp_handle_platform_atio7(isp, (at7_entry_t *) hp); break; case RQSTYPE_ATIO2: isp_handle_platform_atio2(isp, (at2_entry_t *) hp); break; case RQSTYPE_CTIO7: case RQSTYPE_CTIO3: case RQSTYPE_CTIO2: case RQSTYPE_CTIO: isp_handle_platform_ctio(isp, hp); break; default: isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x", __func__, hp->rqs_entry_type); break; } break; } #endif case ISPASYNC_FW_CRASH: { uint16_t mbox1, mbox6; mbox1 = ISP_READ(isp, OUTMAILBOX1); if (IS_DUALBUS(isp)) { mbox6 = ISP_READ(isp, OUTMAILBOX6); } else { mbox6 = 0; } isp_prt(isp, ISP_LOGERR, "Internal Firmware Error on bus %d @ RISC Address 0x%x", mbox6, mbox1); #if 0 mbox1 = isp->isp_osinfo.mbox_sleep_ok; isp->isp_osinfo.mbox_sleep_ok = 0; isp_reinit(isp, 1); isp->isp_osinfo.mbox_sleep_ok = mbox1; isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); #endif break; } default: isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); break; } } uint64_t isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn) { uint64_t seed; struct isp_fc *fc = ISP_FC_PC(isp, chan); /* First try to use explicitly configured WWNs. */ seed = iswwnn ? fc->def_wwnn : fc->def_wwpn; if (seed) return (seed); /* Otherwise try to use WWNs from NVRAM. */ if (isactive) { seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram : FCPARAM(isp, chan)->isp_wwpn_nvram; if (seed) return (seed); } /* If still no WWNs, try to steal them from the first channel. */ if (chan > 0) { seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn : ISP_FC_PC(isp, 0)->def_wwpn; if (seed == 0) { seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram : FCPARAM(isp, 0)->isp_wwpn_nvram; } } /* If still nothing -- improvise. */ if (seed == 0) { seed = 0x400000007F000000ull + device_get_unit(isp->isp_dev); if (!iswwnn) seed ^= 0x0100000000000000ULL; } /* For additional channels we have to improvise even more. */ if (!iswwnn && chan > 0) { /* * We'll stick our channel number plus one first into bits * 57..59 and thence into bits 52..55 which allows for 8 bits * of channel which is enough for our maximum of 255 channels. */ seed ^= 0x0100000000000000ULL; seed ^= ((uint64_t) (chan + 1) & 0xf) << 56; seed ^= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52; } return (seed); } void isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) { int loc; char lbuf[200]; va_list ap; if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { return; } snprintf(lbuf, sizeof (lbuf), "%s: ", device_get_nameunit(isp->isp_dev)); loc = strlen(lbuf); va_start(ap, fmt); vsnprintf(&lbuf[loc], sizeof (lbuf) - loc - 1, fmt, ap); va_end(ap); printf("%s\n", lbuf); } void isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...) { va_list ap; if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { return; } xpt_print_path(xs->ccb_h.path); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); printf("\n"); } uint64_t isp_nanotime_sub(struct timespec *b, struct timespec *a) { uint64_t elapsed; struct timespec x; timespecsub(b, a, &x); elapsed = GET_NANOSEC(&x); if (elapsed == 0) elapsed++; return (elapsed); } int isp_mbox_acquire(ispsoftc_t *isp) { if (isp->isp_osinfo.mboxbsy) { return (1); } else { isp->isp_osinfo.mboxcmd_done = 0; isp->isp_osinfo.mboxbsy = 1; return (0); } } void isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp) { u_int t, to; to = (mbp->timeout == 0) ? MBCMD_DEFAULT_TIMEOUT : mbp->timeout; if (isp->isp_osinfo.mbox_sleep_ok) { isp->isp_osinfo.mbox_sleep_ok = 0; isp->isp_osinfo.mbox_sleeping = 1; msleep_sbt(&isp->isp_osinfo.mboxcmd_done, &isp->isp_lock, PRIBIO, "ispmbx_sleep", to * SBT_1US, 0, 0); isp->isp_osinfo.mbox_sleep_ok = 1; isp->isp_osinfo.mbox_sleeping = 0; } else { for (t = 0; t < to; t += 100) { if (isp->isp_osinfo.mboxcmd_done) break; ISP_RUN_ISR(isp); if (isp->isp_osinfo.mboxcmd_done) break; ISP_DELAY(100); } } if (isp->isp_osinfo.mboxcmd_done == 0) { isp_prt(isp, ISP_LOGWARN, "%s Mailbox Command (0x%x) Timeout (%uus) (%s:%d)", isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", isp->isp_lastmbxcmd, to, mbp->func, mbp->lineno); mbp->param[0] = MBOX_TIMEOUT; isp->isp_osinfo.mboxcmd_done = 1; } } void isp_mbox_notify_done(ispsoftc_t *isp) { isp->isp_osinfo.mboxcmd_done = 1; if (isp->isp_osinfo.mbox_sleeping) wakeup(&isp->isp_osinfo.mboxcmd_done); } void isp_mbox_release(ispsoftc_t *isp) { isp->isp_osinfo.mboxbsy = 0; } int isp_fc_scratch_acquire(ispsoftc_t *isp, int chan) { int ret = 0; if (isp->isp_osinfo.pc.fc[chan].fcbsy) { ret = -1; } else { isp->isp_osinfo.pc.fc[chan].fcbsy = 1; } return (ret); } void isp_platform_intr(void *arg) { ispsoftc_t *isp = arg; ISP_LOCK(isp); ISP_RUN_ISR(isp); ISP_UNLOCK(isp); } void isp_platform_intr_resp(void *arg) { ispsoftc_t *isp = arg; ISP_LOCK(isp); isp_intr_respq(isp); ISP_UNLOCK(isp); /* We have handshake enabled, so explicitly complete interrupt */ ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); } void isp_platform_intr_atio(void *arg) { ispsoftc_t *isp = arg; ISP_LOCK(isp); #ifdef ISP_TARGET_MODE isp_intr_atioq(isp); #endif ISP_UNLOCK(isp); /* We have handshake enabled, so explicitly complete interrupt */ ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); } void isp_common_dmateardown(ispsoftc_t *isp, struct ccb_scsiio *csio, uint32_t hdl) { if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTREAD); } else { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); } /* * Reset the command reference number for all LUNs on a specific target * (needed when a target arrives again) or for all targets on a port * (needed for events like a LIP). */ void isp_fcp_reset_crn(ispsoftc_t *isp, int chan, uint32_t tgt, int tgt_set) { struct isp_fc *fc = ISP_FC_PC(isp, chan); struct isp_nexus *nxp; int i; if (tgt_set == 0) isp_prt(isp, ISP_LOGDEBUG0, "Chan %d resetting CRN on all targets", chan); else isp_prt(isp, ISP_LOGDEBUG0, "Chan %d resetting CRN on target %u", chan, tgt); for (i = 0; i < NEXUS_HASH_WIDTH; i++) { for (nxp = fc->nexus_hash[i]; nxp != NULL; nxp = nxp->next) { if (tgt_set == 0 || tgt == nxp->tgt) nxp->crnseed = 0; } } } int isp_fcp_next_crn(ispsoftc_t *isp, uint8_t *crnp, XS_T *cmd) { lun_id_t lun; uint32_t chan, tgt; struct isp_fc *fc; struct isp_nexus *nxp; int idx; if (IS_2100(isp)) return (0); chan = XS_CHANNEL(cmd); tgt = XS_TGT(cmd); lun = XS_LUN(cmd); fc = &isp->isp_osinfo.pc.fc[chan]; idx = NEXUS_HASH(tgt, lun); nxp = fc->nexus_hash[idx]; while (nxp) { if (nxp->tgt == tgt && nxp->lun == lun) break; nxp = nxp->next; } if (nxp == NULL) { nxp = fc->nexus_free_list; if (nxp == NULL) { nxp = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_ZERO|M_NOWAIT); if (nxp == NULL) { return (-1); } } else { fc->nexus_free_list = nxp->next; } nxp->tgt = tgt; nxp->lun = lun; nxp->next = fc->nexus_hash[idx]; fc->nexus_hash[idx] = nxp; } if (nxp->crnseed == 0) nxp->crnseed = 1; *crnp = nxp->crnseed++; return (0); } /* * We enter with the lock held */ void isp_timer(void *arg) { ispsoftc_t *isp = arg; #ifdef ISP_TARGET_MODE isp_tmcmd_restart(isp); #endif callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp); } isp_ecmd_t * isp_get_ecmd(ispsoftc_t *isp) { isp_ecmd_t *ecmd = isp->isp_osinfo.ecmd_free; if (ecmd) { isp->isp_osinfo.ecmd_free = ecmd->next; } return (ecmd); } void isp_put_ecmd(ispsoftc_t *isp, isp_ecmd_t *ecmd) { ecmd->next = isp->isp_osinfo.ecmd_free; isp->isp_osinfo.ecmd_free = ecmd; } Index: head/sys/dev/isp/isp_freebsd.h =================================================================== --- head/sys/dev/isp/isp_freebsd.h (revision 367043) +++ head/sys/dev/isp/isp_freebsd.h (revision 367044) @@ -1,734 +1,735 @@ /* $FreeBSD$ */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Qlogic ISP SCSI Host Adapter FreeBSD Wrapper Definitions * * Copyright (c) 1997-2008 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _ISP_FREEBSD_H #define _ISP_FREEBSD_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_ddb.h" #include "opt_isp.h" #define ISP_PLATFORM_VERSION_MAJOR 7 #define ISP_PLATFORM_VERSION_MINOR 10 /* * Efficiency- get rid of SBus code && tests unless we need them. */ #define ISP_SBUS_SUPPORTED 0 #define ISP_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE #define N_XCMDS 64 #define XCMD_SIZE 512 struct ispsoftc; typedef union isp_ecmd { union isp_ecmd * next; uint8_t data[XCMD_SIZE]; } isp_ecmd_t; isp_ecmd_t * isp_get_ecmd(struct ispsoftc *); void isp_put_ecmd(struct ispsoftc *, isp_ecmd_t *); #ifdef ISP_TARGET_MODE #define ATPDPSIZE 4096 #define ATPDPHASHSIZE 32 #define ATPDPHASH(x) ((((x) >> 24) ^ ((x) >> 16) ^ ((x) >> 8) ^ (x)) & \ ((ATPDPHASHSIZE) - 1)) #include typedef struct atio_private_data { LIST_ENTRY(atio_private_data) next; uint32_t orig_datalen; uint32_t bytes_xfered; uint32_t bytes_in_transit; uint32_t tag; /* typically f/w RX_ID */ lun_id_t lun; uint32_t nphdl; uint32_t sid; uint32_t did; uint16_t rxid; /* wire rxid */ uint16_t oxid; /* wire oxid */ uint16_t word3; /* PRLI word3 params */ uint16_t ctcnt; /* number of CTIOs currently active */ uint8_t seqno; /* CTIO sequence number */ uint32_t srr_notify_rcvd : 1, cdb0 : 8, sendst : 1, dead : 1, tattr : 3, state : 3; void * ests; /* * The current SRR notify copy */ uint8_t srr[64]; /* sb QENTRY_LEN, but order of definitions is wrong */ void * srr_ccb; uint32_t nsrr; } atio_private_data_t; #define ATPD_STATE_FREE 0 #define ATPD_STATE_ATIO 1 #define ATPD_STATE_CAM 2 #define ATPD_STATE_CTIO 3 #define ATPD_STATE_LAST_CTIO 4 #define ATPD_STATE_PDON 5 #define ATPD_CCB_OUTSTANDING 16 #define ATPD_SEQ_MASK 0x7f #define ATPD_SEQ_NOTIFY_CAM 0x80 #define ATPD_SET_SEQNO(hdrp, atp) ((isphdr_t *)hdrp)->rqs_seqno &= ~ATPD_SEQ_MASK, ((isphdr_t *)hdrp)->rqs_seqno |= (atp)->seqno #define ATPD_GET_SEQNO(hdrp) (((isphdr_t *)hdrp)->rqs_seqno & ATPD_SEQ_MASK) #define ATPD_GET_NCAM(hdrp) ((((isphdr_t *)hdrp)->rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0) typedef struct inot_private_data inot_private_data_t; struct inot_private_data { STAILQ_ENTRY(inot_private_data) next; isp_notify_t nt; uint8_t data[64]; /* sb QENTRY_LEN, but order of definitions is wrong */ uint32_t tag_id, seq_id; }; typedef struct isp_timed_notify_ack { void *isp; void *not; uint8_t data[64]; /* sb QENTRY_LEN, but order of definitions is wrong */ struct callout timer; } isp_tna_t; STAILQ_HEAD(ntpdlist, inot_private_data); typedef struct tstate { SLIST_ENTRY(tstate) next; lun_id_t ts_lun; struct ccb_hdr_slist atios; struct ccb_hdr_slist inots; struct ntpdlist restart_queue; } tstate_t; #define LUN_HASH_SIZE 32 #define LUN_HASH_FUNC(lun) ((lun) & (LUN_HASH_SIZE - 1)) #endif /* * Per command info. */ struct isp_pcmd { struct isp_pcmd * next; bus_dmamap_t dmap; /* dma map for this command */ struct callout wdog; /* watchdog timer */ uint32_t datalen; /* data length for this command (target mode only) */ }; #define ISP_PCMD(ccb) (ccb)->ccb_h.spriv_ptr1 #define PISP_PCMD(ccb) ((struct isp_pcmd *)ISP_PCMD(ccb)) /* * Per nexus info. */ struct isp_nexus { uint64_t lun; /* LUN for target */ uint32_t tgt; /* TGT for target */ uint8_t crnseed; /* next command reference number */ struct isp_nexus *next; }; #define NEXUS_HASH_WIDTH 32 #define INITIAL_NEXUS_COUNT MAX_FC_TARG #define NEXUS_HASH(tgt, lun) ((tgt + lun) % NEXUS_HASH_WIDTH) /* * Per channel information */ SLIST_HEAD(tslist, tstate); TAILQ_HEAD(isp_ccbq, ccb_hdr); LIST_HEAD(atpdlist, atio_private_data); struct isp_fc { struct cam_sim *sim; struct cam_path *path; struct ispsoftc *isp; struct proc *kproc; bus_dmamap_t scmap; uint64_t def_wwpn; uint64_t def_wwnn; time_t loop_down_time; int loop_down_limit; int gone_device_time; /* * Per target/lun info- just to keep a per-ITL nexus crn count */ struct isp_nexus *nexus_hash[NEXUS_HASH_WIDTH]; struct isp_nexus *nexus_free_list; uint32_t simqfrozen : 3, default_id : 8, def_role : 2, /* default role */ loop_seen_once : 1, fcbsy : 1, ready : 1; struct callout gdt; /* gone device timer */ struct task gtask; #ifdef ISP_TARGET_MODE struct tslist lun_hash[LUN_HASH_SIZE]; struct isp_ccbq waitq; /* waiting CCBs */ struct ntpdlist ntfree; inot_private_data_t ntpool[ATPDPSIZE]; struct atpdlist atfree; struct atpdlist atused[ATPDPHASHSIZE]; atio_private_data_t atpool[ATPDPSIZE]; #if defined(DEBUG) unsigned int inject_lost_data_frame; #endif #endif int num_threads; }; struct isp_spi { struct cam_sim *sim; struct cam_path *path; uint32_t simqfrozen : 3, iid : 4; #ifdef ISP_TARGET_MODE struct tslist lun_hash[LUN_HASH_SIZE]; struct isp_ccbq waitq; /* waiting CCBs */ struct ntpdlist ntfree; inot_private_data_t ntpool[ATPDPSIZE]; struct atpdlist atfree; struct atpdlist atused[ATPDPHASHSIZE]; atio_private_data_t atpool[ATPDPSIZE]; #endif int num_threads; }; struct isposinfo { /* * Linkage, locking, and identity */ struct mtx lock; device_t dev; struct cdev * cdev; struct cam_devq * devq; /* * Firmware pointer */ const struct firmware * fw; /* * DMA related stuff */ struct resource * regs; struct resource * regs2; bus_dma_tag_t dmat; bus_dma_tag_t reqdmat; bus_dma_tag_t respdmat; bus_dma_tag_t atiodmat; bus_dma_tag_t iocbdmat; bus_dma_tag_t scdmat; bus_dmamap_t reqmap; bus_dmamap_t respmap; bus_dmamap_t atiomap; bus_dmamap_t iocbmap; /* * Command and transaction related related stuff */ struct isp_pcmd * pcmd_pool; struct isp_pcmd * pcmd_free; int mbox_sleeping; int mbox_sleep_ok; int mboxbsy; int mboxcmd_done; struct callout tmo; /* general timer */ /* * misc- needs to be sorted better XXXXXX */ int framesize; int exec_throttle; int cont_max; bus_addr_t ecmd_dma; isp_ecmd_t * ecmd_base; isp_ecmd_t * ecmd_free; /* * Per-type private storage... */ union { struct isp_fc *fc; struct isp_spi *spi; void *ptr; } pc; int is_exiting; }; #define ISP_FC_PC(isp, chan) (&(isp)->isp_osinfo.pc.fc[(chan)]) #define ISP_SPI_PC(isp, chan) (&(isp)->isp_osinfo.pc.spi[(chan)]) #define ISP_GET_PC(isp, chan, tag, rslt) \ if (IS_SCSI(isp)) { \ rslt = ISP_SPI_PC(isp, chan)-> tag; \ } else { \ rslt = ISP_FC_PC(isp, chan)-> tag; \ } #define ISP_GET_PC_ADDR(isp, chan, tag, rp) \ if (IS_SCSI(isp)) { \ rp = &ISP_SPI_PC(isp, chan)-> tag; \ } else { \ rp = &ISP_FC_PC(isp, chan)-> tag; \ } #define ISP_SET_PC(isp, chan, tag, val) \ if (IS_SCSI(isp)) { \ ISP_SPI_PC(isp, chan)-> tag = val; \ } else { \ ISP_FC_PC(isp, chan)-> tag = val; \ } #define FCP_NEXT_CRN isp_fcp_next_crn #define isp_lock isp_osinfo.lock #define isp_regs isp_osinfo.regs #define isp_regs2 isp_osinfo.regs2 /* * Locking macros... */ #define ISP_LOCK(isp) mtx_lock(&(isp)->isp_lock) #define ISP_UNLOCK(isp) mtx_unlock(&(isp)->isp_lock) #define ISP_ASSERT_LOCKED(isp) mtx_assert(&(isp)->isp_lock, MA_OWNED) /* * Required Macros/Defines */ #define ISP_FC_SCRLEN 0x1000 #define ISP_MEMZERO(a, b) memset(a, 0, b) #define ISP_MEMCPY memcpy #define ISP_SNPRINTF snprintf #define ISP_DELAY(x) DELAY(x) #define ISP_SLEEP(isp, x) msleep_sbt(&(isp)->isp_osinfo.is_exiting, \ &(isp)->isp_lock, 0, "isp_sleep", (x) * SBT_1US, 0, 0) #define ISP_MIN imin #ifndef DIAGNOSTIC #define ISP_INLINE __inline #else #define ISP_INLINE #endif #define NANOTIME_T struct timespec #define GET_NANOTIME nanotime #define GET_NANOSEC(x) ((x)->tv_sec * 1000000000 + (x)->tv_nsec) #define NANOTIME_SUB isp_nanotime_sub #define MAXISPREQUEST(isp) ((IS_FC(isp) || IS_ULTRA2(isp))? 1024 : 256) #define MEMORYBARRIER(isp, type, offset, size, chan) \ switch (type) { \ case SYNC_REQUEST: \ bus_dmamap_sync(isp->isp_osinfo.reqdmat, \ isp->isp_osinfo.reqmap, BUS_DMASYNC_PREWRITE); \ break; \ case SYNC_RESULT: \ bus_dmamap_sync(isp->isp_osinfo.respdmat, \ isp->isp_osinfo.respmap, BUS_DMASYNC_POSTREAD); \ break; \ case SYNC_SFORDEV: \ { \ struct isp_fc *fc = ISP_FC_PC(isp, chan); \ bus_dmamap_sync(isp->isp_osinfo.scdmat, fc->scmap, \ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \ break; \ } \ case SYNC_SFORCPU: \ { \ struct isp_fc *fc = ISP_FC_PC(isp, chan); \ bus_dmamap_sync(isp->isp_osinfo.scdmat, fc->scmap, \ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); \ break; \ } \ case SYNC_REG: \ bus_barrier(isp->isp_osinfo.regs, offset, size, \ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); \ break; \ case SYNC_ATIOQ: \ bus_dmamap_sync(isp->isp_osinfo.atiodmat, \ isp->isp_osinfo.atiomap, BUS_DMASYNC_POSTREAD); \ break; \ case SYNC_IFORDEV: \ bus_dmamap_sync(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, \ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \ break; \ case SYNC_IFORCPU: \ bus_dmamap_sync(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, \ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); \ break; \ default: \ break; \ } #define MEMORYBARRIERW(isp, type, offset, size, chan) \ switch (type) { \ case SYNC_REQUEST: \ bus_dmamap_sync(isp->isp_osinfo.reqdmat, \ isp->isp_osinfo.reqmap, BUS_DMASYNC_PREWRITE); \ break; \ case SYNC_SFORDEV: \ { \ struct isp_fc *fc = ISP_FC_PC(isp, chan); \ bus_dmamap_sync(isp->isp_osinfo.scdmat, fc->scmap, \ BUS_DMASYNC_PREWRITE); \ break; \ } \ case SYNC_SFORCPU: \ { \ struct isp_fc *fc = ISP_FC_PC(isp, chan); \ bus_dmamap_sync(isp->isp_osinfo.scdmat, fc->scmap, \ BUS_DMASYNC_POSTWRITE); \ break; \ } \ case SYNC_REG: \ bus_barrier(isp->isp_osinfo.regs, offset, size, \ BUS_SPACE_BARRIER_WRITE); \ break; \ case SYNC_IFORDEV: \ bus_dmamap_sync(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, \ BUS_DMASYNC_PREWRITE); \ break; \ case SYNC_IFORCPU: \ bus_dmamap_sync(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, \ BUS_DMASYNC_POSTWRITE); \ break; \ default: \ break; \ } #define MBOX_ACQUIRE isp_mbox_acquire #define MBOX_WAIT_COMPLETE isp_mbox_wait_complete #define MBOX_NOTIFY_COMPLETE isp_mbox_notify_done #define MBOX_RELEASE isp_mbox_release #define FC_SCRATCH_ACQUIRE isp_fc_scratch_acquire #define FC_SCRATCH_RELEASE(isp, chan) isp->isp_osinfo.pc.fc[chan].fcbsy = 0 #ifndef SCSI_GOOD #define SCSI_GOOD SCSI_STATUS_OK #endif #ifndef SCSI_CHECK #define SCSI_CHECK SCSI_STATUS_CHECK_COND #endif #ifndef SCSI_BUSY #define SCSI_BUSY SCSI_STATUS_BUSY #endif #ifndef SCSI_QFULL #define SCSI_QFULL SCSI_STATUS_QUEUE_FULL #endif #define XS_T struct ccb_scsiio #define XS_DMA_ADDR_T bus_addr_t #define XS_GET_DMA64_SEG(a, b, c) \ { \ ispds64_t *d = a; \ bus_dma_segment_t *e = b; \ uint32_t f = c; \ e += f; \ d->ds_base = DMA_LO32(e->ds_addr); \ d->ds_basehi = DMA_HI32(e->ds_addr); \ d->ds_count = e->ds_len; \ } #define XS_GET_DMA_SEG(a, b, c) \ { \ ispds_t *d = a; \ bus_dma_segment_t *e = b; \ uint32_t f = c; \ e += f; \ d->ds_base = DMA_LO32(e->ds_addr); \ d->ds_count = e->ds_len; \ } #if (BUS_SPACE_MAXADDR > UINT32_MAX) #define XS_NEED_DMA64_SEG(s, n) \ (((bus_dma_segment_t *)s)[n].ds_addr + \ ((bus_dma_segment_t *)s)[n].ds_len > UINT32_MAX) #else #define XS_NEED_DMA64_SEG(s, n) (0) #endif #define XS_ISP(ccb) cam_sim_softc(xpt_path_sim((ccb)->ccb_h.path)) #define XS_CHANNEL(ccb) cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path)) #define XS_TGT(ccb) (ccb)->ccb_h.target_id #define XS_LUN(ccb) (ccb)->ccb_h.target_lun #define XS_CDBP(ccb) \ (((ccb)->ccb_h.flags & CAM_CDB_POINTER)? \ (ccb)->cdb_io.cdb_ptr : (ccb)->cdb_io.cdb_bytes) #define XS_CDBLEN(ccb) (ccb)->cdb_len #define XS_XFRLEN(ccb) (ccb)->dxfer_len #define XS_TIME(ccb) \ (((ccb)->ccb_h.timeout > 0xffff * 1000 - 999) ? 0 : \ (((ccb)->ccb_h.timeout + 999) / 1000)) #define XS_GET_RESID(ccb) (ccb)->resid #define XS_SET_RESID(ccb, r) (ccb)->resid = r #define XS_STSP(ccb) (&(ccb)->scsi_status) #define XS_SNSP(ccb) (&(ccb)->sense_data) #define XS_TOT_SNSLEN(ccb) ccb->sense_len #define XS_CUR_SNSLEN(ccb) (ccb->sense_len - ccb->sense_resid) #define XS_SNSKEY(ccb) (scsi_get_sense_key(&(ccb)->sense_data, \ ccb->sense_len - ccb->sense_resid, 1)) #define XS_SNSASC(ccb) (scsi_get_asc(&(ccb)->sense_data, \ ccb->sense_len - ccb->sense_resid, 1)) #define XS_SNSASCQ(ccb) (scsi_get_ascq(&(ccb)->sense_data, \ ccb->sense_len - ccb->sense_resid, 1)) #define XS_TAG_P(ccb) \ (((ccb)->ccb_h.flags & CAM_TAG_ACTION_VALID) && \ (ccb)->tag_action != CAM_TAG_ACTION_NONE) #define XS_TAG_TYPE(ccb) \ ((ccb->tag_action == MSG_SIMPLE_Q_TAG)? REQFLAG_STAG : \ ((ccb->tag_action == MSG_HEAD_OF_Q_TAG)? REQFLAG_HTAG : REQFLAG_OTAG)) - + +#define XS_PRIORITY(ccb) (ccb)->priority #define XS_SETERR(ccb, v) (ccb)->ccb_h.status &= ~CAM_STATUS_MASK, \ (ccb)->ccb_h.status |= v # define HBA_NOERROR CAM_REQ_INPROG # define HBA_BOTCH CAM_UNREC_HBA_ERROR # define HBA_CMDTIMEOUT CAM_CMD_TIMEOUT # define HBA_SELTIMEOUT CAM_SEL_TIMEOUT # define HBA_TGTBSY CAM_SCSI_STATUS_ERROR # define HBA_REQINVAL CAM_REQ_INVALID # define HBA_BUSRESET CAM_SCSI_BUS_RESET # define HBA_ABORTED CAM_REQ_ABORTED # define HBA_DATAOVR CAM_DATA_RUN_ERR # define HBA_ARQFAIL CAM_AUTOSENSE_FAIL #define XS_ERR(ccb) ((ccb)->ccb_h.status & CAM_STATUS_MASK) #define XS_NOERR(ccb) (((ccb)->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) #define XS_INITERR(ccb) XS_SETERR(ccb, CAM_REQ_INPROG), ccb->sense_resid = ccb->sense_len #define XS_SAVE_SENSE(xs, sp, len) do { \ uint32_t amt = min(len, (xs)->sense_len); \ memcpy(&(xs)->sense_data, sp, amt); \ (xs)->sense_resid = (xs)->sense_len - amt; \ (xs)->ccb_h.status |= CAM_AUTOSNS_VALID; \ } while (0) #define XS_SENSE_APPEND(xs, sp, len) do { \ uint8_t *ptr = (uint8_t *)(&(xs)->sense_data) + \ ((xs)->sense_len - (xs)->sense_resid); \ uint32_t amt = min((len), (xs)->sense_resid); \ memcpy(ptr, sp, amt); \ (xs)->sense_resid -= amt; \ } while (0) #define XS_SENSE_VALID(xs) (((xs)->ccb_h.status & CAM_AUTOSNS_VALID) != 0) #define DEFAULT_FRAMESIZE(isp) isp->isp_osinfo.framesize #define DEFAULT_EXEC_THROTTLE(isp) isp->isp_osinfo.exec_throttle #define DEFAULT_ROLE(isp, chan) \ (IS_FC(isp)? ISP_FC_PC(isp, chan)->def_role : ISP_ROLE_INITIATOR) #define DEFAULT_IID(isp, chan) isp->isp_osinfo.pc.spi[chan].iid #define DEFAULT_LOOPID(x, chan) isp->isp_osinfo.pc.fc[chan].default_id #define DEFAULT_NODEWWN(isp, chan) isp_default_wwn(isp, chan, 0, 1) #define DEFAULT_PORTWWN(isp, chan) isp_default_wwn(isp, chan, 0, 0) #define ACTIVE_NODEWWN(isp, chan) isp_default_wwn(isp, chan, 1, 1) #define ACTIVE_PORTWWN(isp, chan) isp_default_wwn(isp, chan, 1, 0) #if BYTE_ORDER == BIG_ENDIAN #ifdef ISP_SBUS_SUPPORTED #define ISP_IOXPUT_8(isp, s, d) *(d) = s #define ISP_IOXPUT_16(isp, s, d) \ *(d) = (isp->isp_bustype == ISP_BT_SBUS)? s : bswap16(s) #define ISP_IOXPUT_32(isp, s, d) \ *(d) = (isp->isp_bustype == ISP_BT_SBUS)? s : bswap32(s) #define ISP_IOXGET_8(isp, s, d) d = (*((uint8_t *)s)) #define ISP_IOXGET_16(isp, s, d) \ d = (isp->isp_bustype == ISP_BT_SBUS)? \ *((uint16_t *)s) : bswap16(*((uint16_t *)s)) #define ISP_IOXGET_32(isp, s, d) \ d = (isp->isp_bustype == ISP_BT_SBUS)? \ *((uint32_t *)s) : bswap32(*((uint32_t *)s)) #else /* ISP_SBUS_SUPPORTED */ #define ISP_IOXPUT_8(isp, s, d) *(d) = s #define ISP_IOXPUT_16(isp, s, d) *(d) = bswap16(s) #define ISP_IOXPUT_32(isp, s, d) *(d) = bswap32(s) #define ISP_IOXGET_8(isp, s, d) d = (*((uint8_t *)s)) #define ISP_IOXGET_16(isp, s, d) d = bswap16(*((uint16_t *)s)) #define ISP_IOXGET_32(isp, s, d) d = bswap32(*((uint32_t *)s)) #endif #define ISP_SWIZZLE_NVRAM_WORD(isp, rp) *rp = bswap16(*rp) #define ISP_SWIZZLE_NVRAM_LONG(isp, rp) *rp = bswap32(*rp) #define ISP_IOZGET_8(isp, s, d) d = (*((uint8_t *)s)) #define ISP_IOZGET_16(isp, s, d) d = (*((uint16_t *)s)) #define ISP_IOZGET_32(isp, s, d) d = (*((uint32_t *)s)) #define ISP_IOZPUT_8(isp, s, d) *(d) = s #define ISP_IOZPUT_16(isp, s, d) *(d) = s #define ISP_IOZPUT_32(isp, s, d) *(d) = s #else #define ISP_IOXPUT_8(isp, s, d) *(d) = s #define ISP_IOXPUT_16(isp, s, d) *(d) = s #define ISP_IOXPUT_32(isp, s, d) *(d) = s #define ISP_IOXGET_8(isp, s, d) d = *(s) #define ISP_IOXGET_16(isp, s, d) d = *(s) #define ISP_IOXGET_32(isp, s, d) d = *(s) #define ISP_SWIZZLE_NVRAM_WORD(isp, rp) #define ISP_SWIZZLE_NVRAM_LONG(isp, rp) #define ISP_IOZPUT_8(isp, s, d) *(d) = s #define ISP_IOZPUT_16(isp, s, d) *(d) = bswap16(s) #define ISP_IOZPUT_32(isp, s, d) *(d) = bswap32(s) #define ISP_IOZGET_8(isp, s, d) d = (*((uint8_t *)(s))) #define ISP_IOZGET_16(isp, s, d) d = bswap16(*((uint16_t *)(s))) #define ISP_IOZGET_32(isp, s, d) d = bswap32(*((uint32_t *)(s))) #endif #define ISP_SWAP16(isp, s) bswap16(s) #define ISP_SWAP32(isp, s) bswap32(s) /* * Includes of common header files */ #include #include #include /* * isp_osinfo definiitions && shorthand */ #define SIMQFRZ_RESOURCE 0x1 #define SIMQFRZ_LOOPDOWN 0x2 #define SIMQFRZ_TIMED 0x4 #define isp_dev isp_osinfo.dev /* * prototypes for isp_pci && isp_freebsd to share */ extern int isp_attach(ispsoftc_t *); extern int isp_detach(ispsoftc_t *); extern uint64_t isp_default_wwn(ispsoftc_t *, int, int, int); /* * driver global data */ extern int isp_announced; extern int isp_loop_down_limit; extern int isp_gone_device_time; extern int isp_quickboot_time; /* * Platform private flags */ /* * Platform Library Functions */ void isp_prt(ispsoftc_t *, int level, const char *, ...) __printflike(3, 4); void isp_xs_prt(ispsoftc_t *, XS_T *, int level, const char *, ...) __printflike(4, 5); uint64_t isp_nanotime_sub(struct timespec *, struct timespec *); int isp_mbox_acquire(ispsoftc_t *); void isp_mbox_wait_complete(ispsoftc_t *, mbreg_t *); void isp_mbox_notify_done(ispsoftc_t *); void isp_mbox_release(ispsoftc_t *); int isp_fc_scratch_acquire(ispsoftc_t *, int); void isp_platform_intr(void *); void isp_platform_intr_resp(void *); void isp_platform_intr_atio(void *); void isp_common_dmateardown(ispsoftc_t *, struct ccb_scsiio *, uint32_t); void isp_fcp_reset_crn(ispsoftc_t *, int, uint32_t, int); int isp_fcp_next_crn(ispsoftc_t *, uint8_t *, XS_T *); /* * Platform Version specific defines */ #define ISP_PATH_PRT(i, l, p, ...) \ if ((l) == ISP_LOGALL || ((l)& (i)->isp_dblev) != 0) { \ xpt_print(p, __VA_ARGS__); \ } /* * Platform specific inline functions */ /* * ISP General Library functions */ #include #endif /* _ISP_FREEBSD_H */ Index: head/sys/dev/isp/isp_stds.h =================================================================== --- head/sys/dev/isp/isp_stds.h (revision 367043) +++ head/sys/dev/isp/isp_stds.h (revision 367044) @@ -1,340 +1,343 @@ /* $FreeBSD$ */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Structures that derive directly from public standards. */ #ifndef _ISP_STDS_H #define _ISP_STDS_H /* * FC Frame Header * * Source: dpANS-X3.xxx-199x, section 18 (AKA FC-PH-2) * */ typedef struct { uint8_t r_ctl; uint8_t d_id[3]; uint8_t cs_ctl; uint8_t s_id[3]; uint8_t type; uint8_t f_ctl[3]; uint8_t seq_id; uint8_t df_ctl; uint16_t seq_cnt; uint16_t ox_id; uint16_t rx_id; uint32_t parameter; } fc_hdr_t; /* * FCP_CMND_IU Payload * * Source: NICTS T10, Project 1144D, Revision 07a, Section 9 (AKA fcp2-r07a) * * Notes: * When additional cdb length is defined in fcp_cmnd_alen_datadir, * bits 2..7, the actual cdb length is 16 + ((fcp_cmnd_alen_datadir>>2)*4), * with the datalength following in MSB format just after. */ typedef struct { uint8_t fcp_cmnd_lun[8]; uint8_t fcp_cmnd_crn; uint8_t fcp_cmnd_task_attribute; uint8_t fcp_cmnd_task_management; uint8_t fcp_cmnd_alen_datadir; union { struct { uint8_t fcp_cmnd_cdb[16]; uint32_t fcp_cmnd_dl; } sf; struct { uint8_t fcp_cmnd_cdb[1]; } lf; } cdb_dl; } fcp_cmnd_iu_t; #define FCP_CMND_TASK_ATTR_SIMPLE 0x00 #define FCP_CMND_TASK_ATTR_HEAD 0x01 #define FCP_CMND_TASK_ATTR_ORDERED 0x02 #define FCP_CMND_TASK_ATTR_ACA 0x04 #define FCP_CMND_TASK_ATTR_UNTAGGED 0x05 #define FCP_CMND_TASK_ATTR_MASK 0x07 +#define FCP_CMND_PRIO_MASK 0x78 +#define FCP_CMND_PRIO_SHIFT 3 + #define FCP_CMND_ADDTL_CDBLEN_SHIFT 2 #define FCP_CMND_DATA_WRITE 0x01 #define FCP_CMND_DATA_READ 0x02 #define FCP_CMND_DATA_DIR_MASK 0x03 #define FCP_CMND_TMF_CLEAR_ACA 0x40 #define FCP_CMND_TMF_TGT_RESET 0x20 #define FCP_CMND_TMF_LUN_RESET 0x10 #define FCP_CMND_TMF_QUERY_ASYNC_EVENT 0x08 #define FCP_CMND_TMF_CLEAR_TASK_SET 0x04 #define FCP_CMND_TMF_ABORT_TASK_SET 0x02 #define FCP_CMND_TMF_QUERY_TASK_SET 0x01 /* * Basic CT IU Header * * Source: X3.288-199x Generic Services 2 Rev 5.3 (FC-GS-2) Section 4.3.1 */ typedef struct { uint8_t ct_revision; uint8_t ct_in_id[3]; uint8_t ct_fcs_type; uint8_t ct_fcs_subtype; uint8_t ct_options; uint8_t ct_reserved0; uint16_t ct_cmd_resp; uint16_t ct_bcnt_resid; uint8_t ct_reserved1; uint8_t ct_reason; uint8_t ct_explanation; uint8_t ct_vunique; } ct_hdr_t; #define CT_REVISION 1 #define CT_FC_TYPE_FC 0xFC #define CT_FC_SUBTYPE_NS 0x02 /* * RFT_ID Requet CT_IU * * Source: NCITS xxx-200x Generic Services- 5 Rev 8.5 Section 5.2.5.30 */ typedef struct { ct_hdr_t rftid_hdr; uint8_t rftid_reserved; uint8_t rftid_portid[3]; uint32_t rftid_fc4types[8]; } rft_id_t; /* * RSPN_ID Requet CT_IU * * Source: INCITS 463-2010 Generic Services 6 Section 5.2.5.32 */ typedef struct { ct_hdr_t rspnid_hdr; uint8_t rspnid_reserved; uint8_t rspnid_portid[3]; uint8_t rspnid_length; uint8_t rspnid_name[0]; } rspn_id_t; /* * RFF_ID Requet CT_IU * * Source: INCITS 463-2010 Generic Services 6 Section 5.2.5.34 */ typedef struct { ct_hdr_t rffid_hdr; uint8_t rffid_reserved; uint8_t rffid_portid[3]; uint16_t rffid_reserved2; uint8_t rffid_fc4features; uint8_t rffid_fc4type; } rff_id_t; /* * RSNN_NN Requet CT_IU * * Source: INCITS 463-2010 Generic Services 6 Section 5.2.5.35 */ typedef struct { ct_hdr_t rsnnnn_hdr; uint8_t rsnnnn_nodename[8]; uint8_t rsnnnn_length; uint8_t rsnnnn_name[0]; } rsnn_nn_t; /* * FCP Response IU and bits of interest * Source: NCITS T10, Project 1828D, Revision 02b (aka FCP4r02b) */ typedef struct { uint8_t fcp_rsp_reserved[8]; uint16_t fcp_rsp_status_qualifier; /* SAM-5 Status Qualifier */ uint8_t fcp_rsp_bits; uint8_t fcp_rsp_scsi_status; /* SAM-5 SCSI Status Byte */ uint32_t fcp_rsp_resid; uint32_t fcp_rsp_snslen; uint32_t fcp_rsp_rsplen; /* * In the bytes that follow, it's going to be * FCP RESPONSE INFO (max 8 bytes, possibly 0) * FCP SENSE INFO (if any) * FCP BIDIRECTIONAL READ RESID (if any) */ uint8_t fcp_rsp_extra[0]; } fcp_rsp_iu_t; #define MIN_FCP_RESPONSE_SIZE 24 #define FCP_BIDIR_RSP 0x80 /* Bi-Directional response */ #define FCP_BIDIR_RESID_UNDERFLOW 0x40 #define FCP_BIDIR_RESID_OVERFLOW 0x20 #define FCP_CONF_REQ 0x10 #define FCP_RESID_UNDERFLOW 0x08 #define FCP_RESID_OVERFLOW 0x04 #define FCP_SNSLEN_VALID 0x02 #define FCP_RSPLEN_VALID 0x01 #define FCP_MAX_RSPLEN 0x08 /* * FCP Response Code Definitions * Source: NCITS T10, Project 1144D, Revision 08 (aka FCP2r08) */ #define FCP_RSPNS_CODE_OFFSET 3 #define FCP_RSPNS_TMF_DONE 0 #define FCP_RSPNS_DLBRSTX 1 #define FCP_RSPNS_BADCMND 2 #define FCP_RSPNS_EROFS 3 #define FCP_RSPNS_TMF_REJECT 4 #define FCP_RSPNS_TMF_FAILED 5 #define FCP_RSPNS_TMF_SUCCEEDED 8 #define FCP_RSPNS_TMF_INCORRECT_LUN 9 /* * R_CTL field definitions * * Bits 31-28 are ROUTING * Bits 27-24 are INFORMATION * * These are nibble values, not bits */ #define R_CTL_ROUTE_DATA 0x00 #define R_CTL_ROUTE_ELS 0x02 #define R_CTL_ROUTE_FC4_LINK 0x03 #define R_CTL_ROUTE_VDATA 0x04 #define R_CTL_ROUTE_EXENDED 0x05 #define R_CTL_ROUTE_BASIC 0x08 #define R_CTL_ROUTE_LINK 0x0c #define R_CTL_ROUTE_EXT_ROUTING 0x0f #define R_CTL_INFO_UNCATEGORIZED 0x00 #define R_CTL_INFO_SOLICITED_DATA 0x01 #define R_CTL_INFO_UNSOLICITED_CONTROL 0x02 #define R_CTL_INFO_SOLICITED_CONTROL 0x03 #define R_CTL_INFO_UNSOLICITED_DATA 0x04 #define R_CTL_INFO_DATA_DESCRIPTOR 0x05 #define R_CTL_INFO_UNSOLICITED_COMMAND 0x06 #define R_CTL_INFO_COMMAND_STATUS 0x07 #define MAKE_RCTL(a, b) (((a) << 4) | (b)) /* unconverted miscellany */ /* * Basic FC Link Service defines */ /* #define ABTS MAKE_RCTL(R_CTL_ROUTE_BASIC, R_CTL_INFO_SOLICITED_DATA) */ #define BA_ACC MAKE_RCTL(R_CTL_ROUTE_BASIC, R_CTL_INFO_UNSOLICITED_DATA) /* of ABORT */ #define BA_RJT MAKE_RCTL(R_CTL_ROUTE_BASIC, R_CTL_INFO_DATA_DESCRIPTOR) /* of ABORT */ /* * Link Service Accept/Reject */ #define LS_ACC 0x8002 #define LS_RJT 0x8001 /* * FC ELS Codes- bits 31-24 of the first payload word of an ELS frame. */ #define PLOGI 0x03 #define FLOGI 0x04 #define LOGO 0x05 #define ABTX 0x06 #define PRLI 0x20 #define PRLO 0x21 #define SCN 0x22 #define TPRLO 0x24 #define PDISC 0x50 #define ADISC 0x52 #define RNC 0x53 /* * PRLI Word 0 definitions * FPC4-r02b January, 2011 */ #define PRLI_WD0_TYPE_MASK 0xff000000 #define PRLI_WD0_TC_EXT_MASK 0x00ff0000 #define PRLI_WD0_EST_IMAGE_PAIR (1 << 13) /* * PRLI Word 3 definitions * FPC4-r02b January, 2011 */ #define PRLI_WD3_ENHANCED_DISCOVERY (1 << 11) #define PRLI_WD3_REC_SUPPORT (1 << 10) #define PRLI_WD3_TASK_RETRY_IDENTIFICATION_REQUESTED (1 << 9) #define PRLI_WD3_RETRY (1 << 8) #define PRLI_WD3_CONFIRMED_COMPLETION_ALLOWED (1 << 7) #define PRLI_WD3_DATA_OVERLAY_ALLOWED (1 << 6) #define PRLI_WD3_INITIATOR_FUNCTION (1 << 5) #define PRLI_WD3_TARGET_FUNCTION (1 << 4) #define PRLI_WD3_READ_FCP_XFER_RDY_DISABLED (1 << 1) /* definitely supposed to be set */ #define PRLI_WD3_WRITE_FCP_XFER_RDY_DISABLED (1 << 0) /* * FC4 defines */ #define FC4_IP 5 /* ISO/EEC 8802-2 LLC/SNAP */ #define FC4_SCSI 8 /* SCSI-3 via Fibre Channel Protocol (FCP) */ #define FC4_FC_SVC 0x20 /* Fibre Channel Services */ #ifndef MSG_ABORT #define MSG_ABORT 0x06 #endif #ifndef MSG_BUS_DEV_RESET #define MSG_BUS_DEV_RESET 0x0c #endif #ifndef MSG_ABORT_TAG #define MSG_ABORT_TAG 0x0d #endif #ifndef MSG_CLEAR_QUEUE #define MSG_CLEAR_QUEUE 0x0e #endif #ifndef MSG_REL_RECOVERY #define MSG_REL_RECOVERY 0x10 #endif #ifndef MSG_TERM_IO_PROC #define MSG_TERM_IO_PROC 0x11 #endif #ifndef MSG_LUN_RESET #define MSG_LUN_RESET 0x17 #endif #endif /* _ISP_STDS_H */ Index: head/sys/dev/isp/ispvar.h =================================================================== --- head/sys/dev/isp/ispvar.h (revision 367043) +++ head/sys/dev/isp/ispvar.h (revision 367044) @@ -1,1147 +1,1148 @@ /* $FreeBSD$ */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2018 Alexander Motin * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Soft Definitions for for Qlogic ISP SCSI adapters. */ #ifndef _ISPVAR_H #define _ISPVAR_H #if defined(__NetBSD__) || defined(__OpenBSD__) #include #include #endif #ifdef __FreeBSD__ #include #include #endif #ifdef __linux__ #include "isp_stds.h" #include "ispmbox.h" #endif #ifdef __svr4__ #include "isp_stds.h" #include "ispmbox.h" #endif #define ISP_CORE_VERSION_MAJOR 7 #define ISP_CORE_VERSION_MINOR 0 /* * Vector for bus specific code to provide specific services. */ typedef struct ispsoftc ispsoftc_t; struct ispmdvec { void (*dv_run_isr) (ispsoftc_t *); uint32_t (*dv_rd_reg) (ispsoftc_t *, int); void (*dv_wr_reg) (ispsoftc_t *, int, uint32_t); int (*dv_mbxdma) (ispsoftc_t *); int (*dv_dmaset) (ispsoftc_t *, XS_T *, void *); void (*dv_dmaclr) (ispsoftc_t *, XS_T *, uint32_t); int (*dv_irqsetup) (ispsoftc_t *); void (*dv_dregs) (ispsoftc_t *, const char *); const void * dv_ispfw; /* ptr to f/w */ uint16_t dv_conf1; uint16_t dv_clock; /* clock frequency */ }; /* * Overall parameters */ #define MAX_TARGETS 16 #ifndef MAX_FC_TARG #define MAX_FC_TARG 1024 #endif #define ISP_MAX_TARGETS(isp) (IS_FC(isp)? MAX_FC_TARG : MAX_TARGETS) #define ISP_MAX_LUNS(isp) (isp)->isp_maxluns #define ISP_MAX_IRQS 3 /* * Macros to access ISP registers through bus specific layers- * mostly wrappers to vector through the mdvec structure. */ #define ISP_RUN_ISR(isp) \ (*(isp)->isp_mdvec->dv_run_isr)(isp) #define ISP_READ(isp, reg) \ (*(isp)->isp_mdvec->dv_rd_reg)((isp), (reg)) #define ISP_WRITE(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), (val)) #define ISP_MBOXDMASETUP(isp) \ (*(isp)->isp_mdvec->dv_mbxdma)((isp)) #define ISP_DMASETUP(isp, xs, req) \ (*(isp)->isp_mdvec->dv_dmaset)((isp), (xs), (req)) #define ISP_DMAFREE(isp, xs, hndl) \ if ((isp)->isp_mdvec->dv_dmaclr) \ (*(isp)->isp_mdvec->dv_dmaclr)((isp), (xs), (hndl)) #define ISP_IRQSETUP(isp) \ (((isp)->isp_mdvec->dv_irqsetup) ? (*(isp)->isp_mdvec->dv_irqsetup)(isp) : 0) #define ISP_DUMPREGS(isp, m) \ if ((isp)->isp_mdvec->dv_dregs) (*(isp)->isp_mdvec->dv_dregs)((isp),(m)) #define ISP_SETBITS(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), ISP_READ((isp), (reg)) | (val)) #define ISP_CLRBITS(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), ISP_READ((isp), (reg)) & ~(val)) /* * The MEMORYBARRIER macro is defined per platform (to provide synchronization * on Request and Response Queues, Scratch DMA areas, and Registers) * * Defined Memory Barrier Synchronization Types */ #define SYNC_REQUEST 0 /* request queue synchronization */ #define SYNC_RESULT 1 /* result queue synchronization */ #define SYNC_SFORDEV 2 /* scratch, sync for ISP */ #define SYNC_SFORCPU 3 /* scratch, sync for CPU */ #define SYNC_REG 4 /* for registers */ #define SYNC_ATIOQ 5 /* atio result queue (24xx) */ #define SYNC_IFORDEV 6 /* synchrounous IOCB, sync for ISP */ #define SYNC_IFORCPU 7 /* synchrounous IOCB, sync for CPU */ /* * Request/Response Queue defines and macros. * The maximum is defined per platform (and can be based on board type). */ /* This is the size of a queue entry (request and response) */ #define QENTRY_LEN 64 /* Both request and result queue length must be a power of two */ #define RQUEST_QUEUE_LEN(x) MAXISPREQUEST(x) #ifdef ISP_TARGET_MODE #define RESULT_QUEUE_LEN(x) MAXISPREQUEST(x) #else #define RESULT_QUEUE_LEN(x) \ (((MAXISPREQUEST(x) >> 2) < 64)? 64 : MAXISPREQUEST(x) >> 2) #endif #define ISP_QUEUE_ENTRY(q, idx) (((uint8_t *)q) + ((idx) * QENTRY_LEN)) #define ISP_QUEUE_SIZE(n) ((n) * QENTRY_LEN) #define ISP_NXT_QENTRY(idx, qlen) (((idx) + 1) & ((qlen)-1)) #define ISP_QFREE(in, out, qlen) \ ((in == out)? (qlen - 1) : ((in > out)? \ ((qlen - 1) - (in - out)) : (out - in - 1))) #define ISP_QAVAIL(isp) \ ISP_QFREE(isp->isp_reqidx, isp->isp_reqodx, RQUEST_QUEUE_LEN(isp)) #define ISP_ADD_REQUEST(isp, nxti) \ MEMORYBARRIER(isp, SYNC_REQUEST, isp->isp_reqidx, QENTRY_LEN, -1); \ ISP_WRITE(isp, isp->isp_rqstinrp, nxti); \ isp->isp_reqidx = nxti #define ISP_SYNC_REQUEST(isp) \ MEMORYBARRIER(isp, SYNC_REQUEST, isp->isp_reqidx, QENTRY_LEN, -1); \ isp->isp_reqidx = ISP_NXT_QENTRY(isp->isp_reqidx, RQUEST_QUEUE_LEN(isp)); \ ISP_WRITE(isp, isp->isp_rqstinrp, isp->isp_reqidx) /* * SCSI Specific Host Adapter Parameters- per bus, per target */ typedef struct { uint32_t : 8, update : 1, sendmarker : 1, isp_req_ack_active_neg : 1, isp_data_line_active_neg: 1, isp_cmd_dma_burst_enable: 1, isp_data_dma_burst_enabl: 1, isp_fifo_threshold : 3, isp_ptisp : 1, isp_ultramode : 1, isp_diffmode : 1, isp_lvdmode : 1, isp_fast_mttr : 1, /* fast sram */ isp_initiator_id : 4, isp_async_data_setup : 4; uint16_t isp_selection_timeout; uint16_t isp_max_queue_depth; uint8_t isp_tag_aging; uint8_t isp_bus_reset_delay; uint8_t isp_retry_count; uint8_t isp_retry_delay; struct { uint32_t exc_throttle : 8, : 1, dev_enable : 1, /* ignored */ dev_update : 1, dev_refresh : 1, actv_offset : 4, goal_offset : 4, nvrm_offset : 4; uint8_t actv_period; /* current sync period */ uint8_t goal_period; /* goal sync period */ uint8_t nvrm_period; /* nvram sync period */ uint16_t actv_flags; /* current device flags */ uint16_t goal_flags; /* goal device flags */ uint16_t nvrm_flags; /* nvram device flags */ } isp_devparam[MAX_TARGETS]; } sdparam; /* * Device Flags */ #define DPARM_DISC 0x8000 #define DPARM_PARITY 0x4000 #define DPARM_WIDE 0x2000 #define DPARM_SYNC 0x1000 #define DPARM_TQING 0x0800 #define DPARM_ARQ 0x0400 #define DPARM_QFRZ 0x0200 #define DPARM_RENEG 0x0100 #define DPARM_NARROW 0x0080 #define DPARM_ASYNC 0x0040 #define DPARM_PPR 0x0020 #define DPARM_DEFAULT (0xFF00 & ~DPARM_QFRZ) #define DPARM_SAFE_DFLT (DPARM_DEFAULT & ~(DPARM_WIDE|DPARM_SYNC|DPARM_TQING)) /* technically, not really correct, as they need to be rated based upon clock */ #define ISP_80M_SYNCPARMS 0x0c09 #define ISP_40M_SYNCPARMS 0x0c0a #define ISP_20M_SYNCPARMS 0x0c0c #define ISP_20M_SYNCPARMS_1040 0x080c #define ISP_10M_SYNCPARMS 0x0c19 #define ISP_08M_SYNCPARMS 0x0c25 #define ISP_05M_SYNCPARMS 0x0c32 #define ISP_04M_SYNCPARMS 0x0c41 /* * Fibre Channel Specifics */ /* These are for non-2K Login Firmware cards */ #define FL_ID 0x7e /* FL_Port Special ID */ #define SNS_ID 0x80 /* SNS Server Special ID */ #define NPH_MAX 0xfe /* These are for 2K Login Firmware cards */ #define NPH_RESERVED 0x7F0 /* begin of reserved N-port handles */ #define NPH_MGT_ID 0x7FA /* Management Server Special ID */ #define NPH_SNS_ID 0x7FC /* SNS Server Special ID */ #define NPH_FABRIC_CTLR 0x7FD /* Fabric Controller (0xFFFFFD) */ #define NPH_FL_ID 0x7FE /* F Port Special ID (0xFFFFFE) */ #define NPH_IP_BCST 0x7FF /* IP Broadcast Special ID (0xFFFFFF) */ #define NPH_MAX_2K 0x800 /* * "Unassigned" handle to be used internally */ #define NIL_HANDLE 0xffff /* * Limit for devices on an arbitrated loop. */ #define LOCAL_LOOP_LIM 126 /* * Limit for (2K login) N-port handle amounts */ #define MAX_NPORT_HANDLE 2048 /* * Special Constants */ #define INI_NONE ((uint64_t) 0) #define ISP_NOCHAN 0xff /* * Special Port IDs */ #define MANAGEMENT_PORT_ID 0xFFFFFA #define SNS_PORT_ID 0xFFFFFC #define FABRIC_PORT_ID 0xFFFFFE #define PORT_ANY 0xFFFFFF #define PORT_NONE 0 #define VALID_PORT(port) (port != PORT_NONE && port != PORT_ANY) #define DOMAIN_CONTROLLER_BASE 0xFFFC00 #define DOMAIN_CONTROLLER_END 0xFFFCFF /* * Command Handles * * Most QLogic initiator or target have 32 bit handles associated with them. * We want to have a quick way to index back and forth between a local SCSI * command context and what the firmware is passing back to us. We also * want to avoid working on stale information. This structure handles both * at the expense of some local memory. * * The handle is architected thusly: * * 0 means "free handle" * bits 0..12 index commands * bits 13..15 bits index usage * bits 16..31 contain a rolling sequence * * */ typedef struct { void * cmd; /* associated command context */ uint32_t handle; /* handle associated with this command */ } isp_hdl_t; #define ISP_HANDLE_FREE 0x00000000 #define ISP_HANDLE_CMD_MASK 0x00001fff #define ISP_HANDLE_USAGE_MASK 0x0000e000 #define ISP_HANDLE_USAGE_SHIFT 13 #define ISP_H2HT(hdl) ((hdl & ISP_HANDLE_USAGE_MASK) >> ISP_HANDLE_USAGE_SHIFT) # define ISP_HANDLE_NONE 0 # define ISP_HANDLE_INITIATOR 1 # define ISP_HANDLE_TARGET 2 # define ISP_HANDLE_CTRL 3 #define ISP_HANDLE_SEQ_MASK 0xffff0000 #define ISP_HANDLE_SEQ_SHIFT 16 #define ISP_H2SEQ(hdl) ((hdl & ISP_HANDLE_SEQ_MASK) >> ISP_HANDLE_SEQ_SHIFT) #define ISP_VALID_HANDLE(c, hdl) \ ((ISP_H2HT(hdl) == ISP_HANDLE_INITIATOR || \ ISP_H2HT(hdl) == ISP_HANDLE_TARGET || \ ISP_H2HT(hdl) == ISP_HANDLE_CTRL) && \ ((hdl) & ISP_HANDLE_CMD_MASK) < (c)->isp_maxcmds && \ (hdl) == ((c)->isp_xflist[(hdl) & ISP_HANDLE_CMD_MASK].handle)) #define ISP_BAD_HANDLE_INDEX 0xffffffff /* * FC Port Database entry. * * It has a handle that the f/w uses to address commands to a device. * This handle's value may be assigned by the firmware (e.g., for local loop * devices) or by the driver (e.g., for fabric devices). * * It has a state. If the state if VALID, that means that we've logged into * the device. * * Local loop devices the firmware automatically performs PLOGI on for us * (which is why that handle is imposed upon us). Fabric devices we assign * a handle to and perform the PLOGI on. * * When a PORT DATABASE CHANGED asynchronous event occurs, we mark all VALID * entries as PROBATIONAL. This allows us, if policy says to, just keep track * of devices whose handles change but are otherwise the same device (and * thus keep 'target' constant). * * In any case, we search all possible local loop handles. For each one that * has a port database entity returned, we search for any PROBATIONAL entry * that matches it and update as appropriate. Otherwise, as a new entry, we * find room for it in the Port Database. We *try* and use the handle as the * index to put it into the Database, but that's just an optimization. We mark * the entry VALID and make sure that the target index is updated and correct. * * When we get done searching the local loop, we then search similarly for * a list of devices we've gotten from the fabric name controller (if we're * on a fabric). VALID marking is also done similarly. * * When all of this is done, we can march through the database and clean up * any entry that is still PROBATIONAL (these represent devices which have * departed). Then we're done and can resume normal operations. * * Negative invariants that we try and test for are: * * + There can never be two non-NIL entries with the same { Port, Node } WWN * duples. * * + There can never be two non-NIL entries with the same handle. */ typedef struct { /* * This is the handle that the firmware needs in order for us to * send commands to the device. For pre-24XX cards, this would be * the 'loopid'. */ uint16_t handle; /* * PRLI word 0 contains the Establish Image Pair bit, which is * important for knowing when to reset the CRN. * * PRLI word 3 parameters contains role as well as other things. * * The state is the current state of this entry. * * The is_target is the current state of target on this port. * * The is_initiator is the current state of initiator on this port. * * Portid is obvious, as are node && port WWNs. The new_role and * new_portid is for when we are pending a change. */ uint16_t prli_word0; /* PRLI parameters */ uint16_t prli_word3; /* PRLI parameters */ uint16_t new_prli_word0; /* Incoming new PRLI parameters */ uint16_t new_prli_word3; /* Incoming new PRLI parameters */ uint16_t : 12, probational : 1, state : 3; uint32_t : 6, is_target : 1, is_initiator : 1, portid : 24; uint32_t : 8, new_portid : 24; uint64_t node_wwn; uint64_t port_wwn; uint32_t gone_timer; } fcportdb_t; #define FC_PORTDB_STATE_NIL 0 /* Empty DB slot */ #define FC_PORTDB_STATE_DEAD 1 /* Was valid, but no more. */ #define FC_PORTDB_STATE_CHANGED 2 /* Was valid, but changed. */ #define FC_PORTDB_STATE_NEW 3 /* Logged in, not announced. */ #define FC_PORTDB_STATE_ZOMBIE 4 /* Invalid, but announced. */ #define FC_PORTDB_STATE_VALID 5 /* Valid */ #define FC_PORTDB_TGT(isp, bus, pdb) (int)(lp - FCPARAM(isp, bus)->portdb) /* * FC card specific information * * This structure is replicated across multiple channels for multi-id * capapble chipsets, with some entities different on a per-channel basis. */ typedef struct { int isp_gbspeed; /* Connection speed */ int isp_linkstate; /* Link state */ int isp_fwstate; /* ISP F/W state */ int isp_loopstate; /* Loop State */ int isp_topo; /* Connection Type */ uint32_t : 4, fctape_enabled : 1, sendmarker : 1, role : 2, isp_portid : 24; /* S_ID */ uint16_t isp_fwoptions; uint16_t isp_xfwoptions; uint16_t isp_zfwoptions; uint16_t isp_loopid; /* hard loop id */ uint16_t isp_sns_hdl; /* N-port handle for SNS */ uint16_t isp_lasthdl; /* only valid for channel 0 */ uint16_t isp_maxalloc; uint16_t isp_fabric_params; uint16_t isp_login_hdl; /* Logging in handle */ uint8_t isp_retry_delay; uint8_t isp_retry_count; int isp_use_gft_id; /* Use GFT_ID */ int isp_use_gff_id; /* Use GFF_ID */ /* * Current active WWNN/WWPN */ uint64_t isp_wwnn; uint64_t isp_wwpn; /* * NVRAM WWNN/WWPN */ uint64_t isp_wwnn_nvram; uint64_t isp_wwpn_nvram; /* * Our Port Data Base */ fcportdb_t portdb[MAX_FC_TARG]; /* * Scratch DMA mapped in area to fetch Port Database stuff, etc. */ void * isp_scratch; XS_DMA_ADDR_T isp_scdma; uint8_t isp_scanscratch[ISP_FC_SCRLEN]; } fcparam; #define FW_CONFIG_WAIT 0 #define FW_WAIT_LINK 1 #define FW_WAIT_LOGIN 2 #define FW_READY 3 #define FW_LOSS_OF_SYNC 4 #define FW_ERROR 5 #define FW_REINIT 6 #define FW_NON_PART 7 #define LOOP_NIL 0 #define LOOP_HAVE_LINK 1 #define LOOP_HAVE_ADDR 2 #define LOOP_TESTING_LINK 3 #define LOOP_LTEST_DONE 4 #define LOOP_SCANNING_LOOP 5 #define LOOP_LSCAN_DONE 6 #define LOOP_SCANNING_FABRIC 7 #define LOOP_FSCAN_DONE 8 #define LOOP_SYNCING_PDB 9 #define LOOP_READY 10 #define TOPO_NL_PORT 0 #define TOPO_FL_PORT 1 #define TOPO_N_PORT 2 #define TOPO_F_PORT 3 #define TOPO_PTP_STUB 4 #define TOPO_IS_FABRIC(x) ((x) == TOPO_FL_PORT || (x) == TOPO_F_PORT) #define FCP_AL_DA_ALL 0xFF #define FCP_AL_PA(fcp) ((uint8_t)(fcp->isp_portid)) #define FCP_IS_DEST_ALPD(fcp, alpd) (FCP_AL_PA((fcp)) == FCP_AL_DA_ALL || FCP_AL_PA((fcp)) == alpd) /* * Soft Structure per host adapter */ struct ispsoftc { /* * Platform (OS) specific data */ struct isposinfo isp_osinfo; /* * Pointer to bus specific functions and data */ struct ispmdvec * isp_mdvec; /* * (Mostly) nonvolatile state. Board specific parameters * may contain some volatile state (e.g., current loop state). */ void * isp_param; /* type specific */ uint64_t isp_fwattr; /* firmware attributes */ uint16_t isp_fwrev[3]; /* Loaded F/W revision */ uint16_t isp_maxcmds; /* max possible I/O cmds */ uint8_t isp_type; /* HBA Chip Type */ uint8_t isp_revision; /* HBA Chip H/W Revision */ uint8_t isp_nirq; /* number of IRQs */ uint16_t isp_nchan; /* number of channels */ uint32_t isp_maxluns; /* maximum luns supported */ uint32_t isp_clock : 8, /* input clock */ : 5, isp_port : 1, /* 23XX/24XX only */ isp_bustype : 1, /* SBus or PCI */ isp_loaded_fw : 1, /* loaded firmware */ isp_dblev : 16; /* debug log mask */ uint32_t isp_confopts; /* config options */ uint32_t isp_rqstinrp; /* register for REQINP */ uint32_t isp_rqstoutrp; /* register for REQOUTP */ uint32_t isp_respinrp; /* register for RESINP */ uint32_t isp_respoutrp; /* register for RESOUTP */ /* * Volatile state */ volatile u_int isp_mboxbsy; /* mailbox command active */ volatile u_int isp_state; volatile mbreg_t isp_curmbx; /* currently active mailbox command */ volatile uint32_t isp_reqodx; /* index of last ISP pickup */ volatile uint32_t isp_reqidx; /* index of next request */ volatile uint32_t isp_residx; /* index of last ISP write */ volatile uint32_t isp_resodx; /* index of next result */ volatile uint32_t isp_atioodx; /* index of next ATIO */ volatile uint32_t isp_obits; /* mailbox command output */ volatile uint32_t isp_serno; /* rolling serial number */ volatile uint16_t isp_mboxtmp[MAX_MAILBOX]; volatile uint16_t isp_lastmbxcmd; /* last mbox command sent */ volatile uint16_t isp_seqno; /* running sequence number */ /* * Active commands are stored here, indexed by handle functions. */ isp_hdl_t *isp_xflist; isp_hdl_t *isp_xffree; /* * DMA mapped in area for synchronous IOCB requests. */ void * isp_iocb; XS_DMA_ADDR_T isp_iocb_dma; /* * request/result queue pointers and DMA handles for them. */ void * isp_rquest; void * isp_result; XS_DMA_ADDR_T isp_rquest_dma; XS_DMA_ADDR_T isp_result_dma; #ifdef ISP_TARGET_MODE /* for 24XX only */ void * isp_atioq; XS_DMA_ADDR_T isp_atioq_dma; #endif }; #define SDPARAM(isp, chan) (&((sdparam *)(isp)->isp_param)[(chan)]) #define FCPARAM(isp, chan) (&((fcparam *)(isp)->isp_param)[(chan)]) #define ISP_SET_SENDMARKER(isp, chan, val) \ if (IS_FC(isp)) { \ FCPARAM(isp, chan)->sendmarker = val; \ } else { \ SDPARAM(isp, chan)->sendmarker = val; \ } #define ISP_TST_SENDMARKER(isp, chan) \ (IS_FC(isp)? \ FCPARAM(isp, chan)->sendmarker != 0 : \ SDPARAM(isp, chan)->sendmarker != 0) /* * ISP Driver Run States */ #define ISP_NILSTATE 0 #define ISP_CRASHED 1 #define ISP_RESETSTATE 2 #define ISP_INITSTATE 3 #define ISP_RUNSTATE 4 /* * ISP Runtime Configuration Options */ #define ISP_CFG_FULL_DUPLEX 0x01 /* Full Duplex (Fibre Channel only) */ #define ISP_CFG_PORT_PREF 0x0e /* Mask for Port Prefs (all FC except 2100) */ #define ISP_CFG_PORT_DEF 0x00 /* prefer connection type from NVRAM */ #define ISP_CFG_LPORT_ONLY 0x02 /* insist on {N/F}L-Port connection */ #define ISP_CFG_NPORT_ONLY 0x04 /* insist on {N/F}-Port connection */ #define ISP_CFG_LPORT 0x06 /* prefer {N/F}L-Port connection */ #define ISP_CFG_NPORT 0x08 /* prefer {N/F}-Port connection */ #define ISP_CFG_1GB 0x10 /* force 1Gb connection (23XX only) */ #define ISP_CFG_2GB 0x20 /* force 2Gb connection (23XX only) */ #define ISP_CFG_NORELOAD 0x80 /* don't download f/w */ #define ISP_CFG_NONVRAM 0x40 /* ignore NVRAM */ #define ISP_CFG_NOFCTAPE 0x100 /* disable FC-Tape */ #define ISP_CFG_FCTAPE 0x200 /* enable FC-Tape */ #define ISP_CFG_OWNFSZ 0x400 /* override NVRAM frame size */ #define ISP_CFG_OWNLOOPID 0x800 /* override NVRAM loopid */ #define ISP_CFG_OWNEXCTHROTTLE 0x1000 /* override NVRAM execution throttle */ #define ISP_CFG_4GB 0x2000 /* force 4Gb connection (24XX only) */ #define ISP_CFG_8GB 0x4000 /* force 8Gb connection (25XX only) */ #define ISP_CFG_16GB 0x8000 /* force 16Gb connection (26XX only) */ #define ISP_CFG_32GB 0x10000 /* force 32Gb connection (27XX only) */ /* * For each channel, the outer layers should know what role that channel * will take: ISP_ROLE_NONE, ISP_ROLE_INITIATOR, ISP_ROLE_TARGET, * ISP_ROLE_BOTH. * * If you set ISP_ROLE_NONE, the cards will be reset, new firmware loaded, * NVRAM read, and defaults set, but any further initialization (e.g. * INITIALIZE CONTROL BLOCK commands for 2X00 cards) won't be done. * * If INITIATOR MODE isn't set, attempts to run commands will be stopped * at isp_start and completed with the equivalent of SELECTION TIMEOUT. * * If TARGET MODE is set, it doesn't mean that the rest of target mode support * needs to be enabled, or will even work. What happens with the 2X00 cards * here is that if you have enabled it with TARGET MODE as part of the ICB * options, but you haven't given the f/w any ram resources for ATIOs or * Immediate Notifies, the f/w just handles what it can and you never see * anything. Basically, it sends a single byte of data (the first byte, * which you can set as part of the INITIALIZE CONTROL BLOCK command) for * INQUIRY, and sends back QUEUE FULL status for any other command. * */ #define ISP_ROLE_NONE 0x0 #define ISP_ROLE_TARGET 0x1 #define ISP_ROLE_INITIATOR 0x2 #define ISP_ROLE_BOTH (ISP_ROLE_TARGET|ISP_ROLE_INITIATOR) #define ISP_ROLE_EITHER ISP_ROLE_BOTH #ifndef ISP_DEFAULT_ROLES /* * Counterintuitively, we prefer to default to role 'none' * if we are enable target mode support. This gives us the * maximum flexibility as to which port will do what. */ #ifdef ISP_TARGET_MODE #define ISP_DEFAULT_ROLES ISP_ROLE_NONE #else #define ISP_DEFAULT_ROLES ISP_ROLE_INITIATOR #endif #endif /* * Firmware related defines */ #define ISP_CODE_ORG 0x1000 /* default f/w code start */ #define ISP_CODE_ORG_2300 0x0800 /* ..except for 2300s */ #define ISP_CODE_ORG_2400 0x100000 /* ..and 2400s */ #define ISP_FW_REV(maj, min, mic) ((maj << 24) | (min << 16) | mic) #define ISP_FW_MAJOR(code) ((code >> 24) & 0xff) #define ISP_FW_MINOR(code) ((code >> 16) & 0xff) #define ISP_FW_MICRO(code) ((code >> 8) & 0xff) #define ISP_FW_REVX(xp) ((xp[0]<<24) | (xp[1] << 16) | xp[2]) #define ISP_FW_MAJORX(xp) (xp[0]) #define ISP_FW_MINORX(xp) (xp[1]) #define ISP_FW_MICROX(xp) (xp[2]) #define ISP_FW_NEWER_THAN(i, major, minor, micro) \ (ISP_FW_REVX((i)->isp_fwrev) > ISP_FW_REV(major, minor, micro)) #define ISP_FW_OLDER_THAN(i, major, minor, micro) \ (ISP_FW_REVX((i)->isp_fwrev) < ISP_FW_REV(major, minor, micro)) /* * Bus (implementation) types */ #define ISP_BT_PCI 0 /* PCI Implementations */ #define ISP_BT_SBUS 1 /* SBus Implementations */ /* * If we have not otherwise defined SBus support away make sure * it is defined here such that the code is included as default */ #ifndef ISP_SBUS_SUPPORTED #define ISP_SBUS_SUPPORTED 1 #endif /* * Chip Types */ #define ISP_HA_SCSI 0xf #define ISP_HA_SCSI_UNKNOWN 0x1 #define ISP_HA_SCSI_1020 0x2 #define ISP_HA_SCSI_1020A 0x3 #define ISP_HA_SCSI_1040 0x4 #define ISP_HA_SCSI_1040A 0x5 #define ISP_HA_SCSI_1040B 0x6 #define ISP_HA_SCSI_1040C 0x7 #define ISP_HA_SCSI_1240 0x8 #define ISP_HA_SCSI_1080 0x9 #define ISP_HA_SCSI_1280 0xa #define ISP_HA_SCSI_10160 0xb #define ISP_HA_SCSI_12160 0xc #define ISP_HA_FC 0xf0 #define ISP_HA_FC_2100 0x10 #define ISP_HA_FC_2200 0x20 #define ISP_HA_FC_2300 0x30 #define ISP_HA_FC_2312 0x40 #define ISP_HA_FC_2322 0x50 #define ISP_HA_FC_2400 0x60 #define ISP_HA_FC_2500 0x70 #define ISP_HA_FC_2600 0x80 #define ISP_HA_FC_2700 0x90 #define IS_SCSI(isp) (isp->isp_type & ISP_HA_SCSI) #define IS_1020(isp) (isp->isp_type < ISP_HA_SCSI_1240) #define IS_1240(isp) (isp->isp_type == ISP_HA_SCSI_1240) #define IS_1080(isp) (isp->isp_type == ISP_HA_SCSI_1080) #define IS_1280(isp) (isp->isp_type == ISP_HA_SCSI_1280) #define IS_10160(isp) (isp->isp_type == ISP_HA_SCSI_10160) #define IS_12160(isp) (isp->isp_type == ISP_HA_SCSI_12160) #define IS_12X0(isp) (IS_1240(isp) || IS_1280(isp)) #define IS_1X160(isp) (IS_10160(isp) || IS_12160(isp)) #define IS_DUALBUS(isp) (IS_12X0(isp) || IS_12160(isp)) #define IS_ULTRA2(isp) (IS_1080(isp) || IS_1280(isp) || IS_1X160(isp)) #define IS_ULTRA3(isp) (IS_1X160(isp)) #define IS_FC(isp) ((isp)->isp_type & ISP_HA_FC) #define IS_2100(isp) ((isp)->isp_type == ISP_HA_FC_2100) #define IS_2200(isp) ((isp)->isp_type == ISP_HA_FC_2200) #define IS_23XX(isp) ((isp)->isp_type >= ISP_HA_FC_2300 && \ (isp)->isp_type < ISP_HA_FC_2400) #define IS_2300(isp) ((isp)->isp_type == ISP_HA_FC_2300) #define IS_2312(isp) ((isp)->isp_type == ISP_HA_FC_2312) #define IS_2322(isp) ((isp)->isp_type == ISP_HA_FC_2322) #define IS_24XX(isp) ((isp)->isp_type >= ISP_HA_FC_2400) #define IS_25XX(isp) ((isp)->isp_type >= ISP_HA_FC_2500) #define IS_26XX(isp) ((isp)->isp_type >= ISP_HA_FC_2600) #define IS_27XX(isp) ((isp)->isp_type >= ISP_HA_FC_2700) /* * DMA related macros */ #define DMA_WD3(x) (((uint16_t)(((uint64_t)x) >> 48)) & 0xffff) #define DMA_WD2(x) (((uint16_t)(((uint64_t)x) >> 32)) & 0xffff) #define DMA_WD1(x) ((uint16_t)((x) >> 16) & 0xffff) #define DMA_WD0(x) ((uint16_t)((x) & 0xffff)) #define DMA_LO32(x) ((uint32_t) (x)) #define DMA_HI32(x) ((uint32_t)(((uint64_t)x) >> 32)) /* * Core System Function Prototypes */ /* * Reset Hardware. Totally. Assumes that you'll follow this with a call to isp_init. */ void isp_reset(ispsoftc_t *, int); /* * Initialize Hardware to known state */ void isp_init(ispsoftc_t *); /* * Reset the ISP and call completion for any orphaned commands. */ int isp_reinit(ispsoftc_t *, int); /* * Shutdown hardware after use. */ void isp_shutdown(ispsoftc_t *); /* * Internal Interrupt Service Routine */ #ifdef ISP_TARGET_MODE void isp_intr_atioq(ispsoftc_t *); #endif void isp_intr_async(ispsoftc_t *, uint16_t event); void isp_intr_mbox(ispsoftc_t *, uint16_t mbox0); void isp_intr_respq(ispsoftc_t *); /* * Command Entry Point- Platform Dependent layers call into this */ int isp_start(XS_T *); /* these values are what isp_start returns */ #define CMD_COMPLETE 101 /* command completed */ #define CMD_EAGAIN 102 /* busy- maybe retry later */ #define CMD_QUEUED 103 /* command has been queued for execution */ #define CMD_RQLATER 104 /* requeue this command later */ /* * Command Completion Point- Core layers call out from this with completed cmds */ void isp_done(XS_T *); /* * Platform Dependent to External to Internal Control Function * * Assumes locks are held on entry. You should note that with many of * these commands locks may be released while this function is called. * * ... ISPCTL_RESET_BUS, int channel); * Reset BUS on this channel * ... ISPCTL_RESET_DEV, int channel, int target); * Reset Device on this channel at this target. * ... ISPCTL_ABORT_CMD, XS_T *xs); * Abort active transaction described by xs. * ... IPCTL_UPDATE_PARAMS); * Update any operating parameters (speed, etc.) * ... ISPCTL_FCLINK_TEST, int channel); * Test FC link status on this channel * ... ISPCTL_SCAN_LOOP, int channel); * Scan local loop on this channel * ... ISPCTL_SCAN_FABRIC, int channel); * Scan fabric on this channel * ... ISPCTL_PDB_SYNC, int channel); * Synchronize port database on this channel * ... ISPCTL_SEND_LIP, int channel); * Send a LIP on this channel * ... ISPCTL_GET_NAMES, int channel, int np, uint64_t *wwnn, uint64_t *wwpn) * Get a WWNN/WWPN for this N-port handle on this channel * ... ISPCTL_RUN_MBOXCMD, mbreg_t *mbp) * Run this mailbox command * ... ISPCTL_GET_PDB, int channel, int nphandle, isp_pdb_t *pdb) * Get PDB on this channel for this N-port handle * ... ISPCTL_PLOGX, isp_plcmd_t *) * Performa a port login/logout * ... ISPCTL_CHANGE_ROLE, int channel, int role); * Change role of specified channel * * ISPCTL_PDB_SYNC is somewhat misnamed. It actually is the final step, in * order, of ISPCTL_FCLINK_TEST, ISPCTL_SCAN_LOOP, and ISPCTL_SCAN_FABRIC. * The main purpose of ISPCTL_PDB_SYNC is to complete management of logging * and logging out of fabric devices (if one is on a fabric) and then marking * the 'loop state' as being ready to now be used for sending commands to * devices. */ typedef enum { ISPCTL_RESET_BUS, ISPCTL_RESET_DEV, ISPCTL_ABORT_CMD, ISPCTL_UPDATE_PARAMS, ISPCTL_FCLINK_TEST, ISPCTL_SCAN_FABRIC, ISPCTL_SCAN_LOOP, ISPCTL_PDB_SYNC, ISPCTL_SEND_LIP, ISPCTL_GET_NAMES, ISPCTL_RUN_MBOXCMD, ISPCTL_GET_PDB, ISPCTL_PLOGX, ISPCTL_CHANGE_ROLE } ispctl_t; int isp_control(ispsoftc_t *, ispctl_t, ...); /* * Platform Dependent to Internal to External Control Function */ typedef enum { ISPASYNC_NEW_TGT_PARAMS, /* SPI New Target Parameters */ ISPASYNC_BUS_RESET, /* All Bus Was Reset */ ISPASYNC_LOOP_DOWN, /* FC Loop Down */ ISPASYNC_LOOP_UP, /* FC Loop Up */ ISPASYNC_LIP, /* FC LIP Received */ ISPASYNC_LOOP_RESET, /* FC Loop Reset Received */ ISPASYNC_CHANGE_NOTIFY, /* FC Change Notification */ ISPASYNC_DEV_ARRIVED, /* FC Device Arrived */ ISPASYNC_DEV_CHANGED, /* FC Device Changed */ ISPASYNC_DEV_STAYED, /* FC Device Stayed */ ISPASYNC_DEV_GONE, /* FC Device Departure */ ISPASYNC_TARGET_NOTIFY, /* All target async notification */ ISPASYNC_TARGET_NOTIFY_ACK, /* All target notify ack required */ ISPASYNC_TARGET_ACTION, /* All target action requested */ ISPASYNC_FW_CRASH, /* All Firmware has crashed */ ISPASYNC_FW_RESTARTED /* All Firmware has been restarted */ } ispasync_t; void isp_async(ispsoftc_t *, ispasync_t, ...); #define ISPASYNC_CHANGE_PDB 0 #define ISPASYNC_CHANGE_SNS 1 #define ISPASYNC_CHANGE_OTHER 2 /* * Platform Dependent Error and Debug Printout * * Two required functions for each platform must be provided: * * void isp_prt(ispsoftc_t *, int level, const char *, ...) * void isp_xs_prt(ispsoftc_t *, XS_T *, int level, const char *, ...) * * but due to compiler differences on different platforms this won't be * formally defined here. Instead, they go in each platform definition file. */ #define ISP_LOGALL 0x0 /* log always */ #define ISP_LOGCONFIG 0x1 /* log configuration messages */ #define ISP_LOGINFO 0x2 /* log informational messages */ #define ISP_LOGWARN 0x4 /* log warning messages */ #define ISP_LOGERR 0x8 /* log error messages */ #define ISP_LOGDEBUG0 0x10 /* log simple debug messages */ #define ISP_LOGDEBUG1 0x20 /* log intermediate debug messages */ #define ISP_LOGDEBUG2 0x40 /* log most debug messages */ #define ISP_LOGDEBUG3 0x80 /* log high frequency debug messages */ #define ISP_LOG_SANCFG 0x100 /* log SAN configuration */ #define ISP_LOG_CWARN 0x200 /* log SCSI command "warnings" (e.g., check conditions) */ #define ISP_LOG_WARN1 0x400 /* log WARNS we might be interested at some time */ #define ISP_LOGTINFO 0x1000 /* log informational messages (target mode) */ #define ISP_LOGTDEBUG0 0x2000 /* log simple debug messages (target mode) */ #define ISP_LOGTDEBUG1 0x4000 /* log intermediate debug messages (target) */ #define ISP_LOGTDEBUG2 0x8000 /* log all debug messages (target) */ /* * Each Platform provides it's own isposinfo substructure of the ispsoftc * defined above. * * Each platform must also provide the following macros/defines: * * * ISP_FC_SCRLEN FC scratch area DMA length * * ISP_MEMZERO(dst, src) platform zeroing function * ISP_MEMCPY(dst, src, count) platform copying function * ISP_SNPRINTF(buf, bufsize, fmt, ...) snprintf * ISP_DELAY(usecs) microsecond spindelay function * ISP_SLEEP(isp, usecs) microsecond sleep function * * ISP_INLINE ___inline or not- depending on how * good your debugger is * ISP_MIN shorthand for ((a) < (b))? (a) : (b) * * NANOTIME_T nanosecond time type * * GET_NANOTIME(NANOTIME_T *) get current nanotime. * * GET_NANOSEC(NANOTIME_T *) get uint64_t from NANOTIME_T * * NANOTIME_SUB(NANOTIME_T *, NANOTIME_T *) * subtract two NANOTIME_T values * * MAXISPREQUEST(ispsoftc_t *) maximum request queue size * for this particular board type * * MEMORYBARRIER(ispsoftc_t *, barrier_type, offset, size, chan) * * Function/Macro the provides memory synchronization on * various objects so that the ISP's and the system's view * of the same object is consistent. * * MBOX_ACQUIRE(ispsoftc_t *) acquire lock on mailbox regs * MBOX_WAIT_COMPLETE(ispsoftc_t *, mbreg_t *) wait for cmd to be done * MBOX_NOTIFY_COMPLETE(ispsoftc_t *) notification of mbox cmd donee * MBOX_RELEASE(ispsoftc_t *) release lock on mailbox regs * * FC_SCRATCH_ACQUIRE(ispsoftc_t *, chan) acquire lock on FC scratch area * return -1 if you cannot * FC_SCRATCH_RELEASE(ispsoftc_t *, chan) acquire lock on FC scratch area * * FCP_NEXT_CRN(ispsoftc_t *, XS_T *, rslt, channel, target, lun) generate the next command reference number. XS_T * may be null. * * SCSI_GOOD SCSI 'Good' Status * SCSI_CHECK SCSI 'Check Condition' Status * SCSI_BUSY SCSI 'Busy' Status * SCSI_QFULL SCSI 'Queue Full' Status * * XS_T Platform SCSI transaction type (i.e., command for HBA) * XS_DMA_ADDR_T Platform PCI DMA Address Type * XS_GET_DMA_SEG(..) Get 32 bit dma segment list value * XS_GET_DMA64_SEG(..) Get 64 bit dma segment list value * XS_NEED_DMA64_SEG(..) dma segment needs 64 bit storage * XS_ISP(xs) gets an instance out of an XS_T * XS_CHANNEL(xs) gets the channel (bus # for DUALBUS cards) "" * XS_TGT(xs) gets the target "" * XS_LUN(xs) gets the lun "" * XS_CDBP(xs) gets a pointer to the scsi CDB "" * XS_CDBLEN(xs) gets the CDB's length "" * XS_XFRLEN(xs) gets the associated data transfer length "" * XS_TIME(xs) gets the time (in seconds) for this command * XS_GET_RESID(xs) gets the current residual count * XS_GET_RESID(xs, resid) sets the current residual count * XS_STSP(xs) gets a pointer to the SCSI status byte "" * XS_SNSP(xs) gets a pointer to the associate sense data * XS_TOT_SNSLEN(xs) gets the total length of sense data storage * XS_CUR_SNSLEN(xs) gets the currently used length of sense data storage * XS_SNSKEY(xs) dereferences XS_SNSP to get the current stored Sense Key * XS_SNSASC(xs) dereferences XS_SNSP to get the current stored Additional Sense Code * XS_SNSASCQ(xs) dereferences XS_SNSP to get the current stored Additional Sense Code Qualifier * XS_TAG_P(xs) predicate of whether this command should be tagged * XS_TAG_TYPE(xs) which type of tag to use + * XS_PRIORITY(xs) command priority for SIMPLE tag * XS_SETERR(xs) set error state * * HBA_NOERROR command has no erros * HBA_BOTCH hba botched something * HBA_CMDTIMEOUT command timed out * HBA_SELTIMEOUT selection timed out (also port logouts for FC) * HBA_TGTBSY target returned a BUSY status * HBA_BUSRESET bus reset destroyed command * HBA_ABORTED command was aborted (by request) * HBA_DATAOVR a data overrun was detected * HBA_ARQFAIL Automatic Request Sense failed * * XS_ERR(xs) return current error state * XS_NOERR(xs) there is no error currently set * XS_INITERR(xs) initialize error state * * XS_SAVE_SENSE(xs, sp, len) save sense data * XS_APPEND_SENSE(xs, sp, len) append more sense data * * XS_SENSE_VALID(xs) indicates whether sense is valid * * DEFAULT_FRAMESIZE(ispsoftc_t *) Default Frame Size * DEFAULT_EXEC_THROTTLE(ispsoftc_t *) Default Execution Throttle * * DEFAULT_ROLE(ispsoftc_t *, int) Get Default Role for a channel * DEFAULT_IID(ispsoftc_t *, int) Default SCSI initiator ID * DEFAULT_LOOPID(ispsoftc_t *, int) Default FC Loop ID * * These establish reasonable defaults for each platform. * These must be available independent of card NVRAM and are * to be used should NVRAM not be readable. * * DEFAULT_NODEWWN(ispsoftc_t *, chan) Default FC Node WWN to use * DEFAULT_PORTWWN(ispsoftc_t *, chan) Default FC Port WWN to use * * These defines are hooks to allow the setting of node and * port WWNs when NVRAM cannot be read or is to be overriden. * * ACTIVE_NODEWWN(ispsoftc_t *, chan) FC Node WWN to use * ACTIVE_PORTWWN(ispsoftc_t *, chan) FC Port WWN to use * * After NVRAM is read, these will be invoked to get the * node and port WWNs that will actually be used for this * channel. * * * ISP_IOXPUT_8(ispsoftc_t *, uint8_t srcval, uint8_t *dstptr) * ISP_IOXPUT_16(ispsoftc_t *, uint16_t srcval, uint16_t *dstptr) * ISP_IOXPUT_32(ispsoftc_t *, uint32_t srcval, uint32_t *dstptr) * * ISP_IOXGET_8(ispsoftc_t *, uint8_t *srcptr, uint8_t dstrval) * ISP_IOXGET_16(ispsoftc_t *, uint16_t *srcptr, uint16_t dstrval) * ISP_IOXGET_32(ispsoftc_t *, uint32_t *srcptr, uint32_t dstrval) * * ISP_SWIZZLE_NVRAM_WORD(ispsoftc_t *, uint16_t *) * ISP_SWIZZLE_NVRAM_LONG(ispsoftc_t *, uint32_t *) * ISP_SWAP16(ispsoftc_t *, uint16_t srcval) * ISP_SWAP32(ispsoftc_t *, uint32_t srcval) */ #ifdef ISP_TARGET_MODE /* * The functions below are for the publicly available * target mode functions that are internal to the Qlogic driver. */ /* * This function handles new response queue entry appropriate for target mode. */ int isp_target_notify(ispsoftc_t *, void *, uint32_t *); /* * This function externalizes the ability to acknowledge an Immediate Notify request. */ int isp_notify_ack(ispsoftc_t *, void *); /* * This function externalized acknowledging (success/fail) an ABTS frame */ int isp_acknak_abts(ispsoftc_t *, void *, int); /* * General request queue 'put' routine for target mode entries. */ int isp_target_put_entry(ispsoftc_t *isp, void *); /* * General routine to put back an ATIO entry- * used for replenishing f/w resource counts. * The argument is a pointer to a source ATIO * or ATIO2. */ int isp_target_put_atio(ispsoftc_t *, void *); /* * General routine to send a final CTIO for a command- used mostly for * local responses. */ int isp_endcmd(ispsoftc_t *, ...); #define ECMD_SVALID 0x100 #define ECMD_RVALID 0x200 #define ECMD_TERMINATE 0x400 /* * Handle an asynchronous event */ void isp_target_async(ispsoftc_t *, int, int); #endif #endif /* _ISPVAR_H */ Index: head/sys/dev/mpr/mpr_sas.c =================================================================== --- head/sys/dev/mpr/mpr_sas.c (revision 367043) +++ head/sys/dev/mpr/mpr_sas.c (revision 367044) @@ -1,3542 +1,3544 @@ /*- * Copyright (c) 2009 Yahoo! Inc. * Copyright (c) 2011-2015 LSI Corp. * Copyright (c) 2013-2016 Avago Technologies * Copyright 2000-2020 Broadcom Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD * */ #include __FBSDID("$FreeBSD$"); /* Communications core for Avago Technologies (LSI) MPT3 */ /* TODO Move headers to mprvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MPRSAS_DISCOVERY_TIMEOUT 20 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */ /* * static array to check SCSI OpCode for EEDP protection bits */ #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP static uint8_t op_code_prot[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory"); static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *); static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *); static void mprsas_action(struct cam_sim *sim, union ccb *ccb); static void mprsas_poll(struct cam_sim *sim); static void mprsas_scsiio_timeout(void *data); static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm); static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *); static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *); static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *); static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *); static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, struct mpr_command *cm); static void mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static int mprsas_send_portenable(struct mpr_softc *sc); static void mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm); static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm); static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr); static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb); struct mprsas_target * mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start, uint16_t handle) { struct mprsas_target *target; int i; for (i = start; i < sassc->maxtargets; i++) { target = &sassc->targets[i]; if (target->handle == handle) return (target); } return (NULL); } /* we need to freeze the simq during attach and diag reset, to avoid failing * commands before device handles have been found by discovery. Since * discovery involves reading config pages and possibly sending commands, * discovery actions may continue even after we receive the end of discovery * event, so refcount discovery actions instead of assuming we can unfreeze * the simq when we get the event. */ void mprsas_startup_increment(struct mprsas_softc *sassc) { MPR_FUNCTRACE(sassc->sc); if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { if (sassc->startup_refcount++ == 0) { /* just starting, freeze the simq */ mpr_dprint(sassc->sc, MPR_INIT, "%s freezing simq\n", __func__); xpt_hold_boot(); xpt_freeze_simq(sassc->sim, 1); } mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, sassc->startup_refcount); } } void mprsas_release_simq_reinit(struct mprsas_softc *sassc) { if (sassc->flags & MPRSAS_QUEUE_FROZEN) { sassc->flags &= ~MPRSAS_QUEUE_FROZEN; xpt_release_simq(sassc->sim, 1); mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n"); } } void mprsas_startup_decrement(struct mprsas_softc *sassc) { MPR_FUNCTRACE(sassc->sc); if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { if (--sassc->startup_refcount == 0) { /* finished all discovery-related actions, release * the simq and rescan for the latest topology. */ mpr_dprint(sassc->sc, MPR_INIT, "%s releasing simq\n", __func__); sassc->flags &= ~MPRSAS_IN_STARTUP; xpt_release_simq(sassc->sim, 1); xpt_release_boot(); } mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, sassc->startup_refcount); } } /* * The firmware requires us to stop sending commands when we're doing task * management. * use. * XXX The logic for serializing the device has been made lazy and moved to * mprsas_prepare_for_tm(). */ struct mpr_command * mprsas_alloc_tm(struct mpr_softc *sc) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpr_command *tm; MPR_FUNCTRACE(sc); tm = mpr_alloc_high_priority_command(sc); if (tm == NULL) return (NULL); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; return tm; } void mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm) { int target_id = 0xFFFFFFFF; MPR_FUNCTRACE(sc); if (tm == NULL) return; /* * For TM's the devq is frozen for the device. Unfreeze it here and * free the resources used for freezing the devq. Must clear the * INRESET flag as well or scsi I/O will not work. */ if (tm->cm_targ != NULL) { tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET; target_id = tm->cm_targ->tid; } if (tm->cm_ccb) { mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n", target_id); xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE); xpt_free_path(tm->cm_ccb->ccb_h.path); xpt_free_ccb(tm->cm_ccb); } mpr_free_high_priority_command(sc, tm); } void mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ) { struct mprsas_softc *sassc = sc->sassc; path_id_t pathid; target_id_t targetid; union ccb *ccb; MPR_FUNCTRACE(sc); pathid = cam_sim_path(sassc->sim); if (targ == NULL) targetid = CAM_TARGET_WILDCARD; else targetid = targ - sassc->targets; /* * Allocate a CCB and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n"); xpt_free_ccb(ccb); return; } if (targetid == CAM_TARGET_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_BUS; else ccb->ccb_h.func_code = XPT_SCAN_TGT; mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid); xpt_rescan(ccb); } static void mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...) { struct sbuf sb; va_list ap; char str[224]; char path_str[64]; if (cm == NULL) return; /* No need to be in here if debugging isn't enabled */ if ((cm->cm_sc->mpr_debug & level) == 0) return; sbuf_new(&sb, str, sizeof(str), 0); va_start(ap, fmt); if (cm->cm_ccb != NULL) { xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str, sizeof(path_str)); sbuf_cat(&sb, path_str); if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) { scsi_command_string(&cm->cm_ccb->csio, &sb); sbuf_printf(&sb, "length %d ", cm->cm_ccb->csio.dxfer_len); } } else { sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ", cam_sim_name(cm->cm_sc->sassc->sim), cam_sim_unit(cm->cm_sc->sassc->sim), cam_sim_bus(cm->cm_sc->sassc->sim), cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF, cm->cm_lun); } sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID); sbuf_vprintf(&sb, fmt, ap); sbuf_finish(&sb); mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb)); va_end(ap); } static void mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; struct mprsas_target *targ; uint16_t handle; MPR_FUNCTRACE(sc); reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; handle = (uint16_t)(uintptr_t)tm->cm_complete_data; targ = tm->cm_targ; if (reply == NULL) { /* XXX retry the remove after the diag reset completes? */ mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " "0x%04x\n", __func__, handle); mprsas_free_tm(sc, tm); return; } if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) { mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting " "device 0x%x\n", le16toh(reply->IOCStatus), handle); } mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", le32toh(reply->TerminationCount)); mpr_free_reply(sc, tm->cm_reply_data); tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n", targ->tid, handle); /* * Don't clear target if remove fails because things will get confusing. * Leave the devname and sasaddr intact so that we know to avoid reusing * this target id if possible, and so we can assign the same target id * to this device if it comes back in the future. */ if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) == MPI2_IOCSTATUS_SUCCESS) { targ = tm->cm_targ; targ->handle = 0x0; targ->encl_handle = 0x0; targ->encl_level_valid = 0x0; targ->encl_level = 0x0; targ->connector_name[0] = ' '; targ->connector_name[1] = ' '; targ->connector_name[2] = ' '; targ->connector_name[3] = ' '; targ->encl_slot = 0x0; targ->exp_dev_handle = 0x0; targ->phy_num = 0x0; targ->linkrate = 0x0; targ->devinfo = 0x0; targ->flags = 0x0; targ->scsi_req_desc_type = 0; } mprsas_free_tm(sc, tm); } /* * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal. * Otherwise Volume Delete is same as Bare Drive Removal. */ void mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpr_softc *sc; struct mpr_command *cm; struct mprsas_target *targ = NULL; MPR_FUNCTRACE(sassc->sc); sc = sassc->sc; targ = mprsas_find_target_by_handle(sassc, 0, handle); if (targ == NULL) { /* FIXME: what is the action? */ /* We don't know about this device? */ mpr_dprint(sc, MPR_ERROR, "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle); return; } targ->flags |= MPRSAS_TARGET_INREMOVAL; cm = mprsas_alloc_tm(sc); if (cm == NULL) { mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n", __func__); return; } mprsas_rescan_target(sc, targ); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; req->DevHandle = targ->handle; req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; if (!targ->is_nvme || sc->custom_nvme_tm_handling) { /* SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; } else { /* PCIe Protocol Level Reset*/ req->MsgFlags = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; } cm->cm_targ = targ; cm->cm_data = NULL; cm->cm_complete = mprsas_remove_volume; cm->cm_complete_data = (void *)(uintptr_t)handle; mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", __func__, targ->tid); mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); mpr_map_command(sc, cm); } /* * The firmware performs debounce on the link to avoid transient link errors * and false removals. When it does decide that link has been lost and a * device needs to go away, it expects that the host will perform a target reset * and then an op remove. The reset has the side-effect of aborting any * outstanding requests for the device, which is required for the op-remove to * succeed. It's not clear if the host should check for the device coming back * alive after the reset. */ void mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpr_softc *sc; struct mpr_command *tm; struct mprsas_target *targ = NULL; MPR_FUNCTRACE(sassc->sc); sc = sassc->sc; targ = mprsas_find_target_by_handle(sassc, 0, handle); if (targ == NULL) { /* FIXME: what is the action? */ /* We don't know about this device? */ mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n", __func__, handle); return; } targ->flags |= MPRSAS_TARGET_INREMOVAL; tm = mprsas_alloc_tm(sc); if (tm == NULL) { mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n", __func__); return; } mprsas_rescan_target(sc, targ); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = htole16(targ->handle); req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; /* SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; tm->cm_targ = targ; tm->cm_data = NULL; tm->cm_complete = mprsas_remove_device; tm->cm_complete_data = (void *)(uintptr_t)handle; mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", __func__, targ->tid); mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD); mpr_map_command(sc, tm); } static void mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SAS_IOUNIT_CONTROL_REQUEST *req; struct mprsas_target *targ; uint16_t handle; MPR_FUNCTRACE(sc); reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; handle = (uint16_t)(uintptr_t)tm->cm_complete_data; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of " "handle %#04x! This should not happen!\n", __func__, tm->cm_flags, handle); } if (reply == NULL) { /* XXX retry the remove after the diag reset completes? */ mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " "0x%04x\n", __func__, handle); mprsas_free_tm(sc, tm); return; } if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) { mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting " "device 0x%x\n", le16toh(reply->IOCStatus), handle); } mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", le32toh(reply->TerminationCount)); mpr_free_reply(sc, tm->cm_reply_data); tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ /* Reuse the existing command */ req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req; memset(req, 0, sizeof(*req)); req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; req->Operation = MPI2_SAS_OP_REMOVE_DEVICE; req->DevHandle = htole16(handle); tm->cm_data = NULL; tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; tm->cm_complete = mprsas_remove_complete; tm->cm_complete_data = (void *)(uintptr_t)handle; /* * Wait to send the REMOVE_DEVICE until all the commands have cleared. * They should be aborted or time out and we'll kick thus off there * if so. */ if (TAILQ_FIRST(&targ->commands) == NULL) { mpr_dprint(sc, MPR_INFO, "No pending commands: starting remove_device\n"); mpr_map_command(sc, tm); targ->pending_remove_tm = NULL; } else { targ->pending_remove_tm = tm; } mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n", targ->tid, handle); if (targ->encl_level_valid) { mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, " "connector name (%4s)\n", targ->encl_level, targ->encl_slot, targ->connector_name); } } static void mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm) { MPI2_SAS_IOUNIT_CONTROL_REPLY *reply; uint16_t handle; struct mprsas_target *targ; struct mprsas_lun *lun; MPR_FUNCTRACE(sc); reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply; handle = (uint16_t)(uintptr_t)tm->cm_complete_data; targ = tm->cm_targ; /* * At this point, we should have no pending commands for the target. * The remove target has just completed. */ KASSERT(TAILQ_FIRST(&targ->commands) == NULL, ("%s: no commands should be pending\n", __func__)); /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of " "handle %#04x! This should not happen!\n", __func__, tm->cm_flags, handle); mprsas_free_tm(sc, tm); return; } if (reply == NULL) { /* most likely a chip reset */ mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device " "0x%04x\n", __func__, handle); mprsas_free_tm(sc, tm); return; } mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__, handle, le16toh(reply->IOCStatus)); /* * Don't clear target if remove fails because things will get confusing. * Leave the devname and sasaddr intact so that we know to avoid reusing * this target id if possible, and so we can assign the same target id * to this device if it comes back in the future. */ if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) == MPI2_IOCSTATUS_SUCCESS) { targ->handle = 0x0; targ->encl_handle = 0x0; targ->encl_level_valid = 0x0; targ->encl_level = 0x0; targ->connector_name[0] = ' '; targ->connector_name[1] = ' '; targ->connector_name[2] = ' '; targ->connector_name[3] = ' '; targ->encl_slot = 0x0; targ->exp_dev_handle = 0x0; targ->phy_num = 0x0; targ->linkrate = 0x0; targ->devinfo = 0x0; targ->flags = 0x0; targ->scsi_req_desc_type = 0; while (!SLIST_EMPTY(&targ->luns)) { lun = SLIST_FIRST(&targ->luns); SLIST_REMOVE_HEAD(&targ->luns, lun_link); free(lun, M_MPR); } } mprsas_free_tm(sc, tm); } static int mprsas_register_events(struct mpr_softc *sc) { uint8_t events[16]; bzero(events, 16); setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); setbit(events, MPI2_EVENT_SAS_DISCOVERY); setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE); setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW); setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); setbit(events, MPI2_EVENT_IR_VOLUME); setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK); setbit(events, MPI2_EVENT_IR_OPERATION_STATUS); setbit(events, MPI2_EVENT_TEMP_THRESHOLD); setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR); if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) { setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) { setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE); setbit(events, MPI2_EVENT_PCIE_ENUMERATION); setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); } } mpr_register_events(sc, events, mprsas_evt_handler, NULL, &sc->sassc->mprsas_eh); return (0); } int mpr_attach_sas(struct mpr_softc *sc) { struct mprsas_softc *sassc; cam_status status; int unit, error = 0, reqs; MPR_FUNCTRACE(sc); mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO); /* * XXX MaxTargets could change during a reinit. Since we don't * resize the targets[] array during such an event, cache the value * of MaxTargets here so that we don't get into trouble later. This * should move into the reinit logic. */ sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes; sassc->targets = malloc(sizeof(struct mprsas_target) * sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO); sc->sassc = sassc; sassc->sc = sc; reqs = sc->num_reqs - sc->num_prireqs - 1; if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) { mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n"); error = ENOMEM; goto out; } unit = device_get_unit(sc->mpr_dev); sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc, unit, &sc->mpr_mtx, reqs, reqs, sassc->devq); if (sassc->sim == NULL) { mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n"); error = EINVAL; goto out; } TAILQ_INIT(&sassc->ev_queue); /* Initialize taskqueue for Event Handling */ TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc); sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sassc->ev_tq); taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq", device_get_nameunit(sc->mpr_dev)); mpr_lock(sc); /* * XXX There should be a bus for every port on the adapter, but since * we're just going to fake the topology for now, we'll pretend that * everything is just a target on a single bus. */ if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) { mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Error %d registering SCSI bus\n", error); mpr_unlock(sc); goto out; } /* * Assume that discovery events will start right away. * * Hold off boot until discovery is complete. */ sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY; sc->sassc->startup_refcount = 0; mprsas_startup_increment(sassc); callout_init(&sassc->discovery_callout, 1 /*mpsafe*/); /* * Register for async events so we can determine the EEDP * capabilities of devices. */ status = xpt_create_path(&sassc->path, /*periph*/NULL, cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Error %#x creating sim path\n", status); sassc->path = NULL; } else { int event; event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE; status = xpt_register_async(event, mprsas_async, sc, sassc->path); if (status != CAM_REQ_CMP) { mpr_dprint(sc, MPR_ERROR, "Error %#x registering async handler for " "AC_ADVINFO_CHANGED events\n", status); xpt_free_path(sassc->path); sassc->path = NULL; } } if (status != CAM_REQ_CMP) { /* * EEDP use is the exception, not the rule. * Warn the user, but do not fail to attach. */ mpr_printf(sc, "EEDP capabilities disabled.\n"); } mpr_unlock(sc); mprsas_register_events(sc); out: if (error) mpr_detach_sas(sc); mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error); return (error); } int mpr_detach_sas(struct mpr_softc *sc) { struct mprsas_softc *sassc; struct mprsas_lun *lun, *lun_tmp; struct mprsas_target *targ; int i; MPR_FUNCTRACE(sc); if (sc->sassc == NULL) return (0); sassc = sc->sassc; mpr_deregister_events(sc, sassc->mprsas_eh); /* * Drain and free the event handling taskqueue with the lock * unheld so that any parallel processing tasks drain properly * without deadlocking. */ if (sassc->ev_tq != NULL) taskqueue_free(sassc->ev_tq); /* Make sure CAM doesn't wedge if we had to bail out early. */ mpr_lock(sc); while (sassc->startup_refcount != 0) mprsas_startup_decrement(sassc); /* Deregister our async handler */ if (sassc->path != NULL) { xpt_register_async(0, mprsas_async, sc, sassc->path); xpt_free_path(sassc->path); sassc->path = NULL; } if (sassc->flags & MPRSAS_IN_STARTUP) xpt_release_simq(sassc->sim, 1); if (sassc->sim != NULL) { xpt_bus_deregister(cam_sim_path(sassc->sim)); cam_sim_free(sassc->sim, FALSE); } mpr_unlock(sc); if (sassc->devq != NULL) cam_simq_free(sassc->devq); for (i = 0; i < sassc->maxtargets; i++) { targ = &sassc->targets[i]; SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { free(lun, M_MPR); } } free(sassc->targets, M_MPR); free(sassc, M_MPR); sc->sassc = NULL; return (0); } void mprsas_discovery_end(struct mprsas_softc *sassc) { struct mpr_softc *sc = sassc->sc; MPR_FUNCTRACE(sc); if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING) callout_stop(&sassc->discovery_callout); /* * After discovery has completed, check the mapping table for any * missing devices and update their missing counts. Only do this once * whenever the driver is initialized so that missing counts aren't * updated unnecessarily. Note that just because discovery has * completed doesn't mean that events have been processed yet. The * check_devices function is a callout timer that checks if ALL devices * are missing. If so, it will wait a little longer for events to * complete and keep resetting itself until some device in the mapping * table is not missing, meaning that event processing has started. */ if (sc->track_mapping_events) { mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has " "completed. Check for missing devices in the mapping " "table.\n"); callout_reset(&sc->device_check_callout, MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices, sc); } } static void mprsas_action(struct cam_sim *sim, union ccb *ccb) { struct mprsas_softc *sassc; sassc = cam_sim_softc(sim); MPR_FUNCTRACE(sassc->sc); mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n", ccb->ccb_h.func_code); mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; struct mpr_softc *sc = sassc->sc; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN; cpi->hba_eng_cnt = 0; cpi->max_target = sassc->maxtargets - 1; cpi->max_lun = 255; /* * initiator_id is set here to an ID outside the set of valid * target IDs (including volumes). */ cpi->initiator_id = sassc->maxtargets; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); /* * XXXSLM-I think this needs to change based on config page or * something instead of hardcoded to 150000. */ cpi->base_transfer_speed = 150000; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC; cpi->maxio = sc->maxio; mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_sas *sas; struct ccb_trans_settings_scsi *scsi; struct mprsas_target *targ; cts = &ccb->cts; sas = &cts->xport_specific.sas; scsi = &cts->proto_specific.scsi; KASSERT(cts->ccb_h.target_id < sassc->maxtargets, ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n", cts->ccb_h.target_id)); targ = &sassc->targets[cts->ccb_h.target_id]; if (targ->handle == 0x0) { mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); break; } cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_SAS; cts->transport_version = 0; sas->valid = CTS_SAS_VALID_SPEED; switch (targ->linkrate) { case 0x08: sas->bitrate = 150000; break; case 0x09: sas->bitrate = 300000; break; case 0x0a: sas->bitrate = 600000; break; case 0x0b: sas->bitrate = 1200000; break; default: sas->valid = 0; } cts->protocol = PROTO_SCSI; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); break; } case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, /*extended*/1); mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); break; case XPT_RESET_DEV: mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action " "XPT_RESET_DEV\n"); mprsas_action_resetdev(sassc, ccb); return; case XPT_RESET_BUS: case XPT_ABORT: case XPT_TERM_IO: mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success " "for abort or reset\n"); mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); break; case XPT_SCSI_IO: mprsas_action_scsiio(sassc, ccb); return; case XPT_SMP_IO: mprsas_action_smpio(sassc, ccb); return; default: mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL); break; } xpt_done(ccb); } static void mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code, target_id_t target_id, lun_id_t lun_id) { path_id_t path_id = cam_sim_path(sc->sassc->sim); struct cam_path *path; mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__, ac_code, target_id, (uintmax_t)lun_id); if (xpt_create_path(&path, NULL, path_id, target_id, lun_id) != CAM_REQ_CMP) { mpr_dprint(sc, MPR_ERROR, "unable to create path for reset " "notification\n"); return; } xpt_async(ac_code, path, NULL); xpt_free_path(path); } static void mprsas_complete_all_commands(struct mpr_softc *sc) { struct mpr_command *cm; int i; int completed; MPR_FUNCTRACE(sc); mtx_assert(&sc->mpr_mtx, MA_OWNED); /* complete all commands with a NULL reply */ for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; if (cm->cm_state == MPR_CM_STATE_FREE) continue; cm->cm_state = MPR_CM_STATE_BUSY; cm->cm_reply = NULL; completed = 0; if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) { MPASS(cm->cm_data); free(cm->cm_data, M_MPR); cm->cm_data = NULL; } if (cm->cm_flags & MPR_CM_FLAGS_POLLED) cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; if (cm->cm_complete != NULL) { mprsas_log_command(cm, MPR_RECOVERY, "completing cm %p state %x ccb %p for diag reset\n", cm, cm->cm_state, cm->cm_ccb); cm->cm_complete(sc, cm); completed = 1; } else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { mprsas_log_command(cm, MPR_RECOVERY, "waking up cm %p state %x ccb %p for diag reset\n", cm, cm->cm_state, cm->cm_ccb); wakeup(cm); completed = 1; } if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) { /* this should never happen, but if it does, log */ mprsas_log_command(cm, MPR_RECOVERY, "cm %p state %x flags 0x%x ccb %p during diag " "reset\n", cm, cm->cm_state, cm->cm_flags, cm->cm_ccb); } } sc->io_cmds_active = 0; } void mprsas_handle_reinit(struct mpr_softc *sc) { int i; /* Go back into startup mode and freeze the simq, so that CAM * doesn't send any commands until after we've rediscovered all * targets and found the proper device handles for them. * * After the reset, portenable will trigger discovery, and after all * discovery-related activities have finished, the simq will be * released. */ mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__); sc->sassc->flags |= MPRSAS_IN_STARTUP; sc->sassc->flags |= MPRSAS_IN_DISCOVERY; mprsas_startup_increment(sc->sassc); /* notify CAM of a bus reset */ mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); /* complete and cleanup after all outstanding commands */ mprsas_complete_all_commands(sc); mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n", __func__, sc->sassc->startup_refcount); /* zero all the target handles, since they may change after the * reset, and we have to rediscover all the targets and use the new * handles. */ for (i = 0; i < sc->sassc->maxtargets; i++) { if (sc->sassc->targets[i].outstanding != 0) mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n", i, sc->sassc->targets[i].outstanding); sc->sassc->targets[i].handle = 0x0; sc->sassc->targets[i].exp_dev_handle = 0x0; sc->sassc->targets[i].outstanding = 0; sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET; } } static void mprsas_tm_timeout(void *data) { struct mpr_command *tm = data; struct mpr_softc *sc = tm->cm_sc; mtx_assert(&sc->mpr_mtx, MA_OWNED); mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed " "out\n", tm); KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE, ("command not inqueue\n")); tm->cm_state = MPR_CM_STATE_BUSY; mpr_reinit(sc); } static void mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SCSI_TASK_MANAGE_REQUEST *req; unsigned int cm_count = 0; struct mpr_command *cm; struct mprsas_target *targ; callout_stop(&tm->cm_callout); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR, "%s: cm_flags = %#x for LUN reset! " "This should not happen!\n", __func__, tm->cm_flags); mprsas_free_tm(sc, tm); return; } if (reply == NULL) { mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n", tm); if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { /* this completion was due to a reset, just cleanup */ mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing " "reset, ignoring NULL LUN reset reply\n"); targ->tm = NULL; mprsas_free_tm(sc, tm); } else { /* we should have gotten a reply. */ mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on " "LUN reset attempt, resetting controller\n"); mpr_reinit(sc); } return; } mpr_dprint(sc, MPR_RECOVERY, "logical unit reset status 0x%x code 0x%x count %u\n", le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), le32toh(reply->TerminationCount)); /* * See if there are any outstanding commands for this LUN. * This could be made more efficient by using a per-LU data * structure of some sort. */ TAILQ_FOREACH(cm, &targ->commands, cm_link) { if (cm->cm_lun == tm->cm_lun) cm_count++; } if (cm_count == 0) { mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, "Finished recovery after LUN reset for target %u\n", targ->tid); mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun); /* * We've finished recovery for this logical unit. check and * see if some other logical unit has a timedout command * that needs to be processed. */ cm = TAILQ_FIRST(&targ->timedout_commands); if (cm) { mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "More commands to abort for target %u\n", targ->tid); mprsas_send_abort(sc, tm, cm); } else { targ->tm = NULL; mprsas_free_tm(sc, tm); } } else { /* if we still have commands for this LUN, the reset * effectively failed, regardless of the status reported. * Escalate to a target reset. */ mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "logical unit reset complete for target %u, but still " "have %u command(s), sending target reset\n", targ->tid, cm_count); if (!targ->is_nvme || sc->custom_nvme_tm_handling) mprsas_send_reset(sc, tm, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); else mpr_reinit(sc); } } static void mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mprsas_target *targ; callout_stop(&tm->cm_callout); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target " "reset! This should not happen!\n", __func__, tm->cm_flags); mprsas_free_tm(sc, tm); return; } if (reply == NULL) { mpr_dprint(sc, MPR_RECOVERY, "NULL target reset reply for tm %p TaskMID %u\n", tm, le16toh(req->TaskMID)); if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { /* this completion was due to a reset, just cleanup */ mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing " "reset, ignoring NULL target reset reply\n"); targ->tm = NULL; mprsas_free_tm(sc, tm); } else { /* we should have gotten a reply. */ mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on " "target reset attempt, resetting controller\n"); mpr_reinit(sc); } return; } mpr_dprint(sc, MPR_RECOVERY, "target reset status 0x%x code 0x%x count %u\n", le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), le32toh(reply->TerminationCount)); if (targ->outstanding == 0) { /* * We've finished recovery for this target and all * of its logical units. */ mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, "Finished reset recovery for target %u\n", targ->tid); mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, CAM_LUN_WILDCARD); targ->tm = NULL; mprsas_free_tm(sc, tm); } else { /* * After a target reset, if this target still has * outstanding commands, the reset effectively failed, * regardless of the status reported. escalate. */ mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "Target reset complete for target %u, but still have %u " "command(s), resetting controller\n", targ->tid, targ->outstanding); mpr_reinit(sc); } } #define MPR_RESET_TIMEOUT 30 int mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mprsas_target *target; int err, timeout; target = tm->cm_targ; if (target->handle == 0) { mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id " "%d\n", __func__, target->tid); return -1; } req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = htole16(target->handle); req->TaskType = type; if (!target->is_nvme || sc->custom_nvme_tm_handling) { timeout = MPR_RESET_TIMEOUT; /* * Target reset method = * SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; } else { timeout = (target->controller_reset_timeout) ? ( target->controller_reset_timeout) : (MPR_RESET_TIMEOUT); /* PCIe Protocol Level Reset*/ req->MsgFlags = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; } if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) { /* XXX Need to handle invalid LUNs */ MPR_SET_LUN(req->LUN, tm->cm_lun); tm->cm_targ->logical_unit_resets++; mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, "Sending logical unit reset to target %u lun %d\n", target->tid, tm->cm_lun); tm->cm_complete = mprsas_logical_unit_reset_complete; mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun); } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { tm->cm_targ->target_resets++; mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, "Sending target reset to target %u\n", target->tid); tm->cm_complete = mprsas_target_reset_complete; mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD); } else { mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type); return -1; } if (target->encl_level_valid) { mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, "At enclosure level %d, slot %d, connector name (%4s)\n", target->encl_level, target->encl_slot, target->connector_name); } tm->cm_data = NULL; tm->cm_complete_data = (void *)tm; callout_reset(&tm->cm_callout, timeout * hz, mprsas_tm_timeout, tm); err = mpr_map_command(sc, tm); if (err) mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY, "error %d sending reset type %u\n", err, type); return err; } static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm) { struct mpr_command *cm; MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mprsas_target *targ; callout_stop(&tm->cm_callout); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR, "cm_flags = %#x for abort %p TaskMID %u!\n", tm->cm_flags, tm, le16toh(req->TaskMID)); mprsas_free_tm(sc, tm); return; } if (reply == NULL) { mpr_dprint(sc, MPR_RECOVERY, "NULL abort reply for tm %p TaskMID %u\n", tm, le16toh(req->TaskMID)); if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { /* this completion was due to a reset, just cleanup */ mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing " "reset, ignoring NULL abort reply\n"); targ->tm = NULL; mprsas_free_tm(sc, tm); } else { /* we should have gotten a reply. */ mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on " "abort attempt, resetting controller\n"); mpr_reinit(sc); } return; } mpr_dprint(sc, MPR_RECOVERY, "abort TaskMID %u status 0x%x code 0x%x count %u\n", le16toh(req->TaskMID), le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), le32toh(reply->TerminationCount)); cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands); if (cm == NULL) { /* * if there are no more timedout commands, we're done with * error recovery for this target. */ mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "Finished abort recovery for target %u\n", targ->tid); targ->tm = NULL; mprsas_free_tm(sc, tm); } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) { /* abort success, but we have more timedout commands to abort */ mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "Continuing abort recovery for target %u\n", targ->tid); mprsas_send_abort(sc, tm, cm); } else { /* * we didn't get a command completion, so the abort * failed as far as we're concerned. escalate. */ mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "Abort failed for target %u, sending logical unit reset\n", targ->tid); mprsas_send_reset(sc, tm, MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); } } #define MPR_ABORT_TIMEOUT 5 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, struct mpr_command *cm) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mprsas_target *targ; int err, timeout; targ = cm->cm_targ; if (targ->handle == 0) { mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY, "%s null devhandle for target_id %d\n", __func__, cm->cm_ccb->ccb_h.target_id); return -1; } mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO, "Aborting command %p\n", cm); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = htole16(targ->handle); req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; /* XXX Need to handle invalid LUNs */ MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun); req->TaskMID = htole16(cm->cm_desc.Default.SMID); tm->cm_data = NULL; tm->cm_complete = mprsas_abort_complete; tm->cm_complete_data = (void *)tm; tm->cm_targ = cm->cm_targ; tm->cm_lun = cm->cm_lun; if (!targ->is_nvme || sc->custom_nvme_tm_handling) timeout = MPR_ABORT_TIMEOUT; else timeout = sc->nvme_abort_timeout; callout_reset(&tm->cm_callout, timeout * hz, mprsas_tm_timeout, tm); targ->aborts++; mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun); err = mpr_map_command(sc, tm); if (err) mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY, "error %d sending abort for cm %p SMID %u\n", err, cm, req->TaskMID); return err; } static void mprsas_scsiio_timeout(void *data) { sbintime_t elapsed, now; union ccb *ccb; struct mpr_softc *sc; struct mpr_command *cm; struct mprsas_target *targ; cm = (struct mpr_command *)data; sc = cm->cm_sc; ccb = cm->cm_ccb; now = sbinuptime(); MPR_FUNCTRACE(sc); mtx_assert(&sc->mpr_mtx, MA_OWNED); mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm); /* * Run the interrupt handler to make sure it's not pending. This * isn't perfect because the command could have already completed * and been re-used, though this is unlikely. */ mpr_intr_locked(sc); if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) { mprsas_log_command(cm, MPR_XINFO, "SCSI command %p almost timed out\n", cm); return; } if (cm->cm_ccb == NULL) { mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n"); return; } targ = cm->cm_targ; targ->timeouts++; elapsed = now - ccb->ccb_h.qos.sim_data; mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY, "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n", targ->tid, targ->handle, ccb->ccb_h.timeout, sbintime_getsec(elapsed), elapsed & 0xffffffff); if (targ->encl_level_valid) { mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "At enclosure level %d, slot %d, connector name (%4s)\n", targ->encl_level, targ->encl_slot, targ->connector_name); } /* XXX first, check the firmware state, to see if it's still * operational. if not, do a diag reset. */ mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT); cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT; TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery); if (targ->tm != NULL) { /* target already in recovery, just queue up another * timedout command to be processed later. */ mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for " "processing by tm %p\n", cm, targ->tm); } else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) { /* start recovery by aborting the first timedout command */ mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, "Sending abort to target %u for SMID %d\n", targ->tid, cm->cm_desc.Default.SMID); mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n", cm, targ->tm); mprsas_send_abort(sc, targ->tm, cm); } else { /* XXX queue this target up for recovery once a TM becomes * available. The firmware only has a limited number of * HighPriority credits for the high priority requests used * for task management, and we ran out. * * Isilon: don't worry about this for now, since we have * more credits than disks in an enclosure, and limit * ourselves to one TM per target for recovery. */ mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY, "timedout cm %p failed to allocate a tm\n", cm); } } /** * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent * to SCSI Unmap. * Return 0 - for success, * 1 - to immediately return back the command with success status to CAM * negative value - to fallback to firmware path i.e. issue scsi unmap * to FW without any translation. */ static int mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm, union ccb *ccb, struct mprsas_target *targ) { Mpi26NVMeEncapsulatedRequest_t *req = NULL; struct ccb_scsiio *csio; struct unmap_parm_list *plist; struct nvme_dsm_range *nvme_dsm_ranges = NULL; struct nvme_command *c; int i, res; uint16_t ndesc, list_len, data_length; struct mpr_prp_page *prp_page_info; uint64_t nvme_dsm_ranges_dma_handle; csio = &ccb->csio; list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]); if (!list_len) { mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n"); return -EINVAL; } plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT); if (!plist) { mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to " "save UNMAP data\n"); return -ENOMEM; } /* Copy SCSI unmap data to a local buffer */ bcopy(csio->data_ptr, plist, csio->dxfer_len); /* return back the unmap command to CAM with success status, * if number of descripts is zero. */ ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4; if (!ndesc) { mpr_dprint(sc, MPR_XINFO, "Number of descriptors in " "UNMAP cmd is Zero\n"); res = 1; goto out; } data_length = ndesc * sizeof(struct nvme_dsm_range); if (data_length > targ->MDTS) { mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than " "Device's MDTS: %d\n", data_length, targ->MDTS); res = -EINVAL; goto out; } prp_page_info = mpr_alloc_prp_page(sc); KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for " "UNMAP command.\n", __func__)); /* * Insert the allocated PRP page into the command's PRP page list. This * will be freed when the command is freed. */ TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page; nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr; bzero(nvme_dsm_ranges, data_length); /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data * for each descriptors contained in SCSI UNMAP data. */ for (i = 0; i < ndesc; i++) { nvme_dsm_ranges[i].length = htole32(be32toh(plist->desc[i].nlb)); nvme_dsm_ranges[i].starting_lba = htole64(be64toh(plist->desc[i].slba)); nvme_dsm_ranges[i].attributes = 0; } /* Build MPI2.6's NVMe Encapsulated Request Message */ req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req; bzero(req, sizeof(*req)); req->DevHandle = htole16(targ->handle); req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED; req->Flags = MPI26_NVME_FLAGS_WRITE; req->ErrorResponseBaseAddress.High = htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32)); req->ErrorResponseBaseAddress.Low = htole32(cm->cm_sense_busaddr); req->ErrorResponseAllocationLength = htole16(sizeof(struct nvme_completion)); req->EncapsulatedCommandLength = htole16(sizeof(struct nvme_command)); req->DataLength = htole32(data_length); /* Build NVMe DSM command */ c = (struct nvme_command *) req->NVMe_Command; c->opc = NVME_OPC_DATASET_MANAGEMENT; c->nsid = htole32(csio->ccb_h.target_lun + 1); c->cdw10 = htole32(ndesc - 1); c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE); cm->cm_length = data_length; cm->cm_data = NULL; cm->cm_complete = mprsas_scsiio_complete; cm->cm_complete_data = ccb; cm->cm_targ = targ; cm->cm_lun = csio->ccb_h.target_lun; cm->cm_ccb = ccb; cm->cm_desc.Default.RequestFlags = MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED; csio->ccb_h.qos.sim_data = sbinuptime(); callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0, mprsas_scsiio_timeout, cm, 0); targ->issued++; targ->outstanding++; TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); ccb->ccb_h.status |= CAM_SIM_QUEUED; mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n", __func__, cm, ccb, targ->outstanding); mpr_build_nvme_prp(sc, cm, req, (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length); mpr_map_command(sc, cm); out: free(plist, M_MPR); return 0; } static void mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb) { MPI2_SCSI_IO_REQUEST *req; struct ccb_scsiio *csio; struct mpr_softc *sc; struct mprsas_target *targ; struct mprsas_lun *lun; struct mpr_command *cm; uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode; uint16_t eedp_flags; uint32_t mpi_control; int rc; sc = sassc->sc; MPR_FUNCTRACE(sc); mtx_assert(&sc->mpr_mtx, MA_OWNED); csio = &ccb->csio; KASSERT(csio->ccb_h.target_id < sassc->maxtargets, ("Target %d out of bounds in XPT_SCSI_IO\n", csio->ccb_h.target_id)); targ = &sassc->targets[csio->ccb_h.target_id]; mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags); if (targ->handle == 0x0) { mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n", __func__, csio->ccb_h.target_id); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) { mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO " "supported %u\n", __func__, csio->ccb_h.target_id); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } /* * Sometimes, it is possible to get a command that is not "In * Progress" and was actually aborted by the upper layer. Check for * this here and complete the command without error. */ if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) { mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for " "target %u\n", __func__, csio->ccb_h.target_id); xpt_done(ccb); return; } /* * If devinfo is 0 this will be a volume. In that case don't tell CAM * that the volume has timed out. We want volumes to be enumerated * until they are deleted/removed, not just failed. In either event, * we're removing the target due to a firmware event telling us * the device is now gone (as opposed to some transient event). Since * we're opting to remove failed devices from the OS's view, we need * to propagate that status up the stack. */ if (targ->flags & MPRSAS_TARGET_INREMOVAL) { if (targ->devinfo == 0) mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); else mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) { mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } /* * If target has a reset in progress, freeze the devq and return. The * devq will be released when the TM reset is finished. */ if (targ->flags & MPRSAS_TARGET_INRESET) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n", __func__, targ->tid); xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } cm = mpr_alloc_command(sc); if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) { if (cm != NULL) { mpr_free_command(sc, cm); } if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { xpt_freeze_simq(sassc->sim, 1); sassc->flags |= MPRSAS_QUEUE_FROZEN; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status |= CAM_REQUEUE_REQ; xpt_done(ccb); return; } /* For NVME device's issue UNMAP command directly to NVME drives by * constructing equivalent native NVMe DataSetManagement command. */ scsi_opcode = scsiio_cdb_ptr(csio)[0]; if (scsi_opcode == UNMAP && targ->is_nvme && (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ); if (rc == 1) { /* return command to CAM with success status */ mpr_free_command(sc, cm); mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); xpt_done(ccb); return; } else if (!rc) /* Issued NVMe Encapsulated Request Message */ return; } req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req; bzero(req, sizeof(*req)); req->DevHandle = htole16(targ->handle); req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; req->MsgFlags = 0; req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); req->SenseBufferLength = MPR_SENSE_LEN; req->SGLFlags = 0; req->ChainOffset = 0; req->SGLOffset0 = 24; /* 32bit word offset to the SGL */ req->SGLOffset1= 0; req->SGLOffset2= 0; req->SGLOffset3= 0; req->SkipCount = 0; req->DataLength = htole32(csio->dxfer_len); req->BidirectionalDataLength = 0; req->IoFlags = htole16(csio->cdb_len); req->EEDPFlags = 0; /* Note: BiDirectional transfers are not supported */ switch (csio->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: mpi_control = MPI2_SCSIIO_CONTROL_READ; cm->cm_flags |= MPR_CM_FLAGS_DATAIN; break; case CAM_DIR_OUT: mpi_control = MPI2_SCSIIO_CONTROL_WRITE; cm->cm_flags |= MPR_CM_FLAGS_DATAOUT; break; case CAM_DIR_NONE: default: mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; break; } if (csio->cdb_len == 32) mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; /* * It looks like the hardware doesn't require an explicit tag * number for each transaction. SAM Task Management not supported * at the moment. */ switch (csio->tag_action) { case MSG_HEAD_OF_Q_TAG: mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ; break; case MSG_ORDERED_Q_TAG: mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; break; case MSG_ACA_TASK: mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ; break; case CAM_TAG_ACTION_NONE: case MSG_SIMPLE_Q_TAG: default: mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; break; } + mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT) & + MPI2_SCSIIO_CONTROL_CMDPRI_MASK; mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits; req->Control = htole32(mpi_control); if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) { mpr_free_command(sc, cm); mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID); xpt_done(ccb); return; } if (csio->ccb_h.flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); else { KASSERT(csio->cdb_len <= IOCDBLEN, ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER " "is not set", csio->cdb_len)); bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); } req->IoFlags = htole16(csio->cdb_len); /* * Check if EEDP is supported and enabled. If it is then check if the * SCSI opcode could be using EEDP. If so, make sure the LUN exists and * is formatted for EEDP support. If all of this is true, set CDB up * for EEDP transfer. */ eedp_flags = op_code_prot[req->CDB.CDB32[0]]; if (sc->eedp_enabled && eedp_flags) { SLIST_FOREACH(lun, &targ->luns, lun_link) { if (lun->lun_id == csio->ccb_h.target_lun) { break; } } if ((lun != NULL) && (lun->eedp_formatted)) { req->EEDPBlockSize = htole16(lun->eedp_block_size); eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) { eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; } req->EEDPFlags = htole16(eedp_flags); /* * If CDB less than 32, fill in Primary Ref Tag with * low 4 bytes of LBA. If CDB is 32, tag stuff is * already there. Also, set protection bit. FreeBSD * currently does not support CDBs bigger than 16, but * the code doesn't hurt, and will be here for the * future. */ if (csio->cdb_len != 32) { lba_byte = (csio->cdb_len == 16) ? 6 : 2; ref_tag_addr = (uint8_t *)&req->CDB.EEDP32. PrimaryReferenceTag; for (i = 0; i < 4; i++) { *ref_tag_addr = req->CDB.CDB32[lba_byte + i]; ref_tag_addr++; } req->CDB.EEDP32.PrimaryReferenceTag = htole32(req-> CDB.EEDP32.PrimaryReferenceTag); req->CDB.EEDP32.PrimaryApplicationTagMask = 0xFFFF; req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) | 0x20; } else { eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG; req->EEDPFlags = htole16(eedp_flags); req->CDB.CDB32[10] = (req->CDB.CDB32[10] & 0x1F) | 0x20; } } } cm->cm_length = csio->dxfer_len; if (cm->cm_length != 0) { cm->cm_data = ccb; cm->cm_flags |= MPR_CM_FLAGS_USE_CCB; } else { cm->cm_data = NULL; } cm->cm_sge = &req->SGL; cm->cm_sglsize = (32 - 24) * 4; cm->cm_complete = mprsas_scsiio_complete; cm->cm_complete_data = ccb; cm->cm_targ = targ; cm->cm_lun = csio->ccb_h.target_lun; cm->cm_ccb = ccb; /* * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0) * and set descriptor type. */ if (targ->scsi_req_desc_type == MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) { req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH; cm->cm_desc.FastPathSCSIIO.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; if (!sc->atomic_desc_capable) { cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle); } } else { cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; if (!sc->atomic_desc_capable) cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle); } csio->ccb_h.qos.sim_data = sbinuptime(); callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0, mprsas_scsiio_timeout, cm, 0); targ->issued++; targ->outstanding++; TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); ccb->ccb_h.status |= CAM_SIM_QUEUED; mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n", __func__, cm, ccb, targ->outstanding); mpr_map_command(sc, cm); return; } /** * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request */ static void mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio, Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ) { u32 response_info; u8 *response_bytes; u16 ioc_status = le16toh(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; u8 scsi_state = mpi_reply->SCSIState; u8 scsi_status = mpi_reply->SCSIStatus; char *desc_ioc_state = NULL; char *desc_scsi_status = NULL; u32 log_info = le32toh(mpi_reply->IOCLogInfo); if (log_info == 0x31170000) return; desc_ioc_state = mpr_describe_table(mpr_iocstatus_string, ioc_status); desc_scsi_status = mpr_describe_table(mpr_scsi_status_string, scsi_status); mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n", le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status); if (targ->encl_level_valid) { mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, " "connector name (%4s)\n", targ->encl_level, targ->encl_slot, targ->connector_name); } /* * We can add more detail about underflow data here * TO-DO */ mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), " "scsi_state %b\n", desc_scsi_status, scsi_status, scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed" "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid"); if (sc->mpr_debug & MPR_XINFO && scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n"); scsi_sense_print(csio); mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n"); } if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { response_info = le32toh(mpi_reply->ResponseInfo); response_bytes = (u8 *)&response_info; mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n", response_bytes[0], mpr_describe_table(mpr_scsi_taskmgmt_string, response_bytes[0])); } } /** mprsas_nvme_trans_status_code * * Convert Native NVMe command error status to * equivalent SCSI error status. * * Returns appropriate scsi_status */ static u8 mprsas_nvme_trans_status_code(uint16_t nvme_status, struct mpr_command *cm) { u8 status = MPI2_SCSI_STATUS_GOOD; int skey, asc, ascq; union ccb *ccb = cm->cm_complete_data; int returned_sense_len; uint8_t sct, sc; sct = NVME_STATUS_GET_SCT(nvme_status); sc = NVME_STATUS_GET_SC(nvme_status); status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_ILLEGAL_REQUEST; asc = SCSI_ASC_NO_SENSE; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; switch (sct) { case NVME_SCT_GENERIC: switch (sc) { case NVME_SC_SUCCESS: status = MPI2_SCSI_STATUS_GOOD; skey = SSD_KEY_NO_SENSE; asc = SCSI_ASC_NO_SENSE; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; break; case NVME_SC_INVALID_OPCODE: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_ILLEGAL_REQUEST; asc = SCSI_ASC_ILLEGAL_COMMAND; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; break; case NVME_SC_INVALID_FIELD: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_ILLEGAL_REQUEST; asc = SCSI_ASC_INVALID_CDB; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; break; case NVME_SC_DATA_TRANSFER_ERROR: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_MEDIUM_ERROR; asc = SCSI_ASC_NO_SENSE; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; break; case NVME_SC_ABORTED_POWER_LOSS: status = MPI2_SCSI_STATUS_TASK_ABORTED; skey = SSD_KEY_ABORTED_COMMAND; asc = SCSI_ASC_WARNING; ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED; break; case NVME_SC_INTERNAL_DEVICE_ERROR: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_HARDWARE_ERROR; asc = SCSI_ASC_INTERNAL_TARGET_FAILURE; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; break; case NVME_SC_ABORTED_BY_REQUEST: case NVME_SC_ABORTED_SQ_DELETION: case NVME_SC_ABORTED_FAILED_FUSED: case NVME_SC_ABORTED_MISSING_FUSED: status = MPI2_SCSI_STATUS_TASK_ABORTED; skey = SSD_KEY_ABORTED_COMMAND; asc = SCSI_ASC_NO_SENSE; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; break; case NVME_SC_INVALID_NAMESPACE_OR_FORMAT: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_ILLEGAL_REQUEST; asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; ascq = SCSI_ASCQ_INVALID_LUN_ID; break; case NVME_SC_LBA_OUT_OF_RANGE: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_ILLEGAL_REQUEST; asc = SCSI_ASC_ILLEGAL_BLOCK; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; break; case NVME_SC_CAPACITY_EXCEEDED: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_MEDIUM_ERROR; asc = SCSI_ASC_NO_SENSE; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; break; case NVME_SC_NAMESPACE_NOT_READY: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_NOT_READY; asc = SCSI_ASC_LUN_NOT_READY; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; break; } break; case NVME_SCT_COMMAND_SPECIFIC: switch (sc) { case NVME_SC_INVALID_FORMAT: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_ILLEGAL_REQUEST; asc = SCSI_ASC_FORMAT_COMMAND_FAILED; ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED; break; case NVME_SC_CONFLICTING_ATTRIBUTES: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_ILLEGAL_REQUEST; asc = SCSI_ASC_INVALID_CDB; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; break; } break; case NVME_SCT_MEDIA_ERROR: switch (sc) { case NVME_SC_WRITE_FAULTS: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_MEDIUM_ERROR; asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; break; case NVME_SC_UNRECOVERED_READ_ERROR: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_MEDIUM_ERROR; asc = SCSI_ASC_UNRECOVERED_READ_ERROR; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; break; case NVME_SC_GUARD_CHECK_ERROR: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_MEDIUM_ERROR; asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED; ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED; break; case NVME_SC_APPLICATION_TAG_CHECK_ERROR: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_MEDIUM_ERROR; asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED; ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED; break; case NVME_SC_REFERENCE_TAG_CHECK_ERROR: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_MEDIUM_ERROR; asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED; ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED; break; case NVME_SC_COMPARE_FAILURE: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_MISCOMPARE; asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY; ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; break; case NVME_SC_ACCESS_DENIED: status = MPI2_SCSI_STATUS_CHECK_CONDITION; skey = SSD_KEY_ILLEGAL_REQUEST; asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; ascq = SCSI_ASCQ_INVALID_LUN_ID; break; } break; } returned_sense_len = sizeof(struct scsi_sense_data); if (returned_sense_len < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - returned_sense_len; else ccb->csio.sense_resid = 0; scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED, 1, skey, asc, ascq, SSD_ELEM_NONE); ccb->ccb_h.status |= CAM_AUTOSNS_VALID; return status; } /** mprsas_complete_nvme_unmap * * Complete native NVMe command issued using NVMe Encapsulated * Request Message. */ static u8 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm) { Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply; struct nvme_completion *nvme_completion = NULL; u8 scsi_status = MPI2_SCSI_STATUS_GOOD; mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply; if (le16toh(mpi_reply->ErrorResponseCount)){ nvme_completion = (struct nvme_completion *)cm->cm_sense; scsi_status = mprsas_nvme_trans_status_code( nvme_completion->status, cm); } return scsi_status; } static void mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm) { MPI2_SCSI_IO_REPLY *rep; union ccb *ccb; struct ccb_scsiio *csio; struct mprsas_softc *sassc; struct scsi_vpd_supported_page_list *vpd_list = NULL; u8 *TLR_bits, TLR_on, *scsi_cdb; int dir = 0, i; u16 alloc_len; struct mprsas_target *target; target_id_t target_id; MPR_FUNCTRACE(sc); mpr_dprint(sc, MPR_TRACE, "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply, cm->cm_targ->outstanding); callout_stop(&cm->cm_callout); mtx_assert(&sc->mpr_mtx, MA_OWNED); sassc = sc->sassc; ccb = cm->cm_complete_data; csio = &ccb->csio; target_id = csio->ccb_h.target_id; rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply; /* * XXX KDM if the chain allocation fails, does it matter if we do * the sync and unload here? It is simpler to do it in every case, * assuming it doesn't cause problems. */ if (cm->cm_data != NULL) { if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) dir = BUS_DMASYNC_POSTREAD; else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) dir = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); } cm->cm_targ->completed++; cm->cm_targ->outstanding--; TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link); ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED); if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) { TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery); KASSERT(cm->cm_state == MPR_CM_STATE_BUSY, ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state)); cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY; if (cm->cm_reply != NULL) mprsas_log_command(cm, MPR_RECOVERY, "completed timedout cm %p ccb %p during recovery " "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb, le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); else mprsas_log_command(cm, MPR_RECOVERY, "completed timedout cm %p ccb %p during recovery\n", cm, cm->cm_ccb); } else if (cm->cm_targ->tm != NULL) { if (cm->cm_reply != NULL) mprsas_log_command(cm, MPR_RECOVERY, "completed cm %p ccb %p during recovery " "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb, le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); else mprsas_log_command(cm, MPR_RECOVERY, "completed cm %p ccb %p during recovery\n", cm, cm->cm_ccb); } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { mprsas_log_command(cm, MPR_RECOVERY, "reset completed cm %p ccb %p\n", cm, cm->cm_ccb); } if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { /* * We ran into an error after we tried to map the command, * so we're getting a callback without queueing the command * to the hardware. So we set the status here, and it will * be retained below. We'll go through the "fast path", * because there can be no reply when we haven't actually * gone out to the hardware. */ mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ); /* * Currently the only error included in the mask is * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of * chain frames. We need to freeze the queue until we get * a command that completed without this error, which will * hopefully have some chain frames attached that we can * use. If we wanted to get smarter about it, we would * only unfreeze the queue in this condition when we're * sure that we're getting some chain frames back. That's * probably unnecessary. */ if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { xpt_freeze_simq(sassc->sim, 1); sassc->flags |= MPRSAS_QUEUE_FROZEN; mpr_dprint(sc, MPR_XINFO, "Error sending command, " "freezing SIM queue\n"); } } /* * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER * flag, and use it in a few places in the rest of this function for * convenience. Use the macro if available. */ scsi_cdb = scsiio_cdb_ptr(csio); /* * If this is a Start Stop Unit command and it was issued by the driver * during shutdown, decrement the refcount to account for all of the * commands that were sent. All SSU commands should be completed before * shutdown completes, meaning SSU_refcount will be 0 after SSU_started * is TRUE. */ if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) { mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n"); sc->SSU_refcount--; } /* Take the fast path to completion */ if (cm->cm_reply == NULL) { if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) { if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET); else { mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); csio->scsi_status = SCSI_STATUS_OK; } if (sassc->flags & MPRSAS_QUEUE_FROZEN) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; sassc->flags &= ~MPRSAS_QUEUE_FROZEN; mpr_dprint(sc, MPR_XINFO, "Unfreezing SIM queue\n"); } } /* * There are two scenarios where the status won't be * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is * set, the second is in the MPR_FLAGS_DIAGRESET above. */ if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) { /* * Freeze the dev queue so that commands are * executed in the correct order after error * recovery. */ ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); } mpr_free_command(sc, cm); xpt_done(ccb); return; } target = &sassc->targets[target_id]; if (scsi_cdb[0] == UNMAP && target->is_nvme && (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm); csio->scsi_status = rep->SCSIStatus; } mprsas_log_command(cm, MPR_XINFO, "ioc %x scsi %x state %x xfer %u\n", le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) { case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: csio->resid = cm->cm_length - le32toh(rep->TransferCount); /* FALLTHROUGH */ case MPI2_IOCSTATUS_SUCCESS: case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR) mprsas_log_command(cm, MPR_XINFO, "recovered error\n"); /* Completion failed at the transport level. */ if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS | MPI2_SCSI_STATE_TERMINATED)) { mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); break; } /* In a modern packetized environment, an autosense failure * implies that there's not much else that can be done to * recover the command. */ if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) { mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL); break; } /* * CAM doesn't care about SAS Response Info data, but if this is * the state check if TLR should be done. If not, clear the * TLR_bits for the target. */ if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) && ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) == MPR_SCSI_RI_INVALID_FRAME)) { sc->mapping_table[target_id].TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; } /* * Intentionally override the normal SCSI status reporting * for these two cases. These are likely to happen in a * multi-initiator environment, and we want to make sure that * CAM retries these commands rather than fail them. */ if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) || (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) { mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED); break; } /* Handle normal status and sense */ csio->scsi_status = rep->SCSIStatus; if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD) mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); else mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR); if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { int sense_len, returned_sense_len; returned_sense_len = min(le32toh(rep->SenseCount), sizeof(struct scsi_sense_data)); if (returned_sense_len < csio->sense_len) csio->sense_resid = csio->sense_len - returned_sense_len; else csio->sense_resid = 0; sense_len = min(returned_sense_len, csio->sense_len - csio->sense_resid); bzero(&csio->sense_data, sizeof(csio->sense_data)); bcopy(cm->cm_sense, &csio->sense_data, sense_len); ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } /* * Check if this is an INQUIRY command. If it's a VPD inquiry, * and it's page code 0 (Supported Page List), and there is * inquiry data, and this is for a sequential access device, and * the device is an SSP target, and TLR is supported by the * controller, turn the TLR_bits value ON if page 0x90 is * supported. */ if ((scsi_cdb[0] == INQUIRY) && (scsi_cdb[1] & SI_EVPD) && (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) && ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && (csio->data_ptr != NULL) && ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) && (sc->control_TLR) && (sc->mapping_table[target_id].device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)) { vpd_list = (struct scsi_vpd_supported_page_list *) csio->data_ptr; TLR_bits = &sc->mapping_table[target_id].TLR_bits; *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON; alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4]; alloc_len -= csio->resid; for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) { if (vpd_list->list[i] == 0x90) { *TLR_bits = TLR_on; break; } } } /* * If this is a SATA direct-access end device, mark it so that * a SCSI StartStopUnit command will be sent to it when the * driver is being shutdown. */ if ((scsi_cdb[0] == INQUIRY) && (csio->data_ptr != NULL) && ((csio->data_ptr[0] & 0x1f) == T_DIRECT) && (sc->mapping_table[target_id].device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) && ((sc->mapping_table[target_id].device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) == MPI2_SAS_DEVICE_INFO_END_DEVICE)) { target = &sassc->targets[target_id]; target->supports_SSU = TRUE; mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n", target_id); } break; case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* * If devinfo is 0 this will be a volume. In that case don't * tell CAM that the volume is not there. We want volumes to * be enumerated until they are deleted/removed, not just * failed. */ if (cm->cm_targ->devinfo == 0) mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); else mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); break; case MPI2_IOCSTATUS_INVALID_SGL: mpr_print_scsiio_cmd(sc, cm); mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR); break; case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: /* * This is one of the responses that comes back when an I/O * has been aborted. If it is because of a timeout that we * initiated, just set the status to CAM_CMD_TIMEOUT. * Otherwise set it to CAM_REQ_ABORTED. The effect on the * command is the same (it gets retried, subject to the * retry counter), the only difference is what gets printed * on the console. */ if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT) mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT); else mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED); break; case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: /* resid is ignored for this condition */ csio->resid = 0; mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR); break; case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: /* * These can sometimes be transient transport-related * errors, and sometimes persistent drive-related errors. * We used to retry these without decrementing the retry * count by returning CAM_REQUEUE_REQ. Unfortunately, if * we hit a persistent drive problem that returns one of * these error codes, we would retry indefinitely. So, * return CAM_REQ_CMP_ERROR so that we decrement the retry * count and avoid infinite retries. We're taking the * potential risk of flagging false failures in the event * of a topology-related error (e.g. a SAS expander problem * causes a command addressed to a drive to fail), but * avoiding getting into an infinite retry loop. However, * if we get them while were moving a device, we should * fail the request as 'not there' because the device * is effectively gone. */ if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); else mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); mpr_dprint(sc, MPR_INFO, "Controller reported %s tgt %u SMID %u loginfo %x%s\n", mpr_describe_table(mpr_iocstatus_string, le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK), target_id, cm->cm_desc.Default.SMID, le32toh(rep->IOCLogInfo), (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) ? " departing" : ""); mpr_dprint(sc, MPR_XINFO, "SCSIStatus %x SCSIState %x xfercount %u\n", rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); break; case MPI2_IOCSTATUS_INVALID_FUNCTION: case MPI2_IOCSTATUS_INTERNAL_ERROR: case MPI2_IOCSTATUS_INVALID_VPID: case MPI2_IOCSTATUS_INVALID_FIELD: case MPI2_IOCSTATUS_INVALID_STATE: case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: default: mprsas_log_command(cm, MPR_XINFO, "completed ioc %x loginfo %x scsi %x state %x xfer %u\n", le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); csio->resid = cm->cm_length; if (scsi_cdb[0] == UNMAP && target->is_nvme && (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); else mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); break; } mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ); if (sassc->flags & MPRSAS_QUEUE_FROZEN) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; sassc->flags &= ~MPRSAS_QUEUE_FROZEN; mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM " "queue\n"); } if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); } /* * Check to see if we're removing the device. If so, and this is the * last command on the queue, proceed with the deferred removal of the * device. Note, for removing a volume, this won't trigger because * pending_remove_tm will be NULL. */ if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) { if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL && cm->cm_targ->pending_remove_tm != NULL) { mpr_dprint(sc, MPR_INFO, "Last pending command complete: starting remove_device\n"); mpr_map_command(sc, cm->cm_targ->pending_remove_tm); cm->cm_targ->pending_remove_tm = NULL; } } mpr_free_command(sc, cm); xpt_done(ccb); } static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm) { MPI2_SMP_PASSTHROUGH_REPLY *rpl; MPI2_SMP_PASSTHROUGH_REQUEST *req; uint64_t sasaddr; union ccb *ccb; ccb = cm->cm_complete_data; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and SMP * commands require two S/G elements only. That should be handled * in the standard request size. */ if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP " "request!\n", __func__, cm->cm_flags); mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); goto bailout; } rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply; if (rpl == NULL) { mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__); mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); goto bailout; } req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; sasaddr = le32toh(req->SASAddress.Low); sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32; if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS || rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) { mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n", __func__, le16toh(rpl->IOCStatus), rpl->SASStatus); mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); goto bailout; } mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx " "completed successfully\n", __func__, (uintmax_t)sasaddr); if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED) mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); else mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR); bailout: /* * We sync in both directions because we had DMAs in the S/G list * in both directions. */ bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); mpr_free_command(sc, cm); xpt_done(ccb); } static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr) { struct mpr_command *cm; uint8_t *request, *response; MPI2_SMP_PASSTHROUGH_REQUEST *req; struct mpr_softc *sc; struct sglist *sg; int error; sc = sassc->sc; sg = NULL; error = 0; switch (ccb->ccb_h.flags & CAM_DATA_MASK) { case CAM_DATA_PADDR: case CAM_DATA_SG_PADDR: /* * XXX We don't yet support physical addresses here. */ mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not " "supported\n", __func__); mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); xpt_done(ccb); return; case CAM_DATA_SG: /* * The chip does not support more than one buffer for the * request or response. */ if ((ccb->smpio.smp_request_sglist_cnt > 1) || (ccb->smpio.smp_response_sglist_cnt > 1)) { mpr_dprint(sc, MPR_ERROR, "%s: multiple request or " "response buffer segments not supported for SMP\n", __func__); mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); xpt_done(ccb); return; } /* * The CAM_SCATTER_VALID flag was originally implemented * for the XPT_SCSI_IO CCB, which only has one data pointer. * We have two. So, just take that flag to mean that we * might have S/G lists, and look at the S/G segment count * to figure out whether that is the case for each individual * buffer. */ if (ccb->smpio.smp_request_sglist_cnt != 0) { bus_dma_segment_t *req_sg; req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; } else request = ccb->smpio.smp_request; if (ccb->smpio.smp_response_sglist_cnt != 0) { bus_dma_segment_t *rsp_sg; rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; } else response = ccb->smpio.smp_response; break; case CAM_DATA_VADDR: request = ccb->smpio.smp_request; response = ccb->smpio.smp_response; break; default: mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); xpt_done(ccb); return; } cm = mpr_alloc_command(sc); if (cm == NULL) { mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n", __func__); mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); xpt_done(ccb); return; } req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; bzero(req, sizeof(*req)); req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; /* Allow the chip to use any route to this SAS address. */ req->PhysicalPort = 0xff; req->RequestDataLength = htole16(ccb->smpio.smp_request_len); req->SGLFlags = MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI; mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address " "%#jx\n", __func__, (uintmax_t)sasaddr); mpr_init_sge(cm, req, &req->SGL); /* * Set up a uio to pass into mpr_map_command(). This allows us to * do one map command, and one busdma call in there. */ cm->cm_uio.uio_iov = cm->cm_iovec; cm->cm_uio.uio_iovcnt = 2; cm->cm_uio.uio_segflg = UIO_SYSSPACE; /* * The read/write flag isn't used by busdma, but set it just in * case. This isn't exactly accurate, either, since we're going in * both directions. */ cm->cm_uio.uio_rw = UIO_WRITE; cm->cm_iovec[0].iov_base = request; cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength); cm->cm_iovec[1].iov_base = response; cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len; cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len + cm->cm_iovec[1].iov_len; /* * Trigger a warning message in mpr_data_cb() for the user if we * wind up exceeding two S/G segments. The chip expects one * segment for the request and another for the response. */ cm->cm_max_segs = 2; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_complete = mprsas_smpio_complete; cm->cm_complete_data = ccb; /* * Tell the mapping code that we're using a uio, and that this is * an SMP passthrough request. There is a little special-case * logic there (in mpr_data_cb()) to handle the bidirectional * transfer. */ cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS | MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT; /* The chip data format is little endian. */ req->SASAddress.High = htole32(sasaddr >> 32); req->SASAddress.Low = htole32(sasaddr); /* * XXX Note that we don't have a timeout/abort mechanism here. * From the manual, it looks like task management requests only * work for SCSI IO and SATA passthrough requests. We may need to * have a mechanism to retry requests in the event of a chip reset * at least. Hopefully the chip will insure that any errors short * of that are relayed back to the driver. */ error = mpr_map_command(sc, cm); if ((error != 0) && (error != EINPROGRESS)) { mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from " "mpr_map_command()\n", __func__, error); goto bailout_error; } return; bailout_error: mpr_free_command(sc, cm); mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); xpt_done(ccb); return; } static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb) { struct mpr_softc *sc; struct mprsas_target *targ; uint64_t sasaddr = 0; sc = sassc->sc; /* * Make sure the target exists. */ KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id)); targ = &sassc->targets[ccb->ccb_h.target_id]; if (targ->handle == 0x0) { mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n", __func__, ccb->ccb_h.target_id); mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); xpt_done(ccb); return; } /* * If this device has an embedded SMP target, we'll talk to it * directly. * figure out what the expander's address is. */ if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0) sasaddr = targ->sasaddr; /* * If we don't have a SAS address for the expander yet, try * grabbing it from the page 0x83 information cached in the * transport layer for this target. LSI expanders report the * expander SAS address as the port-associated SAS address in * Inquiry VPD page 0x83. Maxim expanders don't report it in page * 0x83. * * XXX KDM disable this for now, but leave it commented out so that * it is obvious that this is another possible way to get the SAS * address. * * The parent handle method below is a little more reliable, and * the other benefit is that it works for devices other than SES * devices. So you can send a SMP request to a da(4) device and it * will get routed to the expander that device is attached to. * (Assuming the da(4) device doesn't contain an SMP target...) */ #if 0 if (sasaddr == 0) sasaddr = xpt_path_sas_addr(ccb->ccb_h.path); #endif /* * If we still don't have a SAS address for the expander, look for * the parent device of this device, which is probably the expander. */ if (sasaddr == 0) { #ifdef OLD_MPR_PROBE struct mprsas_target *parent_target; #endif if (targ->parent_handle == 0x0) { mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " "a valid parent handle!\n", __func__, targ->handle); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } #ifdef OLD_MPR_PROBE parent_target = mprsas_find_target_by_handle(sassc, 0, targ->parent_handle); if (parent_target == NULL) { mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " "a valid parent target!\n", __func__, targ->handle); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } if ((parent_target->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " "does not have an SMP target!\n", __func__, targ->handle, parent_target->handle); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } sasaddr = parent_target->sasaddr; #else /* OLD_MPR_PROBE */ if ((targ->parent_devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " "does not have an SMP target!\n", __func__, targ->handle, targ->parent_handle); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } if (targ->parent_sasaddr == 0x0) { mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle " "%d does not have a valid SAS address!\n", __func__, targ->handle, targ->parent_handle); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } sasaddr = targ->parent_sasaddr; #endif /* OLD_MPR_PROBE */ } if (sasaddr == 0) { mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for " "handle %d\n", __func__, targ->handle); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } mprsas_send_smpcmd(sassc, ccb, sasaddr); return; bailout: xpt_done(ccb); } static void mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpr_softc *sc; struct mpr_command *tm; struct mprsas_target *targ; MPR_FUNCTRACE(sassc->sc); mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED); KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of " "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id)); sc = sassc->sc; tm = mprsas_alloc_tm(sc); if (tm == NULL) { mpr_dprint(sc, MPR_ERROR, "command alloc failure in " "mprsas_action_resetdev\n"); mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); xpt_done(ccb); return; } targ = &sassc->targets[ccb->ccb_h.target_id]; req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = htole16(targ->handle); req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; if (!targ->is_nvme || sc->custom_nvme_tm_handling) { /* SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; } else { /* PCIe Protocol Level Reset*/ req->MsgFlags = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; } tm->cm_data = NULL; tm->cm_complete = mprsas_resetdev_complete; tm->cm_complete_data = ccb; mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", __func__, targ->tid); tm->cm_targ = targ; mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD); mpr_map_command(sc, tm); } static void mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *resp; union ccb *ccb; MPR_FUNCTRACE(sc); mtx_assert(&sc->mpr_mtx, MA_OWNED); resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; ccb = tm->cm_complete_data; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of " "handle %#04x! This should not happen!\n", __func__, tm->cm_flags, req->DevHandle); mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); goto bailout; } mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode)); if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) { mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, CAM_LUN_WILDCARD); } else mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); bailout: mprsas_free_tm(sc, tm); xpt_done(ccb); } static void mprsas_poll(struct cam_sim *sim) { struct mprsas_softc *sassc; sassc = cam_sim_softc(sim); if (sassc->sc->mpr_debug & MPR_TRACE) { /* frequent debug messages during a panic just slow * everything down too much. */ mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n", __func__); sassc->sc->mpr_debug &= ~MPR_TRACE; } mpr_intr_locked(sassc->sc); } static void mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct mpr_softc *sc; sc = (struct mpr_softc *)callback_arg; switch (code) { case AC_ADVINFO_CHANGED: { struct mprsas_target *target; struct mprsas_softc *sassc; struct scsi_read_capacity_data_long rcap_buf; struct ccb_dev_advinfo cdai; struct mprsas_lun *lun; lun_id_t lunid; int found_lun; uintptr_t buftype; buftype = (uintptr_t)arg; found_lun = 0; sassc = sc->sassc; /* * We're only interested in read capacity data changes. */ if (buftype != CDAI_TYPE_RCAPLONG) break; /* * We should have a handle for this, but check to make sure. */ KASSERT(xpt_path_target_id(path) < sassc->maxtargets, ("Target %d out of bounds in mprsas_async\n", xpt_path_target_id(path))); target = &sassc->targets[xpt_path_target_id(path)]; if (target->handle == 0) break; lunid = xpt_path_lun_id(path); SLIST_FOREACH(lun, &target->luns, lun_link) { if (lun->lun_id == lunid) { found_lun = 1; break; } } if (found_lun == 0) { lun = malloc(sizeof(struct mprsas_lun), M_MPR, M_NOWAIT | M_ZERO); if (lun == NULL) { mpr_dprint(sc, MPR_ERROR, "Unable to alloc " "LUN for EEDP support.\n"); break; } lun->lun_id = lunid; SLIST_INSERT_HEAD(&target->luns, lun, lun_link); } bzero(&rcap_buf, sizeof(rcap_buf)); xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.ccb_h.flags = CAM_DIR_IN; cdai.buftype = CDAI_TYPE_RCAPLONG; cdai.flags = CDAI_FLAG_NONE; cdai.bufsiz = sizeof(rcap_buf); cdai.buf = (uint8_t *)&rcap_buf; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP) && (rcap_buf.prot & SRC16_PROT_EN)) { switch (rcap_buf.prot & SRC16_P_TYPE) { case SRC16_PTYPE_1: case SRC16_PTYPE_3: lun->eedp_formatted = TRUE; lun->eedp_block_size = scsi_4btoul(rcap_buf.length); break; case SRC16_PTYPE_2: default: lun->eedp_formatted = FALSE; lun->eedp_block_size = 0; break; } } else { lun->eedp_formatted = FALSE; lun->eedp_block_size = 0; } break; } case AC_FOUND_DEVICE: default: break; } } /* * Set the INRESET flag for this target so that no I/O will be sent to * the target until the reset has completed. If an I/O request does * happen, the devq will be frozen. The CCB holds the path which is * used to release the devq. The devq is released and the CCB is freed * when the TM completes. */ void mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm, struct mprsas_target *target, lun_id_t lun_id) { union ccb *ccb; path_id_t path_id; ccb = xpt_alloc_ccb_nowait(); if (ccb) { path_id = cam_sim_path(sc->sassc->sim); if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id, target->tid, lun_id) != CAM_REQ_CMP) { xpt_free_ccb(ccb); } else { tm->cm_ccb = ccb; tm->cm_targ = target; target->flags |= MPRSAS_TARGET_INRESET; } } } int mprsas_startup(struct mpr_softc *sc) { /* * Send the port enable message and set the wait_for_port_enable flag. * This flag helps to keep the simq frozen until all discovery events * are processed. */ sc->wait_for_port_enable = 1; mprsas_send_portenable(sc); return (0); } static int mprsas_send_portenable(struct mpr_softc *sc) { MPI2_PORT_ENABLE_REQUEST *request; struct mpr_command *cm; MPR_FUNCTRACE(sc); if ((cm = mpr_alloc_command(sc)) == NULL) return (EBUSY); request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req; request->Function = MPI2_FUNCTION_PORT_ENABLE; request->MsgFlags = 0; request->VP_ID = 0; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_complete = mprsas_portenable_complete; cm->cm_data = NULL; cm->cm_sge = NULL; mpr_map_command(sc, cm); mpr_dprint(sc, MPR_XINFO, "mpr_send_portenable finished cm %p req %p complete %p\n", cm, cm->cm_req, cm->cm_complete); return (0); } static void mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm) { MPI2_PORT_ENABLE_REPLY *reply; struct mprsas_softc *sassc; MPR_FUNCTRACE(sc); sassc = sc->sassc; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * port enable commands don't have S/G lists. */ if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! " "This should not happen!\n", __func__, cm->cm_flags); } reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply; if (reply == NULL) mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n"); else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) mpr_dprint(sc, MPR_FAULT, "Portenable failed\n"); mpr_free_command(sc, cm); /* * Done waiting for port enable to complete. Decrement the refcount. * If refcount is 0, discovery is complete and a rescan of the bus can * take place. */ sc->wait_for_port_enable = 0; sc->port_enable_complete = 1; wakeup(&sc->port_enable_complete); mprsas_startup_decrement(sassc); } int mprsas_check_id(struct mprsas_softc *sassc, int id) { struct mpr_softc *sc = sassc->sc; char *ids; char *name; ids = &sc->exclude_ids[0]; while((name = strsep(&ids, ",")) != NULL) { if (name[0] == '\0') continue; if (strtol(name, NULL, 0) == (long)id) return (1); } return (0); } void mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets) { struct mprsas_softc *sassc; struct mprsas_lun *lun, *lun_tmp; struct mprsas_target *targ; int i; sassc = sc->sassc; /* * The number of targets is based on IOC Facts, so free all of * the allocated LUNs for each target and then the target buffer * itself. */ for (i=0; i< maxtargets; i++) { targ = &sassc->targets[i]; SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { free(lun, M_MPR); } } free(sassc->targets, M_MPR); sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets, M_MPR, M_WAITOK|M_ZERO); } Index: head/sys/dev/mps/mps_sas.c =================================================================== --- head/sys/dev/mps/mps_sas.c (revision 367043) +++ head/sys/dev/mps/mps_sas.c (revision 367044) @@ -1,3403 +1,3405 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Yahoo! Inc. * Copyright (c) 2011-2015 LSI Corp. * Copyright (c) 2013-2015 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); /* Communications core for Avago Technologies (LSI) MPT2 */ /* TODO Move headers to mpsvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MPSSAS_DISCOVERY_TIMEOUT 20 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */ /* * static array to check SCSI OpCode for EEDP protection bits */ #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP static uint8_t op_code_prot[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory"); static void mpssas_remove_device(struct mps_softc *, struct mps_command *); static void mpssas_remove_complete(struct mps_softc *, struct mps_command *); static void mpssas_action(struct cam_sim *sim, union ccb *ccb); static void mpssas_poll(struct cam_sim *sim); static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm); static void mpssas_scsiio_timeout(void *data); static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm); static void mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm, union ccb *ccb); static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *); static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *); static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *); static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm); static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr); static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb); static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *); static void mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static int mpssas_send_portenable(struct mps_softc *sc); static void mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm); struct mpssas_target * mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle) { struct mpssas_target *target; int i; for (i = start; i < sassc->maxtargets; i++) { target = &sassc->targets[i]; if (target->handle == handle) return (target); } return (NULL); } /* we need to freeze the simq during attach and diag reset, to avoid failing * commands before device handles have been found by discovery. Since * discovery involves reading config pages and possibly sending commands, * discovery actions may continue even after we receive the end of discovery * event, so refcount discovery actions instead of assuming we can unfreeze * the simq when we get the event. */ void mpssas_startup_increment(struct mpssas_softc *sassc) { MPS_FUNCTRACE(sassc->sc); if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) { if (sassc->startup_refcount++ == 0) { /* just starting, freeze the simq */ mps_dprint(sassc->sc, MPS_INIT, "%s freezing simq\n", __func__); xpt_hold_boot(); xpt_freeze_simq(sassc->sim, 1); } mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__, sassc->startup_refcount); } } void mpssas_release_simq_reinit(struct mpssas_softc *sassc) { if (sassc->flags & MPSSAS_QUEUE_FROZEN) { sassc->flags &= ~MPSSAS_QUEUE_FROZEN; xpt_release_simq(sassc->sim, 1); mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n"); } } void mpssas_startup_decrement(struct mpssas_softc *sassc) { MPS_FUNCTRACE(sassc->sc); if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) { if (--sassc->startup_refcount == 0) { /* finished all discovery-related actions, release * the simq and rescan for the latest topology. */ mps_dprint(sassc->sc, MPS_INIT, "%s releasing simq\n", __func__); sassc->flags &= ~MPSSAS_IN_STARTUP; xpt_release_simq(sassc->sim, 1); xpt_release_boot(); } mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__, sassc->startup_refcount); } } /* * The firmware requires us to stop sending commands when we're doing task * management. * XXX The logic for serializing the device has been made lazy and moved to * mpssas_prepare_for_tm(). */ struct mps_command * mpssas_alloc_tm(struct mps_softc *sc) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mps_command *tm; tm = mps_alloc_high_priority_command(sc); if (tm == NULL) return (NULL); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; return tm; } void mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm) { int target_id = 0xFFFFFFFF; if (tm == NULL) return; /* * For TM's the devq is frozen for the device. Unfreeze it here and * free the resources used for freezing the devq. Must clear the * INRESET flag as well or scsi I/O will not work. */ if (tm->cm_targ != NULL) { tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET; target_id = tm->cm_targ->tid; } if (tm->cm_ccb) { mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n", target_id); xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE); xpt_free_path(tm->cm_ccb->ccb_h.path); xpt_free_ccb(tm->cm_ccb); } mps_free_high_priority_command(sc, tm); } void mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ) { struct mpssas_softc *sassc = sc->sassc; path_id_t pathid; target_id_t targetid; union ccb *ccb; MPS_FUNCTRACE(sc); pathid = cam_sim_path(sassc->sim); if (targ == NULL) targetid = CAM_TARGET_WILDCARD; else targetid = targ - sassc->targets; /* * Allocate a CCB and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n"); xpt_free_ccb(ccb); return; } if (targetid == CAM_TARGET_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_BUS; else ccb->ccb_h.func_code = XPT_SCAN_TGT; mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid); xpt_rescan(ccb); } static void mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...) { struct sbuf sb; va_list ap; char str[224]; char path_str[64]; if (cm == NULL) return; /* No need to be in here if debugging isn't enabled */ if ((cm->cm_sc->mps_debug & level) == 0) return; sbuf_new(&sb, str, sizeof(str), 0); va_start(ap, fmt); if (cm->cm_ccb != NULL) { xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str, sizeof(path_str)); sbuf_cat(&sb, path_str); if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) { scsi_command_string(&cm->cm_ccb->csio, &sb); sbuf_printf(&sb, "length %d ", cm->cm_ccb->csio.dxfer_len); } } else { sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ", cam_sim_name(cm->cm_sc->sassc->sim), cam_sim_unit(cm->cm_sc->sassc->sim), cam_sim_bus(cm->cm_sc->sassc->sim), cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF, cm->cm_lun); } sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID); sbuf_vprintf(&sb, fmt, ap); sbuf_finish(&sb); mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb)); va_end(ap); } static void mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; struct mpssas_target *targ; uint16_t handle; MPS_FUNCTRACE(sc); reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; handle = (uint16_t)(uintptr_t)tm->cm_complete_data; targ = tm->cm_targ; if (reply == NULL) { /* XXX retry the remove after the diag reset completes? */ mps_dprint(sc, MPS_FAULT, "%s NULL reply resetting device 0x%04x\n", __func__, handle); mpssas_free_tm(sc, tm); return; } if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) { mps_dprint(sc, MPS_ERROR, "IOCStatus = 0x%x while resetting device 0x%x\n", le16toh(reply->IOCStatus), handle); } mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n", reply->TerminationCount); mps_free_reply(sc, tm->cm_reply_data); tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n", targ->tid, handle); /* * Don't clear target if remove fails because things will get confusing. * Leave the devname and sasaddr intact so that we know to avoid reusing * this target id if possible, and so we can assign the same target id * to this device if it comes back in the future. */ if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) == MPI2_IOCSTATUS_SUCCESS) { targ = tm->cm_targ; targ->handle = 0x0; targ->encl_handle = 0x0; targ->encl_slot = 0x0; targ->exp_dev_handle = 0x0; targ->phy_num = 0x0; targ->linkrate = 0x0; targ->devinfo = 0x0; targ->flags = 0x0; } mpssas_free_tm(sc, tm); } /* * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal. * Otherwise Volume Delete is same as Bare Drive Removal. */ void mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mps_softc *sc; struct mps_command *tm; struct mpssas_target *targ = NULL; MPS_FUNCTRACE(sassc->sc); sc = sassc->sc; #ifdef WD_SUPPORT /* * If this is a WD controller, determine if the disk should be exposed * to the OS or not. If disk should be exposed, return from this * function without doing anything. */ if (sc->WD_available && (sc->WD_hide_expose == MPS_WD_EXPOSE_ALWAYS)) { return; } #endif //WD_SUPPORT targ = mpssas_find_target_by_handle(sassc, 0, handle); if (targ == NULL) { /* FIXME: what is the action? */ /* We don't know about this device? */ mps_dprint(sc, MPS_ERROR, "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle); return; } targ->flags |= MPSSAS_TARGET_INREMOVAL; tm = mpssas_alloc_tm(sc); if (tm == NULL) { mps_dprint(sc, MPS_ERROR, "%s: command alloc failure\n", __func__); return; } mpssas_rescan_target(sc, targ); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = targ->handle; req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; /* SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; tm->cm_targ = targ; tm->cm_data = NULL; tm->cm_complete = mpssas_remove_volume; tm->cm_complete_data = (void *)(uintptr_t)handle; mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n", __func__, targ->tid); mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD); mps_map_command(sc, tm); } /* * The MPT2 firmware performs debounce on the link to avoid transient link * errors and false removals. When it does decide that link has been lost * and a device need to go away, it expects that the host will perform a * target reset and then an op remove. The reset has the side-effect of * aborting any outstanding requests for the device, which is required for * the op-remove to succeed. It's not clear if the host should check for * the device coming back alive after the reset. */ void mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mps_softc *sc; struct mps_command *cm; struct mpssas_target *targ = NULL; MPS_FUNCTRACE(sassc->sc); sc = sassc->sc; targ = mpssas_find_target_by_handle(sassc, 0, handle); if (targ == NULL) { /* FIXME: what is the action? */ /* We don't know about this device? */ mps_dprint(sc, MPS_ERROR, "%s : invalid handle 0x%x \n", __func__, handle); return; } targ->flags |= MPSSAS_TARGET_INREMOVAL; cm = mpssas_alloc_tm(sc); if (cm == NULL) { mps_dprint(sc, MPS_ERROR, "%s: command alloc failure\n", __func__); return; } mpssas_rescan_target(sc, targ); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; req->DevHandle = htole16(targ->handle); req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; /* SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; cm->cm_targ = targ; cm->cm_data = NULL; cm->cm_complete = mpssas_remove_device; cm->cm_complete_data = (void *)(uintptr_t)handle; mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n", __func__, targ->tid); mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); mps_map_command(sc, cm); } static void mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SAS_IOUNIT_CONTROL_REQUEST *req; struct mpssas_target *targ; uint16_t handle; MPS_FUNCTRACE(sc); reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; handle = (uint16_t)(uintptr_t)tm->cm_complete_data; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for remove of handle %#04x! " "This should not happen!\n", __func__, tm->cm_flags, handle); } if (reply == NULL) { /* XXX retry the remove after the diag reset completes? */ mps_dprint(sc, MPS_FAULT, "%s NULL reply resetting device 0x%04x\n", __func__, handle); mpssas_free_tm(sc, tm); return; } if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) { mps_dprint(sc, MPS_ERROR, "IOCStatus = 0x%x while resetting device 0x%x\n", le16toh(reply->IOCStatus), handle); } mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n", le32toh(reply->TerminationCount)); mps_free_reply(sc, tm->cm_reply_data); tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ /* Reuse the existing command */ req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req; memset(req, 0, sizeof(*req)); req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; req->Operation = MPI2_SAS_OP_REMOVE_DEVICE; req->DevHandle = htole16(handle); tm->cm_data = NULL; tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; tm->cm_complete = mpssas_remove_complete; tm->cm_complete_data = (void *)(uintptr_t)handle; /* * Wait to send the REMOVE_DEVICE until all the commands have cleared. * They should be aborted or time out and we'll kick thus off there * if so. */ if (TAILQ_FIRST(&targ->commands) == NULL) { mps_dprint(sc, MPS_INFO, "No pending commands: starting remove_device\n"); mps_map_command(sc, tm); targ->pending_remove_tm = NULL; } else { targ->pending_remove_tm = tm; } mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n", targ->tid, handle); } static void mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm) { MPI2_SAS_IOUNIT_CONTROL_REPLY *reply; uint16_t handle; struct mpssas_target *targ; struct mpssas_lun *lun; MPS_FUNCTRACE(sc); reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply; handle = (uint16_t)(uintptr_t)tm->cm_complete_data; targ = tm->cm_targ; /* * At this point, we should have no pending commands for the target. * The remove target has just completed. */ KASSERT(TAILQ_FIRST(&targ->commands) == NULL, ("%s: no commands should be pending\n", __func__)); /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mps_dprint(sc, MPS_XINFO, "%s: cm_flags = %#x for remove of handle %#04x! " "This should not happen!\n", __func__, tm->cm_flags, handle); mpssas_free_tm(sc, tm); return; } if (reply == NULL) { /* most likely a chip reset */ mps_dprint(sc, MPS_FAULT, "%s NULL reply removing device 0x%04x\n", __func__, handle); mpssas_free_tm(sc, tm); return; } mps_dprint(sc, MPS_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__, handle, le16toh(reply->IOCStatus)); /* * Don't clear target if remove fails because things will get confusing. * Leave the devname and sasaddr intact so that we know to avoid reusing * this target id if possible, and so we can assign the same target id * to this device if it comes back in the future. */ if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) == MPI2_IOCSTATUS_SUCCESS) { targ->handle = 0x0; targ->encl_handle = 0x0; targ->encl_slot = 0x0; targ->exp_dev_handle = 0x0; targ->phy_num = 0x0; targ->linkrate = 0x0; targ->devinfo = 0x0; targ->flags = 0x0; while(!SLIST_EMPTY(&targ->luns)) { lun = SLIST_FIRST(&targ->luns); SLIST_REMOVE_HEAD(&targ->luns, lun_link); free(lun, M_MPT2); } } mpssas_free_tm(sc, tm); } static int mpssas_register_events(struct mps_softc *sc) { u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; bzero(events, 16); setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); setbit(events, MPI2_EVENT_SAS_DISCOVERY); setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE); setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW); setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); setbit(events, MPI2_EVENT_IR_VOLUME); setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK); setbit(events, MPI2_EVENT_IR_OPERATION_STATUS); setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); mps_register_events(sc, events, mpssas_evt_handler, NULL, &sc->sassc->mpssas_eh); return (0); } int mps_attach_sas(struct mps_softc *sc) { struct mpssas_softc *sassc; cam_status status; int unit, error = 0, reqs; MPS_FUNCTRACE(sc); mps_dprint(sc, MPS_INIT, "%s entered\n", __func__); sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO); /* * XXX MaxTargets could change during a reinit. Since we don't * resize the targets[] array during such an event, cache the value * of MaxTargets here so that we don't get into trouble later. This * should move into the reinit logic. */ sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes; sassc->targets = malloc(sizeof(struct mpssas_target) * sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO); sc->sassc = sassc; sassc->sc = sc; reqs = sc->num_reqs - sc->num_prireqs - 1; if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) { mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n"); error = ENOMEM; goto out; } unit = device_get_unit(sc->mps_dev); sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc, unit, &sc->mps_mtx, reqs, reqs, sassc->devq); if (sassc->sim == NULL) { mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n"); error = EINVAL; goto out; } TAILQ_INIT(&sassc->ev_queue); /* Initialize taskqueue for Event Handling */ TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc); sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sassc->ev_tq); taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq", device_get_nameunit(sc->mps_dev)); mps_lock(sc); /* * XXX There should be a bus for every port on the adapter, but since * we're just going to fake the topology for now, we'll pretend that * everything is just a target on a single bus. */ if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) { mps_dprint(sc, MPS_INIT|MPS_ERROR, "Error %d registering SCSI bus\n", error); mps_unlock(sc); goto out; } /* * Assume that discovery events will start right away. * * Hold off boot until discovery is complete. */ sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY; sc->sassc->startup_refcount = 0; mpssas_startup_increment(sassc); callout_init(&sassc->discovery_callout, 1 /*mpsafe*/); /* * Register for async events so we can determine the EEDP * capabilities of devices. */ status = xpt_create_path(&sassc->path, /*periph*/NULL, cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { mps_dprint(sc, MPS_ERROR|MPS_INIT, "Error %#x creating sim path\n", status); sassc->path = NULL; } else { int event; event = AC_ADVINFO_CHANGED; status = xpt_register_async(event, mpssas_async, sc, sassc->path); if (status != CAM_REQ_CMP) { mps_dprint(sc, MPS_ERROR, "Error %#x registering async handler for " "AC_ADVINFO_CHANGED events\n", status); xpt_free_path(sassc->path); sassc->path = NULL; } } if (status != CAM_REQ_CMP) { /* * EEDP use is the exception, not the rule. * Warn the user, but do not fail to attach. */ mps_printf(sc, "EEDP capabilities disabled.\n"); } mps_unlock(sc); mpssas_register_events(sc); out: if (error) mps_detach_sas(sc); mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error); return (error); } int mps_detach_sas(struct mps_softc *sc) { struct mpssas_softc *sassc; struct mpssas_lun *lun, *lun_tmp; struct mpssas_target *targ; int i; MPS_FUNCTRACE(sc); if (sc->sassc == NULL) return (0); sassc = sc->sassc; mps_deregister_events(sc, sassc->mpssas_eh); /* * Drain and free the event handling taskqueue with the lock * unheld so that any parallel processing tasks drain properly * without deadlocking. */ if (sassc->ev_tq != NULL) taskqueue_free(sassc->ev_tq); /* Make sure CAM doesn't wedge if we had to bail out early. */ mps_lock(sc); while (sassc->startup_refcount != 0) mpssas_startup_decrement(sassc); /* Deregister our async handler */ if (sassc->path != NULL) { xpt_register_async(0, mpssas_async, sc, sassc->path); xpt_free_path(sassc->path); sassc->path = NULL; } if (sassc->flags & MPSSAS_IN_STARTUP) xpt_release_simq(sassc->sim, 1); if (sassc->sim != NULL) { xpt_bus_deregister(cam_sim_path(sassc->sim)); cam_sim_free(sassc->sim, FALSE); } mps_unlock(sc); if (sassc->devq != NULL) cam_simq_free(sassc->devq); for(i=0; i< sassc->maxtargets ;i++) { targ = &sassc->targets[i]; SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { free(lun, M_MPT2); } } free(sassc->targets, M_MPT2); free(sassc, M_MPT2); sc->sassc = NULL; return (0); } void mpssas_discovery_end(struct mpssas_softc *sassc) { struct mps_softc *sc = sassc->sc; MPS_FUNCTRACE(sc); if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING) callout_stop(&sassc->discovery_callout); /* * After discovery has completed, check the mapping table for any * missing devices and update their missing counts. Only do this once * whenever the driver is initialized so that missing counts aren't * updated unnecessarily. Note that just because discovery has * completed doesn't mean that events have been processed yet. The * check_devices function is a callout timer that checks if ALL devices * are missing. If so, it will wait a little longer for events to * complete and keep resetting itself until some device in the mapping * table is not missing, meaning that event processing has started. */ if (sc->track_mapping_events) { mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has " "completed. Check for missing devices in the mapping " "table.\n"); callout_reset(&sc->device_check_callout, MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices, sc); } } static void mpssas_action(struct cam_sim *sim, union ccb *ccb) { struct mpssas_softc *sassc; sassc = cam_sim_softc(sim); MPS_FUNCTRACE(sassc->sc); mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n", ccb->ccb_h.func_code); mtx_assert(&sassc->sc->mps_mtx, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; struct mps_softc *sc = sassc->sc; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN; cpi->hba_eng_cnt = 0; cpi->max_target = sassc->maxtargets - 1; cpi->max_lun = 255; /* * initiator_id is set here to an ID outside the set of valid * target IDs (including volumes). */ cpi->initiator_id = sassc->maxtargets; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC; cpi->maxio = sc->maxio; mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_sas *sas; struct ccb_trans_settings_scsi *scsi; struct mpssas_target *targ; cts = &ccb->cts; sas = &cts->xport_specific.sas; scsi = &cts->proto_specific.scsi; KASSERT(cts->ccb_h.target_id < sassc->maxtargets, ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n", cts->ccb_h.target_id)); targ = &sassc->targets[cts->ccb_h.target_id]; if (targ->handle == 0x0) { mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); break; } cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_SAS; cts->transport_version = 0; sas->valid = CTS_SAS_VALID_SPEED; switch (targ->linkrate) { case 0x08: sas->bitrate = 150000; break; case 0x09: sas->bitrate = 300000; break; case 0x0a: sas->bitrate = 600000; break; default: sas->valid = 0; } cts->protocol = PROTO_SCSI; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); break; } case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, /*extended*/1); mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); break; case XPT_RESET_DEV: mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n"); mpssas_action_resetdev(sassc, ccb); return; case XPT_RESET_BUS: case XPT_ABORT: case XPT_TERM_IO: mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action faking success for abort or reset\n"); mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); break; case XPT_SCSI_IO: mpssas_action_scsiio(sassc, ccb); return; case XPT_SMP_IO: mpssas_action_smpio(sassc, ccb); return; default: mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL); break; } xpt_done(ccb); } static void mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code, target_id_t target_id, lun_id_t lun_id) { path_id_t path_id = cam_sim_path(sc->sassc->sim); struct cam_path *path; mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__, ac_code, target_id, (uintmax_t)lun_id); if (xpt_create_path(&path, NULL, path_id, target_id, lun_id) != CAM_REQ_CMP) { mps_dprint(sc, MPS_ERROR, "unable to create path for reset " "notification\n"); return; } xpt_async(ac_code, path, NULL); xpt_free_path(path); } static void mpssas_complete_all_commands(struct mps_softc *sc) { struct mps_command *cm; int i; int completed; MPS_FUNCTRACE(sc); mtx_assert(&sc->mps_mtx, MA_OWNED); /* complete all commands with a NULL reply */ for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; if (cm->cm_state == MPS_CM_STATE_FREE) continue; cm->cm_state = MPS_CM_STATE_BUSY; cm->cm_reply = NULL; completed = 0; if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) { MPASS(cm->cm_data); free(cm->cm_data, M_MPT2); cm->cm_data = NULL; } if (cm->cm_flags & MPS_CM_FLAGS_POLLED) cm->cm_flags |= MPS_CM_FLAGS_COMPLETE; if (cm->cm_complete != NULL) { mpssas_log_command(cm, MPS_RECOVERY, "completing cm %p state %x ccb %p for diag reset\n", cm, cm->cm_state, cm->cm_ccb); cm->cm_complete(sc, cm); completed = 1; } else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) { mpssas_log_command(cm, MPS_RECOVERY, "waking up cm %p state %x ccb %p for diag reset\n", cm, cm->cm_state, cm->cm_ccb); wakeup(cm); completed = 1; } if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) { /* this should never happen, but if it does, log */ mpssas_log_command(cm, MPS_RECOVERY, "cm %p state %x flags 0x%x ccb %p during diag " "reset\n", cm, cm->cm_state, cm->cm_flags, cm->cm_ccb); } } sc->io_cmds_active = 0; } void mpssas_handle_reinit(struct mps_softc *sc) { int i; /* Go back into startup mode and freeze the simq, so that CAM * doesn't send any commands until after we've rediscovered all * targets and found the proper device handles for them. * * After the reset, portenable will trigger discovery, and after all * discovery-related activities have finished, the simq will be * released. */ mps_dprint(sc, MPS_INIT, "%s startup\n", __func__); sc->sassc->flags |= MPSSAS_IN_STARTUP; sc->sassc->flags |= MPSSAS_IN_DISCOVERY; mpssas_startup_increment(sc->sassc); /* notify CAM of a bus reset */ mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); /* complete and cleanup after all outstanding commands */ mpssas_complete_all_commands(sc); mps_dprint(sc, MPS_INIT, "%s startup %u after command completion\n", __func__, sc->sassc->startup_refcount); /* zero all the target handles, since they may change after the * reset, and we have to rediscover all the targets and use the new * handles. */ for (i = 0; i < sc->sassc->maxtargets; i++) { if (sc->sassc->targets[i].outstanding != 0) mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n", i, sc->sassc->targets[i].outstanding); sc->sassc->targets[i].handle = 0x0; sc->sassc->targets[i].exp_dev_handle = 0x0; sc->sassc->targets[i].outstanding = 0; sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET; } } static void mpssas_tm_timeout(void *data) { struct mps_command *tm = data; struct mps_softc *sc = tm->cm_sc; mtx_assert(&sc->mps_mtx, MA_OWNED); mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY, "task mgmt %p timed out\n", tm); KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE, ("command not inqueue\n")); tm->cm_state = MPS_CM_STATE_BUSY; mps_reinit(sc); } static void mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SCSI_TASK_MANAGE_REQUEST *req; unsigned int cm_count = 0; struct mps_command *cm; struct mpssas_target *targ; callout_stop(&tm->cm_callout); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. * XXXSL So should it be an assertion? */ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mps_dprint(sc, MPS_RECOVERY|MPS_ERROR, "%s: cm_flags = %#x for LUN reset! " "This should not happen!\n", __func__, tm->cm_flags); mpssas_free_tm(sc, tm); return; } if (reply == NULL) { mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n", tm); if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { /* this completion was due to a reset, just cleanup */ mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing " "reset, ignoring NULL LUN reset reply\n"); targ->tm = NULL; mpssas_free_tm(sc, tm); } else { /* we should have gotten a reply. */ mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on " "LUN reset attempt, resetting controller\n"); mps_reinit(sc); } return; } mps_dprint(sc, MPS_RECOVERY, "logical unit reset status 0x%x code 0x%x count %u\n", le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), le32toh(reply->TerminationCount)); /* * See if there are any outstanding commands for this LUN. * This could be made more efficient by using a per-LU data * structure of some sort. */ TAILQ_FOREACH(cm, &targ->commands, cm_link) { if (cm->cm_lun == tm->cm_lun) cm_count++; } if (cm_count == 0) { mps_dprint(sc, MPS_RECOVERY|MPS_INFO, "Finished recovery after LUN reset for target %u\n", targ->tid); mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun); /* * We've finished recovery for this logical unit. check and * see if some other logical unit has a timedout command * that needs to be processed. */ cm = TAILQ_FIRST(&targ->timedout_commands); if (cm) { mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "More commands to abort for target %u\n", targ->tid); mpssas_send_abort(sc, tm, cm); } else { targ->tm = NULL; mpssas_free_tm(sc, tm); } } else { /* * If we still have commands for this LUN, the reset * effectively failed, regardless of the status reported. * Escalate to a target reset. */ mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "logical unit reset complete for target %u, but still " "have %u command(s), sending target reset\n", targ->tid, cm_count); mpssas_send_reset(sc, tm, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); } } static void mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpssas_target *targ; callout_stop(&tm->cm_callout); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! " "This should not happen!\n", __func__, tm->cm_flags); mpssas_free_tm(sc, tm); return; } if (reply == NULL) { mps_dprint(sc, MPS_RECOVERY, "NULL target reset reply for tm %pi TaskMID %u\n", tm, le16toh(req->TaskMID)); if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { /* this completion was due to a reset, just cleanup */ mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing " "reset, ignoring NULL target reset reply\n"); targ->tm = NULL; mpssas_free_tm(sc, tm); } else { /* we should have gotten a reply. */ mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on " "target reset attempt, resetting controller\n"); mps_reinit(sc); } return; } mps_dprint(sc, MPS_RECOVERY, "target reset status 0x%x code 0x%x count %u\n", le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), le32toh(reply->TerminationCount)); if (targ->outstanding == 0) { /* we've finished recovery for this target and all * of its logical units. */ mps_dprint(sc, MPS_RECOVERY|MPS_INFO, "Finished reset recovery for target %u\n", targ->tid); mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, CAM_LUN_WILDCARD); targ->tm = NULL; mpssas_free_tm(sc, tm); } else { /* * After a target reset, if this target still has * outstanding commands, the reset effectively failed, * regardless of the status reported. escalate. */ mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "Target reset complete for target %u, but still have %u " "command(s), resetting controller\n", targ->tid, targ->outstanding); mps_reinit(sc); } } #define MPS_RESET_TIMEOUT 30 int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpssas_target *target; int err; target = tm->cm_targ; if (target->handle == 0) { mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n", __func__, target->tid); return -1; } req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = htole16(target->handle); req->TaskType = type; if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) { /* XXX Need to handle invalid LUNs */ MPS_SET_LUN(req->LUN, tm->cm_lun); tm->cm_targ->logical_unit_resets++; mps_dprint(sc, MPS_RECOVERY|MPS_INFO, "Sending logical unit reset to target %u lun %d\n", target->tid, tm->cm_lun); tm->cm_complete = mpssas_logical_unit_reset_complete; mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun); } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { /* * Target reset method = * SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; tm->cm_targ->target_resets++; mps_dprint(sc, MPS_RECOVERY|MPS_INFO, "Sending target reset to target %u\n", target->tid); tm->cm_complete = mpssas_target_reset_complete; mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD); } else { mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type); return -1; } tm->cm_data = NULL; tm->cm_complete_data = (void *)tm; callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz, mpssas_tm_timeout, tm); err = mps_map_command(sc, tm); if (err) mps_dprint(sc, MPS_ERROR|MPS_RECOVERY, "error %d sending reset type %u\n", err, type); return err; } static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm) { struct mps_command *cm; MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpssas_target *targ; callout_stop(&tm->cm_callout); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mps_dprint(sc, MPS_RECOVERY, "cm_flags = %#x for abort %p TaskMID %u!\n", tm->cm_flags, tm, le16toh(req->TaskMID)); mpssas_free_tm(sc, tm); return; } if (reply == NULL) { mps_dprint(sc, MPS_RECOVERY, "NULL abort reply for tm %p TaskMID %u\n", tm, le16toh(req->TaskMID)); if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { /* this completion was due to a reset, just cleanup */ mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing " "reset, ignoring NULL abort reply\n"); targ->tm = NULL; mpssas_free_tm(sc, tm); } else { /* we should have gotten a reply. */ mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on " "abort attempt, resetting controller\n"); mps_reinit(sc); } return; } mps_dprint(sc, MPS_RECOVERY, "abort TaskMID %u status 0x%x code 0x%x count %u\n", le16toh(req->TaskMID), le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), le32toh(reply->TerminationCount)); cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands); if (cm == NULL) { /* * If there are no more timedout commands, we're done with * error recovery for this target. */ mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "Finished abort recovery for target %u\n", targ->tid); targ->tm = NULL; mpssas_free_tm(sc, tm); } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) { /* abort success, but we have more timedout commands to abort */ mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "Continuing abort recovery for target %u\n", targ->tid); mpssas_send_abort(sc, tm, cm); } else { /* we didn't get a command completion, so the abort * failed as far as we're concerned. escalate. */ mps_dprint(sc, MPS_RECOVERY, "Abort failed for target %u, sending logical unit reset\n", targ->tid); mpssas_send_reset(sc, tm, MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); } } #define MPS_ABORT_TIMEOUT 5 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpssas_target *targ; int err; targ = cm->cm_targ; if (targ->handle == 0) { mps_dprint(sc, MPS_ERROR|MPS_RECOVERY, "%s null devhandle for target_id %d\n", __func__, cm->cm_ccb->ccb_h.target_id); return -1; } mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO, "Aborting command %p\n", cm); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = htole16(targ->handle); req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; /* XXX Need to handle invalid LUNs */ MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun); req->TaskMID = htole16(cm->cm_desc.Default.SMID); tm->cm_data = NULL; tm->cm_complete = mpssas_abort_complete; tm->cm_complete_data = (void *)tm; tm->cm_targ = cm->cm_targ; tm->cm_lun = cm->cm_lun; callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz, mpssas_tm_timeout, tm); targ->aborts++; mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun); err = mps_map_command(sc, tm); if (err) mps_dprint(sc, MPS_ERROR|MPS_RECOVERY, "error %d sending abort for cm %p SMID %u\n", err, cm, req->TaskMID); return err; } static void mpssas_scsiio_timeout(void *data) { sbintime_t elapsed, now; union ccb *ccb; struct mps_softc *sc; struct mps_command *cm; struct mpssas_target *targ; cm = (struct mps_command *)data; sc = cm->cm_sc; ccb = cm->cm_ccb; now = sbinuptime(); MPS_FUNCTRACE(sc); mtx_assert(&sc->mps_mtx, MA_OWNED); mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", sc); /* * Run the interrupt handler to make sure it's not pending. This * isn't perfect because the command could have already completed * and been re-used, though this is unlikely. */ mps_intr_locked(sc); if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) { mpssas_log_command(cm, MPS_XINFO, "SCSI command %p almost timed out\n", cm); return; } if (cm->cm_ccb == NULL) { mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n"); return; } targ = cm->cm_targ; targ->timeouts++; elapsed = now - ccb->ccb_h.qos.sim_data; mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY, "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n", targ->tid, targ->handle, ccb->ccb_h.timeout, sbintime_getsec(elapsed), elapsed & 0xffffffff); /* XXX first, check the firmware state, to see if it's still * operational. if not, do a diag reset. */ mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT); cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT; TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery); if (targ->tm != NULL) { /* target already in recovery, just queue up another * timedout command to be processed later. */ mps_dprint(sc, MPS_RECOVERY, "queued timedout cm %p for processing by tm %p\n", cm, targ->tm); } else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) { mps_dprint(sc, MPS_RECOVERY|MPS_INFO, "Sending abort to target %u for SMID %d\n", targ->tid, cm->cm_desc.Default.SMID); mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n", cm, targ->tm); /* start recovery by aborting the first timedout command */ mpssas_send_abort(sc, targ->tm, cm); } else { /* XXX queue this target up for recovery once a TM becomes * available. The firmware only has a limited number of * HighPriority credits for the high priority requests used * for task management, and we ran out. * * Isilon: don't worry about this for now, since we have * more credits than disks in an enclosure, and limit * ourselves to one TM per target for recovery. */ mps_dprint(sc, MPS_ERROR|MPS_RECOVERY, "timedout cm %p failed to allocate a tm\n", cm); } } static void mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb) { MPI2_SCSI_IO_REQUEST *req; struct ccb_scsiio *csio; struct mps_softc *sc; struct mpssas_target *targ; struct mpssas_lun *lun; struct mps_command *cm; uint8_t i, lba_byte, *ref_tag_addr; uint16_t eedp_flags; uint32_t mpi_control; sc = sassc->sc; MPS_FUNCTRACE(sc); mtx_assert(&sc->mps_mtx, MA_OWNED); csio = &ccb->csio; KASSERT(csio->ccb_h.target_id < sassc->maxtargets, ("Target %d out of bounds in XPT_SCSI_IO\n", csio->ccb_h.target_id)); targ = &sassc->targets[csio->ccb_h.target_id]; mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags); if (targ->handle == 0x0) { mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n", __func__, csio->ccb_h.target_id); mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) { mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO " "supported %u\n", __func__, csio->ccb_h.target_id); mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } /* * Sometimes, it is possible to get a command that is not "In * Progress" and was actually aborted by the upper layer. Check for * this here and complete the command without error. */ if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) { mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for " "target %u\n", __func__, csio->ccb_h.target_id); xpt_done(ccb); return; } /* * If devinfo is 0 this will be a volume. In that case don't tell CAM * that the volume has timed out. We want volumes to be enumerated * until they are deleted/removed, not just failed. In either event, * we're removing the target due to a firmware event telling us * the device is now gone (as opposed to some transient event). Since * we're opting to remove failed devices from the OS's view, we need * to propagate that status up the stack. */ if (targ->flags & MPSSAS_TARGET_INREMOVAL) { if (targ->devinfo == 0) mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); else mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) { mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__); mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } /* * If target has a reset in progress, freeze the devq and return. The * devq will be released when the TM reset is finished. */ if (targ->flags & MPSSAS_TARGET_INRESET) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n", __func__, targ->tid); xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } cm = mps_alloc_command(sc); if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) { if (cm != NULL) { mps_free_command(sc, cm); } if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) { xpt_freeze_simq(sassc->sim, 1); sassc->flags |= MPSSAS_QUEUE_FROZEN; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status |= CAM_REQUEUE_REQ; xpt_done(ccb); return; } req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req; bzero(req, sizeof(*req)); req->DevHandle = htole16(targ->handle); req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; req->MsgFlags = 0; req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); req->SenseBufferLength = MPS_SENSE_LEN; req->SGLFlags = 0; req->ChainOffset = 0; req->SGLOffset0 = 24; /* 32bit word offset to the SGL */ req->SGLOffset1= 0; req->SGLOffset2= 0; req->SGLOffset3= 0; req->SkipCount = 0; req->DataLength = htole32(csio->dxfer_len); req->BidirectionalDataLength = 0; req->IoFlags = htole16(csio->cdb_len); req->EEDPFlags = 0; /* Note: BiDirectional transfers are not supported */ switch (csio->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: mpi_control = MPI2_SCSIIO_CONTROL_READ; cm->cm_flags |= MPS_CM_FLAGS_DATAIN; break; case CAM_DIR_OUT: mpi_control = MPI2_SCSIIO_CONTROL_WRITE; cm->cm_flags |= MPS_CM_FLAGS_DATAOUT; break; case CAM_DIR_NONE: default: mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; break; } if (csio->cdb_len == 32) mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; /* * It looks like the hardware doesn't require an explicit tag * number for each transaction. SAM Task Management not supported * at the moment. */ switch (csio->tag_action) { case MSG_HEAD_OF_Q_TAG: mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ; break; case MSG_ORDERED_Q_TAG: mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; break; case MSG_ACA_TASK: mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ; break; case CAM_TAG_ACTION_NONE: case MSG_SIMPLE_Q_TAG: default: mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; break; } + mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT) & + MPI2_SCSIIO_CONTROL_TASKPRI_MASK; mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits; req->Control = htole32(mpi_control); if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) { mps_free_command(sc, cm); mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID); xpt_done(ccb); return; } if (csio->ccb_h.flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); req->IoFlags = htole16(csio->cdb_len); /* * Check if EEDP is supported and enabled. If it is then check if the * SCSI opcode could be using EEDP. If so, make sure the LUN exists and * is formatted for EEDP support. If all of this is true, set CDB up * for EEDP transfer. */ eedp_flags = op_code_prot[req->CDB.CDB32[0]]; if (sc->eedp_enabled && eedp_flags) { SLIST_FOREACH(lun, &targ->luns, lun_link) { if (lun->lun_id == csio->ccb_h.target_lun) { break; } } if ((lun != NULL) && (lun->eedp_formatted)) { req->EEDPBlockSize = htole16(lun->eedp_block_size); eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); req->EEDPFlags = htole16(eedp_flags); /* * If CDB less than 32, fill in Primary Ref Tag with * low 4 bytes of LBA. If CDB is 32, tag stuff is * already there. Also, set protection bit. FreeBSD * currently does not support CDBs bigger than 16, but * the code doesn't hurt, and will be here for the * future. */ if (csio->cdb_len != 32) { lba_byte = (csio->cdb_len == 16) ? 6 : 2; ref_tag_addr = (uint8_t *)&req->CDB.EEDP32. PrimaryReferenceTag; for (i = 0; i < 4; i++) { *ref_tag_addr = req->CDB.CDB32[lba_byte + i]; ref_tag_addr++; } req->CDB.EEDP32.PrimaryReferenceTag = htole32(req->CDB.EEDP32.PrimaryReferenceTag); req->CDB.EEDP32.PrimaryApplicationTagMask = 0xFFFF; req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) | 0x20; } else { eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG; req->EEDPFlags = htole16(eedp_flags); req->CDB.CDB32[10] = (req->CDB.CDB32[10] & 0x1F) | 0x20; } } } cm->cm_length = csio->dxfer_len; if (cm->cm_length != 0) { cm->cm_data = ccb; cm->cm_flags |= MPS_CM_FLAGS_USE_CCB; } else { cm->cm_data = NULL; } cm->cm_sge = &req->SGL; cm->cm_sglsize = (32 - 24) * 4; cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle); cm->cm_complete = mpssas_scsiio_complete; cm->cm_complete_data = ccb; cm->cm_targ = targ; cm->cm_lun = csio->ccb_h.target_lun; cm->cm_ccb = ccb; /* * If HBA is a WD and the command is not for a retry, try to build a * direct I/O message. If failed, or the command is for a retry, send * the I/O to the IR volume itself. */ if (sc->WD_valid_config) { if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) { mpssas_direct_drive_io(sassc, cm, ccb); } else { mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG); } } #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (csio->bio != NULL) biotrack(csio->bio, __func__); #endif csio->ccb_h.qos.sim_data = sbinuptime(); callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0, mpssas_scsiio_timeout, cm, 0); targ->issued++; targ->outstanding++; TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); ccb->ccb_h.status |= CAM_SIM_QUEUED; mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n", __func__, cm, ccb, targ->outstanding); mps_map_command(sc, cm); return; } /** * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request */ static void mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio, Mpi2SCSIIOReply_t *mpi_reply) { u32 response_info; u8 *response_bytes; u16 ioc_status = le16toh(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; u8 scsi_state = mpi_reply->SCSIState; u8 scsi_status = mpi_reply->SCSIStatus; u32 log_info = le32toh(mpi_reply->IOCLogInfo); const char *desc_ioc_state, *desc_scsi_status; if (log_info == 0x31170000) return; desc_ioc_state = mps_describe_table(mps_iocstatus_string, ioc_status); desc_scsi_status = mps_describe_table(mps_scsi_status_string, scsi_status); mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n", le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status); /* *We can add more detail about underflow data here * TO-DO */ mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), " "scsi_state %b\n", desc_scsi_status, scsi_status, scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed" "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid"); if (sc->mps_debug & MPS_XINFO && scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n"); scsi_sense_print(csio); mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n"); } if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { response_info = le32toh(mpi_reply->ResponseInfo); response_bytes = (u8 *)&response_info; mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n", response_bytes[0], mps_describe_table(mps_scsi_taskmgmt_string, response_bytes[0])); } } static void mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm) { MPI2_SCSI_IO_REPLY *rep; union ccb *ccb; struct ccb_scsiio *csio; struct mpssas_softc *sassc; struct scsi_vpd_supported_page_list *vpd_list = NULL; u8 *TLR_bits, TLR_on; int dir = 0, i; u16 alloc_len; struct mpssas_target *target; target_id_t target_id; MPS_FUNCTRACE(sc); mps_dprint(sc, MPS_TRACE, "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply, cm->cm_targ->outstanding); callout_stop(&cm->cm_callout); mtx_assert(&sc->mps_mtx, MA_OWNED); sassc = sc->sassc; ccb = cm->cm_complete_data; csio = &ccb->csio; target_id = csio->ccb_h.target_id; rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply; /* * XXX KDM if the chain allocation fails, does it matter if we do * the sync and unload here? It is simpler to do it in every case, * assuming it doesn't cause problems. */ if (cm->cm_data != NULL) { if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) dir = BUS_DMASYNC_POSTREAD; else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) dir = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); } cm->cm_targ->completed++; cm->cm_targ->outstanding--; TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link); ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED); #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->csio.bio != NULL) biotrack(ccb->csio.bio, __func__); #endif if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) { TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery); KASSERT(cm->cm_state == MPS_CM_STATE_BUSY, ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state)); cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY; if (cm->cm_reply != NULL) mpssas_log_command(cm, MPS_RECOVERY, "completed timedout cm %p ccb %p during recovery " "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb, le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); else mpssas_log_command(cm, MPS_RECOVERY, "completed timedout cm %p ccb %p during recovery\n", cm, cm->cm_ccb); } else if (cm->cm_targ->tm != NULL) { if (cm->cm_reply != NULL) mpssas_log_command(cm, MPS_RECOVERY, "completed cm %p ccb %p during recovery " "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb, le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); else mpssas_log_command(cm, MPS_RECOVERY, "completed cm %p ccb %p during recovery\n", cm, cm->cm_ccb); } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { mpssas_log_command(cm, MPS_RECOVERY, "reset completed cm %p ccb %p\n", cm, cm->cm_ccb); } if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { /* * We ran into an error after we tried to map the command, * so we're getting a callback without queueing the command * to the hardware. So we set the status here, and it will * be retained below. We'll go through the "fast path", * because there can be no reply when we haven't actually * gone out to the hardware. */ mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ); /* * Currently the only error included in the mask is * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of * chain frames. We need to freeze the queue until we get * a command that completed without this error, which will * hopefully have some chain frames attached that we can * use. If we wanted to get smarter about it, we would * only unfreeze the queue in this condition when we're * sure that we're getting some chain frames back. That's * probably unnecessary. */ if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) { xpt_freeze_simq(sassc->sim, 1); sassc->flags |= MPSSAS_QUEUE_FROZEN; mps_dprint(sc, MPS_XINFO, "Error sending command, " "freezing SIM queue\n"); } } /* * If this is a Start Stop Unit command and it was issued by the driver * during shutdown, decrement the refcount to account for all of the * commands that were sent. All SSU commands should be completed before * shutdown completes, meaning SSU_refcount will be 0 after SSU_started * is TRUE. */ if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) { mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n"); sc->SSU_refcount--; } /* Take the fast path to completion */ if (cm->cm_reply == NULL) { if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) { if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET); else { mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); ccb->csio.scsi_status = SCSI_STATUS_OK; } if (sassc->flags & MPSSAS_QUEUE_FROZEN) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; sassc->flags &= ~MPSSAS_QUEUE_FROZEN; mps_dprint(sc, MPS_XINFO, "Unfreezing SIM queue\n"); } } /* * There are two scenarios where the status won't be * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is * set, the second is in the MPS_FLAGS_DIAGRESET above. */ if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) { /* * Freeze the dev queue so that commands are * executed in the correct order after error * recovery. */ ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); } mps_free_command(sc, cm); xpt_done(ccb); return; } mpssas_log_command(cm, MPS_XINFO, "ioc %x scsi %x state %x xfer %u\n", le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); /* * If this is a Direct Drive I/O, reissue the I/O to the original IR * Volume if an error occurred (normal I/O retry). Use the original * CCB, but set a flag that this will be a retry so that it's sent to * the original volume. Free the command but reuse the CCB. */ if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) { mps_free_command(sc, cm); ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY; mpssas_action_scsiio(sassc, ccb); return; } else ccb->ccb_h.sim_priv.entries[0].field = 0; switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) { case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: csio->resid = cm->cm_length - le32toh(rep->TransferCount); /* FALLTHROUGH */ case MPI2_IOCSTATUS_SUCCESS: case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR) mpssas_log_command(cm, MPS_XINFO, "recovered error\n"); /* Completion failed at the transport level. */ if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS | MPI2_SCSI_STATE_TERMINATED)) { mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); break; } /* In a modern packetized environment, an autosense failure * implies that there's not much else that can be done to * recover the command. */ if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) { mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL); break; } /* * CAM doesn't care about SAS Response Info data, but if this is * the state check if TLR should be done. If not, clear the * TLR_bits for the target. */ if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) && ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) == MPS_SCSI_RI_INVALID_FRAME)) { sc->mapping_table[target_id].TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; } /* * Intentionally override the normal SCSI status reporting * for these two cases. These are likely to happen in a * multi-initiator environment, and we want to make sure that * CAM retries these commands rather than fail them. */ if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) || (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) { mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED); break; } /* Handle normal status and sense */ csio->scsi_status = rep->SCSIStatus; if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD) mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); else mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR); if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { int sense_len, returned_sense_len; returned_sense_len = min(le32toh(rep->SenseCount), sizeof(struct scsi_sense_data)); if (returned_sense_len < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - returned_sense_len; else ccb->csio.sense_resid = 0; sense_len = min(returned_sense_len, ccb->csio.sense_len - ccb->csio.sense_resid); bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len); ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } /* * Check if this is an INQUIRY command. If it's a VPD inquiry, * and it's page code 0 (Supported Page List), and there is * inquiry data, and this is for a sequential access device, and * the device is an SSP target, and TLR is supported by the * controller, turn the TLR_bits value ON if page 0x90 is * supported. */ if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && (csio->cdb_io.cdb_bytes[1] & SI_EVPD) && (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) && ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && (csio->data_ptr != NULL) && ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) && (sc->control_TLR) && (sc->mapping_table[target_id].device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)) { vpd_list = (struct scsi_vpd_supported_page_list *) csio->data_ptr; TLR_bits = &sc->mapping_table[target_id].TLR_bits; *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON; alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) + csio->cdb_io.cdb_bytes[4]; alloc_len -= csio->resid; for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) { if (vpd_list->list[i] == 0x90) { *TLR_bits = TLR_on; break; } } } /* * If this is a SATA direct-access end device, mark it so that * a SCSI StartStopUnit command will be sent to it when the * driver is being shutdown. */ if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && ((csio->data_ptr[0] & 0x1f) == T_DIRECT) && (sc->mapping_table[target_id].device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) && ((sc->mapping_table[target_id].device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) == MPI2_SAS_DEVICE_INFO_END_DEVICE)) { target = &sassc->targets[target_id]; target->supports_SSU = TRUE; mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n", target_id); } break; case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* * If devinfo is 0 this will be a volume. In that case don't * tell CAM that the volume is not there. We want volumes to * be enumerated until they are deleted/removed, not just * failed. */ if (cm->cm_targ->devinfo == 0) mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); else mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); break; case MPI2_IOCSTATUS_INVALID_SGL: mps_print_scsiio_cmd(sc, cm); mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR); break; case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: /* * This is one of the responses that comes back when an I/O * has been aborted. If it is because of a timeout that we * initiated, just set the status to CAM_CMD_TIMEOUT. * Otherwise set it to CAM_REQ_ABORTED. The effect on the * command is the same (it gets retried, subject to the * retry counter), the only difference is what gets printed * on the console. */ if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT) mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT); else mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED); break; case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: /* resid is ignored for this condition */ csio->resid = 0; mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR); break; case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: /* * These can sometimes be transient transport-related * errors, and sometimes persistent drive-related errors. * We used to retry these without decrementing the retry * count by returning CAM_REQUEUE_REQ. Unfortunately, if * we hit a persistent drive problem that returns one of * these error codes, we would retry indefinitely. So, * return CAM_REQ_CMP_ERROR so that we decrement the retry * count and avoid infinite retries. We're taking the * potential risk of flagging false failures in the event * of a topology-related error (e.g. a SAS expander problem * causes a command addressed to a drive to fail), but * avoiding getting into an infinite retry loop. However, * if we get them while were moving a device, we should * fail the request as 'not there' because the device * is effectively gone. */ if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); else mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); mps_dprint(sc, MPS_INFO, "Controller reported %s tgt %u SMID %u loginfo %x%s\n", mps_describe_table(mps_iocstatus_string, le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK), target_id, cm->cm_desc.Default.SMID, le32toh(rep->IOCLogInfo), (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : ""); mps_dprint(sc, MPS_XINFO, "SCSIStatus %x SCSIState %x xfercount %u\n", rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); break; case MPI2_IOCSTATUS_INVALID_FUNCTION: case MPI2_IOCSTATUS_INTERNAL_ERROR: case MPI2_IOCSTATUS_INVALID_VPID: case MPI2_IOCSTATUS_INVALID_FIELD: case MPI2_IOCSTATUS_INVALID_STATE: case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: default: mpssas_log_command(cm, MPS_XINFO, "completed ioc %x loginfo %x scsi %x state %x xfer %u\n", le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); csio->resid = cm->cm_length; mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); break; } mps_sc_failed_io_info(sc,csio,rep); if (sassc->flags & MPSSAS_QUEUE_FROZEN) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; sassc->flags &= ~MPSSAS_QUEUE_FROZEN; mps_dprint(sc, MPS_XINFO, "Command completed, " "unfreezing SIM queue\n"); } if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); } /* * Check to see if we're removing the device. If so, and this is the * last command on the queue, proceed with the deferred removal of the * device. Note, for removing a volume, this won't trigger because * pending_remove_tm will be NULL. */ if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) { if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL && cm->cm_targ->pending_remove_tm != NULL) { mps_dprint(sc, MPS_INFO, "Last pending command complete: starting remove_device\n"); mps_map_command(sc, cm->cm_targ->pending_remove_tm); cm->cm_targ->pending_remove_tm = NULL; } } mps_free_command(sc, cm); xpt_done(ccb); } /* All Request reached here are Endian safe */ static void mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm, union ccb *ccb) { pMpi2SCSIIORequest_t pIO_req; struct mps_softc *sc = sassc->sc; uint64_t virtLBA; uint32_t physLBA, stripe_offset, stripe_unit; uint32_t io_size, column; uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB; /* * If this is a valid SCSI command (Read6, Read10, Read16, Write6, * Write10, or Write16), build a direct I/O message. Otherwise, the I/O * will be sent to the IR volume itself. Since Read6 and Write6 are a * bit different than the 10/16 CDBs, handle them separately. */ pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req; CDB = pIO_req->CDB.CDB32; /* * Handle 6 byte CDBs. */ if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) || (CDB[0] == WRITE_6))) { /* * Get the transfer size in blocks. */ io_size = (cm->cm_length >> sc->DD_block_exponent); /* * Get virtual LBA given in the CDB. */ virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) | ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3]; /* * Check that LBA range for I/O does not exceed volume's * MaxLBA. */ if ((virtLBA + (uint64_t)io_size - 1) <= sc->DD_max_lba) { /* * Check if the I/O crosses a stripe boundary. If not, * translate the virtual LBA to a physical LBA and set * the DevHandle for the PhysDisk to be used. If it * does cross a boundary, do normal I/O. To get the * right DevHandle to use, get the map number for the * column, then use that map number to look up the * DevHandle of the PhysDisk. */ stripe_offset = (uint32_t)virtLBA & (sc->DD_stripe_size - 1); if ((stripe_offset + io_size) <= sc->DD_stripe_size) { physLBA = (uint32_t)virtLBA >> sc->DD_stripe_exponent; stripe_unit = physLBA / sc->DD_num_phys_disks; column = physLBA % sc->DD_num_phys_disks; pIO_req->DevHandle = htole16(sc->DD_column_map[column].dev_handle); /* ???? Is this endian safe*/ cm->cm_desc.SCSIIO.DevHandle = pIO_req->DevHandle; physLBA = (stripe_unit << sc->DD_stripe_exponent) + stripe_offset; ptrLBA = &pIO_req->CDB.CDB32[1]; physLBA_byte = (uint8_t)(physLBA >> 16); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[2]; physLBA_byte = (uint8_t)(physLBA >> 8); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[3]; physLBA_byte = (uint8_t)physLBA; *ptrLBA = physLBA_byte; /* * Set flag that Direct Drive I/O is * being done. */ cm->cm_flags |= MPS_CM_FLAGS_DD_IO; } } return; } /* * Handle 10, 12 or 16 byte CDBs. */ if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) || (CDB[0] == WRITE_10) || (CDB[0] == READ_16) || (CDB[0] == WRITE_16) || (CDB[0] == READ_12) || (CDB[0] == WRITE_12))) { /* * For 16-byte CDB's, verify that the upper 4 bytes of the CDB * are 0. If not, this is accessing beyond 2TB so handle it in * the else section. 10-byte and 12-byte CDB's are OK. * FreeBSD sends very rare 12 byte READ/WRITE, but driver is * ready to accept 12byte CDB for Direct IOs. */ if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) || (CDB[0] == READ_12 || CDB[0] == WRITE_12) || !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) { /* * Get the transfer size in blocks. */ io_size = (cm->cm_length >> sc->DD_block_exponent); /* * Get virtual LBA. Point to correct lower 4 bytes of * LBA in the CDB depending on command. */ lba_idx = ((CDB[0] == READ_12) || (CDB[0] == WRITE_12) || (CDB[0] == READ_10) || (CDB[0] == WRITE_10))? 2 : 6; virtLBA = ((uint64_t)CDB[lba_idx] << 24) | ((uint64_t)CDB[lba_idx + 1] << 16) | ((uint64_t)CDB[lba_idx + 2] << 8) | (uint64_t)CDB[lba_idx + 3]; /* * Check that LBA range for I/O does not exceed volume's * MaxLBA. */ if ((virtLBA + (uint64_t)io_size - 1) <= sc->DD_max_lba) { /* * Check if the I/O crosses a stripe boundary. * If not, translate the virtual LBA to a * physical LBA and set the DevHandle for the * PhysDisk to be used. If it does cross a * boundary, do normal I/O. To get the right * DevHandle to use, get the map number for the * column, then use that map number to look up * the DevHandle of the PhysDisk. */ stripe_offset = (uint32_t)virtLBA & (sc->DD_stripe_size - 1); if ((stripe_offset + io_size) <= sc->DD_stripe_size) { physLBA = (uint32_t)virtLBA >> sc->DD_stripe_exponent; stripe_unit = physLBA / sc->DD_num_phys_disks; column = physLBA % sc->DD_num_phys_disks; pIO_req->DevHandle = htole16(sc->DD_column_map[column]. dev_handle); cm->cm_desc.SCSIIO.DevHandle = pIO_req->DevHandle; physLBA = (stripe_unit << sc->DD_stripe_exponent) + stripe_offset; ptrLBA = &pIO_req->CDB.CDB32[lba_idx]; physLBA_byte = (uint8_t)(physLBA >> 24); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[lba_idx + 1]; physLBA_byte = (uint8_t)(physLBA >> 16); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[lba_idx + 2]; physLBA_byte = (uint8_t)(physLBA >> 8); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[lba_idx + 3]; physLBA_byte = (uint8_t)physLBA; *ptrLBA = physLBA_byte; /* * Set flag that Direct Drive I/O is * being done. */ cm->cm_flags |= MPS_CM_FLAGS_DD_IO; } } } else { /* * 16-byte CDB and the upper 4 bytes of the CDB are not * 0. Get the transfer size in blocks. */ io_size = (cm->cm_length >> sc->DD_block_exponent); /* * Get virtual LBA. */ virtLBA = ((uint64_t)CDB[2] << 54) | ((uint64_t)CDB[3] << 48) | ((uint64_t)CDB[4] << 40) | ((uint64_t)CDB[5] << 32) | ((uint64_t)CDB[6] << 24) | ((uint64_t)CDB[7] << 16) | ((uint64_t)CDB[8] << 8) | (uint64_t)CDB[9]; /* * Check that LBA range for I/O does not exceed volume's * MaxLBA. */ if ((virtLBA + (uint64_t)io_size - 1) <= sc->DD_max_lba) { /* * Check if the I/O crosses a stripe boundary. * If not, translate the virtual LBA to a * physical LBA and set the DevHandle for the * PhysDisk to be used. If it does cross a * boundary, do normal I/O. To get the right * DevHandle to use, get the map number for the * column, then use that map number to look up * the DevHandle of the PhysDisk. */ stripe_offset = (uint32_t)virtLBA & (sc->DD_stripe_size - 1); if ((stripe_offset + io_size) <= sc->DD_stripe_size) { physLBA = (uint32_t)(virtLBA >> sc->DD_stripe_exponent); stripe_unit = physLBA / sc->DD_num_phys_disks; column = physLBA % sc->DD_num_phys_disks; pIO_req->DevHandle = htole16(sc->DD_column_map[column]. dev_handle); cm->cm_desc.SCSIIO.DevHandle = pIO_req->DevHandle; physLBA = (stripe_unit << sc->DD_stripe_exponent) + stripe_offset; /* * Set upper 4 bytes of LBA to 0. We * assume that the phys disks are less * than 2 TB's in size. Then, set the * lower 4 bytes. */ pIO_req->CDB.CDB32[2] = 0; pIO_req->CDB.CDB32[3] = 0; pIO_req->CDB.CDB32[4] = 0; pIO_req->CDB.CDB32[5] = 0; ptrLBA = &pIO_req->CDB.CDB32[6]; physLBA_byte = (uint8_t)(physLBA >> 24); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[7]; physLBA_byte = (uint8_t)(physLBA >> 16); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[8]; physLBA_byte = (uint8_t)(physLBA >> 8); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[9]; physLBA_byte = (uint8_t)physLBA; *ptrLBA = physLBA_byte; /* * Set flag that Direct Drive I/O is * being done. */ cm->cm_flags |= MPS_CM_FLAGS_DD_IO; } } } } } static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm) { MPI2_SMP_PASSTHROUGH_REPLY *rpl; MPI2_SMP_PASSTHROUGH_REQUEST *req; uint64_t sasaddr; union ccb *ccb; ccb = cm->cm_complete_data; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and SMP * commands require two S/G elements only. That should be handled * in the standard request size. */ if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n", __func__, cm->cm_flags); mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); goto bailout; } rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply; if (rpl == NULL) { mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__); mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); goto bailout; } req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; sasaddr = le32toh(req->SASAddress.Low); sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32; if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS || rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) { mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n", __func__, le16toh(rpl->IOCStatus), rpl->SASStatus); mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); goto bailout; } mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address " "%#jx completed successfully\n", __func__, (uintmax_t)sasaddr); if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED) mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); else mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR); bailout: /* * We sync in both directions because we had DMAs in the S/G list * in both directions. */ bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); mps_free_command(sc, cm); xpt_done(ccb); } static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr) { struct mps_command *cm; uint8_t *request, *response; MPI2_SMP_PASSTHROUGH_REQUEST *req; struct mps_softc *sc; int error; sc = sassc->sc; error = 0; /* * XXX We don't yet support physical addresses here. */ switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { case CAM_DATA_PADDR: case CAM_DATA_SG_PADDR: mps_dprint(sc, MPS_ERROR, "%s: physical addresses not supported\n", __func__); mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID); xpt_done(ccb); return; case CAM_DATA_SG: /* * The chip does not support more than one buffer for the * request or response. */ if ((ccb->smpio.smp_request_sglist_cnt > 1) || (ccb->smpio.smp_response_sglist_cnt > 1)) { mps_dprint(sc, MPS_ERROR, "%s: multiple request or response " "buffer segments not supported for SMP\n", __func__); mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID); xpt_done(ccb); return; } /* * The CAM_SCATTER_VALID flag was originally implemented * for the XPT_SCSI_IO CCB, which only has one data pointer. * We have two. So, just take that flag to mean that we * might have S/G lists, and look at the S/G segment count * to figure out whether that is the case for each individual * buffer. */ if (ccb->smpio.smp_request_sglist_cnt != 0) { bus_dma_segment_t *req_sg; req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; } else request = ccb->smpio.smp_request; if (ccb->smpio.smp_response_sglist_cnt != 0) { bus_dma_segment_t *rsp_sg; rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; } else response = ccb->smpio.smp_response; break; case CAM_DATA_VADDR: request = ccb->smpio.smp_request; response = ccb->smpio.smp_response; break; default: mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID); xpt_done(ccb); return; } cm = mps_alloc_command(sc); if (cm == NULL) { mps_dprint(sc, MPS_ERROR, "%s: cannot allocate command\n", __func__); mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); xpt_done(ccb); return; } req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; bzero(req, sizeof(*req)); req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; /* Allow the chip to use any route to this SAS address. */ req->PhysicalPort = 0xff; req->RequestDataLength = htole16(ccb->smpio.smp_request_len); req->SGLFlags = MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI; mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS " "address %#jx\n", __func__, (uintmax_t)sasaddr); mpi_init_sge(cm, req, &req->SGL); /* * Set up a uio to pass into mps_map_command(). This allows us to * do one map command, and one busdma call in there. */ cm->cm_uio.uio_iov = cm->cm_iovec; cm->cm_uio.uio_iovcnt = 2; cm->cm_uio.uio_segflg = UIO_SYSSPACE; /* * The read/write flag isn't used by busdma, but set it just in * case. This isn't exactly accurate, either, since we're going in * both directions. */ cm->cm_uio.uio_rw = UIO_WRITE; cm->cm_iovec[0].iov_base = request; cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength); cm->cm_iovec[1].iov_base = response; cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len; cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len + cm->cm_iovec[1].iov_len; /* * Trigger a warning message in mps_data_cb() for the user if we * wind up exceeding two S/G segments. The chip expects one * segment for the request and another for the response. */ cm->cm_max_segs = 2; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_complete = mpssas_smpio_complete; cm->cm_complete_data = ccb; /* * Tell the mapping code that we're using a uio, and that this is * an SMP passthrough request. There is a little special-case * logic there (in mps_data_cb()) to handle the bidirectional * transfer. */ cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS | MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT; /* The chip data format is little endian. */ req->SASAddress.High = htole32(sasaddr >> 32); req->SASAddress.Low = htole32(sasaddr); /* * XXX Note that we don't have a timeout/abort mechanism here. * From the manual, it looks like task management requests only * work for SCSI IO and SATA passthrough requests. We may need to * have a mechanism to retry requests in the event of a chip reset * at least. Hopefully the chip will insure that any errors short * of that are relayed back to the driver. */ error = mps_map_command(sc, cm); if ((error != 0) && (error != EINPROGRESS)) { mps_dprint(sc, MPS_ERROR, "%s: error %d returned from mps_map_command()\n", __func__, error); goto bailout_error; } return; bailout_error: mps_free_command(sc, cm); mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); xpt_done(ccb); return; } static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb) { struct mps_softc *sc; struct mpssas_target *targ; uint64_t sasaddr = 0; sc = sassc->sc; /* * Make sure the target exists. */ KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id)); targ = &sassc->targets[ccb->ccb_h.target_id]; if (targ->handle == 0x0) { mps_dprint(sc, MPS_ERROR, "%s: target %d does not exist!\n", __func__, ccb->ccb_h.target_id); mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); xpt_done(ccb); return; } /* * If this device has an embedded SMP target, we'll talk to it * directly. * figure out what the expander's address is. */ if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0) sasaddr = targ->sasaddr; /* * If we don't have a SAS address for the expander yet, try * grabbing it from the page 0x83 information cached in the * transport layer for this target. LSI expanders report the * expander SAS address as the port-associated SAS address in * Inquiry VPD page 0x83. Maxim expanders don't report it in page * 0x83. * * XXX KDM disable this for now, but leave it commented out so that * it is obvious that this is another possible way to get the SAS * address. * * The parent handle method below is a little more reliable, and * the other benefit is that it works for devices other than SES * devices. So you can send a SMP request to a da(4) device and it * will get routed to the expander that device is attached to. * (Assuming the da(4) device doesn't contain an SMP target...) */ #if 0 if (sasaddr == 0) sasaddr = xpt_path_sas_addr(ccb->ccb_h.path); #endif /* * If we still don't have a SAS address for the expander, look for * the parent device of this device, which is probably the expander. */ if (sasaddr == 0) { #ifdef OLD_MPS_PROBE struct mpssas_target *parent_target; #endif if (targ->parent_handle == 0x0) { mps_dprint(sc, MPS_ERROR, "%s: handle %d does not have a valid " "parent handle!\n", __func__, targ->handle); mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } #ifdef OLD_MPS_PROBE parent_target = mpssas_find_target_by_handle(sassc, 0, targ->parent_handle); if (parent_target == NULL) { mps_dprint(sc, MPS_ERROR, "%s: handle %d does not have a valid " "parent target!\n", __func__, targ->handle); mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } if ((parent_target->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { mps_dprint(sc, MPS_ERROR, "%s: handle %d parent %d does not " "have an SMP target!\n", __func__, targ->handle, parent_target->handle); mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } sasaddr = parent_target->sasaddr; #else /* OLD_MPS_PROBE */ if ((targ->parent_devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { mps_dprint(sc, MPS_ERROR, "%s: handle %d parent %d does not " "have an SMP target!\n", __func__, targ->handle, targ->parent_handle); mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } if (targ->parent_sasaddr == 0x0) { mps_dprint(sc, MPS_ERROR, "%s: handle %d parent handle %d does " "not have a valid SAS address!\n", __func__, targ->handle, targ->parent_handle); mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } sasaddr = targ->parent_sasaddr; #endif /* OLD_MPS_PROBE */ } if (sasaddr == 0) { mps_dprint(sc, MPS_INFO, "%s: unable to find SAS address for handle %d\n", __func__, targ->handle); mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } mpssas_send_smpcmd(sassc, ccb, sasaddr); return; bailout: xpt_done(ccb); } static void mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mps_softc *sc; struct mps_command *tm; struct mpssas_target *targ; MPS_FUNCTRACE(sassc->sc); mtx_assert(&sassc->sc->mps_mtx, MA_OWNED); KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id)); sc = sassc->sc; tm = mpssas_alloc_tm(sc); if (tm == NULL) { mps_dprint(sc, MPS_ERROR, "command alloc failure in mpssas_action_resetdev\n"); mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); xpt_done(ccb); return; } targ = &sassc->targets[ccb->ccb_h.target_id]; req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = htole16(targ->handle); req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; /* SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; tm->cm_data = NULL; tm->cm_complete = mpssas_resetdev_complete; tm->cm_complete_data = ccb; tm->cm_targ = targ; mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD); mps_map_command(sc, tm); } static void mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *resp; union ccb *ccb; MPS_FUNCTRACE(sc); mtx_assert(&sc->mps_mtx, MA_OWNED); resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; ccb = tm->cm_complete_data; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for reset of handle %#04x! " "This should not happen!\n", __func__, tm->cm_flags, req->DevHandle); mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); goto bailout; } mps_dprint(sc, MPS_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode)); if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) { mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, CAM_LUN_WILDCARD); } else mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); bailout: mpssas_free_tm(sc, tm); xpt_done(ccb); } static void mpssas_poll(struct cam_sim *sim) { struct mpssas_softc *sassc; sassc = cam_sim_softc(sim); if (sassc->sc->mps_debug & MPS_TRACE) { /* frequent debug messages during a panic just slow * everything down too much. */ mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__); sassc->sc->mps_debug &= ~MPS_TRACE; } mps_intr_locked(sassc->sc); } static void mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct mps_softc *sc; sc = (struct mps_softc *)callback_arg; switch (code) { case AC_ADVINFO_CHANGED: { struct mpssas_target *target; struct mpssas_softc *sassc; struct scsi_read_capacity_data_long rcap_buf; struct ccb_dev_advinfo cdai; struct mpssas_lun *lun; lun_id_t lunid; int found_lun; uintptr_t buftype; buftype = (uintptr_t)arg; found_lun = 0; sassc = sc->sassc; /* * We're only interested in read capacity data changes. */ if (buftype != CDAI_TYPE_RCAPLONG) break; /* * We should have a handle for this, but check to make sure. */ KASSERT(xpt_path_target_id(path) < sassc->maxtargets, ("Target %d out of bounds in mpssas_async\n", xpt_path_target_id(path))); target = &sassc->targets[xpt_path_target_id(path)]; if (target->handle == 0) break; lunid = xpt_path_lun_id(path); SLIST_FOREACH(lun, &target->luns, lun_link) { if (lun->lun_id == lunid) { found_lun = 1; break; } } if (found_lun == 0) { lun = malloc(sizeof(struct mpssas_lun), M_MPT2, M_NOWAIT | M_ZERO); if (lun == NULL) { mps_dprint(sc, MPS_ERROR, "Unable to alloc " "LUN for EEDP support.\n"); break; } lun->lun_id = lunid; SLIST_INSERT_HEAD(&target->luns, lun, lun_link); } bzero(&rcap_buf, sizeof(rcap_buf)); xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.ccb_h.flags = CAM_DIR_IN; cdai.buftype = CDAI_TYPE_RCAPLONG; cdai.flags = CDAI_FLAG_NONE; cdai.bufsiz = sizeof(rcap_buf); cdai.buf = (uint8_t *)&rcap_buf; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP) && (rcap_buf.prot & SRC16_PROT_EN)) { switch (rcap_buf.prot & SRC16_P_TYPE) { case SRC16_PTYPE_1: case SRC16_PTYPE_3: lun->eedp_formatted = TRUE; lun->eedp_block_size = scsi_4btoul(rcap_buf.length); break; case SRC16_PTYPE_2: default: lun->eedp_formatted = FALSE; lun->eedp_block_size = 0; break; } } else { lun->eedp_formatted = FALSE; lun->eedp_block_size = 0; } break; } default: break; } } /* * Set the INRESET flag for this target so that no I/O will be sent to * the target until the reset has completed. If an I/O request does * happen, the devq will be frozen. The CCB holds the path which is * used to release the devq. The devq is released and the CCB is freed * when the TM completes. */ void mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm, struct mpssas_target *target, lun_id_t lun_id) { union ccb *ccb; path_id_t path_id; ccb = xpt_alloc_ccb_nowait(); if (ccb) { path_id = cam_sim_path(sc->sassc->sim); if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id, target->tid, lun_id) != CAM_REQ_CMP) { xpt_free_ccb(ccb); } else { tm->cm_ccb = ccb; tm->cm_targ = target; target->flags |= MPSSAS_TARGET_INRESET; } } } int mpssas_startup(struct mps_softc *sc) { /* * Send the port enable message and set the wait_for_port_enable flag. * This flag helps to keep the simq frozen until all discovery events * are processed. */ sc->wait_for_port_enable = 1; mpssas_send_portenable(sc); return (0); } static int mpssas_send_portenable(struct mps_softc *sc) { MPI2_PORT_ENABLE_REQUEST *request; struct mps_command *cm; MPS_FUNCTRACE(sc); if ((cm = mps_alloc_command(sc)) == NULL) return (EBUSY); request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req; request->Function = MPI2_FUNCTION_PORT_ENABLE; request->MsgFlags = 0; request->VP_ID = 0; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_complete = mpssas_portenable_complete; cm->cm_data = NULL; cm->cm_sge = NULL; mps_map_command(sc, cm); mps_dprint(sc, MPS_XINFO, "mps_send_portenable finished cm %p req %p complete %p\n", cm, cm->cm_req, cm->cm_complete); return (0); } static void mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm) { MPI2_PORT_ENABLE_REPLY *reply; struct mpssas_softc *sassc; MPS_FUNCTRACE(sc); sassc = sc->sassc; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * port enable commands don't have S/G lists. */ if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! " "This should not happen!\n", __func__, cm->cm_flags); } reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply; if (reply == NULL) mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n"); else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) mps_dprint(sc, MPS_FAULT, "Portenable failed\n"); mps_free_command(sc, cm); /* * Get WarpDrive info after discovery is complete but before the scan * starts. At this point, all devices are ready to be exposed to the * OS. If devices should be hidden instead, take them out of the * 'targets' array before the scan. The devinfo for a disk will have * some info and a volume's will be 0. Use that to remove disks. */ mps_wd_config_pages(sc); /* * Done waiting for port enable to complete. Decrement the refcount. * If refcount is 0, discovery is complete and a rescan of the bus can * take place. Since the simq was explicitly frozen before port * enable, it must be explicitly released here to keep the * freeze/release count in sync. */ sc->wait_for_port_enable = 0; sc->port_enable_complete = 1; wakeup(&sc->port_enable_complete); mpssas_startup_decrement(sassc); } int mpssas_check_id(struct mpssas_softc *sassc, int id) { struct mps_softc *sc = sassc->sc; char *ids; char *name; ids = &sc->exclude_ids[0]; while((name = strsep(&ids, ",")) != NULL) { if (name[0] == '\0') continue; if (strtol(name, NULL, 0) == (long)id) return (1); } return (0); } void mpssas_realloc_targets(struct mps_softc *sc, int maxtargets) { struct mpssas_softc *sassc; struct mpssas_lun *lun, *lun_tmp; struct mpssas_target *targ; int i; sassc = sc->sassc; /* * The number of targets is based on IOC Facts, so free all of * the allocated LUNs for each target and then the target buffer * itself. */ for (i=0; i< maxtargets; i++) { targ = &sassc->targets[i]; SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { free(lun, M_MPT2); } } free(sassc->targets, M_MPT2); sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets, M_MPT2, M_WAITOK|M_ZERO); } Index: head/sys/dev/ocs_fc/ocs_cam.c =================================================================== --- head/sys/dev/ocs_fc/ocs_cam.c (revision 367043) +++ head/sys/dev/ocs_fc/ocs_cam.c (revision 367044) @@ -1,2840 +1,2863 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @defgroup scsi_api_target SCSI Target API * @defgroup scsi_api_initiator SCSI Initiator API * @defgroup cam_api Common Access Method (CAM) API * @defgroup cam_io CAM IO */ /** * @file * Provides CAM functionality. */ #include "ocs.h" #include "ocs_scsi.h" #include "ocs_device.h" /* Default IO timeout value for initiators is 30 seconds */ #define OCS_CAM_IO_TIMEOUT 30 typedef struct { ocs_scsi_sgl_t *sgl; uint32_t sgl_max; uint32_t sgl_count; int32_t rc; } ocs_dmamap_load_arg_t; static void ocs_action(struct cam_sim *, union ccb *); static void ocs_poll(struct cam_sim *); static ocs_tgt_resource_t *ocs_tgt_resource_get(ocs_fcport *, struct ccb_hdr *, uint32_t *); static int32_t ocs_tgt_resource_abort(struct ocs_softc *, ocs_tgt_resource_t *); static uint32_t ocs_abort_initiator_io(struct ocs_softc *ocs, union ccb *accb); static void ocs_abort_inot(struct ocs_softc *ocs, union ccb *ccb); static void ocs_abort_atio(struct ocs_softc *ocs, union ccb *ccb); static int32_t ocs_target_tmf_cb(ocs_io_t *, ocs_scsi_io_status_e, uint32_t, void *); static int32_t ocs_io_abort_cb(ocs_io_t *, ocs_scsi_io_status_e, uint32_t, void *); static int32_t ocs_task_set_full_or_busy(ocs_io_t *io); static int32_t ocs_initiator_tmf_cb(ocs_io_t *, ocs_scsi_io_status_e, ocs_scsi_cmd_resp_t *, uint32_t, void *); static uint32_t ocs_fcp_change_role(struct ocs_softc *ocs, ocs_fcport *fcp, uint32_t new_role); static void ocs_ldt(void *arg); static void ocs_ldt_task(void *arg, int pending); static void ocs_delete_target(ocs_t *ocs, ocs_fcport *fcp, int tgt); uint32_t ocs_add_new_tgt(ocs_node_t *node, ocs_fcport *fcp); uint32_t ocs_update_tgt(ocs_node_t *node, ocs_fcport *fcp, uint32_t tgt_id); int32_t ocs_tgt_find(ocs_fcport *fcp, ocs_node_t *node); static inline ocs_io_t *ocs_scsi_find_io(struct ocs_softc *ocs, uint32_t tag) { return ocs_io_get_instance(ocs, tag); } static inline void ocs_target_io_free(ocs_io_t *io) { io->tgt_io.state = OCS_CAM_IO_FREE; io->tgt_io.flags = 0; io->tgt_io.app = NULL; ocs_scsi_io_complete(io); if(io->ocs->io_in_use != 0) atomic_subtract_acq_32(&io->ocs->io_in_use, 1); } static int32_t ocs_attach_port(ocs_t *ocs, int chan) { struct cam_sim *sim = NULL; struct cam_path *path = NULL; uint32_t max_io = ocs_scsi_get_property(ocs, OCS_SCSI_MAX_IOS); ocs_fcport *fcp = FCPORT(ocs, chan); if (NULL == (sim = cam_sim_alloc(ocs_action, ocs_poll, device_get_name(ocs->dev), ocs, device_get_unit(ocs->dev), &ocs->sim_lock, max_io, max_io, ocs->devq))) { device_printf(ocs->dev, "Can't allocate SIM\n"); return 1; } mtx_lock(&ocs->sim_lock); if (CAM_SUCCESS != xpt_bus_register(sim, ocs->dev, chan)) { device_printf(ocs->dev, "Can't register bus %d\n", 0); mtx_unlock(&ocs->sim_lock); cam_sim_free(sim, FALSE); return 1; } mtx_unlock(&ocs->sim_lock); if (CAM_REQ_CMP != xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)) { device_printf(ocs->dev, "Can't create path\n"); xpt_bus_deregister(cam_sim_path(sim)); mtx_unlock(&ocs->sim_lock); cam_sim_free(sim, FALSE); return 1; } fcp->ocs = ocs; fcp->sim = sim; fcp->path = path; callout_init_mtx(&fcp->ldt, &ocs->sim_lock, 0); TASK_INIT(&fcp->ltask, 1, ocs_ldt_task, fcp); return 0; } static int32_t ocs_detach_port(ocs_t *ocs, int32_t chan) { ocs_fcport *fcp = NULL; struct cam_sim *sim = NULL; struct cam_path *path = NULL; fcp = FCPORT(ocs, chan); sim = fcp->sim; path = fcp->path; callout_drain(&fcp->ldt); ocs_ldt_task(fcp, 0); if (fcp->sim) { mtx_lock(&ocs->sim_lock); ocs_tgt_resource_abort(ocs, &fcp->targ_rsrc_wildcard); if (path) { xpt_async(AC_LOST_DEVICE, path, NULL); xpt_free_path(path); fcp->path = NULL; } xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, FALSE); fcp->sim = NULL; mtx_unlock(&ocs->sim_lock); } return 0; } int32_t ocs_cam_attach(ocs_t *ocs) { struct cam_devq *devq = NULL; int i = 0; uint32_t max_io = ocs_scsi_get_property(ocs, OCS_SCSI_MAX_IOS); if (NULL == (devq = cam_simq_alloc(max_io))) { device_printf(ocs->dev, "Can't allocate SIMQ\n"); return -1; } ocs->devq = devq; if (mtx_initialized(&ocs->sim_lock) == 0) { mtx_init(&ocs->sim_lock, "ocs_sim_lock", NULL, MTX_DEF); } for (i = 0; i < (ocs->num_vports + 1); i++) { if (ocs_attach_port(ocs, i)) { ocs_log_err(ocs, "Attach port failed for chan: %d\n", i); goto detach_port; } } ocs->io_high_watermark = max_io; ocs->io_in_use = 0; return 0; detach_port: while (--i >= 0) { ocs_detach_port(ocs, i); } cam_simq_free(ocs->devq); if (mtx_initialized(&ocs->sim_lock)) mtx_destroy(&ocs->sim_lock); return 1; } int32_t ocs_cam_detach(ocs_t *ocs) { int i = 0; for (i = (ocs->num_vports); i >= 0; i--) { ocs_detach_port(ocs, i); } cam_simq_free(ocs->devq); if (mtx_initialized(&ocs->sim_lock)) mtx_destroy(&ocs->sim_lock); return 0; } /*************************************************************************** * Functions required by SCSI base driver API */ /** * @ingroup scsi_api_target * @brief Attach driver to the BSD SCSI layer (a.k.a CAM) * * Allocates + initializes CAM related resources and attaches to the CAM * * @param ocs the driver instance's software context * * @return 0 on success, non-zero otherwise */ int32_t ocs_scsi_tgt_new_device(ocs_t *ocs) { ocs->enable_task_set_full = ocs_scsi_get_property(ocs, OCS_SCSI_ENABLE_TASK_SET_FULL); ocs_log_debug(ocs, "task set full processing is %s\n", ocs->enable_task_set_full ? "enabled" : "disabled"); return 0; } /** * @ingroup scsi_api_target * @brief Tears down target members of ocs structure. * * Called by OS code when device is removed. * * @param ocs pointer to ocs * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_del_device(ocs_t *ocs) { return 0; } /** * @ingroup scsi_api_target * @brief accept new domain notification * * Called by base drive when new domain is discovered. A target-server * will use this call to prepare for new remote node notifications * arising from ocs_scsi_new_initiator(). * * The domain context has an element ocs_scsi_tgt_domain_t tgt_domain * which is declared by the target-server code and is used for target-server * private data. * * This function will only be called if the base-driver has been enabled for * target capability. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_new_domain() function is called to initiator-client backends. * * @param domain pointer to domain * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_new_domain(ocs_domain_t *domain) { return 0; } /** * @ingroup scsi_api_target * @brief accept domain lost notification * * Called by base-driver when a domain goes away. A target-server will * use this call to clean up all domain scoped resources. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_del_domain() function is called to initiator-client backends. * * @param domain pointer to domain * * @return returns 0 for success, a negative error code value for failure. */ void ocs_scsi_tgt_del_domain(ocs_domain_t *domain) { } /** * @ingroup scsi_api_target * @brief accept new sli port (sport) notification * * Called by base drive when new sport is discovered. A target-server * will use this call to prepare for new remote node notifications * arising from ocs_scsi_new_initiator(). * * The domain context has an element ocs_scsi_tgt_sport_t tgt_sport * which is declared by the target-server code and is used for * target-server private data. * * This function will only be called if the base-driver has been enabled for * target capability. * * Note that this call is made to target-server backends, * the ocs_scsi_tgt_new_domain() is called to initiator-client backends. * * @param sport pointer to SLI port * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_new_sport(ocs_sport_t *sport) { ocs_t *ocs = sport->ocs; if(!sport->is_vport) { sport->tgt_data = FCPORT(ocs, 0); } return 0; } /** * @ingroup scsi_api_target * @brief accept SLI port gone notification * * Called by base-driver when a sport goes away. A target-server will * use this call to clean up all sport scoped resources. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_del_sport() is called to initiator-client backends. * * @param sport pointer to SLI port * * @return returns 0 for success, a negative error code value for failure. */ void ocs_scsi_tgt_del_sport(ocs_sport_t *sport) { return; } /** * @ingroup scsi_api_target * @brief receive notification of a new SCSI initiator node * * Sent by base driver to notify a target-server of the presense of a new * remote initiator. The target-server may use this call to prepare for * inbound IO from this node. * * The ocs_node_t structure has and elment of type ocs_scsi_tgt_node_t named * tgt_node that is declared and used by a target-server for private * information. * * This function is only called if the target capability is enabled in driver. * * @param node pointer to new remote initiator node * * @return returns 0 for success, a negative error code value for failure. * * @note */ int32_t ocs_scsi_new_initiator(ocs_node_t *node) { ocs_t *ocs = node->ocs; struct ac_contract ac; struct ac_device_changed *adc; ocs_fcport *fcp = NULL; fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs, "FCP is NULL \n"); return 1; } /* * Update the IO watermark by decrementing it by the * number of IOs reserved for each initiator. */ atomic_subtract_acq_32(&ocs->io_high_watermark, OCS_RSVD_INI_IO); ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = ocs_node_get_wwpn(node); adc->port = node->rnode.fc_id; adc->target = node->instance_index; adc->arrived = 1; xpt_async(AC_CONTRACT, fcp->path, &ac); return 0; } /** * @ingroup scsi_api_target * @brief validate new initiator * * Sent by base driver to validate a remote initiatiator. The target-server * returns TRUE if this initiator should be accepted. * * This function is only called if the target capability is enabled in driver. * * @param node pointer to remote initiator node to validate * * @return TRUE if initiator should be accepted, FALSE if it should be rejected * * @note */ int32_t ocs_scsi_validate_initiator(ocs_node_t *node) { return 1; } /** * @ingroup scsi_api_target * @brief Delete a SCSI initiator node * * Sent by base driver to notify a target-server that a remote initiator * is now gone. The base driver will have terminated all outstanding IOs * and the target-server will receive appropriate completions. * * This function is only called if the base driver is enabled for * target capability. * * @param node pointer node being deleted * @param reason Reason why initiator is gone. * * @return OCS_SCSI_CALL_COMPLETE to indicate that all work was completed * * @note */ int32_t ocs_scsi_del_initiator(ocs_node_t *node, ocs_scsi_del_initiator_reason_e reason) { ocs_t *ocs = node->ocs; struct ac_contract ac; struct ac_device_changed *adc; ocs_fcport *fcp = NULL; fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs, "FCP is NULL \n"); return 1; } ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = ocs_node_get_wwpn(node); adc->port = node->rnode.fc_id; adc->target = node->instance_index; adc->arrived = 0; xpt_async(AC_CONTRACT, fcp->path, &ac); if (reason == OCS_SCSI_INITIATOR_MISSING) { return OCS_SCSI_CALL_COMPLETE; } /* * Update the IO watermark by incrementing it by the * number of IOs reserved for each initiator. */ atomic_add_acq_32(&ocs->io_high_watermark, OCS_RSVD_INI_IO); return OCS_SCSI_CALL_COMPLETE; } /** * @ingroup scsi_api_target * @brief receive FCP SCSI Command * * Called by the base driver when a new SCSI command has been received. The * target-server will process the command, and issue data and/or response phase * requests to the base driver. * * The IO context (ocs_io_t) structure has and element of type * ocs_scsi_tgt_io_t named tgt_io that is declared and used by * a target-server for private information. * * @param io pointer to IO context * @param lun LUN for this IO * @param cdb pointer to SCSI CDB * @param cdb_len length of CDB in bytes * @param flags command flags * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_recv_cmd(ocs_io_t *io, uint64_t lun, uint8_t *cdb, uint32_t cdb_len, uint32_t flags) { ocs_t *ocs = io->ocs; struct ccb_accept_tio *atio = NULL; ocs_node_t *node = io->node; ocs_tgt_resource_t *trsrc = NULL; int32_t rc = -1; ocs_fcport *fcp = NULL; fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs, "FCP is NULL \n"); return 1; } atomic_add_acq_32(&ocs->io_in_use, 1); /* set target io timeout */ io->timeout = ocs->target_io_timer_sec; if (ocs->enable_task_set_full && (ocs->io_in_use >= ocs->io_high_watermark)) { return ocs_task_set_full_or_busy(io); } else { atomic_store_rel_32(&io->node->tgt_node.busy_sent, FALSE); } if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) { trsrc = &fcp->targ_rsrc[lun]; } else if (fcp->targ_rsrc_wildcard.enabled) { trsrc = &fcp->targ_rsrc_wildcard; } if (trsrc) { atio = (struct ccb_accept_tio *)STAILQ_FIRST(&trsrc->atio); } if (atio) { STAILQ_REMOVE_HEAD(&trsrc->atio, sim_links.stqe); atio->ccb_h.status = CAM_CDB_RECVD; atio->ccb_h.target_lun = lun; atio->sense_len = 0; atio->init_id = node->instance_index; atio->tag_id = io->tag; atio->ccb_h.ccb_io_ptr = io; if (flags & OCS_SCSI_CMD_SIMPLE) atio->tag_action = MSG_SIMPLE_Q_TAG; else if (flags & OCS_SCSI_CMD_HEAD_OF_QUEUE) atio->tag_action = MSG_HEAD_OF_Q_TAG; else if (flags & OCS_SCSI_CMD_ORDERED) atio->tag_action = MSG_ORDERED_Q_TAG; + else if (flags & OCS_SCSI_CMD_ACA) + atio->tag_action = MSG_ACA_TASK; else - atio->tag_action = 0; + atio->tag_action = CAM_TAG_ACTION_NONE; + atio->priority = (flags & OCS_SCSI_PRIORITY_MASK) >> + OCS_SCSI_PRIORITY_SHIFT; atio->cdb_len = cdb_len; ocs_memcpy(atio->cdb_io.cdb_bytes, cdb, cdb_len); io->tgt_io.flags = 0; io->tgt_io.state = OCS_CAM_IO_COMMAND; io->tgt_io.lun = lun; xpt_done((union ccb *)atio); rc = 0; } else { device_printf( ocs->dev, "%s: no ATIO for LUN %lx (en=%s) OX_ID %#x\n", __func__, (unsigned long)lun, trsrc ? (trsrc->enabled ? "T" : "F") : "X", be16toh(io->init_task_tag)); io->tgt_io.state = OCS_CAM_IO_MAX; ocs_target_io_free(io); } return rc; } /** * @ingroup scsi_api_target * @brief receive FCP SCSI Command with first burst data. * * Receive a new FCP SCSI command from the base driver with first burst data. * * @param io pointer to IO context * @param lun LUN for this IO * @param cdb pointer to SCSI CDB * @param cdb_len length of CDB in bytes * @param flags command flags * @param first_burst_buffers first burst buffers * @param first_burst_buffer_count The number of bytes received in the first burst * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_recv_cmd_first_burst(ocs_io_t *io, uint64_t lun, uint8_t *cdb, uint32_t cdb_len, uint32_t flags, ocs_dma_t first_burst_buffers[], uint32_t first_burst_buffer_count) { return -1; } /** * @ingroup scsi_api_target * @brief receive a TMF command IO * * Called by the base driver when a SCSI TMF command has been received. The * target-server will process the command, aborting commands as needed, and post * a response using ocs_scsi_send_resp() * * The IO context (ocs_io_t) structure has and element of type ocs_scsi_tgt_io_t named * tgt_io that is declared and used by a target-server for private information. * * If the target-server walks the nodes active_ios linked list, and starts IO * abort processing, the code must be sure not to abort the IO passed into the * ocs_scsi_recv_tmf() command. * * @param tmfio pointer to IO context * @param lun logical unit value * @param cmd command request * @param abortio pointer to IO object to abort for TASK_ABORT (NULL for all other TMF) * @param flags flags * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_recv_tmf(ocs_io_t *tmfio, uint64_t lun, ocs_scsi_tmf_cmd_e cmd, ocs_io_t *abortio, uint32_t flags) { ocs_t *ocs = tmfio->ocs; ocs_node_t *node = tmfio->node; ocs_tgt_resource_t *trsrc = NULL; struct ccb_immediate_notify *inot = NULL; int32_t rc = -1; ocs_fcport *fcp = NULL; fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs, "FCP is NULL \n"); return 1; } if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) { trsrc = &fcp->targ_rsrc[lun]; } else if (fcp->targ_rsrc_wildcard.enabled) { trsrc = &fcp->targ_rsrc_wildcard; } device_printf(tmfio->ocs->dev, "%s: io=%p cmd=%#x LU=%lx en=%s\n", __func__, tmfio, cmd, (unsigned long)lun, trsrc ? (trsrc->enabled ? "T" : "F") : "X"); if (trsrc) { inot = (struct ccb_immediate_notify *)STAILQ_FIRST(&trsrc->inot); } if (!inot) { device_printf( ocs->dev, "%s: no INOT for LUN %llx (en=%s) OX_ID %#x\n", __func__, (unsigned long long)lun, trsrc ? (trsrc->enabled ? "T" : "F") : "X", be16toh(tmfio->init_task_tag)); if (abortio) { ocs_scsi_io_complete(abortio); } ocs_scsi_io_complete(tmfio); goto ocs_scsi_recv_tmf_out; } tmfio->tgt_io.app = abortio; STAILQ_REMOVE_HEAD(&trsrc->inot, sim_links.stqe); inot->tag_id = tmfio->tag; inot->seq_id = tmfio->tag; if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) { inot->initiator_id = node->instance_index; } else { inot->initiator_id = CAM_TARGET_WILDCARD; } inot->ccb_h.status = CAM_MESSAGE_RECV; inot->ccb_h.target_lun = lun; switch (cmd) { case OCS_SCSI_TMF_ABORT_TASK: inot->arg = MSG_ABORT_TASK; inot->seq_id = abortio->tag; device_printf(ocs->dev, "%s: ABTS IO.%#x st=%#x\n", __func__, abortio->tag, abortio->tgt_io.state); abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_RECV; abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_NOTIFY; break; case OCS_SCSI_TMF_QUERY_TASK_SET: device_printf(ocs->dev, "%s: OCS_SCSI_TMF_QUERY_TASK_SET not supported\n", __func__); STAILQ_INSERT_TAIL(&trsrc->inot, &inot->ccb_h, sim_links.stqe); ocs_scsi_io_complete(tmfio); goto ocs_scsi_recv_tmf_out; break; case OCS_SCSI_TMF_ABORT_TASK_SET: inot->arg = MSG_ABORT_TASK_SET; break; case OCS_SCSI_TMF_CLEAR_TASK_SET: inot->arg = MSG_CLEAR_TASK_SET; break; case OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT: inot->arg = MSG_QUERY_ASYNC_EVENT; break; case OCS_SCSI_TMF_LOGICAL_UNIT_RESET: inot->arg = MSG_LOGICAL_UNIT_RESET; break; case OCS_SCSI_TMF_CLEAR_ACA: inot->arg = MSG_CLEAR_ACA; break; case OCS_SCSI_TMF_TARGET_RESET: inot->arg = MSG_TARGET_RESET; break; default: device_printf(ocs->dev, "%s: unsupported TMF %#x\n", __func__, cmd); STAILQ_INSERT_TAIL(&trsrc->inot, &inot->ccb_h, sim_links.stqe); goto ocs_scsi_recv_tmf_out; } rc = 0; xpt_print(inot->ccb_h.path, "%s: func=%#x stat=%#x id=%#x lun=%#x" " flags=%#x tag=%#x seq=%#x ini=%#x arg=%#x\n", __func__, inot->ccb_h.func_code, inot->ccb_h.status, inot->ccb_h.target_id, (unsigned int)inot->ccb_h.target_lun, inot->ccb_h.flags, inot->tag_id, inot->seq_id, inot->initiator_id, inot->arg); xpt_done((union ccb *)inot); if (abortio) { abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_DEV; rc = ocs_scsi_tgt_abort_io(abortio, ocs_io_abort_cb, tmfio); } ocs_scsi_recv_tmf_out: return rc; } /** * @ingroup scsi_api_initiator * @brief Initializes any initiator fields on the ocs structure. * * Called by OS initialization code when a new device is discovered. * * @param ocs pointer to ocs * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_new_device(ocs_t *ocs) { return 0; } /** * @ingroup scsi_api_initiator * @brief Tears down initiator members of ocs structure. * * Called by OS code when device is removed. * * @param ocs pointer to ocs * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_del_device(ocs_t *ocs) { return 0; } /** * @ingroup scsi_api_initiator * @brief accept new domain notification * * Called by base drive when new domain is discovered. An initiator-client * will accept this call to prepare for new remote node notifications * arising from ocs_scsi_new_target(). * * The domain context has the element ocs_scsi_ini_domain_t ini_domain * which is declared by the initiator-client code and is used for * initiator-client private data. * * This function will only be called if the base-driver has been enabled for * initiator capability. * * Note that this call is made to initiator-client backends, * the ocs_scsi_tgt_new_domain() function is called to target-server backends. * * @param domain pointer to domain * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_new_domain(ocs_domain_t *domain) { return 0; } /** * @ingroup scsi_api_initiator * @brief accept domain lost notification * * Called by base-driver when a domain goes away. An initiator-client will * use this call to clean up all domain scoped resources. * * This function will only be called if the base-driver has been enabled for * initiator capability. * * Note that this call is made to initiator-client backends, * the ocs_scsi_tgt_del_domain() function is called to target-server backends. * * @param domain pointer to domain * * @return returns 0 for success, a negative error code value for failure. */ void ocs_scsi_ini_del_domain(ocs_domain_t *domain) { } /** * @ingroup scsi_api_initiator * @brief accept new sli port notification * * Called by base drive when new sli port (sport) is discovered. * A target-server will use this call to prepare for new remote node * notifications arising from ocs_scsi_new_initiator(). * * This function will only be called if the base-driver has been enabled for * target capability. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_new_sport() function is called to initiator-client backends. * * @param sport pointer to sport * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_new_sport(ocs_sport_t *sport) { ocs_t *ocs = sport->ocs; ocs_fcport *fcp = FCPORT(ocs, 0); if (!sport->is_vport) { sport->tgt_data = fcp; fcp->fc_id = sport->fc_id; } return 0; } /** * @ingroup scsi_api_initiator * @brief accept sli port gone notification * * Called by base-driver when a sport goes away. A target-server will * use this call to clean up all sport scoped resources. * * Note that this call is made to target-server backends, * the ocs_scsi_ini_del_sport() function is called to initiator-client backends. * * @param sport pointer to SLI port * * @return returns 0 for success, a negative error code value for failure. */ void ocs_scsi_ini_del_sport(ocs_sport_t *sport) { ocs_t *ocs = sport->ocs; ocs_fcport *fcp = FCPORT(ocs, 0); if (!sport->is_vport) { fcp->fc_id = 0; } } void ocs_scsi_sport_deleted(ocs_sport_t *sport) { ocs_t *ocs = sport->ocs; ocs_fcport *fcp = NULL; ocs_xport_stats_t value; if (!sport->is_vport) { return; } fcp = sport->tgt_data; ocs_xport_status(ocs->xport, OCS_XPORT_PORT_STATUS, &value); if (value.value == 0) { ocs_log_debug(ocs, "PORT offline,.. skipping\n"); return; } if ((fcp->role != KNOB_ROLE_NONE)) { if(fcp->vport->sport != NULL) { ocs_log_debug(ocs,"sport is not NULL, skipping\n"); return; } ocs_sport_vport_alloc(ocs->domain, fcp->vport); return; } } int32_t ocs_tgt_find(ocs_fcport *fcp, ocs_node_t *node) { ocs_fc_target_t *tgt = NULL; uint32_t i; for (i = 0; i < OCS_MAX_TARGETS; i++) { tgt = &fcp->tgt[i]; if (tgt->state == OCS_TGT_STATE_NONE) continue; if (ocs_node_get_wwpn(node) == tgt->wwpn) { return i; } } return -1; } /** * @ingroup scsi_api_initiator * @brief receive notification of a new SCSI target node * * Sent by base driver to notify an initiator-client of the presense of a new * remote target. The initiator-server may use this call to prepare for * inbound IO from this node. * * This function is only called if the base driver is enabled for * initiator capability. * * @param node pointer to new remote initiator node * * @return none * * @note */ uint32_t ocs_update_tgt(ocs_node_t *node, ocs_fcport *fcp, uint32_t tgt_id) { ocs_fc_target_t *tgt = NULL; tgt = &fcp->tgt[tgt_id]; tgt->node_id = node->instance_index; tgt->state = OCS_TGT_STATE_VALID; tgt->port_id = node->rnode.fc_id; tgt->wwpn = ocs_node_get_wwpn(node); tgt->wwnn = ocs_node_get_wwnn(node); return 0; } uint32_t ocs_add_new_tgt(ocs_node_t *node, ocs_fcport *fcp) { uint32_t i; struct ocs_softc *ocs = node->ocs; union ccb *ccb = NULL; for (i = 0; i < OCS_MAX_TARGETS; i++) { if (fcp->tgt[i].state == OCS_TGT_STATE_NONE) break; } if (NULL == (ccb = xpt_alloc_ccb_nowait())) { device_printf(ocs->dev, "%s: ccb allocation failed\n", __func__); return -1; } if (CAM_REQ_CMP != xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(fcp->sim), i, CAM_LUN_WILDCARD)) { device_printf( ocs->dev, "%s: target path creation failed\n", __func__); xpt_free_ccb(ccb); return -1; } ocs_update_tgt(node, fcp, i); xpt_rescan(ccb); return 0; } int32_t ocs_scsi_new_target(ocs_node_t *node) { ocs_fcport *fcp = NULL; int32_t i; fcp = node->sport->tgt_data; if (fcp == NULL) { printf("%s:FCP is NULL \n", __func__); return 0; } i = ocs_tgt_find(fcp, node); if (i < 0) { ocs_add_new_tgt(node, fcp); return 0; } ocs_update_tgt(node, fcp, i); return 0; } static void ocs_delete_target(ocs_t *ocs, ocs_fcport *fcp, int tgt) { struct cam_path *cpath = NULL; if (!fcp->sim) { device_printf(ocs->dev, "%s: calling with NULL sim\n", __func__); return; } if (CAM_REQ_CMP == xpt_create_path(&cpath, NULL, cam_sim_path(fcp->sim), tgt, CAM_LUN_WILDCARD)) { xpt_async(AC_LOST_DEVICE, cpath, NULL); xpt_free_path(cpath); } } /* * Device Lost Timer Function- when we have decided that a device was lost, * we wait a specific period of time prior to telling the OS about lost device. * * This timer function gets activated when the device was lost. * This function fires once a second and then scans the port database * for devices that are marked dead but still have a virtual target assigned. * We decrement a counter for that port database entry, and when it hits zero, * we tell the OS the device was lost. Timer will be stopped when the device * comes back active or removed from the OS. */ static void ocs_ldt(void *arg) { ocs_fcport *fcp = arg; taskqueue_enqueue(taskqueue_thread, &fcp->ltask); } static void ocs_ldt_task(void *arg, int pending) { ocs_fcport *fcp = arg; ocs_t *ocs = fcp->ocs; int i, more_to_do = 0; ocs_fc_target_t *tgt = NULL; for (i = 0; i < OCS_MAX_TARGETS; i++) { tgt = &fcp->tgt[i]; if (tgt->state != OCS_TGT_STATE_LOST) { continue; } if ((tgt->gone_timer != 0) && (ocs->attached)){ tgt->gone_timer -= 1; more_to_do++; continue; } if (tgt->is_target) { tgt->is_target = 0; ocs_delete_target(ocs, fcp, i); } tgt->state = OCS_TGT_STATE_NONE; } if (more_to_do) { callout_reset(&fcp->ldt, hz, ocs_ldt, fcp); } else { callout_deactivate(&fcp->ldt); } } /** * @ingroup scsi_api_initiator * @brief Delete a SCSI target node * * Sent by base driver to notify a initiator-client that a remote target * is now gone. The base driver will have terminated all outstanding IOs * and the initiator-client will receive appropriate completions. * * The ocs_node_t structure has and elment of type ocs_scsi_ini_node_t named * ini_node that is declared and used by a target-server for private * information. * * This function is only called if the base driver is enabled for * initiator capability. * * @param node pointer node being deleted * @param reason reason for deleting the target * * @return Returns OCS_SCSI_CALL_ASYNC if target delete is queued for async * completion and OCS_SCSI_CALL_COMPLETE if call completed or error. * * @note */ int32_t ocs_scsi_del_target(ocs_node_t *node, ocs_scsi_del_target_reason_e reason) { struct ocs_softc *ocs = node->ocs; ocs_fcport *fcp = NULL; ocs_fc_target_t *tgt = NULL; int32_t tgt_id; if (ocs == NULL) { ocs_log_err(ocs,"OCS is NULL \n"); return -1; } fcp = node->sport->tgt_data; if (fcp == NULL) { ocs_log_err(ocs,"FCP is NULL \n"); return -1; } tgt_id = ocs_tgt_find(fcp, node); if (tgt_id == -1) { ocs_log_err(ocs,"target is invalid\n"); return -1; } tgt = &fcp->tgt[tgt_id]; // IF in shutdown delete target. if(!ocs->attached) { ocs_delete_target(ocs, fcp, tgt_id); } else { tgt->state = OCS_TGT_STATE_LOST; tgt->gone_timer = 30; if (!callout_active(&fcp->ldt)) { callout_reset(&fcp->ldt, hz, ocs_ldt, fcp); } } return 0; } /** * @brief Initialize SCSI IO * * Initialize SCSI IO, this function is called once per IO during IO pool * allocation so that the target server may initialize any of its own private * data. * * @param io pointer to SCSI IO object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_io_init(ocs_io_t *io) { return 0; } /** * @brief Uninitialize SCSI IO * * Uninitialize target server private data in a SCSI io object * * @param io pointer to SCSI IO object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_tgt_io_exit(ocs_io_t *io) { return 0; } /** * @brief Initialize SCSI IO * * Initialize SCSI IO, this function is called once per IO during IO pool * allocation so that the initiator client may initialize any of its own private * data. * * @param io pointer to SCSI IO object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_io_init(ocs_io_t *io) { return 0; } /** * @brief Uninitialize SCSI IO * * Uninitialize initiator client private data in a SCSI io object * * @param io pointer to SCSI IO object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_scsi_ini_io_exit(ocs_io_t *io) { return 0; } /* * End of functions required by SCSI base driver API ***************************************************************************/ static __inline void ocs_set_ccb_status(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } static int32_t ocs_task_set_full_or_busy_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg) { ocs_target_io_free(io); return 0; } /** * @brief send SCSI task set full or busy status * * A SCSI task set full or busy response is sent depending on whether * another IO is already active on the LUN. * * @param io pointer to IO context * * @return returns 0 for success, a negative error code value for failure. */ static int32_t ocs_task_set_full_or_busy(ocs_io_t *io) { ocs_scsi_cmd_resp_t rsp = { 0 }; ocs_t *ocs = io->ocs; /* * If there is another command for the LUN, then send task set full, * if this is the first one, then send the busy status. * * if 'busy sent' is FALSE, set it to TRUE and send BUSY * otherwise send FULL */ if (atomic_cmpset_acq_32(&io->node->tgt_node.busy_sent, FALSE, TRUE)) { rsp.scsi_status = SCSI_STATUS_BUSY; /* Busy */ printf("%s: busy [%s] tag=%x iiu=%d ihw=%d\n", __func__, io->node->display_name, io->tag, io->ocs->io_in_use, io->ocs->io_high_watermark); } else { rsp.scsi_status = SCSI_STATUS_TASK_SET_FULL; /* Task set full */ printf("%s: full tag=%x iiu=%d\n", __func__, io->tag, io->ocs->io_in_use); } /* Log a message here indicating a busy or task set full state */ if (OCS_LOG_ENABLE_Q_FULL_BUSY_MSG(ocs)) { /* Log Task Set Full */ if (rsp.scsi_status == SCSI_STATUS_TASK_SET_FULL) { /* Task Set Full Message */ ocs_log_info(ocs, "OCS CAM TASK SET FULL. Tasks >= %d\n", ocs->io_high_watermark); } else if (rsp.scsi_status == SCSI_STATUS_BUSY) { /* Log Busy Message */ ocs_log_info(ocs, "OCS CAM SCSI BUSY\n"); } } /* Send the response */ return ocs_scsi_send_resp(io, 0, &rsp, ocs_task_set_full_or_busy_cb, NULL); } /** * @ingroup cam_io * @brief Process target IO completions * * @param io * @param scsi_status did the IO complete successfully * @param flags * @param arg application specific pointer provided in the call to ocs_target_io() * * @todo */ static int32_t ocs_scsi_target_io_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg) { union ccb *ccb = arg; struct ccb_scsiio *csio = &ccb->csio; struct ocs_softc *ocs = csio->ccb_h.ccb_ocs_ptr; uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK; uint32_t io_is_done = (ccb->ccb_h.flags & CAM_SEND_STATUS) == CAM_SEND_STATUS; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if (CAM_DIR_NONE != cam_dir) { bus_dmasync_op_t op; if (CAM_DIR_IN == cam_dir) { op = BUS_DMASYNC_POSTREAD; } else { op = BUS_DMASYNC_POSTWRITE; } /* Synchronize the DMA memory with the CPU and free the mapping */ bus_dmamap_sync(ocs->buf_dmat, io->tgt_io.dmap, op); if (io->tgt_io.flags & OCS_CAM_IO_F_DMAPPED) { bus_dmamap_unload(ocs->buf_dmat, io->tgt_io.dmap); } } if (io->tgt_io.sendresp) { io->tgt_io.sendresp = 0; ocs_scsi_cmd_resp_t resp = { 0 }; io->tgt_io.state = OCS_CAM_IO_RESP; resp.scsi_status = scsi_status; if (ccb->ccb_h.flags & CAM_SEND_SENSE) { resp.sense_data = (uint8_t *)&csio->sense_data; resp.sense_data_length = csio->sense_len; } resp.residual = io->exp_xfer_len - io->transferred; return ocs_scsi_send_resp(io, 0, &resp, ocs_scsi_target_io_cb, ccb); } switch (scsi_status) { case OCS_SCSI_STATUS_GOOD: ocs_set_ccb_status(ccb, CAM_REQ_CMP); break; case OCS_SCSI_STATUS_ABORTED: ocs_set_ccb_status(ccb, CAM_REQ_ABORTED); break; default: ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } if (io_is_done) { if ((io->tgt_io.flags & OCS_CAM_IO_F_ABORT_NOTIFY) == 0) { ocs_target_io_free(io); } } else { io->tgt_io.state = OCS_CAM_IO_DATA_DONE; /*device_printf(ocs->dev, "%s: CTIO state=%d tag=%#x\n", __func__, io->tgt_io.state, io->tag);*/ } xpt_done(ccb); return 0; } /** * @note 1. Since the CCB is assigned to the ocs_io_t on an XPT_CONT_TARGET_IO * action, if an initiator aborts a command prior to the SIM receiving * a CTIO, the IO's CCB will be NULL. */ static int32_t ocs_io_abort_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg) { struct ocs_softc *ocs = NULL; ocs_io_t *tmfio = arg; ocs_scsi_tmf_resp_e tmf_resp = OCS_SCSI_TMF_FUNCTION_COMPLETE; int32_t rc = 0; ocs = io->ocs; io->tgt_io.flags &= ~OCS_CAM_IO_F_ABORT_DEV; /* A good status indicates the IO was aborted and will be completed in * the IO's completion handler. Handle the other cases here. */ switch (scsi_status) { case OCS_SCSI_STATUS_GOOD: break; case OCS_SCSI_STATUS_NO_IO: break; default: device_printf(ocs->dev, "%s: unhandled status %d\n", __func__, scsi_status); tmf_resp = OCS_SCSI_TMF_FUNCTION_REJECTED; rc = -1; } ocs_scsi_send_tmf_resp(tmfio, tmf_resp, NULL, ocs_target_tmf_cb, NULL); return rc; } /** * @ingroup cam_io * @brief Process initiator IO completions * * @param io * @param scsi_status did the IO complete successfully * @param rsp pointer to response buffer * @param flags * @param arg application specific pointer provided in the call to ocs_target_io() * * @todo */ static int32_t ocs_scsi_initiator_io_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, ocs_scsi_cmd_resp_t *rsp, uint32_t flags, void *arg) { union ccb *ccb = arg; struct ccb_scsiio *csio = &ccb->csio; struct ocs_softc *ocs = csio->ccb_h.ccb_ocs_ptr; uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK; cam_status ccb_status= CAM_REQ_CMP_ERR; if (CAM_DIR_NONE != cam_dir) { bus_dmasync_op_t op; if (CAM_DIR_IN == cam_dir) { op = BUS_DMASYNC_POSTREAD; } else { op = BUS_DMASYNC_POSTWRITE; } /* Synchronize the DMA memory with the CPU and free the mapping */ bus_dmamap_sync(ocs->buf_dmat, io->tgt_io.dmap, op); if (io->tgt_io.flags & OCS_CAM_IO_F_DMAPPED) { bus_dmamap_unload(ocs->buf_dmat, io->tgt_io.dmap); } } if (scsi_status == OCS_SCSI_STATUS_CHECK_RESPONSE) { csio->scsi_status = rsp->scsi_status; if (SCSI_STATUS_OK != rsp->scsi_status) { ccb_status = CAM_SCSI_STATUS_ERROR; } csio->resid = rsp->residual; if (rsp->residual > 0) { uint32_t length = rsp->response_wire_length; /* underflow */ if (csio->dxfer_len == (length + csio->resid)) { ccb_status = CAM_REQ_CMP; } } else if (rsp->residual < 0) { ccb_status = CAM_DATA_RUN_ERR; } if ((rsp->sense_data_length) && !(ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR))) { uint32_t sense_len = 0; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; if (rsp->sense_data_length < csio->sense_len) { csio->sense_resid = csio->sense_len - rsp->sense_data_length; sense_len = rsp->sense_data_length; } else { csio->sense_resid = 0; sense_len = csio->sense_len; } ocs_memcpy(&csio->sense_data, rsp->sense_data, sense_len); } } else if (scsi_status != OCS_SCSI_STATUS_GOOD) { ccb_status = CAM_REQ_CMP_ERR; ocs_set_ccb_status(ccb, ccb_status); csio->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(csio->ccb_h.path, 1); } else { ccb_status = CAM_REQ_CMP; } ocs_set_ccb_status(ccb, ccb_status); ocs_scsi_io_free(io); csio->ccb_h.ccb_io_ptr = NULL; csio->ccb_h.ccb_ocs_ptr = NULL; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return 0; } /** * @brief Load scatter-gather list entries into an IO * * This routine relies on the driver instance's software context pointer and * the IO object pointer having been already assigned to hooks in the CCB. * Although the routine does not return success/fail, callers can look at the * n_sge member to determine if the mapping failed (0 on failure). * * @param arg pointer to the CAM ccb for this IO * @param seg DMA address/length pairs * @param nseg number of DMA address/length pairs * @param error any errors while mapping the IO */ static void ocs_scsi_dmamap_load(void *arg, bus_dma_segment_t *seg, int nseg, int error) { ocs_dmamap_load_arg_t *sglarg = (ocs_dmamap_load_arg_t*) arg; if (error) { printf("%s: seg=%p nseg=%d error=%d\n", __func__, seg, nseg, error); sglarg->rc = -1; } else { uint32_t i = 0; uint32_t c = 0; if ((sglarg->sgl_count + nseg) > sglarg->sgl_max) { printf("%s: sgl_count=%d nseg=%d max=%d\n", __func__, sglarg->sgl_count, nseg, sglarg->sgl_max); sglarg->rc = -2; return; } for (i = 0, c = sglarg->sgl_count; i < nseg; i++, c++) { sglarg->sgl[c].addr = seg[i].ds_addr; sglarg->sgl[c].len = seg[i].ds_len; } sglarg->sgl_count = c; sglarg->rc = 0; } } /** * @brief Build a scatter-gather list from a CAM CCB * * @param ocs the driver instance's software context * @param ccb pointer to the CCB * @param io pointer to the previously allocated IO object * @param sgl pointer to SGL * @param sgl_max number of entries in sgl * * @return 0 on success, non-zero otherwise */ static int32_t ocs_build_scsi_sgl(struct ocs_softc *ocs, union ccb *ccb, ocs_io_t *io, ocs_scsi_sgl_t *sgl, uint32_t sgl_max) { ocs_dmamap_load_arg_t dmaarg; int32_t err = 0; if (!ocs || !ccb || !io || !sgl) { printf("%s: bad param o=%p c=%p i=%p s=%p\n", __func__, ocs, ccb, io, sgl); return -1; } io->tgt_io.flags &= ~OCS_CAM_IO_F_DMAPPED; dmaarg.sgl = sgl; dmaarg.sgl_count = 0; dmaarg.sgl_max = sgl_max; dmaarg.rc = 0; err = bus_dmamap_load_ccb(ocs->buf_dmat, io->tgt_io.dmap, ccb, ocs_scsi_dmamap_load, &dmaarg, 0); if (err || dmaarg.rc) { device_printf( ocs->dev, "%s: bus_dmamap_load_ccb error (%d %d)\n", __func__, err, dmaarg.rc); return -1; } io->tgt_io.flags |= OCS_CAM_IO_F_DMAPPED; return dmaarg.sgl_count; } /** * @ingroup cam_io * @brief Send a target IO * * @param ocs the driver instance's software context * @param ccb pointer to the CCB * * @return 0 on success, non-zero otherwise */ static int32_t ocs_target_io(struct ocs_softc *ocs, union ccb *ccb) { struct ccb_scsiio *csio = &ccb->csio; ocs_io_t *io = NULL; uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK; bool sendstatus = ccb->ccb_h.flags & CAM_SEND_STATUS; uint32_t xferlen = csio->dxfer_len; int32_t rc = 0; io = ocs_scsi_find_io(ocs, csio->tag_id); if (io == NULL) { ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); panic("bad tag value"); return 1; } /* Received an ABORT TASK for this IO */ if (io->tgt_io.flags & OCS_CAM_IO_F_ABORT_RECV) { /*device_printf(ocs->dev, "%s: XPT_CONT_TARGET_IO state=%d tag=%#x xid=%#x flags=%#x\n", __func__, io->tgt_io.state, io->tag, io->init_task_tag, io->tgt_io.flags);*/ io->tgt_io.flags |= OCS_CAM_IO_F_ABORT_CAM; if (ccb->ccb_h.flags & CAM_SEND_STATUS) { ocs_set_ccb_status(ccb, CAM_REQ_CMP); ocs_target_io_free(io); return 1; } ocs_set_ccb_status(ccb, CAM_REQ_ABORTED); return 1; } io->tgt_io.app = ccb; ocs_set_ccb_status(ccb, CAM_REQ_INPROG); ccb->ccb_h.status |= CAM_SIM_QUEUED; csio->ccb_h.ccb_ocs_ptr = ocs; csio->ccb_h.ccb_io_ptr = io; if ((sendstatus && (xferlen == 0))) { ocs_scsi_cmd_resp_t resp = { 0 }; ocs_assert(ccb->ccb_h.flags & CAM_SEND_STATUS, -1); io->tgt_io.state = OCS_CAM_IO_RESP; resp.scsi_status = csio->scsi_status; if (ccb->ccb_h.flags & CAM_SEND_SENSE) { resp.sense_data = (uint8_t *)&csio->sense_data; resp.sense_data_length = csio->sense_len; } resp.residual = io->exp_xfer_len - io->transferred; rc = ocs_scsi_send_resp(io, 0, &resp, ocs_scsi_target_io_cb, ccb); } else if (xferlen != 0) { ocs_scsi_sgl_t sgl[OCS_FC_MAX_SGL]; int32_t sgl_count = 0; io->tgt_io.state = OCS_CAM_IO_DATA; if (sendstatus) io->tgt_io.sendresp = 1; sgl_count = ocs_build_scsi_sgl(ocs, ccb, io, sgl, ARRAY_SIZE(sgl)); if (sgl_count > 0) { if (cam_dir == CAM_DIR_IN) { rc = ocs_scsi_send_rd_data(io, 0, NULL, sgl, sgl_count, csio->dxfer_len, ocs_scsi_target_io_cb, ccb); } else if (cam_dir == CAM_DIR_OUT) { rc = ocs_scsi_recv_wr_data(io, 0, NULL, sgl, sgl_count, csio->dxfer_len, ocs_scsi_target_io_cb, ccb); } else { device_printf(ocs->dev, "%s:" " unknown CAM direction %#x\n", __func__, cam_dir); ocs_set_ccb_status(ccb, CAM_REQ_INVALID); rc = 1; } } else { device_printf(ocs->dev, "%s: building SGL failed\n", __func__); ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); rc = 1; } } else { device_printf(ocs->dev, "%s: Wrong value xfer and sendstatus" " are 0 \n", __func__); ocs_set_ccb_status(ccb, CAM_REQ_INVALID); rc = 1; } if (rc) { ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; io->tgt_io.state = OCS_CAM_IO_DATA_DONE; device_printf(ocs->dev, "%s: CTIO state=%d tag=%#x\n", __func__, io->tgt_io.state, io->tag); if ((sendstatus && (xferlen == 0))) { ocs_target_io_free(io); } } return rc; } static int32_t ocs_target_tmf_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg) { /*device_printf(io->ocs->dev, "%s: tag=%x io=%p s=%#x\n", __func__, io->tag, io, scsi_status);*/ ocs_scsi_io_complete(io); return 0; } /** * @ingroup cam_io * @brief Send an initiator IO * * @param ocs the driver instance's software context * @param ccb pointer to the CCB * * @return 0 on success, non-zero otherwise */ static int32_t ocs_initiator_io(struct ocs_softc *ocs, union ccb *ccb) { int32_t rc; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccb_h = &csio->ccb_h; ocs_node_t *node = NULL; ocs_io_t *io = NULL; ocs_scsi_sgl_t sgl[OCS_FC_MAX_SGL]; - int32_t sgl_count; + int32_t flags, sgl_count; ocs_fcport *fcp; fcp = FCPORT(ocs, cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path))); if (fcp->tgt[ccb_h->target_id].state == OCS_TGT_STATE_LOST) { device_printf(ocs->dev, "%s: device LOST %d\n", __func__, ccb_h->target_id); return CAM_REQUEUE_REQ; } if (fcp->tgt[ccb_h->target_id].state == OCS_TGT_STATE_NONE) { device_printf(ocs->dev, "%s: device not ready %d\n", __func__, ccb_h->target_id); return CAM_SEL_TIMEOUT; } node = ocs_node_get_instance(ocs, fcp->tgt[ccb_h->target_id].node_id); if (node == NULL) { device_printf(ocs->dev, "%s: no device %d\n", __func__, ccb_h->target_id); return CAM_SEL_TIMEOUT; } if (!node->targ) { device_printf(ocs->dev, "%s: not target device %d\n", __func__, ccb_h->target_id); return CAM_SEL_TIMEOUT; } io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_ORIGINATOR); if (io == NULL) { device_printf(ocs->dev, "%s: unable to alloc IO\n", __func__); return -1; } /* eventhough this is INI, use target structure as ocs_build_scsi_sgl * only references the tgt_io part of an ocs_io_t */ io->tgt_io.app = ccb; csio->ccb_h.ccb_ocs_ptr = ocs; csio->ccb_h.ccb_io_ptr = io; sgl_count = ocs_build_scsi_sgl(ocs, ccb, io, sgl, ARRAY_SIZE(sgl)); if (sgl_count < 0) { ocs_scsi_io_free(io); device_printf(ocs->dev, "%s: building SGL failed\n", __func__); return -1; } if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) { io->timeout = 0; } else if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) { io->timeout = OCS_CAM_IO_TIMEOUT; } else { io->timeout = ccb->ccb_h.timeout; } + switch (csio->tag_action) { + case MSG_HEAD_OF_Q_TAG: + flags = OCS_SCSI_CMD_HEAD_OF_QUEUE; + break; + case MSG_ORDERED_Q_TAG: + flags = OCS_SCSI_CMD_ORDERED; + break; + case MSG_ACA_TASK: + flags = OCS_SCSI_CMD_ACA; + break; + case CAM_TAG_ACTION_NONE: + case MSG_SIMPLE_Q_TAG: + default: + flags = OCS_SCSI_CMD_SIMPLE; + break; + } + flags |= (csio->priority << OCS_SCSI_PRIORITY_SHIFT) & + OCS_SCSI_PRIORITY_MASK; + switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_NONE: rc = ocs_scsi_send_nodata_io(node, io, ccb_h->target_lun, ccb->ccb_h.flags & CAM_CDB_POINTER ? csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes, csio->cdb_len, - ocs_scsi_initiator_io_cb, ccb); + ocs_scsi_initiator_io_cb, ccb, flags); break; case CAM_DIR_IN: rc = ocs_scsi_send_rd_io(node, io, ccb_h->target_lun, ccb->ccb_h.flags & CAM_CDB_POINTER ? csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes, csio->cdb_len, NULL, sgl, sgl_count, csio->dxfer_len, - ocs_scsi_initiator_io_cb, ccb); + ocs_scsi_initiator_io_cb, ccb, flags); break; case CAM_DIR_OUT: rc = ocs_scsi_send_wr_io(node, io, ccb_h->target_lun, ccb->ccb_h.flags & CAM_CDB_POINTER ? csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes, csio->cdb_len, NULL, sgl, sgl_count, csio->dxfer_len, - ocs_scsi_initiator_io_cb, ccb); + ocs_scsi_initiator_io_cb, ccb, flags); break; default: panic("%s invalid data direction %08x\n", __func__, ccb->ccb_h.flags); break; } return rc; } static uint32_t ocs_fcp_change_role(struct ocs_softc *ocs, ocs_fcport *fcp, uint32_t new_role) { uint32_t rc = 0, was = 0, i = 0; ocs_vport_spec_t *vport = fcp->vport; for (was = 0, i = 0; i < (ocs->num_vports + 1); i++) { if (FCPORT(ocs, i)->role != KNOB_ROLE_NONE) was++; } // Physical port if ((was == 0) || (vport == NULL)) { fcp->role = new_role; if (vport == NULL) { ocs->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0; ocs->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0; } else { vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0; vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (rc) { ocs_log_debug(ocs, "port offline failed : %d\n", rc); } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (rc) { ocs_log_debug(ocs, "port online failed : %d\n", rc); } return 0; } if ((fcp->role != KNOB_ROLE_NONE)){ fcp->role = new_role; vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0; vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0; /* New Sport will be created in sport deleted cb */ return ocs_sport_vport_del(ocs, ocs->domain, vport->wwpn, vport->wwnn); } fcp->role = new_role; vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0; vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0; if (fcp->role != KNOB_ROLE_NONE) { return ocs_sport_vport_alloc(ocs->domain, vport); } return (0); } /** * @ingroup cam_api * @brief Process CAM actions * * The driver supplies this routine to the CAM during intialization and * is the main entry point for processing CAM Control Blocks (CCB) * * @param sim pointer to the SCSI Interface Module * @param ccb CAM control block * * @todo * - populate path inquiry data via info retrieved from SLI port */ static void ocs_action(struct cam_sim *sim, union ccb *ccb) { struct ocs_softc *ocs = (struct ocs_softc *)cam_sim_softc(sim); struct ccb_hdr *ccb_h = &ccb->ccb_h; int32_t rc, bus; bus = cam_sim_bus(sim); switch (ccb_h->func_code) { case XPT_SCSI_IO: if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } rc = ocs_initiator_io(ocs, ccb); if (0 == rc) { ocs_set_ccb_status(ccb, CAM_REQ_INPROG | CAM_SIM_QUEUED); break; } else { if (rc == CAM_REQUEUE_REQ) { cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 100, 0); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); break; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if (rc > 0) { ocs_set_ccb_status(ccb, rc); } else { ocs_set_ccb_status(ccb, CAM_SEL_TIMEOUT); } } xpt_done(ccb); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; struct ccb_pathinq_settings_fc *fc = &cpi->xport_specific.fc; ocs_fcport *fcp = FCPORT(ocs, bus); uint64_t wwn = 0; ocs_xport_stats_t value; cpi->version_num = 1; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC; if (ocs->ocs_xport == OCS_XPORT_FC) { cpi->transport = XPORT_FC; } else { cpi->transport = XPORT_UNKNOWN; } cpi->transport_version = 0; /* Set the transport parameters of the SIM */ ocs_xport_status(ocs->xport, OCS_XPORT_LINK_SPEED, &value); fc->bitrate = value.value * 1000; /* speed in Mbps */ wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWPN)); fc->wwpn = be64toh(wwn); wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWNN)); fc->wwnn = be64toh(wwn); fc->port = fcp->fc_id; if (ocs->config_tgt) { cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; } cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN; cpi->hba_inquiry = PI_TAG_ABLE; cpi->max_target = OCS_MAX_TARGETS; cpi->initiator_id = ocs->max_remote_nodes + 1; if (!ocs->enable_ini) { cpi->hba_misc |= PIM_NOINITIATOR; } cpi->max_lun = OCS_MAX_LUN; cpi->bus_id = cam_sim_bus(sim); /* Need to supply a base transfer speed prior to linking up * Worst case, this would be FC 1Gbps */ cpi->base_transfer_speed = 1 * 1000 * 1000; /* Calculate the max IO supported * Worst case would be an OS page per SGL entry */ cpi->maxio = PAGE_SIZE * (ocs_scsi_get_property(ocs, OCS_SCSI_MAX_SGL) - 1); strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Emulex", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; ocs_xport_stats_t value; ocs_fcport *fcp = FCPORT(ocs, bus); ocs_fc_target_t *tgt = NULL; if (ocs->ocs_xport != OCS_XPORT_FC) { ocs_set_ccb_status(ccb, CAM_REQ_INVALID); xpt_done(ccb); break; } if (cts->ccb_h.target_id > OCS_MAX_TARGETS) { ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); break; } tgt = &fcp->tgt[cts->ccb_h.target_id]; if (tgt->state == OCS_TGT_STATE_NONE) { ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); break; } cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_FC; cts->transport_version = 2; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; /* speed in Mbps */ ocs_xport_status(ocs->xport, OCS_XPORT_LINK_SPEED, &value); fc->bitrate = value.value * 100; fc->wwpn = tgt->wwpn; fc->wwnn = tgt->wwnn; fc->port = tgt->port_id; fc->valid = CTS_FC_VALID_SPEED | CTS_FC_VALID_WWPN | CTS_FC_VALID_WWNN | CTS_FC_VALID_PORT; ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; } case XPT_SET_TRAN_SETTINGS: ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, TRUE); xpt_done(ccb); break; case XPT_GET_SIM_KNOB: { struct ccb_sim_knob *knob = &ccb->knob; uint64_t wwn = 0; ocs_fcport *fcp = FCPORT(ocs, bus); if (ocs->ocs_xport != OCS_XPORT_FC) { ocs_set_ccb_status(ccb, CAM_REQ_INVALID); xpt_done(ccb); break; } if (bus == 0) { wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWNN)); knob->xport_specific.fc.wwnn = be64toh(wwn); wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWPN)); knob->xport_specific.fc.wwpn = be64toh(wwn); } else { knob->xport_specific.fc.wwnn = fcp->vport->wwnn; knob->xport_specific.fc.wwpn = fcp->vport->wwpn; } knob->xport_specific.fc.role = fcp->role; knob->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; } case XPT_SET_SIM_KNOB: { struct ccb_sim_knob *knob = &ccb->knob; bool role_changed = FALSE; ocs_fcport *fcp = FCPORT(ocs, bus); if (ocs->ocs_xport != OCS_XPORT_FC) { ocs_set_ccb_status(ccb, CAM_REQ_INVALID); xpt_done(ccb); break; } if (knob->xport_specific.fc.valid & KNOB_VALID_ADDRESS) { device_printf(ocs->dev, "%s: XPT_SET_SIM_KNOB wwnn=%llx wwpn=%llx\n", __func__, (unsigned long long)knob->xport_specific.fc.wwnn, (unsigned long long)knob->xport_specific.fc.wwpn); } if (knob->xport_specific.fc.valid & KNOB_VALID_ROLE) { switch (knob->xport_specific.fc.role) { case KNOB_ROLE_NONE: if (fcp->role != KNOB_ROLE_NONE) { role_changed = TRUE; } break; case KNOB_ROLE_TARGET: if (fcp->role != KNOB_ROLE_TARGET) { role_changed = TRUE; } break; case KNOB_ROLE_INITIATOR: if (fcp->role != KNOB_ROLE_INITIATOR) { role_changed = TRUE; } break; case KNOB_ROLE_BOTH: if (fcp->role != KNOB_ROLE_BOTH) { role_changed = TRUE; } break; default: device_printf(ocs->dev, "%s: XPT_SET_SIM_KNOB unsupported role: %d\n", __func__, knob->xport_specific.fc.role); } if (role_changed) { device_printf(ocs->dev, "BUS:%d XPT_SET_SIM_KNOB old_role: %d new_role: %d\n", bus, fcp->role, knob->xport_specific.fc.role); ocs_fcp_change_role(ocs, fcp, knob->xport_specific.fc.role); } } ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; } case XPT_ABORT: { union ccb *accb = ccb->cab.abort_ccb; switch (accb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: ocs_abort_atio(ocs, ccb); break; case XPT_IMMEDIATE_NOTIFY: ocs_abort_inot(ocs, ccb); break; case XPT_SCSI_IO: rc = ocs_abort_initiator_io(ocs, accb); if (rc) { ccb->ccb_h.status = CAM_UA_ABORT; } else { ccb->ccb_h.status = CAM_REQ_CMP; } break; default: printf("abort of unknown func %#x\n", accb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; break; } break; } case XPT_RESET_BUS: if (ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE) == 0) { rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (rc) { ocs_log_debug(ocs, "Failed to bring port online" " : %d\n", rc); } ocs_set_ccb_status(ccb, CAM_REQ_CMP); } else { ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } xpt_done(ccb); break; case XPT_RESET_DEV: { ocs_node_t *node = NULL; ocs_io_t *io = NULL; int32_t rc = 0; ocs_fcport *fcp = FCPORT(ocs, bus); node = ocs_node_get_instance(ocs, fcp->tgt[ccb_h->target_id].node_id); if (node == NULL) { device_printf(ocs->dev, "%s: no device %d\n", __func__, ccb_h->target_id); ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); break; } io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_ORIGINATOR); if (io == NULL) { device_printf(ocs->dev, "%s: unable to alloc IO\n", __func__); ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); xpt_done(ccb); break; } rc = ocs_scsi_send_tmf(node, io, NULL, ccb_h->target_lun, OCS_SCSI_TMF_LOGICAL_UNIT_RESET, NULL, 0, 0, /* sgl, sgl_count, length */ ocs_initiator_tmf_cb, NULL/*arg*/); if (rc) { ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } else { ocs_set_ccb_status(ccb, CAM_REQ_CMP); } if (node->fcp2device) { ocs_reset_crn(node, ccb_h->target_lun); } xpt_done(ccb); break; } case XPT_EN_LUN: /* target support */ { ocs_tgt_resource_t *trsrc = NULL; uint32_t status = 0; ocs_fcport *fcp = FCPORT(ocs, bus); device_printf(ocs->dev, "XPT_EN_LUN %sable %d:%d\n", ccb->cel.enable ? "en" : "dis", ccb->ccb_h.target_id, (unsigned int)ccb->ccb_h.target_lun); trsrc = ocs_tgt_resource_get(fcp, &ccb->ccb_h, &status); if (trsrc) { trsrc->enabled = ccb->cel.enable; /* Abort all ATIO/INOT on LUN disable */ if (trsrc->enabled == FALSE) { ocs_tgt_resource_abort(ocs, trsrc); } else { STAILQ_INIT(&trsrc->atio); STAILQ_INIT(&trsrc->inot); } status = CAM_REQ_CMP; } ocs_set_ccb_status(ccb, status); xpt_done(ccb); break; } /* * The flow of target IOs in CAM is: * - CAM supplies a number of CCBs to the driver used for received * commands. * - when the driver receives a command, it copies the relevant * information to the CCB and returns it to the CAM using xpt_done() * - after the target server processes the request, it creates * a new CCB containing information on how to continue the IO and * passes that to the driver * - the driver processes the "continue IO" (a.k.a CTIO) CCB * - once the IO completes, the driver returns the CTIO to the CAM * using xpt_done() */ case XPT_ACCEPT_TARGET_IO: /* used to inform upper layer of received CDB (a.k.a. ATIO) */ case XPT_IMMEDIATE_NOTIFY: /* used to inform upper layer of other event (a.k.a. INOT) */ { ocs_tgt_resource_t *trsrc = NULL; uint32_t status = 0; ocs_fcport *fcp = FCPORT(ocs, bus); /*printf("XPT_%s %p\n", ccb_h->func_code == XPT_ACCEPT_TARGET_IO ? "ACCEPT_TARGET_IO" : "IMMEDIATE_NOTIFY", ccb);*/ trsrc = ocs_tgt_resource_get(fcp, &ccb->ccb_h, &status); if (trsrc == NULL) { ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); break; } if (XPT_ACCEPT_TARGET_IO == ccb->ccb_h.func_code) { struct ccb_accept_tio *atio = NULL; atio = (struct ccb_accept_tio *)ccb; atio->init_id = 0x0badbeef; atio->tag_id = 0xdeadc0de; STAILQ_INSERT_TAIL(&trsrc->atio, &ccb->ccb_h, sim_links.stqe); } else { STAILQ_INSERT_TAIL(&trsrc->inot, &ccb->ccb_h, sim_links.stqe); } ccb->ccb_h.ccb_io_ptr = NULL; ccb->ccb_h.ccb_ocs_ptr = ocs; ocs_set_ccb_status(ccb, CAM_REQ_INPROG); /* * These actions give resources to the target driver. * If we didn't return here, this function would call * xpt_done(), signaling to the upper layers that an * IO or other event had arrived. */ break; } case XPT_NOTIFY_ACKNOWLEDGE: { ocs_io_t *io = NULL; ocs_io_t *abortio = NULL; /* Get the IO reference for this tag */ io = ocs_scsi_find_io(ocs, ccb->cna2.tag_id); if (io == NULL) { device_printf(ocs->dev, "%s: XPT_NOTIFY_ACKNOWLEDGE no IO with tag %#x\n", __func__, ccb->cna2.tag_id); ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR); xpt_done(ccb); break; } abortio = io->tgt_io.app; if (abortio) { abortio->tgt_io.flags &= ~OCS_CAM_IO_F_ABORT_NOTIFY; device_printf(ocs->dev, "%s: XPT_NOTIFY_ACK state=%d tag=%#x xid=%#x" " flags=%#x\n", __func__, abortio->tgt_io.state, abortio->tag, abortio->init_task_tag, abortio->tgt_io.flags); /* TMF response was sent in abort callback */ } else { ocs_scsi_send_tmf_resp(io, OCS_SCSI_TMF_FUNCTION_COMPLETE, NULL, ocs_target_tmf_cb, NULL); } ocs_set_ccb_status(ccb, CAM_REQ_CMP); xpt_done(ccb); break; } case XPT_CONT_TARGET_IO: /* continue target IO, sending data/response (a.k.a. CTIO) */ if (ocs_target_io(ocs, ccb)) { device_printf(ocs->dev, "XPT_CONT_TARGET_IO failed flags=%x tag=%#x\n", ccb->ccb_h.flags, ccb->csio.tag_id); xpt_done(ccb); } break; default: device_printf(ocs->dev, "unhandled func_code = %#x\n", ccb_h->func_code); ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /** * @ingroup cam_api * @brief Process events * * @param sim pointer to the SCSI Interface Module * */ static void ocs_poll(struct cam_sim *sim) { printf("%s\n", __func__); } static int32_t ocs_initiator_tmf_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, ocs_scsi_cmd_resp_t *rsp, uint32_t flags, void *arg) { int32_t rc = 0; switch (scsi_status) { case OCS_SCSI_STATUS_GOOD: case OCS_SCSI_STATUS_NO_IO: break; case OCS_SCSI_STATUS_CHECK_RESPONSE: if (rsp->response_data_length == 0) { ocs_log_test(io->ocs, "check response without data?!?\n"); rc = -1; break; } if (rsp->response_data[3] != 0) { ocs_log_test(io->ocs, "TMF status %08x\n", be32toh(*((uint32_t *)rsp->response_data))); rc = -1; break; } break; default: ocs_log_test(io->ocs, "status=%#x\n", scsi_status); rc = -1; } ocs_scsi_io_free(io); return rc; } /** * @brief lookup target resource structure * * Arbitrarily support * - wildcard target ID + LU * - 0 target ID + non-wildcard LU * * @param ocs the driver instance's software context * @param ccb_h pointer to the CCB header * @param status returned status value * * @return pointer to the target resource, NULL if none available (e.g. if LU * is not enabled) */ static ocs_tgt_resource_t *ocs_tgt_resource_get(ocs_fcport *fcp, struct ccb_hdr *ccb_h, uint32_t *status) { target_id_t tid = ccb_h->target_id; lun_id_t lun = ccb_h->target_lun; if (CAM_TARGET_WILDCARD == tid) { if (CAM_LUN_WILDCARD != lun) { *status = CAM_LUN_INVALID; return NULL; } return &fcp->targ_rsrc_wildcard; } else { if (lun < OCS_MAX_LUN) { return &fcp->targ_rsrc[lun]; } else { *status = CAM_LUN_INVALID; return NULL; } } } static int32_t ocs_tgt_resource_abort(struct ocs_softc *ocs, ocs_tgt_resource_t *trsrc) { union ccb *ccb = NULL; uint32_t count; count = 0; do { ccb = (union ccb *)STAILQ_FIRST(&trsrc->atio); if (ccb) { STAILQ_REMOVE_HEAD(&trsrc->atio, sim_links.stqe); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); count++; } } while (ccb); count = 0; do { ccb = (union ccb *)STAILQ_FIRST(&trsrc->inot); if (ccb) { STAILQ_REMOVE_HEAD(&trsrc->inot, sim_links.stqe); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); count++; } } while (ccb); return 0; } static void ocs_abort_atio(struct ocs_softc *ocs, union ccb *ccb) { ocs_io_t *aio = NULL; ocs_tgt_resource_t *trsrc = NULL; uint32_t status = CAM_REQ_INVALID; struct ccb_hdr *cur = NULL; union ccb *accb = ccb->cab.abort_ccb; int bus = cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path)); ocs_fcport *fcp = FCPORT(ocs, bus); trsrc = ocs_tgt_resource_get(fcp, &accb->ccb_h, &status); if (trsrc != NULL) { STAILQ_FOREACH(cur, &trsrc->atio, sim_links.stqe) { if (cur != &accb->ccb_h) continue; STAILQ_REMOVE(&trsrc->atio, cur, ccb_hdr, sim_links.stqe); accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); ocs_set_ccb_status(ccb, CAM_REQ_CMP); return; } } /* if the ATIO has a valid IO pointer, CAM is telling * the driver that the ATIO (which represents the entire * exchange) has been aborted. */ aio = accb->ccb_h.ccb_io_ptr; if (aio == NULL) { ccb->ccb_h.status = CAM_UA_ABORT; return; } device_printf(ocs->dev, "%s: XPT_ABORT ATIO state=%d tag=%#x" " xid=%#x flags=%#x\n", __func__, aio->tgt_io.state, aio->tag, aio->init_task_tag, aio->tgt_io.flags); /* Expectations are: * - abort task was received * - already aborted IO in the DEVICE * - already received NOTIFY ACKNOWLEDGE */ if ((aio->tgt_io.flags & OCS_CAM_IO_F_ABORT_RECV) == 0) { device_printf(ocs->dev, "%s: abort not received or io completed \n", __func__); ocs_set_ccb_status(ccb, CAM_REQ_CMP); return; } aio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_CAM; ocs_target_io_free(aio); ocs_set_ccb_status(ccb, CAM_REQ_CMP); return; } static void ocs_abort_inot(struct ocs_softc *ocs, union ccb *ccb) { ocs_tgt_resource_t *trsrc = NULL; uint32_t status = CAM_REQ_INVALID; struct ccb_hdr *cur = NULL; union ccb *accb = ccb->cab.abort_ccb; int bus = cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path)); ocs_fcport *fcp = FCPORT(ocs, bus); trsrc = ocs_tgt_resource_get(fcp, &accb->ccb_h, &status); if (trsrc) { STAILQ_FOREACH(cur, &trsrc->inot, sim_links.stqe) { if (cur != &accb->ccb_h) continue; STAILQ_REMOVE(&trsrc->inot, cur, ccb_hdr, sim_links.stqe); accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); ocs_set_ccb_status(ccb, CAM_REQ_CMP); return; } } ocs_set_ccb_status(ccb, CAM_UA_ABORT); return; } static uint32_t ocs_abort_initiator_io(struct ocs_softc *ocs, union ccb *accb) { ocs_node_t *node = NULL; ocs_io_t *io = NULL; int32_t rc = 0; struct ccb_scsiio *csio = &accb->csio; ocs_fcport *fcp = FCPORT(ocs, cam_sim_bus(xpt_path_sim((accb)->ccb_h.path))); node = ocs_node_get_instance(ocs, fcp->tgt[accb->ccb_h.target_id].node_id); if (node == NULL) { device_printf(ocs->dev, "%s: no device %d\n", __func__, accb->ccb_h.target_id); ocs_set_ccb_status(accb, CAM_DEV_NOT_THERE); xpt_done(accb); return (-1); } io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_ORIGINATOR); if (io == NULL) { device_printf(ocs->dev, "%s: unable to alloc IO\n", __func__); ocs_set_ccb_status(accb, CAM_REQ_CMP_ERR); xpt_done(accb); return (-1); } rc = ocs_scsi_send_tmf(node, io, (ocs_io_t *)csio->ccb_h.ccb_io_ptr, accb->ccb_h.target_lun, OCS_SCSI_TMF_ABORT_TASK, NULL, 0, 0, ocs_initiator_tmf_cb, NULL/*arg*/); return rc; } void ocs_scsi_ini_ddump(ocs_textbuf_t *textbuf, ocs_scsi_ddump_type_e type, void *obj) { switch(type) { case OCS_SCSI_DDUMP_DEVICE: { //ocs_t *ocs = obj; break; } case OCS_SCSI_DDUMP_DOMAIN: { //ocs_domain_t *domain = obj; break; } case OCS_SCSI_DDUMP_SPORT: { //ocs_sport_t *sport = obj; break; } case OCS_SCSI_DDUMP_NODE: { //ocs_node_t *node = obj; break; } case OCS_SCSI_DDUMP_IO: { //ocs_io_t *io = obj; break; } default: { break; } } } void ocs_scsi_tgt_ddump(ocs_textbuf_t *textbuf, ocs_scsi_ddump_type_e type, void *obj) { switch(type) { case OCS_SCSI_DDUMP_DEVICE: { //ocs_t *ocs = obj; break; } case OCS_SCSI_DDUMP_DOMAIN: { //ocs_domain_t *domain = obj; break; } case OCS_SCSI_DDUMP_SPORT: { //ocs_sport_t *sport = obj; break; } case OCS_SCSI_DDUMP_NODE: { //ocs_node_t *node = obj; break; } case OCS_SCSI_DDUMP_IO: { ocs_io_t *io = obj; char *state_str = NULL; switch (io->tgt_io.state) { case OCS_CAM_IO_FREE: state_str = "FREE"; break; case OCS_CAM_IO_COMMAND: state_str = "COMMAND"; break; case OCS_CAM_IO_DATA: state_str = "DATA"; break; case OCS_CAM_IO_DATA_DONE: state_str = "DATA_DONE"; break; case OCS_CAM_IO_RESP: state_str = "RESP"; break; default: state_str = "xxx BAD xxx"; } ocs_ddump_value(textbuf, "cam_st", "%s", state_str); if (io->tgt_io.app) { ocs_ddump_value(textbuf, "cam_flags", "%#x", ((union ccb *)(io->tgt_io.app))->ccb_h.flags); ocs_ddump_value(textbuf, "cam_status", "%#x", ((union ccb *)(io->tgt_io.app))->ccb_h.status); } break; } default: { break; } } } int32_t ocs_scsi_get_block_vaddr(ocs_io_t *io, uint64_t blocknumber, ocs_scsi_vaddr_len_t addrlen[], uint32_t max_addrlen, void **dif_vaddr) { return -1; } uint32_t ocs_get_crn(ocs_node_t *node, uint8_t *crn, uint64_t lun) { uint32_t idx; struct ocs_lun_crn *lcrn = NULL; idx = lun % OCS_MAX_LUN; lcrn = node->ini_node.lun_crn[idx]; if (lcrn == NULL) { lcrn = ocs_malloc(node->ocs, sizeof(struct ocs_lun_crn), M_ZERO|M_NOWAIT); if (lcrn == NULL) { return (1); } lcrn->lun = lun; node->ini_node.lun_crn[idx] = lcrn; } if (lcrn->lun != lun) { return (1); } if (lcrn->crnseed == 0) lcrn->crnseed = 1; *crn = lcrn->crnseed++; return (0); } void ocs_del_crn(ocs_node_t *node) { uint32_t i; struct ocs_lun_crn *lcrn = NULL; for(i = 0; i < OCS_MAX_LUN; i++) { lcrn = node->ini_node.lun_crn[i]; if (lcrn) { ocs_free(node->ocs, lcrn, sizeof(*lcrn)); } } return; } void ocs_reset_crn(ocs_node_t *node, uint64_t lun) { uint32_t idx; struct ocs_lun_crn *lcrn = NULL; idx = lun % OCS_MAX_LUN; lcrn = node->ini_node.lun_crn[idx]; if (lcrn) lcrn->crnseed = 0; return; } Index: head/sys/dev/ocs_fc/ocs_scsi.c =================================================================== --- head/sys/dev/ocs_fc/ocs_scsi.c (revision 367043) +++ head/sys/dev/ocs_fc/ocs_scsi.c (revision 367044) @@ -1,2940 +1,2952 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * OCS Linux SCSI API base driver implementation. */ /** * @defgroup scsi_api_base SCSI Base Target/Initiator */ #include "ocs.h" #include "ocs_els.h" #include "ocs_scsi.h" #if defined(OCS_ENABLE_VPD_SUPPORT) #include "ocs_vpd.h" #endif #include "ocs_utils.h" #include "ocs_device.h" #define SCSI_IOFMT "[%04x][i:%0*x t:%0*x h:%04x]" #define SCSI_ITT_SIZE(ocs) ((ocs->ocs_xport == OCS_XPORT_FC) ? 4 : 8) #define SCSI_IOFMT_ARGS(io) io->instance_index, SCSI_ITT_SIZE(io->ocs), io->init_task_tag, SCSI_ITT_SIZE(io->ocs), io->tgt_task_tag, io->hw_tag #define enable_tsend_auto_resp(ocs) ((ocs->ctrlmask & OCS_CTRLMASK_XPORT_DISABLE_AUTORSP_TSEND) == 0) #define enable_treceive_auto_resp(ocs) ((ocs->ctrlmask & OCS_CTRLMASK_XPORT_DISABLE_AUTORSP_TRECEIVE) == 0) #define scsi_io_printf(io, fmt, ...) ocs_log_info(io->ocs, "[%s]" SCSI_IOFMT fmt, \ io->node->display_name, SCSI_IOFMT_ARGS(io), ##__VA_ARGS__) #define scsi_io_trace(io, fmt, ...) \ do { \ if (OCS_LOG_ENABLE_SCSI_TRACE(io->ocs)) \ scsi_io_printf(io, fmt, ##__VA_ARGS__); \ } while (0) #define scsi_log(ocs, fmt, ...) \ do { \ if (OCS_LOG_ENABLE_SCSI_TRACE(ocs)) \ ocs_log_info(ocs, fmt, ##__VA_ARGS__); \ } while (0) static int32_t ocs_target_send_bls_resp(ocs_io_t *io, ocs_scsi_io_cb_t cb, void *arg); static int32_t ocs_scsi_abort_io_cb(struct ocs_hw_io_s *hio, ocs_remote_node_t *rnode, uint32_t len, int32_t status, uint32_t ext, void *arg); static void ocs_scsi_io_free_ovfl(ocs_io_t *io); static uint32_t ocs_scsi_count_sgls(ocs_hw_dif_info_t *hw_dif, ocs_scsi_sgl_t *sgl, uint32_t sgl_count); static int ocs_scsi_dif_guard_is_crc(uint8_t direction, ocs_hw_dif_info_t *dif_info); static ocs_scsi_io_status_e ocs_scsi_dif_check_unknown(ocs_io_t *io, uint32_t length, uint32_t check_length, int is_crc); static uint32_t ocs_scsi_dif_check_guard(ocs_hw_dif_info_t *dif_info, ocs_scsi_vaddr_len_t addrlen[], uint32_t addrlen_count, ocs_dif_t *dif, int is_crc); static uint32_t ocs_scsi_dif_check_app_tag(ocs_t *ocs, ocs_hw_dif_info_t *dif_info, uint16_t exp_app_tag, ocs_dif_t *dif); static uint32_t ocs_scsi_dif_check_ref_tag(ocs_t *ocs, ocs_hw_dif_info_t *dif_info, uint32_t exp_ref_tag, ocs_dif_t *dif); static int32_t ocs_scsi_convert_dif_info(ocs_t *ocs, ocs_scsi_dif_info_t *scsi_dif_info, ocs_hw_dif_info_t *hw_dif_info); static int32_t ocs_scsi_io_dispatch_hw_io(ocs_io_t *io, ocs_hw_io_t *hio); static int32_t ocs_scsi_io_dispatch_no_hw_io(ocs_io_t *io); static void _ocs_scsi_io_free(void *arg); /** * @ingroup scsi_api_base * @brief Returns a big-endian 32-bit value given a pointer. * * @param p Pointer to the 32-bit big-endian location. * * @return Returns the byte-swapped 32-bit value. */ static inline uint32_t ocs_fc_getbe32(void *p) { return ocs_be32toh(*((uint32_t*)p)); } /** * @ingroup scsi_api_base * @brief Enable IO allocation. * * @par Description * The SCSI and Transport IO allocation functions are enabled. If the allocation functions * are not enabled, then calls to ocs_scsi_io_alloc() (and ocs_els_io_alloc() for FC) will * fail. * * @param node Pointer to node object. * * @return None. */ void ocs_scsi_io_alloc_enable(ocs_node_t *node) { ocs_assert(node != NULL); ocs_lock(&node->active_ios_lock); node->io_alloc_enabled = TRUE; ocs_unlock(&node->active_ios_lock); } /** * @ingroup scsi_api_base * @brief Disable IO allocation * * @par Description * The SCSI and Transport IO allocation functions are disabled. If the allocation functions * are not enabled, then calls to ocs_scsi_io_alloc() (and ocs_els_io_alloc() for FC) will * fail. * * @param node Pointer to node object * * @return None. */ void ocs_scsi_io_alloc_disable(ocs_node_t *node) { ocs_assert(node != NULL); ocs_lock(&node->active_ios_lock); node->io_alloc_enabled = FALSE; ocs_unlock(&node->active_ios_lock); } /** * @ingroup scsi_api_base * @brief Allocate a SCSI IO context. * * @par Description * A SCSI IO context is allocated and associated with a @c node. This function * is called by an initiator-client when issuing SCSI commands to remote * target devices. On completion, ocs_scsi_io_free() is called. * @n @n * The returned ocs_io_t structure has an element of type ocs_scsi_ini_io_t named * "ini_io" that is declared and used by an initiator-client for private information. * * @param node Pointer to the associated node structure. * @param role Role for IO (originator/responder). * * @return Returns the pointer to the IO context, or NULL. * */ ocs_io_t * ocs_scsi_io_alloc(ocs_node_t *node, ocs_scsi_io_role_e role) { ocs_t *ocs; ocs_xport_t *xport; ocs_io_t *io; ocs_assert(node, NULL); ocs_assert(node->ocs, NULL); ocs = node->ocs; ocs_assert(ocs->xport, NULL); xport = ocs->xport; ocs_lock(&node->active_ios_lock); if (!node->io_alloc_enabled) { ocs_unlock(&node->active_ios_lock); return NULL; } io = ocs_io_alloc(ocs); if (io == NULL) { ocs_atomic_add_return(&xport->io_alloc_failed_count, 1); ocs_unlock(&node->active_ios_lock); return NULL; } /* initialize refcount */ ocs_ref_init(&io->ref, _ocs_scsi_io_free, io); if (io->hio != NULL) { ocs_log_err(node->ocs, "assertion failed: io->hio is not NULL\n"); ocs_unlock(&node->active_ios_lock); return NULL; } /* set generic fields */ io->ocs = ocs; io->node = node; /* set type and name */ io->io_type = OCS_IO_TYPE_IO; io->display_name = "scsi_io"; switch (role) { case OCS_SCSI_IO_ROLE_ORIGINATOR: io->cmd_ini = TRUE; io->cmd_tgt = FALSE; break; case OCS_SCSI_IO_ROLE_RESPONDER: io->cmd_ini = FALSE; io->cmd_tgt = TRUE; break; } /* Add to node's active_ios list */ ocs_list_add_tail(&node->active_ios, io); ocs_unlock(&node->active_ios_lock); return io; } /** * @ingroup scsi_api_base * @brief Free a SCSI IO context (internal). * * @par Description * The IO context previously allocated using ocs_scsi_io_alloc() * is freed. This is called from within the transport layer, * when the reference count goes to zero. * * @param arg Pointer to the IO context. * * @return None. */ static void _ocs_scsi_io_free(void *arg) { ocs_io_t *io = (ocs_io_t *)arg; ocs_t *ocs = io->ocs; ocs_node_t *node = io->node; int send_empty_event; ocs_assert(io != NULL); scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name); ocs_assert(ocs_io_busy(io)); ocs_lock(&node->active_ios_lock); ocs_list_remove(&node->active_ios, io); send_empty_event = (!node->io_alloc_enabled) && ocs_list_empty(&node->active_ios); ocs_unlock(&node->active_ios_lock); if (send_empty_event) { ocs_node_post_event(node, OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY, NULL); } io->node = NULL; ocs_io_free(ocs, io); } /** * @ingroup scsi_api_base * @brief Free a SCSI IO context. * * @par Description * The IO context previously allocated using ocs_scsi_io_alloc() is freed. * * @param io Pointer to the IO context. * * @return None. */ void ocs_scsi_io_free(ocs_io_t *io) { scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name); ocs_assert(ocs_ref_read_count(&io->ref) > 0); ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_scsi_io_alloc() */ } static int32_t ocs_scsi_send_io(ocs_hw_io_type_e type, ocs_node_t *node, ocs_io_t *io, uint64_t lun, ocs_scsi_tmf_cmd_e tmf, uint8_t *cdb, uint32_t cdb_len, ocs_scsi_dif_info_t *dif_info, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, uint32_t first_burst, - ocs_scsi_rsp_io_cb_t cb, void *arg); + ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags); /** * @brief Target response completion callback. * * @par Description * Function is called upon the completion of a target IO request. * * @param hio Pointer to the HW IO structure. * @param rnode Remote node associated with the IO that is completing. * @param length Length of the response payload. * @param status Completion status. * @param ext_status Extended completion status. * @param app Application-specific data (generally a pointer to the IO context). * * @return None. */ static void ocs_target_io_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *app) { ocs_io_t *io = app; ocs_t *ocs; ocs_scsi_io_status_e scsi_status = OCS_SCSI_STATUS_GOOD; uint16_t additional_length; uint8_t edir; uint8_t tdpv; ocs_hw_dif_info_t *dif_info = &io->hw_dif; int is_crc; ocs_assert(io); scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status); ocs = io->ocs; ocs_assert(ocs); ocs_scsi_io_free_ovfl(io); io->transferred += length; /* Call target server completion */ if (io->scsi_tgt_cb) { ocs_scsi_io_cb_t cb = io->scsi_tgt_cb; uint32_t flags = 0; /* Clear the callback before invoking the callback */ io->scsi_tgt_cb = NULL; /* if status was good, and auto-good-response was set, then callback * target-server with IO_CMPL_RSP_SENT, otherwise send IO_CMPL */ if ((status == 0) && (io->auto_resp)) flags |= OCS_SCSI_IO_CMPL_RSP_SENT; else flags |= OCS_SCSI_IO_CMPL; switch (status) { case SLI4_FC_WCQE_STATUS_SUCCESS: scsi_status = OCS_SCSI_STATUS_GOOD; break; case SLI4_FC_WCQE_STATUS_DI_ERROR: if (ext_status & SLI4_FC_DI_ERROR_GE) { scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR; } else if (ext_status & SLI4_FC_DI_ERROR_AE) { scsi_status = OCS_SCSI_STATUS_DIF_APP_TAG_ERROR; } else if (ext_status & SLI4_FC_DI_ERROR_RE) { scsi_status = OCS_SCSI_STATUS_DIF_REF_TAG_ERROR; } else { additional_length = ((ext_status >> 16) & 0xFFFF); /* Capture the EDIR and TDPV bits as 0 or 1 for easier printing. */ edir = !!(ext_status & SLI4_FC_DI_ERROR_EDIR); tdpv = !!(ext_status & SLI4_FC_DI_ERROR_TDPV); is_crc = ocs_scsi_dif_guard_is_crc(edir, dif_info); if (edir == 0) { /* For reads, we have everything in memory. Start checking from beginning. */ scsi_status = ocs_scsi_dif_check_unknown(io, 0, io->wire_len, is_crc); } else { /* For writes, use the additional length to determine where to look for the error. * The additional_length field is set to 0 if it is not supported. * The additional length field is valid if: * . additional_length is not zero * . Total Data Placed is valid * . Error Direction is RX (1) * . Operation is a pass thru (CRC or CKSUM on IN, and CRC or CHKSUM on OUT) (all pass-thru cases except raw) */ if ((additional_length != 0) && (tdpv != 0) && (dif_info->dif == SLI4_DIF_PASS_THROUGH) && (dif_info->dif_oper != OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW) ) { scsi_status = ocs_scsi_dif_check_unknown(io, length, additional_length, is_crc); } else { /* If we can't do additional checking, then fall-back to guard error */ scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR; } } } break; case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: switch (ext_status) { case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET: case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: scsi_status = OCS_SCSI_STATUS_ABORTED; break; case SLI4_FC_LOCAL_REJECT_INVALID_RPI: scsi_status = OCS_SCSI_STATUS_NEXUS_LOST; break; case SLI4_FC_LOCAL_REJECT_NO_XRI: scsi_status = OCS_SCSI_STATUS_NO_IO; break; default: /* TODO: we have seen 0x0d (TX_DMA_FAILED error) */ scsi_status = OCS_SCSI_STATUS_ERROR; break; } break; case SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT: /* target IO timed out */ scsi_status = OCS_SCSI_STATUS_TIMEDOUT_AND_ABORTED; break; case SLI4_FC_WCQE_STATUS_SHUTDOWN: /* Target IO cancelled by HW */ scsi_status = OCS_SCSI_STATUS_SHUTDOWN; break; default: scsi_status = OCS_SCSI_STATUS_ERROR; break; } cb(io, scsi_status, flags, io->scsi_tgt_cb_arg); } ocs_scsi_check_pending(ocs); } /** * @brief Determine if an IO is using CRC for DIF guard format. * * @param direction IO direction: 1 for write, 0 for read. * @param dif_info Pointer to HW DIF info data. * * @return Returns TRUE if using CRC, FALSE if not. */ static int ocs_scsi_dif_guard_is_crc(uint8_t direction, ocs_hw_dif_info_t *dif_info) { int is_crc; if (direction) { /* For writes, check if operation is "OUT_CRC" or not */ switch(dif_info->dif_oper) { case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC: case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC: case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC: is_crc = TRUE; break; default: is_crc = FALSE; break; } } else { /* For reads, check if operation is "IN_CRC" or not */ switch(dif_info->dif_oper) { case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF: case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC: case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM: is_crc = TRUE; break; default: is_crc = FALSE; break; } } return is_crc; } /** * @brief Check a block and DIF data, computing the appropriate SCSI status * * @par Description * This function is used to check blocks and DIF when given an unknown DIF * status using the following logic: * * Given the address of the last good block, and a length of bytes that includes * the block with the DIF error, find the bad block. If a block is found with an * app_tag or ref_tag error, then return the appropriate error. No block is expected * to have a block guard error since hardware "fixes" the crc. So if no block in the * range of blocks has an error, then it is presumed to be a BLOCK GUARD error. * * @param io Pointer to the IO object. * @param length Length of bytes covering the good blocks. * @param check_length Length of bytes that covers the bad block. * @param is_crc True if guard is using CRC format. * * @return Returns SCSI status. */ static ocs_scsi_io_status_e ocs_scsi_dif_check_unknown(ocs_io_t *io, uint32_t length, uint32_t check_length, int is_crc) { uint32_t i; ocs_t *ocs = io->ocs; ocs_hw_dif_info_t *dif_info = &io->hw_dif; ocs_scsi_io_status_e scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR; uint32_t blocksize; /* data block size */ uint64_t first_check_block; /* first block following total data placed */ uint64_t last_check_block; /* last block to check */ uint32_t check_count; /* count of blocks to check */ ocs_scsi_vaddr_len_t addrlen[4]; /* address-length pairs returned from target */ int32_t addrlen_count; /* count of address-length pairs */ ocs_dif_t *dif; /* pointer to DIF block returned from target */ ocs_scsi_dif_info_t scsi_dif_info = io->scsi_dif_info; blocksize = ocs_hw_dif_mem_blocksize(&io->hw_dif, TRUE); first_check_block = length / blocksize; last_check_block = ((length + check_length) / blocksize); check_count = last_check_block - first_check_block; ocs_log_debug(ocs, "blocksize %d first check_block %" PRId64 " last_check_block %" PRId64 " check_count %d\n", blocksize, first_check_block, last_check_block, check_count); for (i = first_check_block; i < last_check_block; i++) { addrlen_count = ocs_scsi_get_block_vaddr(io, (scsi_dif_info.lba + i), addrlen, ARRAY_SIZE(addrlen), (void**) &dif); if (addrlen_count < 0) { ocs_log_test(ocs, "ocs_scsi_get_block_vaddr() failed: %d\n", addrlen_count); scsi_status = OCS_SCSI_STATUS_DIF_UNKNOWN_ERROR; break; } if (! ocs_scsi_dif_check_guard(dif_info, addrlen, addrlen_count, dif, is_crc)) { ocs_log_debug(ocs, "block guard check error, lba %" PRId64 "\n", scsi_dif_info.lba + i); scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR; break; } if (! ocs_scsi_dif_check_app_tag(ocs, dif_info, scsi_dif_info.app_tag, dif)) { ocs_log_debug(ocs, "app tag check error, lba %" PRId64 "\n", scsi_dif_info.lba + i); scsi_status = OCS_SCSI_STATUS_DIF_APP_TAG_ERROR; break; } if (! ocs_scsi_dif_check_ref_tag(ocs, dif_info, (scsi_dif_info.ref_tag + i), dif)) { ocs_log_debug(ocs, "ref tag check error, lba %" PRId64 "\n", scsi_dif_info.lba + i); scsi_status = OCS_SCSI_STATUS_DIF_REF_TAG_ERROR; break; } } return scsi_status; } /** * @brief Check the block guard of block data * * @par Description * Using the dif_info for the transfer, check the block guard value. * * @param dif_info Pointer to HW DIF info data. * @param addrlen Array of address length pairs. * @param addrlen_count Number of entries in the addrlen[] array. * @param dif Pointer to the DIF data block being checked. * @param is_crc True if guard is using CRC format. * * @return Returns TRUE if block guard check is ok. */ static uint32_t ocs_scsi_dif_check_guard(ocs_hw_dif_info_t *dif_info, ocs_scsi_vaddr_len_t addrlen[], uint32_t addrlen_count, ocs_dif_t *dif, int is_crc) { uint16_t crc = dif_info->dif_seed; uint32_t i; uint16_t checksum; if ((dif == NULL) || !dif_info->check_guard) { return TRUE; } if (is_crc) { for (i = 0; i < addrlen_count; i++) { crc = ocs_scsi_dif_calc_crc(addrlen[i].vaddr, addrlen[i].length, crc); } return (crc == ocs_be16toh(dif->crc)); } else { checksum = ocs_scsi_dif_calc_checksum(addrlen, addrlen_count); return (checksum == dif->crc); } } /** * @brief Check the app tag of dif data * * @par Description * Using the dif_info for the transfer, check the app tag. * * @param ocs Pointer to the ocs structure for logging. * @param dif_info Pointer to HW DIF info data. * @param exp_app_tag The value the app tag is expected to be. * @param dif Pointer to the DIF data block being checked. * * @return Returns TRUE if app tag check is ok. */ static uint32_t ocs_scsi_dif_check_app_tag(ocs_t *ocs, ocs_hw_dif_info_t *dif_info, uint16_t exp_app_tag, ocs_dif_t *dif) { if ((dif == NULL) || !dif_info->check_app_tag) { return TRUE; } ocs_log_debug(ocs, "expected app tag 0x%x, actual 0x%x\n", exp_app_tag, ocs_be16toh(dif->app_tag)); return (exp_app_tag == ocs_be16toh(dif->app_tag)); } /** * @brief Check the ref tag of dif data * * @par Description * Using the dif_info for the transfer, check the app tag. * * @param ocs Pointer to the ocs structure for logging. * @param dif_info Pointer to HW DIF info data. * @param exp_ref_tag The value the ref tag is expected to be. * @param dif Pointer to the DIF data block being checked. * * @return Returns TRUE if ref tag check is ok. */ static uint32_t ocs_scsi_dif_check_ref_tag(ocs_t *ocs, ocs_hw_dif_info_t *dif_info, uint32_t exp_ref_tag, ocs_dif_t *dif) { if ((dif == NULL) || !dif_info->check_ref_tag) { return TRUE; } if (exp_ref_tag != ocs_be32toh(dif->ref_tag)) { ocs_log_debug(ocs, "expected ref tag 0x%x, actual 0x%x\n", exp_ref_tag, ocs_be32toh(dif->ref_tag)); return FALSE; } else { return TRUE; } } /** * @brief Return count of SGE's required for request * * @par Description * An accurate count of SGEs is computed and returned. * * @param hw_dif Pointer to HW dif information. * @param sgl Pointer to SGL from back end. * @param sgl_count Count of SGEs in SGL. * * @return Count of SGEs. */ static uint32_t ocs_scsi_count_sgls(ocs_hw_dif_info_t *hw_dif, ocs_scsi_sgl_t *sgl, uint32_t sgl_count) { uint32_t count = 0; uint32_t i; /* Convert DIF Information */ if (hw_dif->dif_oper != OCS_HW_DIF_OPER_DISABLED) { /* If we're not DIF separate, then emit a seed SGE */ if (!hw_dif->dif_separate) { count++; } for (i = 0; i < sgl_count; i++) { /* If DIF is enabled, and DIF is separate, then append a SEED then DIF SGE */ if (hw_dif->dif_separate) { count += 2; } count++; } } else { count = sgl_count; } return count; } static int32_t ocs_scsi_build_sgls(ocs_hw_t *hw, ocs_hw_io_t *hio, ocs_hw_dif_info_t *hw_dif, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, ocs_hw_io_type_e type) { int32_t rc; uint32_t i; ocs_t *ocs = hw->os; uint32_t blocksize = 0; uint32_t blockcount; ocs_assert(hio, -1); /* Initialize HW SGL */ rc = ocs_hw_io_init_sges(hw, hio, type); if (rc) { ocs_log_err(ocs, "ocs_hw_io_init_sges failed: %d\n", rc); return -1; } /* Convert DIF Information */ if (hw_dif->dif_oper != OCS_HW_DIF_OPER_DISABLED) { /* If we're not DIF separate, then emit a seed SGE */ if (!hw_dif->dif_separate) { rc = ocs_hw_io_add_seed_sge(hw, hio, hw_dif); if (rc) { return rc; } } /* if we are doing DIF separate, then figure out the block size so that we * can update the ref tag in the DIF seed SGE. Also verify that the * the sgl lengths are all multiples of the blocksize */ if (hw_dif->dif_separate) { switch(hw_dif->blk_size) { case OCS_HW_DIF_BK_SIZE_512: blocksize = 512; break; case OCS_HW_DIF_BK_SIZE_1024: blocksize = 1024; break; case OCS_HW_DIF_BK_SIZE_2048: blocksize = 2048; break; case OCS_HW_DIF_BK_SIZE_4096: blocksize = 4096; break; case OCS_HW_DIF_BK_SIZE_520: blocksize = 520; break; case OCS_HW_DIF_BK_SIZE_4104: blocksize = 4104; break; default: ocs_log_test(hw->os, "Inavlid hw_dif blocksize %d\n", hw_dif->blk_size); return -1; } for (i = 0; i < sgl_count; i++) { if ((sgl[i].len % blocksize) != 0) { ocs_log_test(hw->os, "sgl[%d] len of %ld is not multiple of blocksize\n", i, sgl[i].len); return -1; } } } for (i = 0; i < sgl_count; i++) { ocs_assert(sgl[i].addr, -1); ocs_assert(sgl[i].len, -1); /* If DIF is enabled, and DIF is separate, then append a SEED then DIF SGE */ if (hw_dif->dif_separate) { rc = ocs_hw_io_add_seed_sge(hw, hio, hw_dif); if (rc) { return rc; } rc = ocs_hw_io_add_dif_sge(hw, hio, sgl[i].dif_addr); if (rc) { return rc; } /* Update the ref_tag for the next DIF seed SGE */ blockcount = sgl[i].len / blocksize; if (hw_dif->dif_oper == OCS_HW_DIF_OPER_INSERT) { hw_dif->ref_tag_repl += blockcount; } else { hw_dif->ref_tag_cmp += blockcount; } } /* Add data SGE */ rc = ocs_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len); if (rc) { ocs_log_err(ocs, "ocs_hw_io_add_sge failed: count=%d rc=%d\n", sgl_count, rc); return rc; } } } else { for (i = 0; i < sgl_count; i++) { ocs_assert(sgl[i].addr, -1); ocs_assert(sgl[i].len, -1); /* Add data SGE */ rc = ocs_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len); if (rc) { ocs_log_err(ocs, "ocs_hw_io_add_sge failed: count=%d rc=%d\n", sgl_count, rc); return rc; } } } return 0; } /** * @ingroup scsi_api_base * @brief Convert SCSI API T10 DIF information into the FC HW format. * * @param ocs Pointer to the ocs structure for logging. * @param scsi_dif_info Pointer to the SCSI API T10 DIF fields. * @param hw_dif_info Pointer to the FC HW API T10 DIF fields. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t ocs_scsi_convert_dif_info(ocs_t *ocs, ocs_scsi_dif_info_t *scsi_dif_info, ocs_hw_dif_info_t *hw_dif_info) { uint32_t dif_seed; ocs_memset(hw_dif_info, 0, sizeof(ocs_hw_dif_info_t)); if (scsi_dif_info == NULL) { hw_dif_info->dif_oper = OCS_HW_DIF_OPER_DISABLED; hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_NA; return 0; } /* Convert the DIF operation */ switch(scsi_dif_info->dif_oper) { case OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CRC: hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC; hw_dif_info->dif = SLI4_DIF_INSERT; break; case OCS_SCSI_DIF_OPER_IN_CRC_OUT_NODIF: hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF; hw_dif_info->dif = SLI4_DIF_STRIP; break; case OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CHKSUM: hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM; hw_dif_info->dif = SLI4_DIF_INSERT; break; case OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_NODIF: hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF; hw_dif_info->dif = SLI4_DIF_STRIP; break; case OCS_SCSI_DIF_OPER_IN_CRC_OUT_CRC: hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC; hw_dif_info->dif = SLI4_DIF_PASS_THROUGH; break; case OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CHKSUM: hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM; hw_dif_info->dif = SLI4_DIF_PASS_THROUGH; break; case OCS_SCSI_DIF_OPER_IN_CRC_OUT_CHKSUM: hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM; hw_dif_info->dif = SLI4_DIF_PASS_THROUGH; break; case OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CRC: hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC; hw_dif_info->dif = SLI4_DIF_PASS_THROUGH; break; case OCS_SCSI_DIF_OPER_IN_RAW_OUT_RAW: hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW; hw_dif_info->dif = SLI4_DIF_PASS_THROUGH; break; default: ocs_log_test(ocs, "unhandled SCSI DIF operation %d\n", scsi_dif_info->dif_oper); return -1; } switch(scsi_dif_info->blk_size) { case OCS_SCSI_DIF_BK_SIZE_512: hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_512; break; case OCS_SCSI_DIF_BK_SIZE_1024: hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_1024; break; case OCS_SCSI_DIF_BK_SIZE_2048: hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_2048; break; case OCS_SCSI_DIF_BK_SIZE_4096: hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_4096; break; case OCS_SCSI_DIF_BK_SIZE_520: hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_520; break; case OCS_SCSI_DIF_BK_SIZE_4104: hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_4104; break; default: ocs_log_test(ocs, "unhandled SCSI DIF block size %d\n", scsi_dif_info->blk_size); return -1; } /* If the operation is an INSERT the tags provided are the ones that should be * inserted, otherwise they're the ones to be checked against. */ if (hw_dif_info->dif == SLI4_DIF_INSERT ) { hw_dif_info->ref_tag_repl = scsi_dif_info->ref_tag; hw_dif_info->app_tag_repl = scsi_dif_info->app_tag; } else { hw_dif_info->ref_tag_cmp = scsi_dif_info->ref_tag; hw_dif_info->app_tag_cmp = scsi_dif_info->app_tag; } hw_dif_info->check_ref_tag = scsi_dif_info->check_ref_tag; hw_dif_info->check_app_tag = scsi_dif_info->check_app_tag; hw_dif_info->check_guard = scsi_dif_info->check_guard; hw_dif_info->auto_incr_ref_tag = 1; hw_dif_info->dif_separate = scsi_dif_info->dif_separate; hw_dif_info->disable_app_ffff = scsi_dif_info->disable_app_ffff; hw_dif_info->disable_app_ref_ffff = scsi_dif_info->disable_app_ref_ffff; ocs_hw_get(&ocs->hw, OCS_HW_DIF_SEED, &dif_seed); hw_dif_info->dif_seed = dif_seed; return 0; } /** * @ingroup scsi_api_base * @brief This function logs the SGLs for an IO. * * @param io Pointer to the IO context. */ static void ocs_log_sgl(ocs_io_t *io) { ocs_hw_io_t *hio = io->hio; sli4_sge_t *data = NULL; uint32_t *dword = NULL; uint32_t i; uint32_t n_sge; scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n", ocs_addr32_hi(hio->def_sgl.phys), ocs_addr32_lo(hio->def_sgl.phys)); n_sge = (hio->sgl == &hio->def_sgl ? hio->n_sge : hio->def_sgl_count); for (i = 0, data = hio->def_sgl.virt; i < n_sge; i++, data++) { dword = (uint32_t*)data; scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n", i, dword[0], dword[1], dword[2], dword[3]); if (dword[2] & (1U << 31)) { break; } } if (hio->ovfl_sgl != NULL && hio->sgl == hio->ovfl_sgl) { scsi_io_trace(io, "Overflow at 0x%x 0x%08x\n", ocs_addr32_hi(hio->ovfl_sgl->phys), ocs_addr32_lo(hio->ovfl_sgl->phys)); for (i = 0, data = hio->ovfl_sgl->virt; i < hio->n_sge; i++, data++) { dword = (uint32_t*)data; scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n", i, dword[0], dword[1], dword[2], dword[3]); if (dword[2] & (1U << 31)) { break; } } } } /** * @brief Check pending error asynchronous callback function. * * @par Description * Invoke the HW callback function for a given IO. This function is called * from the NOP mailbox completion context. * * @param hw Pointer to HW object. * @param status Completion status. * @param mqe Mailbox completion queue entry. * @param arg General purpose argument. * * @return Returns 0. */ static int32_t ocs_scsi_check_pending_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { ocs_io_t *io = arg; if (io != NULL) { if (io->hw_cb != NULL) { ocs_hw_done_t cb = io->hw_cb; io->hw_cb = NULL; cb(io->hio, NULL, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io); } } return 0; } /** * @brief Check for pending IOs to dispatch. * * @par Description * If there are IOs on the pending list, and a HW IO is available, then * dispatch the IOs. * * @param ocs Pointer to the OCS structure. * * @return None. */ void ocs_scsi_check_pending(ocs_t *ocs) { ocs_xport_t *xport = ocs->xport; ocs_io_t *io; ocs_hw_io_t *hio; int32_t status; int count = 0; int dispatch; /* Guard against recursion */ if (ocs_atomic_add_return(&xport->io_pending_recursing, 1)) { /* This function is already running. Decrement and return. */ ocs_atomic_sub_return(&xport->io_pending_recursing, 1); return; } do { ocs_lock(&xport->io_pending_lock); status = 0; hio = NULL; io = ocs_list_remove_head(&xport->io_pending_list); if (io != NULL) { if (io->io_type == OCS_IO_TYPE_ABORT) { hio = NULL; } else { hio = ocs_hw_io_alloc(&ocs->hw); if (hio == NULL) { /* * No HW IO available. * Put IO back on the front of pending list */ ocs_list_add_head(&xport->io_pending_list, io); io = NULL; } else { hio->eq = io->hw_priv; } } } /* Must drop the lock before dispatching the IO */ ocs_unlock(&xport->io_pending_lock); if (io != NULL) { count++; /* * We pulled an IO off the pending list, * and either got an HW IO or don't need one */ ocs_atomic_sub_return(&xport->io_pending_count, 1); if (hio == NULL) { status = ocs_scsi_io_dispatch_no_hw_io(io); } else { status = ocs_scsi_io_dispatch_hw_io(io, hio); } if (status) { /* * Invoke the HW callback, but do so in the separate execution context, * provided by the NOP mailbox completion processing context by using * ocs_hw_async_call() */ if (ocs_hw_async_call(&ocs->hw, ocs_scsi_check_pending_async_cb, io)) { ocs_log_test(ocs, "call to ocs_hw_async_call() failed\n"); } } } } while (io != NULL); /* * If nothing was removed from the list, * we might be in a case where we need to abort an * active IO and the abort is on the pending list. * Look for an abort we can dispatch. */ if (count == 0 ) { dispatch = 0; ocs_lock(&xport->io_pending_lock); ocs_list_foreach(&xport->io_pending_list, io) { if (io->io_type == OCS_IO_TYPE_ABORT) { if (io->io_to_abort->hio != NULL) { /* This IO has a HW IO, so it is active. Dispatch the abort. */ dispatch = 1; } else { /* Leave this abort on the pending list and keep looking */ dispatch = 0; } } if (dispatch) { ocs_list_remove(&xport->io_pending_list, io); ocs_atomic_sub_return(&xport->io_pending_count, 1); break; } } ocs_unlock(&xport->io_pending_lock); if (dispatch) { status = ocs_scsi_io_dispatch_no_hw_io(io); if (status) { if (ocs_hw_async_call(&ocs->hw, ocs_scsi_check_pending_async_cb, io)) { ocs_log_test(ocs, "call to ocs_hw_async_call() failed\n"); } } } } ocs_atomic_sub_return(&xport->io_pending_recursing, 1); return; } /** * @brief Attempt to dispatch a non-abort IO * * @par Description * An IO is dispatched: * - if the pending list is not empty, add IO to pending list * and call a function to process the pending list. * - if pending list is empty, try to allocate a HW IO. If none * is available, place this IO at the tail of the pending IO * list. * - if HW IO is available, attach this IO to the HW IO and * submit it. * * @param io Pointer to IO structure. * @param cb Callback function. * * @return Returns 0 on success, a negative error code value on failure. */ int32_t ocs_scsi_io_dispatch(ocs_io_t *io, void *cb) { ocs_hw_io_t *hio; ocs_t *ocs = io->ocs; ocs_xport_t *xport = ocs->xport; ocs_assert(io->cmd_tgt || io->cmd_ini, -1); ocs_assert((io->io_type != OCS_IO_TYPE_ABORT), -1); io->hw_cb = cb; /* * if this IO already has a HW IO, then this is either not the first phase of * the IO. Send it to the HW. */ if (io->hio != NULL) { return ocs_scsi_io_dispatch_hw_io(io, io->hio); } /* * We don't already have a HW IO associated with the IO. First check * the pending list. If not empty, add IO to the tail and process the * pending list. */ ocs_lock(&xport->io_pending_lock); if (!ocs_list_empty(&xport->io_pending_list)) { /* * If this is a low latency request, the put at the front of the IO pending * queue, otherwise put it at the end of the queue. */ if (io->low_latency) { ocs_list_add_head(&xport->io_pending_list, io); } else { ocs_list_add_tail(&xport->io_pending_list, io); } ocs_unlock(&xport->io_pending_lock); ocs_atomic_add_return(&xport->io_pending_count, 1); ocs_atomic_add_return(&xport->io_total_pending, 1); /* process pending list */ ocs_scsi_check_pending(ocs); return 0; } ocs_unlock(&xport->io_pending_lock); /* * We don't have a HW IO associated with the IO and there's nothing * on the pending list. Attempt to allocate a HW IO and dispatch it. */ hio = ocs_hw_io_alloc(&io->ocs->hw); if (hio == NULL) { /* Couldn't get a HW IO. Save this IO on the pending list */ ocs_lock(&xport->io_pending_lock); ocs_list_add_tail(&xport->io_pending_list, io); ocs_unlock(&xport->io_pending_lock); ocs_atomic_add_return(&xport->io_total_pending, 1); ocs_atomic_add_return(&xport->io_pending_count, 1); return 0; } /* We successfully allocated a HW IO; dispatch to HW */ return ocs_scsi_io_dispatch_hw_io(io, hio); } /** * @brief Attempt to dispatch an Abort IO. * * @par Description * An Abort IO is dispatched: * - if the pending list is not empty, add IO to pending list * and call a function to process the pending list. * - if pending list is empty, send abort to the HW. * * @param io Pointer to IO structure. * @param cb Callback function. * * @return Returns 0 on success, a negative error code value on failure. */ int32_t ocs_scsi_io_dispatch_abort(ocs_io_t *io, void *cb) { ocs_t *ocs = io->ocs; ocs_xport_t *xport = ocs->xport; ocs_assert((io->io_type == OCS_IO_TYPE_ABORT), -1); io->hw_cb = cb; /* * For aborts, we don't need a HW IO, but we still want to pass through * the pending list to preserve ordering. Thus, if the pending list is * not empty, add this abort to the pending list and process the pending list. */ ocs_lock(&xport->io_pending_lock); if (!ocs_list_empty(&xport->io_pending_list)) { ocs_list_add_tail(&xport->io_pending_list, io); ocs_unlock(&xport->io_pending_lock); ocs_atomic_add_return(&xport->io_pending_count, 1); ocs_atomic_add_return(&xport->io_total_pending, 1); /* process pending list */ ocs_scsi_check_pending(ocs); return 0; } ocs_unlock(&xport->io_pending_lock); /* nothing on pending list, dispatch abort */ return ocs_scsi_io_dispatch_no_hw_io(io); } /** * @brief Dispatch IO * * @par Description * An IO and its associated HW IO is dispatched to the HW. * * @param io Pointer to IO structure. * @param hio Pointer to HW IO structure from which IO will be * dispatched. * * @return Returns 0 on success, a negative error code value on failure. */ static int32_t ocs_scsi_io_dispatch_hw_io(ocs_io_t *io, ocs_hw_io_t *hio) { int32_t rc; ocs_t *ocs = io->ocs; /* Got a HW IO; update ini/tgt_task_tag with HW IO info and dispatch */ io->hio = hio; if (io->cmd_tgt) { io->tgt_task_tag = hio->indicator; } else if (io->cmd_ini) { io->init_task_tag = hio->indicator; } io->hw_tag = hio->reqtag; hio->eq = io->hw_priv; /* Copy WQ steering */ switch(io->wq_steering) { case OCS_SCSI_WQ_STEERING_CLASS >> OCS_SCSI_WQ_STEERING_SHIFT: hio->wq_steering = OCS_HW_WQ_STEERING_CLASS; break; case OCS_SCSI_WQ_STEERING_REQUEST >> OCS_SCSI_WQ_STEERING_SHIFT: hio->wq_steering = OCS_HW_WQ_STEERING_REQUEST; break; case OCS_SCSI_WQ_STEERING_CPU >> OCS_SCSI_WQ_STEERING_SHIFT: hio->wq_steering = OCS_HW_WQ_STEERING_CPU; break; } switch (io->io_type) { case OCS_IO_TYPE_IO: { uint32_t max_sgl; uint32_t total_count; uint32_t host_allocated; ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &max_sgl); ocs_hw_get(&ocs->hw, OCS_HW_SGL_CHAINING_HOST_ALLOCATED, &host_allocated); /* * If the requested SGL is larger than the default size, then we can allocate * an overflow SGL. */ total_count = ocs_scsi_count_sgls(&io->hw_dif, io->sgl, io->sgl_count); /* * Lancer requires us to allocate the chained memory area, but * Skyhawk must use the SGL list associated with another XRI. */ if (host_allocated && total_count > max_sgl) { /* Compute count needed, the number extra plus 1 for the link sge */ uint32_t count = total_count - max_sgl + 1; rc = ocs_dma_alloc(ocs, &io->ovfl_sgl, count*sizeof(sli4_sge_t), 64); if (rc) { ocs_log_err(ocs, "ocs_dma_alloc overflow sgl failed\n"); break; } rc = ocs_hw_io_register_sgl(&ocs->hw, io->hio, &io->ovfl_sgl, count); if (rc) { ocs_scsi_io_free_ovfl(io); ocs_log_err(ocs, "ocs_hw_io_register_sgl() failed\n"); break; } /* EVT: update chained_io_count */ io->node->chained_io_count++; } rc = ocs_scsi_build_sgls(&ocs->hw, io->hio, &io->hw_dif, io->sgl, io->sgl_count, io->hio_type); if (rc) { ocs_scsi_io_free_ovfl(io); break; } if (OCS_LOG_ENABLE_SCSI_TRACE(ocs)) { ocs_log_sgl(io); } if (io->app_id) { io->iparam.fcp_tgt.app_id = io->app_id; } rc = ocs_hw_io_send(&io->ocs->hw, io->hio_type, io->hio, io->wire_len, &io->iparam, &io->node->rnode, io->hw_cb, io); break; } case OCS_IO_TYPE_ELS: case OCS_IO_TYPE_CT: { rc = ocs_hw_srrs_send(&ocs->hw, io->hio_type, io->hio, &io->els_req, io->wire_len, &io->els_rsp, &io->node->rnode, &io->iparam, io->hw_cb, io); break; } case OCS_IO_TYPE_CT_RESP: { rc = ocs_hw_srrs_send(&ocs->hw, io->hio_type, io->hio, &io->els_rsp, io->wire_len, NULL, &io->node->rnode, &io->iparam, io->hw_cb, io); break; } case OCS_IO_TYPE_BLS_RESP: { /* no need to update tgt_task_tag for BLS response since the RX_ID * will be specified by the payload, not the XRI */ rc = ocs_hw_srrs_send(&ocs->hw, io->hio_type, io->hio, NULL, 0, NULL, &io->node->rnode, &io->iparam, io->hw_cb, io); break; } default: scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type); rc = -1; break; } return rc; } /** * @brief Dispatch IO * * @par Description * An IO that does require a HW IO is dispatched to the HW. * * @param io Pointer to IO structure. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t ocs_scsi_io_dispatch_no_hw_io(ocs_io_t *io) { int32_t rc; switch (io->io_type) { case OCS_IO_TYPE_ABORT: { ocs_hw_io_t *hio_to_abort = NULL; ocs_assert(io->io_to_abort, -1); hio_to_abort = io->io_to_abort->hio; if (hio_to_abort == NULL) { /* * If "IO to abort" does not have an associated HW IO, immediately * make callback with success. The command must have been sent to * the backend, but the data phase has not yet started, so we don't * have a HW IO. * * Note: since the backend shims should be taking a reference * on io_to_abort, it should not be possible to have been completed * and freed by the backend before the abort got here. */ scsi_io_printf(io, "IO: " SCSI_IOFMT " not active\n", SCSI_IOFMT_ARGS(io->io_to_abort)); ((ocs_hw_done_t)io->hw_cb)(io->hio, NULL, 0, SLI4_FC_WCQE_STATUS_SUCCESS, 0, io); rc = 0; } else { /* HW IO is valid, abort it */ scsi_io_printf(io, "aborting " SCSI_IOFMT "\n", SCSI_IOFMT_ARGS(io->io_to_abort)); rc = ocs_hw_io_abort(&io->ocs->hw, hio_to_abort, io->send_abts, io->hw_cb, io); if (rc) { int status = SLI4_FC_WCQE_STATUS_SUCCESS; if ((rc != OCS_HW_RTN_IO_NOT_ACTIVE) && (rc != OCS_HW_RTN_IO_ABORT_IN_PROGRESS)) { status = -1; scsi_io_printf(io, "Failed to abort IO: " SCSI_IOFMT " status=%d\n", SCSI_IOFMT_ARGS(io->io_to_abort), rc); } ((ocs_hw_done_t)io->hw_cb)(io->hio, NULL, 0, status, 0, io); rc = 0; } } break; } default: scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type); rc = -1; break; } return rc; } /** * @ingroup scsi_api_base * @brief Send read/write data. * * @par Description * This call is made by a target-server to initiate a SCSI read or write data phase, transferring * data between the target to the remote initiator. The payload is specified by the * scatter-gather list @c sgl of length @c sgl_count. The @c wire_len argument * specifies the payload length (independent of the scatter-gather list cumulative length). * @n @n * The @c flags argument has one bit, OCS_SCSI_LAST_DATAPHASE, which is a hint to the base * driver that it may use auto SCSI response features if the hardware supports it. * @n @n * Upon completion, the callback function @b cb is called with flags indicating that the * IO has completed (OCS_SCSI_IO_COMPL) and another data phase or response may be sent; * that the IO has completed and no response needs to be sent (OCS_SCSI_IO_COMPL_NO_RSP); * or that the IO was aborted (OCS_SCSI_IO_ABORTED). * * @param io Pointer to the IO context. * @param flags Flags controlling the sending of data. * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF. * @param sgl Pointer to the payload scatter-gather list. * @param sgl_count Count of the scatter-gather list elements. * @param xwire_len Length of the payload on wire, in bytes. * @param type HW IO type. * @param enable_ar Enable auto-response if true. * @param cb Completion callback. * @param arg Application-supplied callback data. * * @return Returns 0 on success, or a negative error code value on failure. */ static inline int32_t ocs_scsi_xfer_data(ocs_io_t *io, uint32_t flags, ocs_scsi_dif_info_t *dif_info, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t xwire_len, ocs_hw_io_type_e type, int enable_ar, ocs_scsi_io_cb_t cb, void *arg) { int32_t rc; ocs_t *ocs; uint32_t disable_ar_tgt_dif = FALSE; size_t residual = 0; if ((dif_info != NULL) && (dif_info->dif_oper == OCS_SCSI_DIF_OPER_DISABLED)) { dif_info = NULL; } ocs_assert(io, -1); if (dif_info != NULL) { ocs_hw_get(&io->ocs->hw, OCS_HW_DISABLE_AR_TGT_DIF, &disable_ar_tgt_dif); if (disable_ar_tgt_dif) { enable_ar = FALSE; } } io->sgl_count = sgl_count; /* If needed, copy SGL */ if (sgl && (sgl != io->sgl)) { ocs_assert(sgl_count <= io->sgl_allocated, -1); ocs_memcpy(io->sgl, sgl, sgl_count*sizeof(*io->sgl)); } ocs = io->ocs; ocs_assert(ocs, -1); ocs_assert(io->node, -1); scsi_io_trace(io, "%s wire_len %d\n", (type == OCS_HW_IO_TARGET_READ) ? "send" : "recv", xwire_len); ocs_assert(sgl, -1); ocs_assert(sgl_count > 0, -1); ocs_assert(io->exp_xfer_len > io->transferred, -1); io->hio_type = type; io->scsi_tgt_cb = cb; io->scsi_tgt_cb_arg = arg; rc = ocs_scsi_convert_dif_info(ocs, dif_info, &io->hw_dif); if (rc) { return rc; } /* If DIF is used, then save lba for error recovery */ if (dif_info) { io->scsi_dif_info = *dif_info; } io->wire_len = MIN(xwire_len, io->exp_xfer_len - io->transferred); residual = (xwire_len - io->wire_len); ocs_memset(&io->iparam, 0, sizeof(io->iparam)); io->iparam.fcp_tgt.ox_id = io->init_task_tag; io->iparam.fcp_tgt.offset = io->transferred; io->iparam.fcp_tgt.dif_oper = io->hw_dif.dif; io->iparam.fcp_tgt.blk_size = io->hw_dif.blk_size; io->iparam.fcp_tgt.cs_ctl = io->cs_ctl; io->iparam.fcp_tgt.timeout = io->timeout; /* if this is the last data phase and there is no residual, enable * auto-good-response */ if (enable_ar && (flags & OCS_SCSI_LAST_DATAPHASE) && (residual == 0) && ((io->transferred + io->wire_len) == io->exp_xfer_len) && (!(flags & OCS_SCSI_NO_AUTO_RESPONSE))) { io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE; io->auto_resp = TRUE; } else { io->auto_resp = FALSE; } /* save this transfer length */ io->xfer_req = io->wire_len; /* Adjust the transferred count to account for overrun * when the residual is calculated in ocs_scsi_send_resp */ io->transferred += residual; /* Adjust the SGL size if there is overrun */ if (residual) { ocs_scsi_sgl_t *sgl_ptr = &io->sgl[sgl_count-1]; while (residual) { size_t len = sgl_ptr->len; if ( len > residual) { sgl_ptr->len = len - residual; residual = 0; } else { sgl_ptr->len = 0; residual -= len; io->sgl_count--; } sgl_ptr--; } } /* Set latency and WQ steering */ io->low_latency = (flags & OCS_SCSI_LOW_LATENCY) != 0; io->wq_steering = (flags & OCS_SCSI_WQ_STEERING_MASK) >> OCS_SCSI_WQ_STEERING_SHIFT; io->wq_class = (flags & OCS_SCSI_WQ_CLASS_MASK) >> OCS_SCSI_WQ_CLASS_SHIFT; return ocs_scsi_io_dispatch(io, ocs_target_io_cb); } int32_t ocs_scsi_send_rd_data(ocs_io_t *io, uint32_t flags, ocs_scsi_dif_info_t *dif_info, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t len, ocs_scsi_io_cb_t cb, void *arg) { return ocs_scsi_xfer_data(io, flags, dif_info, sgl, sgl_count, len, OCS_HW_IO_TARGET_READ, enable_tsend_auto_resp(io->ocs), cb, arg); } int32_t ocs_scsi_recv_wr_data(ocs_io_t *io, uint32_t flags, ocs_scsi_dif_info_t *dif_info, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t len, ocs_scsi_io_cb_t cb, void *arg) { return ocs_scsi_xfer_data(io, flags, dif_info, sgl, sgl_count, len, OCS_HW_IO_TARGET_WRITE, enable_treceive_auto_resp(io->ocs), cb, arg); } /** * @ingroup scsi_api_base * @brief Free overflow SGL. * * @par Description * Free the overflow SGL if it is present. * * @param io Pointer to IO object. * * @return None. */ static void ocs_scsi_io_free_ovfl(ocs_io_t *io) { if (io->ovfl_sgl.size) { ocs_dma_free(io->ocs, &io->ovfl_sgl); } } /** * @ingroup scsi_api_base * @brief Send response data. * * @par Description * This function is used by a target-server to send the SCSI response data to a remote * initiator node. The target-server populates the @c ocs_scsi_cmd_resp_t * argument with scsi status, status qualifier, sense data, and response data, as * needed. * @n @n * Upon completion, the callback function @c cb is invoked. The target-server will generally * clean up its IO context resources and call ocs_scsi_io_complete(). * * @param io Pointer to the IO context. * @param flags Flags to control sending of the SCSI response. * @param rsp Pointer to the response data populated by the caller. * @param cb Completion callback. * @param arg Application-specified completion callback argument. * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_scsi_send_resp(ocs_io_t *io, uint32_t flags, ocs_scsi_cmd_resp_t *rsp, ocs_scsi_io_cb_t cb, void *arg) { ocs_t *ocs; int32_t residual; int auto_resp = TRUE; /* Always try auto resp */ uint8_t scsi_status = 0; uint16_t scsi_status_qualifier = 0; uint8_t *sense_data = NULL; uint32_t sense_data_length = 0; ocs_assert(io, -1); ocs = io->ocs; ocs_assert(ocs, -1); ocs_assert(io->node, -1); ocs_scsi_convert_dif_info(ocs, NULL, &io->hw_dif); if (rsp) { scsi_status = rsp->scsi_status; scsi_status_qualifier = rsp->scsi_status_qualifier; sense_data = rsp->sense_data; sense_data_length = rsp->sense_data_length; residual = rsp->residual; } else { residual = io->exp_xfer_len - io->transferred; } io->wire_len = 0; io->hio_type = OCS_HW_IO_TARGET_RSP; io->scsi_tgt_cb = cb; io->scsi_tgt_cb_arg = arg; ocs_memset(&io->iparam, 0, sizeof(io->iparam)); io->iparam.fcp_tgt.ox_id = io->init_task_tag; io->iparam.fcp_tgt.offset = 0; io->iparam.fcp_tgt.cs_ctl = io->cs_ctl; io->iparam.fcp_tgt.timeout = io->timeout; /* Set low latency queueing request */ io->low_latency = (flags & OCS_SCSI_LOW_LATENCY) != 0; io->wq_steering = (flags & OCS_SCSI_WQ_STEERING_MASK) >> OCS_SCSI_WQ_STEERING_SHIFT; io->wq_class = (flags & OCS_SCSI_WQ_CLASS_MASK) >> OCS_SCSI_WQ_CLASS_SHIFT; if ((scsi_status != 0) || residual || sense_data_length) { fcp_rsp_iu_t *fcprsp = io->rspbuf.virt; if (!fcprsp) { ocs_log_err(ocs, "NULL response buffer\n"); return -1; } auto_resp = FALSE; ocs_memset(fcprsp, 0, sizeof(*fcprsp)); io->wire_len += (sizeof(*fcprsp) - sizeof(fcprsp->data)); fcprsp->scsi_status = scsi_status; *((uint16_t*)fcprsp->status_qualifier) = ocs_htobe16(scsi_status_qualifier); /* set residual status if necessary */ if (residual != 0) { /* FCP: if data transferred is less than the amount expected, then this is an * underflow. If data transferred would have been greater than the amount expected * then this is an overflow */ if (residual > 0) { fcprsp->flags |= FCP_RESID_UNDER; *((uint32_t *)fcprsp->fcp_resid) = ocs_htobe32(residual); } else { fcprsp->flags |= FCP_RESID_OVER; *((uint32_t *)fcprsp->fcp_resid) = ocs_htobe32(-residual); } } if (sense_data && sense_data_length) { ocs_assert(sense_data_length <= sizeof(fcprsp->data), -1); fcprsp->flags |= FCP_SNS_LEN_VALID; ocs_memcpy(fcprsp->data, sense_data, sense_data_length); *((uint32_t*)fcprsp->fcp_sns_len) = ocs_htobe32(sense_data_length); io->wire_len += sense_data_length; } io->sgl[0].addr = io->rspbuf.phys; io->sgl[0].dif_addr = 0; io->sgl[0].len = io->wire_len; io->sgl_count = 1; } if (auto_resp) { io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE; } return ocs_scsi_io_dispatch(io, ocs_target_io_cb); } /** * @ingroup scsi_api_base * @brief Send TMF response data. * * @par Description * This function is used by a target-server to send SCSI TMF response data to a remote * initiator node. * Upon completion, the callback function @c cb is invoked. The target-server will generally * clean up its IO context resources and call ocs_scsi_io_complete(). * * @param io Pointer to the IO context. * @param rspcode TMF response code. * @param addl_rsp_info Additional TMF response information (may be NULL for zero data). * @param cb Completion callback. * @param arg Application-specified completion callback argument. * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_scsi_send_tmf_resp(ocs_io_t *io, ocs_scsi_tmf_resp_e rspcode, uint8_t addl_rsp_info[3], ocs_scsi_io_cb_t cb, void *arg) { int32_t rc = -1; ocs_t *ocs = NULL; fcp_rsp_iu_t *fcprsp = NULL; fcp_rsp_info_t *rspinfo = NULL; uint8_t fcp_rspcode; ocs_assert(io, -1); ocs_assert(io->ocs, -1); ocs_assert(io->node, -1); ocs = io->ocs; io->wire_len = 0; ocs_scsi_convert_dif_info(ocs, NULL, &io->hw_dif); switch(rspcode) { case OCS_SCSI_TMF_FUNCTION_COMPLETE: fcp_rspcode = FCP_TMF_COMPLETE; break; case OCS_SCSI_TMF_FUNCTION_SUCCEEDED: case OCS_SCSI_TMF_FUNCTION_IO_NOT_FOUND: fcp_rspcode = FCP_TMF_SUCCEEDED; break; case OCS_SCSI_TMF_FUNCTION_REJECTED: fcp_rspcode = FCP_TMF_REJECTED; break; case OCS_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER: fcp_rspcode = FCP_TMF_INCORRECT_LUN; break; case OCS_SCSI_TMF_SERVICE_DELIVERY: fcp_rspcode = FCP_TMF_FAILED; break; default: fcp_rspcode = FCP_TMF_REJECTED; break; } io->hio_type = OCS_HW_IO_TARGET_RSP; io->scsi_tgt_cb = cb; io->scsi_tgt_cb_arg = arg; if (io->tmf_cmd == OCS_SCSI_TMF_ABORT_TASK) { rc = ocs_target_send_bls_resp(io, cb, arg); return rc; } /* populate the FCP TMF response */ fcprsp = io->rspbuf.virt; ocs_memset(fcprsp, 0, sizeof(*fcprsp)); fcprsp->flags |= FCP_RSP_LEN_VALID; rspinfo = (fcp_rsp_info_t*) fcprsp->data; if (addl_rsp_info != NULL) { ocs_memcpy(rspinfo->addl_rsp_info, addl_rsp_info, sizeof(rspinfo->addl_rsp_info)); } rspinfo->rsp_code = fcp_rspcode; io->wire_len = sizeof(*fcprsp) - sizeof(fcprsp->data) + sizeof(*rspinfo); *((uint32_t*)fcprsp->fcp_rsp_len) = ocs_htobe32(sizeof(*rspinfo)); io->sgl[0].addr = io->rspbuf.phys; io->sgl[0].dif_addr = 0; io->sgl[0].len = io->wire_len; io->sgl_count = 1; ocs_memset(&io->iparam, 0, sizeof(io->iparam)); io->iparam.fcp_tgt.ox_id = io->init_task_tag; io->iparam.fcp_tgt.offset = 0; io->iparam.fcp_tgt.cs_ctl = io->cs_ctl; io->iparam.fcp_tgt.timeout = io->timeout; rc = ocs_scsi_io_dispatch(io, ocs_target_io_cb); return rc; } /** * @brief Process target abort callback. * * @par Description * Accepts HW abort requests. * * @param hio HW IO context. * @param rnode Remote node. * @param length Length of response data. * @param status Completion status. * @param ext_status Extended completion status. * @param app Application-specified callback data. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t ocs_target_abort_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *app) { ocs_io_t *io = app; ocs_t *ocs; ocs_scsi_io_status_e scsi_status; ocs_assert(io, -1); ocs_assert(io->ocs, -1); ocs = io->ocs; if (io->abort_cb) { ocs_scsi_io_cb_t abort_cb = io->abort_cb; void *abort_cb_arg = io->abort_cb_arg; io->abort_cb = NULL; io->abort_cb_arg = NULL; switch (status) { case SLI4_FC_WCQE_STATUS_SUCCESS: scsi_status = OCS_SCSI_STATUS_GOOD; break; case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: switch (ext_status) { case SLI4_FC_LOCAL_REJECT_NO_XRI: scsi_status = OCS_SCSI_STATUS_NO_IO; break; case SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS: scsi_status = OCS_SCSI_STATUS_ABORT_IN_PROGRESS; break; default: /* TODO: we have seen 0x15 (abort in progress) */ scsi_status = OCS_SCSI_STATUS_ERROR; break; } break; case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE: scsi_status = OCS_SCSI_STATUS_CHECK_RESPONSE; break; default: scsi_status = OCS_SCSI_STATUS_ERROR; break; } /* invoke callback */ abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg); } ocs_assert(io != io->io_to_abort, -1); /* done with IO to abort */ ocs_ref_put(&io->io_to_abort->ref); /* ocs_ref_get(): ocs_scsi_tgt_abort_io() */ ocs_io_free(ocs, io); ocs_scsi_check_pending(ocs); return 0; } /** * @ingroup scsi_api_base * @brief Abort a target IO. * * @par Description * This routine is called from a SCSI target-server. It initiates an abort of a * previously-issued target data phase or response request. * * @param io IO context. * @param cb SCSI target server callback. * @param arg SCSI target server supplied callback argument. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t ocs_scsi_tgt_abort_io(ocs_io_t *io, ocs_scsi_io_cb_t cb, void *arg) { ocs_t *ocs; ocs_xport_t *xport; int32_t rc; ocs_io_t *abort_io = NULL; ocs_assert(io, -1); ocs_assert(io->node, -1); ocs_assert(io->ocs, -1); ocs = io->ocs; xport = ocs->xport; /* take a reference on IO being aborted */ if ((ocs_ref_get_unless_zero(&io->ref) == 0)) { /* command no longer active */ scsi_io_printf(io, "command no longer active\n"); return -1; } /* * allocate a new IO to send the abort request. Use ocs_io_alloc() directly, as * we need an IO object that will not fail allocation due to allocations being * disabled (in ocs_scsi_io_alloc()) */ abort_io = ocs_io_alloc(ocs); if (abort_io == NULL) { ocs_atomic_add_return(&xport->io_alloc_failed_count, 1); ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */ return -1; } /* Save the target server callback and argument */ ocs_assert(abort_io->hio == NULL, -1); /* set generic fields */ abort_io->cmd_tgt = TRUE; abort_io->node = io->node; /* set type and abort-specific fields */ abort_io->io_type = OCS_IO_TYPE_ABORT; abort_io->display_name = "tgt_abort"; abort_io->io_to_abort = io; abort_io->send_abts = FALSE; abort_io->abort_cb = cb; abort_io->abort_cb_arg = arg; /* now dispatch IO */ rc = ocs_scsi_io_dispatch_abort(abort_io, ocs_target_abort_cb); if (rc) { ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */ } return rc; } /** * @brief Process target BLS response callback. * * @par Description * Accepts HW abort requests. * * @param hio HW IO context. * @param rnode Remote node. * @param length Length of response data. * @param status Completion status. * @param ext_status Extended completion status. * @param app Application-specified callback data. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t ocs_target_bls_resp_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *app) { ocs_io_t *io = app; ocs_t *ocs; ocs_scsi_io_status_e bls_status; ocs_assert(io, -1); ocs_assert(io->ocs, -1); ocs = io->ocs; /* BLS isn't really a "SCSI" concept, but use SCSI status */ if (status) { io_error_log(io, "s=%#x x=%#x\n", status, ext_status); bls_status = OCS_SCSI_STATUS_ERROR; } else { bls_status = OCS_SCSI_STATUS_GOOD; } if (io->bls_cb) { ocs_scsi_io_cb_t bls_cb = io->bls_cb; void *bls_cb_arg = io->bls_cb_arg; io->bls_cb = NULL; io->bls_cb_arg = NULL; /* invoke callback */ bls_cb(io, bls_status, 0, bls_cb_arg); } ocs_scsi_check_pending(ocs); return 0; } /** * @brief Complete abort request. * * @par Description * An abort request is completed by posting a BA_ACC for the IO that requested the abort. * * @param io Pointer to the IO context. * @param cb Callback function to invoke upon completion. * @param arg Application-specified completion callback argument. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t ocs_target_send_bls_resp(ocs_io_t *io, ocs_scsi_io_cb_t cb, void *arg) { int32_t rc; fc_ba_acc_payload_t *acc; ocs_assert(io, -1); /* fill out IO structure with everything needed to send BA_ACC */ ocs_memset(&io->iparam, 0, sizeof(io->iparam)); io->iparam.bls.ox_id = io->init_task_tag; io->iparam.bls.rx_id = io->abort_rx_id; acc = (void *)io->iparam.bls.payload; ocs_memset(io->iparam.bls.payload, 0, sizeof(io->iparam.bls.payload)); acc->ox_id = io->iparam.bls.ox_id; acc->rx_id = io->iparam.bls.rx_id; acc->high_seq_cnt = UINT16_MAX; /* generic io fields have already been populated */ /* set type and BLS-specific fields */ io->io_type = OCS_IO_TYPE_BLS_RESP; io->display_name = "bls_rsp"; io->hio_type = OCS_HW_BLS_ACC; io->bls_cb = cb; io->bls_cb_arg = arg; /* dispatch IO */ rc = ocs_scsi_io_dispatch(io, ocs_target_bls_resp_cb); return rc; } /** * @ingroup scsi_api_base * @brief Notify the base driver that the IO is complete. * * @par Description * This function is called by a target-server to notify the base driver that an IO * has completed, allowing for the base driver to free resources. * @n * @n @b Note: This function is not called by initiator-clients. * * @param io Pointer to IO context. * * @return None. */ void ocs_scsi_io_complete(ocs_io_t *io) { ocs_assert(io); if (!ocs_io_busy(io)) { ocs_log_test(io->ocs, "Got completion for non-busy io with tag 0x%x\n", io->tag); return; } scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name); ocs_assert(ocs_ref_read_count(&io->ref) > 0); ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_scsi_io_alloc() */ } /** * @brief Handle initiator IO completion. * * @par Description * This callback is made upon completion of an initiator operation (initiator read/write command). * * @param hio HW IO context. * @param rnode Remote node. * @param length Length of completion data. * @param status Completion status. * @param ext_status Extended completion status. * @param app Application-specified callback data. * * @return None. */ static void ocs_initiator_io_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *app) { ocs_io_t *io = app; ocs_t *ocs; ocs_scsi_io_status_e scsi_status; ocs_assert(io); ocs_assert(io->scsi_ini_cb); scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status); ocs = io->ocs; ocs_assert(ocs); ocs_scsi_io_free_ovfl(io); /* Call target server completion */ if (io->scsi_ini_cb) { fcp_rsp_iu_t *fcprsp = io->rspbuf.virt; ocs_scsi_cmd_resp_t rsp; ocs_scsi_rsp_io_cb_t cb = io->scsi_ini_cb; uint32_t flags = 0; uint8_t *pd = fcprsp->data; /* Clear the callback before invoking the callback */ io->scsi_ini_cb = NULL; ocs_memset(&rsp, 0, sizeof(rsp)); /* Unless status is FCP_RSP_FAILURE, fcprsp is not filled in */ switch (status) { case SLI4_FC_WCQE_STATUS_SUCCESS: scsi_status = OCS_SCSI_STATUS_GOOD; break; case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE: scsi_status = OCS_SCSI_STATUS_CHECK_RESPONSE; rsp.scsi_status = fcprsp->scsi_status; rsp.scsi_status_qualifier = ocs_be16toh(*((uint16_t*)fcprsp->status_qualifier)); if (fcprsp->flags & FCP_RSP_LEN_VALID) { rsp.response_data = pd; rsp.response_data_length = ocs_fc_getbe32(fcprsp->fcp_rsp_len); pd += rsp.response_data_length; } if (fcprsp->flags & FCP_SNS_LEN_VALID) { uint32_t sns_len = ocs_fc_getbe32(fcprsp->fcp_sns_len); rsp.sense_data = pd; rsp.sense_data_length = sns_len; pd += sns_len; } /* Set residual */ if (fcprsp->flags & FCP_RESID_OVER) { rsp.residual = -ocs_fc_getbe32(fcprsp->fcp_resid); rsp.response_wire_length = length; } else if (fcprsp->flags & FCP_RESID_UNDER) { rsp.residual = ocs_fc_getbe32(fcprsp->fcp_resid); rsp.response_wire_length = length; } /* * Note: The FCP_RSP_FAILURE can be returned for initiator IOs when the total data * placed does not match the requested length even if the status is good. If * the status is all zeroes, then we have to assume that a frame(s) were * dropped and change the status to LOCAL_REJECT/OUT_OF_ORDER_DATA */ if (length != io->wire_len) { uint32_t rsp_len = ext_status; uint8_t *rsp_bytes = io->rspbuf.virt; uint32_t i; uint8_t all_zeroes = (rsp_len > 0); /* Check if the rsp is zero */ for (i = 0; i < rsp_len; i++) { if (rsp_bytes[i] != 0) { all_zeroes = FALSE; break; } } if (all_zeroes) { scsi_status = OCS_SCSI_STATUS_ERROR; ocs_log_test(io->ocs, "[%s]" SCSI_IOFMT "local reject=0x%02x\n", io->node->display_name, SCSI_IOFMT_ARGS(io), SLI4_FC_LOCAL_REJECT_OUT_OF_ORDER_DATA); } } break; case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: if (ext_status == SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT) { scsi_status = OCS_SCSI_STATUS_COMMAND_TIMEOUT; } else { scsi_status = OCS_SCSI_STATUS_ERROR; } break; case SLI4_FC_WCQE_STATUS_DI_ERROR: if (ext_status & 0x01) { scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR; } else if (ext_status & 0x02) { scsi_status = OCS_SCSI_STATUS_DIF_APP_TAG_ERROR; } else if (ext_status & 0x04) { scsi_status = OCS_SCSI_STATUS_DIF_REF_TAG_ERROR; } else { scsi_status = OCS_SCSI_STATUS_DIF_UNKNOWN_ERROR; } break; default: scsi_status = OCS_SCSI_STATUS_ERROR; break; } cb(io, scsi_status, &rsp, flags, io->scsi_ini_cb_arg); } ocs_scsi_check_pending(ocs); } /** * @ingroup scsi_api_base * @brief Initiate initiator read IO. * * @par Description * This call is made by an initiator-client to send a SCSI read command. The payload * for the command is given by a scatter-gather list @c sgl for @c sgl_count * entries. * @n @n * Upon completion, the callback @b cb is invoked and passed request status. * If the command completed successfully, the callback is given SCSI response data. * * @param node Pointer to the node. * @param io Pointer to the IO context. * @param lun LUN value. * @param cdb Pointer to the CDB. * @param cdb_len Length of the CDB. * @param dif_info Pointer to the T10 DIF fields, or NULL if no DIF. * @param sgl Pointer to the scatter-gather list. * @param sgl_count Count of the scatter-gather list elements. * @param wire_len Length of the payload. * @param cb Completion callback. * @param arg Application-specified completion callback argument. * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_scsi_send_rd_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len, ocs_scsi_dif_info_t *dif_info, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, - ocs_scsi_rsp_io_cb_t cb, void *arg) + ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags) { int32_t rc; rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_READ, node, io, lun, 0, cdb, cdb_len, dif_info, sgl, sgl_count, - wire_len, 0, cb, arg); + wire_len, 0, cb, arg, flags); return rc; } /** * @ingroup scsi_api_base * @brief Initiate initiator write IO. * * @par Description * This call is made by an initiator-client to send a SCSI write command. The payload * for the command is given by a scatter-gather list @c sgl for @c sgl_count * entries. * @n @n * Upon completion, the callback @c cb is invoked and passed request status. If the command * completed successfully, the callback is given SCSI response data. * * @param node Pointer to the node. * @param io Pointer to IO context. * @param lun LUN value. * @param cdb Pointer to the CDB. * @param cdb_len Length of the CDB. * @param dif_info Pointer to the T10 DIF fields, or NULL if no DIF. * @param sgl Pointer to the scatter-gather list. * @param sgl_count Count of the scatter-gather list elements. * @param wire_len Length of the payload. * @param cb Completion callback. * @param arg Application-specified completion callback argument. * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_scsi_send_wr_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len, ocs_scsi_dif_info_t *dif_info, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, - ocs_scsi_rsp_io_cb_t cb, void *arg) + ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags) { int32_t rc; rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_WRITE, node, io, lun, 0, cdb, cdb_len, dif_info, sgl, sgl_count, - wire_len, 0, cb, arg); + wire_len, 0, cb, arg, flags); return rc; } /** * @ingroup scsi_api_base * @brief Initiate initiator write IO. * * @par Description * This call is made by an initiator-client to send a SCSI write command. The payload * for the command is given by a scatter-gather list @c sgl for @c sgl_count * entries. * @n @n * Upon completion, the callback @c cb is invoked and passed request status. If the command * completed successfully, the callback is given SCSI response data. * * @param node Pointer to the node. * @param io Pointer to IO context. * @param lun LUN value. * @param cdb Pointer to the CDB. * @param cdb_len Length of the CDB. * @param dif_info Pointer to the T10 DIF fields, or NULL if no DIF. * @param sgl Pointer to the scatter-gather list. * @param sgl_count Count of the scatter-gather list elements. * @param wire_len Length of the payload. * @param first_burst Number of first burst bytes to send. * @param cb Completion callback. * @param arg Application-specified completion callback argument. * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_scsi_send_wr_io_first_burst(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len, ocs_scsi_dif_info_t *dif_info, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, uint32_t first_burst, - ocs_scsi_rsp_io_cb_t cb, void *arg) + ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags) { int32_t rc; rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_WRITE, node, io, lun, 0, cdb, cdb_len, dif_info, sgl, sgl_count, - wire_len, 0, cb, arg); + wire_len, 0, cb, arg, flags); return rc; } /** * @ingroup scsi_api_base * @brief Initiate initiator SCSI command with no data. * * @par Description * This call is made by an initiator-client to send a SCSI command with no data. * @n @n * Upon completion, the callback @c cb is invoked and passed request status. If the command * completed successfully, the callback is given SCSI response data. * * @param node Pointer to the node. * @param io Pointer to the IO context. * @param lun LUN value. * @param cdb Pointer to the CDB. * @param cdb_len Length of the CDB. * @param cb Completion callback. * @param arg Application-specified completion callback argument. * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_scsi_send_nodata_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len, - ocs_scsi_rsp_io_cb_t cb, void *arg) + ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags) { int32_t rc; - rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_NODATA, node, io, lun, 0, cdb, cdb_len, NULL, NULL, 0, 0, 0, cb, arg); + rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_NODATA, node, io, lun, 0, cdb, cdb_len, NULL, NULL, 0, 0, 0, cb, arg, flags); return rc; } /** * @ingroup scsi_api_base * @brief Initiate initiator task management operation. * * @par Description * This command is used to send a SCSI task management function command. If the command * requires it (QUERY_TASK_SET for example), a payload may be associated with the command. * If no payload is required, then @c sgl_count may be zero and @c sgl is ignored. * @n @n * Upon completion @c cb is invoked with status and SCSI response data. * * @param node Pointer to the node. * @param io Pointer to the IO context. * @param io_to_abort Pointer to the IO context to abort in the * case of OCS_SCSI_TMF_ABORT_TASK. Note: this can point to the * same the same ocs_io_t as @c io, provided that @c io does not * have any outstanding work requests. * @param lun LUN value. * @param tmf Task management command. * @param sgl Pointer to the scatter-gather list. * @param sgl_count Count of the scatter-gather list elements. * @param len Length of the payload. * @param cb Completion callback. * @param arg Application-specified completion callback argument. * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_scsi_send_tmf(ocs_node_t *node, ocs_io_t *io, ocs_io_t *io_to_abort, uint64_t lun, ocs_scsi_tmf_cmd_e tmf, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t len, ocs_scsi_rsp_io_cb_t cb, void *arg) { int32_t rc; ocs_assert(io, -1); if (tmf == OCS_SCSI_TMF_ABORT_TASK) { ocs_assert(io_to_abort, -1); /* take a reference on IO being aborted */ if ((ocs_ref_get_unless_zero(&io_to_abort->ref) == 0)) { /* command no longer active */ scsi_io_printf(io, "command no longer active\n"); return -1; } /* generic io fields have already been populated */ /* abort-specific fields */ io->io_type = OCS_IO_TYPE_ABORT; io->display_name = "abort_task"; io->io_to_abort = io_to_abort; io->send_abts = TRUE; io->scsi_ini_cb = cb; io->scsi_ini_cb_arg = arg; /* now dispatch IO */ rc = ocs_scsi_io_dispatch_abort(io, ocs_scsi_abort_io_cb); if (rc) { scsi_io_printf(io, "Failed to dispatch abort\n"); ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */ } } else { io->display_name = "tmf"; rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_READ, node, io, lun, tmf, NULL, 0, NULL, - sgl, sgl_count, len, 0, cb, arg); + sgl, sgl_count, len, 0, cb, arg, 0); } return rc; } /** * @ingroup scsi_api_base * @brief Send an FCP IO. * * @par Description * An FCP read/write IO command, with optional task management flags, is sent to @c node. * * @param type HW IO type to send. * @param node Pointer to the node destination of the IO. * @param io Pointer to the IO context. * @param lun LUN value. * @param tmf Task management command. * @param cdb Pointer to the SCSI CDB. * @param cdb_len Length of the CDB, in bytes. * @param dif_info Pointer to the T10 DIF fields, or NULL if no DIF. * @param sgl Pointer to the scatter-gather list. * @param sgl_count Number of SGL entries in SGL. * @param wire_len Payload length, in bytes, of data on wire. * @param first_burst Number of first burst bytes to send. * @param cb Completion callback. * @param arg Application-specified completion callback argument. * * @return Returns 0 on success, or a negative error code value on failure. */ /* tc: could elminiate LUN, as it's part of the IO structure */ static int32_t ocs_scsi_send_io(ocs_hw_io_type_e type, ocs_node_t *node, ocs_io_t *io, uint64_t lun, ocs_scsi_tmf_cmd_e tmf, uint8_t *cdb, uint32_t cdb_len, ocs_scsi_dif_info_t *dif_info, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, uint32_t first_burst, - ocs_scsi_rsp_io_cb_t cb, void *arg) + ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags) { int32_t rc; ocs_t *ocs; fcp_cmnd_iu_t *cmnd; uint32_t cmnd_bytes = 0; uint32_t *fcp_dl; uint8_t tmf_flags = 0; ocs_assert(io->node, -1); ocs_assert(io->node == node, -1); ocs_assert(io, -1); ocs = io->ocs; ocs_assert(cb, -1); io->sgl_count = sgl_count; /* Copy SGL if needed */ if (sgl != io->sgl) { ocs_assert(sgl_count <= io->sgl_allocated, -1); ocs_memcpy(io->sgl, sgl, sizeof(*io->sgl) * sgl_count); } /* save initiator and target task tags for debugging */ io->tgt_task_tag = 0xffff; io->wire_len = wire_len; io->hio_type = type; if (OCS_LOG_ENABLE_SCSI_TRACE(ocs)) { char buf[80]; ocs_textbuf_t txtbuf; uint32_t i; ocs_textbuf_init(ocs, &txtbuf, buf, sizeof(buf)); ocs_textbuf_printf(&txtbuf, "cdb%d: ", cdb_len); for (i = 0; i < cdb_len; i ++) { ocs_textbuf_printf(&txtbuf, "%02X%s", cdb[i], (i == (cdb_len-1)) ? "" : " "); } scsi_io_printf(io, "%s len %d, %s\n", (io->hio_type == OCS_HW_IO_INITIATOR_READ) ? "read" : (io->hio_type == OCS_HW_IO_INITIATOR_WRITE) ? "write" : "", io->wire_len, ocs_textbuf_get_buffer(&txtbuf)); } ocs_assert(io->cmdbuf.virt, -1); cmnd = io->cmdbuf.virt; ocs_assert(sizeof(*cmnd) <= io->cmdbuf.size, -1); ocs_memset(cmnd, 0, sizeof(*cmnd)); /* Default FCP_CMND IU doesn't include additional CDB bytes but does include FCP_DL */ cmnd_bytes = sizeof(fcp_cmnd_iu_t) - sizeof(cmnd->fcp_cdb_and_dl) + sizeof(uint32_t); fcp_dl = (uint32_t*)(&(cmnd->fcp_cdb_and_dl)); if (cdb) { if (cdb_len <= 16) { ocs_memcpy(cmnd->fcp_cdb, cdb, cdb_len); } else { uint32_t addl_cdb_bytes; ocs_memcpy(cmnd->fcp_cdb, cdb, 16); addl_cdb_bytes = cdb_len - 16; ocs_memcpy(cmnd->fcp_cdb_and_dl, &(cdb[16]), addl_cdb_bytes); /* additional_fcp_cdb_length is in words, not bytes */ cmnd->additional_fcp_cdb_length = (addl_cdb_bytes + 3) / 4; fcp_dl += cmnd->additional_fcp_cdb_length; /* Round up additional CDB bytes */ cmnd_bytes += (addl_cdb_bytes + 3) & ~0x3; } } be64enc(cmnd->fcp_lun, CAM_EXTLUN_BYTE_SWIZZLE(lun)); if (node->fcp2device) { if(ocs_get_crn(node, &cmnd->command_reference_number, lun)) { return -1; } } + if (flags & OCS_SCSI_CMD_HEAD_OF_QUEUE) + cmnd->task_attribute = FCP_TASK_ATTR_HEAD_OF_QUEUE; + else if (flags & OCS_SCSI_CMD_ORDERED) + cmnd->task_attribute = FCP_TASK_ATTR_ORDERED; + else if (flags & OCS_SCSI_CMD_UNTAGGED) + cmnd->task_attribute = FCP_TASK_ATTR_UNTAGGED; + else if (flags & OCS_SCSI_CMD_ACA) + cmnd->task_attribute = FCP_TASK_ATTR_ACA; + else + cmnd->task_attribute = FCP_TASK_ATTR_SIMPLE; + cmnd->command_priority = (flags & OCS_SCSI_PRIORITY_MASK) >> + OCS_SCSI_PRIORITY_SHIFT; switch (tmf) { case OCS_SCSI_TMF_QUERY_TASK_SET: tmf_flags = FCP_QUERY_TASK_SET; break; case OCS_SCSI_TMF_ABORT_TASK_SET: tmf_flags = FCP_ABORT_TASK_SET; break; case OCS_SCSI_TMF_CLEAR_TASK_SET: tmf_flags = FCP_CLEAR_TASK_SET; break; case OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT: tmf_flags = FCP_QUERY_ASYNCHRONOUS_EVENT; break; case OCS_SCSI_TMF_LOGICAL_UNIT_RESET: tmf_flags = FCP_LOGICAL_UNIT_RESET; break; case OCS_SCSI_TMF_CLEAR_ACA: tmf_flags = FCP_CLEAR_ACA; break; case OCS_SCSI_TMF_TARGET_RESET: tmf_flags = FCP_TARGET_RESET; break; default: tmf_flags = 0; } cmnd->task_management_flags = tmf_flags; *fcp_dl = ocs_htobe32(io->wire_len); switch (io->hio_type) { case OCS_HW_IO_INITIATOR_READ: cmnd->rddata = 1; break; case OCS_HW_IO_INITIATOR_WRITE: cmnd->wrdata = 1; break; case OCS_HW_IO_INITIATOR_NODATA: /* sets neither */ break; default: ocs_log_test(ocs, "bad IO type %d\n", io->hio_type); return -1; } rc = ocs_scsi_convert_dif_info(ocs, dif_info, &io->hw_dif); if (rc) { return rc; } io->scsi_ini_cb = cb; io->scsi_ini_cb_arg = arg; /* set command and response buffers in the iparam */ io->iparam.fcp_ini.cmnd = &io->cmdbuf; io->iparam.fcp_ini.cmnd_size = cmnd_bytes; io->iparam.fcp_ini.rsp = &io->rspbuf; io->iparam.fcp_ini.flags = 0; io->iparam.fcp_ini.dif_oper = io->hw_dif.dif; io->iparam.fcp_ini.blk_size = io->hw_dif.blk_size; io->iparam.fcp_ini.timeout = io->timeout; io->iparam.fcp_ini.first_burst = first_burst; return ocs_scsi_io_dispatch(io, ocs_initiator_io_cb); } /** * @ingroup scsi_api_base * @brief Callback for an aborted IO. * * @par Description * Callback function invoked upon completion of an IO abort request. * * @param hio HW IO context. * @param rnode Remote node. * @param len Response length. * @param status Completion status. * @param ext_status Extended completion status. * @param arg Application-specific callback, usually IO context. * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t ocs_scsi_abort_io_cb(struct ocs_hw_io_s *hio, ocs_remote_node_t *rnode, uint32_t len, int32_t status, uint32_t ext_status, void *arg) { ocs_io_t *io = arg; ocs_t *ocs; ocs_scsi_io_status_e scsi_status = OCS_SCSI_STATUS_GOOD; ocs_assert(io, -1); ocs_assert(ocs_io_busy(io), -1); ocs_assert(io->ocs, -1); ocs_assert(io->io_to_abort, -1); ocs = io->ocs; ocs_log_debug(ocs, "status %d ext %d\n", status, ext_status); /* done with IO to abort */ ocs_ref_put(&io->io_to_abort->ref); /* ocs_ref_get(): ocs_scsi_send_tmf() */ ocs_scsi_io_free_ovfl(io); switch (status) { case SLI4_FC_WCQE_STATUS_SUCCESS: scsi_status = OCS_SCSI_STATUS_GOOD; break; case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: if (ext_status == SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED) { scsi_status = OCS_SCSI_STATUS_ABORTED; } else if (ext_status == SLI4_FC_LOCAL_REJECT_NO_XRI) { scsi_status = OCS_SCSI_STATUS_NO_IO; } else if (ext_status == SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS) { scsi_status = OCS_SCSI_STATUS_ABORT_IN_PROGRESS; } else { ocs_log_test(ocs, "Unhandled local reject 0x%x/0x%x\n", status, ext_status); scsi_status = OCS_SCSI_STATUS_ERROR; } break; default: scsi_status = OCS_SCSI_STATUS_ERROR; break; } if (io->scsi_ini_cb) { (*io->scsi_ini_cb)(io, scsi_status, NULL, 0, io->scsi_ini_cb_arg); } else { ocs_scsi_io_free(io); } ocs_scsi_check_pending(ocs); return 0; } /** * @ingroup scsi_api_base * @brief Return SCSI API integer valued property. * * @par Description * This function is called by a target-server or initiator-client to * retrieve an integer valued property. * * @param ocs Pointer to the ocs. * @param prop Property value to return. * * @return Returns a value, or 0 if invalid property was requested. */ uint32_t ocs_scsi_get_property(ocs_t *ocs, ocs_scsi_property_e prop) { ocs_xport_t *xport = ocs->xport; uint32_t val; switch (prop) { case OCS_SCSI_MAX_SGE: if (0 == ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &val)) { return val; } break; case OCS_SCSI_MAX_SGL: if (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) { /* * If chain SGL test-mode is enabled, the number of HW SGEs * has been limited; report back original max. */ return (OCS_FC_MAX_SGL); } if (0 == ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &val)) { return val; } break; case OCS_SCSI_MAX_IOS: return ocs_io_pool_allocated(xport->io_pool); case OCS_SCSI_DIF_CAPABLE: if (0 == ocs_hw_get(&ocs->hw, OCS_HW_DIF_CAPABLE, &val)) { return val; } break; case OCS_SCSI_MAX_FIRST_BURST: return 0; case OCS_SCSI_DIF_MULTI_SEPARATE: if (ocs_hw_get(&ocs->hw, OCS_HW_DIF_MULTI_SEPARATE, &val) == 0) { return val; } break; case OCS_SCSI_ENABLE_TASK_SET_FULL: /* Return FALSE if we are send frame capable */ if (ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &val) == 0) { return ! val; } break; default: break; } ocs_log_debug(ocs, "invalid property request %d\n", prop); return 0; } /** * @ingroup scsi_api_base * @brief Return a property pointer. * * @par Description * This function is called by a target-server or initiator-client to * retrieve a pointer to the requested property. * * @param ocs Pointer to the ocs. * @param prop Property value to return. * * @return Returns pointer to the requested property, or NULL otherwise. */ void *ocs_scsi_get_property_ptr(ocs_t *ocs, ocs_scsi_property_e prop) { void *rc = NULL; switch (prop) { case OCS_SCSI_WWNN: rc = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_NODE); break; case OCS_SCSI_WWPN: rc = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_PORT); break; case OCS_SCSI_PORTNUM: rc = ocs_hw_get_ptr(&ocs->hw, OCS_HW_PORTNUM); break; case OCS_SCSI_BIOS_VERSION_STRING: rc = ocs_hw_get_ptr(&ocs->hw, OCS_HW_BIOS_VERSION_STRING); break; #if defined(OCS_ENABLE_VPD_SUPPORT) case OCS_SCSI_SERIALNUMBER: { uint8_t *pvpd; uint32_t vpd_len; if (ocs_hw_get(&ocs->hw, OCS_HW_VPD_LEN, &vpd_len)) { ocs_log_test(ocs, "Can't get VPD length\n"); rc = "\012sn-unknown"; break; } pvpd = ocs_hw_get_ptr(&ocs->hw, OCS_HW_VPD); if (pvpd) { rc = ocs_find_vpd(pvpd, vpd_len, "SN"); } if (rc == NULL || ocs_strlen(rc) == 0) { /* Note: VPD is missing, using wwnn for serial number */ scsi_log(ocs, "Note: VPD is missing, using wwnn for serial number\n"); /* Use the last 32 bits of the WWN */ if ((ocs == NULL) || (ocs->domain == NULL) || (ocs->domain->sport == NULL)) { rc = "\011(Unknown)"; } else { rc = &ocs->domain->sport->wwnn_str[8]; } } break; } case OCS_SCSI_PARTNUMBER: { uint8_t *pvpd; uint32_t vpd_len; if (ocs_hw_get(&ocs->hw, OCS_HW_VPD_LEN, &vpd_len)) { ocs_log_test(ocs, "Can't get VPD length\n"); rc = "\012pn-unknown"; break; } pvpd = ocs_hw_get_ptr(&ocs->hw, OCS_HW_VPD); if (pvpd) { rc = ocs_find_vpd(pvpd, vpd_len, "PN"); if (rc == NULL) { rc = "\012pn-unknown"; } } else { rc = "\012pn-unknown"; } break; } #endif default: break; } if (rc == NULL) { ocs_log_debug(ocs, "invalid property request %d\n", prop); } return rc; } /** * @ingroup scsi_api_base * @brief Notify that delete initiator is complete. * * @par Description * Sent by the target-server to notify the base driver that the work started from * ocs_scsi_del_initiator() is now complete and that it is safe for the node to * release the rest of its resources. * * @param node Pointer to the node. * * @return None. */ void ocs_scsi_del_initiator_complete(ocs_node_t *node) { /* Notify the node to resume */ ocs_node_post_event(node, OCS_EVT_NODE_DEL_INI_COMPLETE, NULL); } /** * @ingroup scsi_api_base * @brief Notify that delete target is complete. * * @par Description * Sent by the initiator-client to notify the base driver that the work started from * ocs_scsi_del_target() is now complete and that it is safe for the node to * release the rest of its resources. * * @param node Pointer to the node. * * @return None. */ void ocs_scsi_del_target_complete(ocs_node_t *node) { /* Notify the node to resume */ ocs_node_post_event(node, OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL); } /** * @brief Update transferred count * * @par Description * Updates io->transferred, as required when using first burst, when the amount * of first burst data processed differs from the amount of first burst * data received. * * @param io Pointer to the io object. * @param transferred Number of bytes transferred out of first burst buffers. * * @return None. */ void ocs_scsi_update_first_burst_transferred(ocs_io_t *io, uint32_t transferred) { io->transferred = transferred; } /** * @brief Register bounce callback for multi-threading. * * @par Description * Register the back end bounce function. * * @param ocs Pointer to device object. * @param fctn Function pointer of bounce function. * * @return None. */ void ocs_scsi_register_bounce(ocs_t *ocs, void(*fctn)(void(*fctn)(void *arg), void *arg, uint32_t s_id, uint32_t d_id, uint32_t ox_id)) { ocs_hw_rtn_e rc; rc = ocs_hw_callback(&ocs->hw, OCS_HW_CB_BOUNCE, fctn, NULL); if (rc) { ocs_log_test(ocs, "ocs_hw_callback(OCS_HW_CB_BOUNCE) failed: %d\n", rc); } } Index: head/sys/dev/ocs_fc/ocs_scsi.h =================================================================== --- head/sys/dev/ocs_fc/ocs_scsi.h (revision 367043) +++ head/sys/dev/ocs_fc/ocs_scsi.h (revision 367044) @@ -1,447 +1,450 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * OCS SCSI API declarations * */ #if !defined(__OCS_SCSI_H__) #define __OCS_SCSI_H__ #include "ocs_ddump.h" #include "ocs_mgmt.h" #include "ocs_utils.h" /* ocs_scsi_rcv_cmd() ocs_scsi_rcv_tmf() flags */ #define OCS_SCSI_CMD_DIR_IN (1U << 0) #define OCS_SCSI_CMD_DIR_OUT (1U << 1) #define OCS_SCSI_CMD_SIMPLE (1U << 2) #define OCS_SCSI_CMD_HEAD_OF_QUEUE (1U << 3) #define OCS_SCSI_CMD_ORDERED (1U << 4) #define OCS_SCSI_CMD_UNTAGGED (1U << 5) #define OCS_SCSI_CMD_ACA (1U << 6) #define OCS_SCSI_FIRST_BURST_ERR (1U << 7) #define OCS_SCSI_FIRST_BURST_ABORTED (1U << 8) +#define OCS_SCSI_PRIORITY_MASK 0xf0000 +#define OCS_SCSI_PRIORITY_SHIFT 16 /* ocs_scsi_send_rd_data/recv_wr_data/send_resp flags */ #define OCS_SCSI_LAST_DATAPHASE (1U << 0) #define OCS_SCSI_NO_AUTO_RESPONSE (1U << 1) #define OCS_SCSI_LOW_LATENCY (1U << 2) #define OCS_SCSI_WQ_STEERING_SHIFT (16) #define OCS_SCSI_WQ_STEERING_MASK (0xf << OCS_SCSI_WQ_STEERING_SHIFT) #define OCS_SCSI_WQ_STEERING_CLASS (0 << OCS_SCSI_WQ_STEERING_SHIFT) #define OCS_SCSI_WQ_STEERING_REQUEST (1 << OCS_SCSI_WQ_STEERING_SHIFT) #define OCS_SCSI_WQ_STEERING_CPU (2 << OCS_SCSI_WQ_STEERING_SHIFT) #define OCS_SCSI_WQ_CLASS_SHIFT (20) #define OCS_SCSI_WQ_CLASS_MASK (0xf << OCS_SCSI_WQ_CLASS_SHIFT) #define OCS_SCSI_WQ_CLASS(x) ((x & OCS_SCSI_WQ_CLASS_MASK) << OCS_SCSI_WQ_CLASS_SHIFT) #define OCS_SCSI_WQ_CLASS_LOW_LATENCY (1) /*! * @defgroup scsi_api_base SCSI Base Target/Initiator * @defgroup scsi_api_target SCSI Target * @defgroup scsi_api_initiator SCSI Initiator */ /** * @brief SCSI command response. * * This structure is used by target-servers to specify SCSI status and * sense data. In this case all but the @b residual element are used. For * initiator-clients, this structure is used by the SCSI API to convey the * response data for issued commands, including the residual element. */ typedef struct { uint8_t scsi_status; /**< SCSI status */ uint16_t scsi_status_qualifier; /**< SCSI status qualifier */ uint8_t *response_data; /**< pointer to response data buffer */ uint32_t response_data_length; /**< length of response data buffer (bytes) */ uint8_t *sense_data; /**< pointer to sense data buffer */ uint32_t sense_data_length; /**< length of sense data buffer (bytes) */ int32_t residual; /**< command residual (not used for target), positive value * indicates an underflow, negative value indicates overflow */ uint32_t response_wire_length; /**< Command response length received in wcqe */ } ocs_scsi_cmd_resp_t; /* Status values returned by IO callbacks */ typedef enum { OCS_SCSI_STATUS_GOOD = 0, OCS_SCSI_STATUS_ABORTED, OCS_SCSI_STATUS_ERROR, OCS_SCSI_STATUS_DIF_GUARD_ERROR, OCS_SCSI_STATUS_DIF_REF_TAG_ERROR, OCS_SCSI_STATUS_DIF_APP_TAG_ERROR, OCS_SCSI_STATUS_DIF_UNKNOWN_ERROR, OCS_SCSI_STATUS_PROTOCOL_CRC_ERROR, OCS_SCSI_STATUS_NO_IO, OCS_SCSI_STATUS_ABORT_IN_PROGRESS, OCS_SCSI_STATUS_CHECK_RESPONSE, OCS_SCSI_STATUS_COMMAND_TIMEOUT, OCS_SCSI_STATUS_TIMEDOUT_AND_ABORTED, OCS_SCSI_STATUS_SHUTDOWN, OCS_SCSI_STATUS_NEXUS_LOST, } ocs_scsi_io_status_e; /* SCSI command status */ #define SCSI_STATUS_GOOD 0x00 #define SCSI_STATUS_CHECK_CONDITION 0x02 #define SCSI_STATUS_CONDITION_MET 0x04 #define SCSI_STATUS_BUSY 0x08 #define SCSI_STATUS_RESERVATION_CONFLICT 0x18 #define SCSI_STATUS_TASK_SET_FULL 0x28 #define SCSI_STATUS_ACA_ACTIVE 0x30 #define SCSI_STATUS_TASK_ABORTED 0x40 /* Callback used by send_rd_data(), recv_wr_data(), send_resp() */ typedef int32_t (*ocs_scsi_io_cb_t)(ocs_io_t *io, ocs_scsi_io_status_e status, uint32_t flags, void *arg); /* Callback used by send_rd_io(), send_wr_io() */ typedef int32_t (*ocs_scsi_rsp_io_cb_t)(ocs_io_t *io, ocs_scsi_io_status_e status, ocs_scsi_cmd_resp_t *rsp, uint32_t flags, void *arg); /* ocs_scsi_cb_t flags */ #define OCS_SCSI_IO_CMPL (1U << 0) /* IO completed */ #define OCS_SCSI_IO_CMPL_RSP_SENT (1U << 1) /* IO completed, response sent */ #define OCS_SCSI_IO_ABORTED (1U << 2) /* IO was aborted */ /* ocs_scsi_recv_tmf() request values */ typedef enum { OCS_SCSI_TMF_ABORT_TASK = 1, OCS_SCSI_TMF_QUERY_TASK_SET, OCS_SCSI_TMF_ABORT_TASK_SET, OCS_SCSI_TMF_CLEAR_TASK_SET, OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT, OCS_SCSI_TMF_LOGICAL_UNIT_RESET, OCS_SCSI_TMF_CLEAR_ACA, OCS_SCSI_TMF_TARGET_RESET, } ocs_scsi_tmf_cmd_e; /* ocs_scsi_send_tmf_resp() response values */ typedef enum { OCS_SCSI_TMF_FUNCTION_COMPLETE = 1, OCS_SCSI_TMF_FUNCTION_SUCCEEDED, OCS_SCSI_TMF_FUNCTION_IO_NOT_FOUND, OCS_SCSI_TMF_FUNCTION_REJECTED, OCS_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER, OCS_SCSI_TMF_SERVICE_DELIVERY, } ocs_scsi_tmf_resp_e; /** * @brief property names for ocs_scsi_get_property() functions */ typedef enum { OCS_SCSI_MAX_SGE, OCS_SCSI_MAX_SGL, OCS_SCSI_WWNN, OCS_SCSI_WWPN, OCS_SCSI_SERIALNUMBER, OCS_SCSI_PARTNUMBER, OCS_SCSI_PORTNUM, OCS_SCSI_BIOS_VERSION_STRING, OCS_SCSI_MAX_IOS, OCS_SCSI_DIF_CAPABLE, OCS_SCSI_DIF_MULTI_SEPARATE, OCS_SCSI_MAX_FIRST_BURST, OCS_SCSI_ENABLE_TASK_SET_FULL, } ocs_scsi_property_e; #define DIF_SIZE 8 /** * @brief T10 DIF operations * * WARNING: do not reorder or insert to this list without making appropriate changes in ocs_dif.c */ typedef enum { OCS_SCSI_DIF_OPER_DISABLED, OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CRC, OCS_SCSI_DIF_OPER_IN_CRC_OUT_NODIF, OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CHKSUM, OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_NODIF, OCS_SCSI_DIF_OPER_IN_CRC_OUT_CRC, OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CHKSUM, OCS_SCSI_DIF_OPER_IN_CRC_OUT_CHKSUM, OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CRC, OCS_SCSI_DIF_OPER_IN_RAW_OUT_RAW, } ocs_scsi_dif_oper_e; #define OCS_SCSI_DIF_OPER_PASS_THRU OCS_SCSI_DIF_OPER_IN_CRC_OUT_CRC #define OCS_SCSI_DIF_OPER_STRIP OCS_SCSI_DIF_OPER_IN_CRC_OUT_NODIF #define OCS_SCSI_DIF_OPER_INSERT OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CRC /** * @brief T10 DIF block sizes */ typedef enum { OCS_SCSI_DIF_BK_SIZE_512, OCS_SCSI_DIF_BK_SIZE_1024, OCS_SCSI_DIF_BK_SIZE_2048, OCS_SCSI_DIF_BK_SIZE_4096, OCS_SCSI_DIF_BK_SIZE_520, OCS_SCSI_DIF_BK_SIZE_4104 } ocs_scsi_dif_blk_size_e; /** * @brief generic scatter-gather list structure */ typedef struct ocs_scsi_sgl_s { uintptr_t addr; /**< physical address */ uintptr_t dif_addr; /**< address of DIF segment, zero if DIF is interleaved */ size_t len; /**< length */ } ocs_scsi_sgl_t; /** * @brief T10 DIF information passed to the transport */ typedef struct ocs_scsi_dif_info_s { ocs_scsi_dif_oper_e dif_oper; ocs_scsi_dif_blk_size_e blk_size; uint32_t ref_tag; uint32_t app_tag:16, check_ref_tag:1, check_app_tag:1, check_guard:1, dif_separate:1, /* If the APP TAG is 0xFFFF, disable checking the REF TAG and CRC fields */ disable_app_ffff:1, /* if the APP TAG is 0xFFFF and REF TAG is 0xFFFF_FFFF, disable checking the received CRC field. */ disable_app_ref_ffff:1, :10; uint64_t lba; } ocs_scsi_dif_info_t; /* Return values for calls from base driver to target-server/initiator-client */ #define OCS_SCSI_CALL_COMPLETE 0 /* All work is done */ #define OCS_SCSI_CALL_ASYNC 1 /* Work will be completed asynchronously */ /* Calls from target/initiator to base driver */ typedef enum { OCS_SCSI_IO_ROLE_ORIGINATOR, OCS_SCSI_IO_ROLE_RESPONDER, } ocs_scsi_io_role_e; extern void ocs_scsi_io_alloc_enable(ocs_node_t *node); extern void ocs_scsi_io_alloc_disable(ocs_node_t *node); extern ocs_io_t *ocs_scsi_io_alloc(ocs_node_t *node, ocs_scsi_io_role_e role); extern void ocs_scsi_io_free(ocs_io_t *io); extern ocs_io_t *ocs_io_get_instance(ocs_t *ocs, uint32_t index); extern void ocs_scsi_register_bounce(ocs_t *ocs, void(*fctn)(void(*fctn)(void *arg), void *arg, uint32_t s_id, uint32_t d_id, uint32_t ox_id)); /* Calls from base driver to target-server */ extern int32_t ocs_scsi_tgt_driver_init(void); extern int32_t ocs_scsi_tgt_driver_exit(void); extern int32_t ocs_scsi_tgt_io_init(ocs_io_t *io); extern int32_t ocs_scsi_tgt_io_exit(ocs_io_t *io); extern int32_t ocs_scsi_tgt_new_device(ocs_t *ocs); extern int32_t ocs_scsi_tgt_del_device(ocs_t *ocs); extern int32_t ocs_scsi_tgt_new_domain(ocs_domain_t *domain); extern void ocs_scsi_tgt_del_domain(ocs_domain_t *domain); extern int32_t ocs_scsi_tgt_new_sport(ocs_sport_t *sport); extern void ocs_scsi_tgt_del_sport(ocs_sport_t *sport); extern void ocs_scsi_sport_deleted(ocs_sport_t *sport); extern int32_t ocs_scsi_validate_initiator(ocs_node_t *node); extern int32_t ocs_scsi_new_initiator(ocs_node_t *node); typedef enum { OCS_SCSI_INITIATOR_DELETED, OCS_SCSI_INITIATOR_MISSING, } ocs_scsi_del_initiator_reason_e; extern int32_t ocs_scsi_del_initiator(ocs_node_t *node, ocs_scsi_del_initiator_reason_e reason); extern int32_t ocs_scsi_recv_cmd(ocs_io_t *io, uint64_t lun, uint8_t *cdb, uint32_t cdb_len, uint32_t flags); extern int32_t ocs_scsi_recv_cmd_first_burst(ocs_io_t *io, uint64_t lun, uint8_t *cdb, uint32_t cdb_len, uint32_t flags, ocs_dma_t first_burst_buffers[], uint32_t first_burst_bytes); extern int32_t ocs_scsi_recv_tmf(ocs_io_t *tmfio, uint64_t lun, ocs_scsi_tmf_cmd_e cmd, ocs_io_t *abortio, uint32_t flags); extern ocs_sport_t *ocs_sport_get_instance(ocs_domain_t *domain, uint32_t index); extern ocs_domain_t *ocs_domain_get_instance(ocs_t *ocs, uint32_t index); /* Calls from target-server to base driver */ extern int32_t ocs_scsi_send_rd_data(ocs_io_t *io, uint32_t flags, ocs_scsi_dif_info_t *dif_info, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, ocs_scsi_io_cb_t cb, void *arg); extern int32_t ocs_scsi_recv_wr_data(ocs_io_t *io, uint32_t flags, ocs_scsi_dif_info_t *dif_info, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, ocs_scsi_io_cb_t cb, void *arg); extern int32_t ocs_scsi_send_resp(ocs_io_t *io, uint32_t flags, ocs_scsi_cmd_resp_t *rsp, ocs_scsi_io_cb_t cb, void *arg); extern int32_t ocs_scsi_send_tmf_resp(ocs_io_t *io, ocs_scsi_tmf_resp_e rspcode, uint8_t addl_rsp_info[3], ocs_scsi_io_cb_t cb, void *arg); extern int32_t ocs_scsi_tgt_abort_io(ocs_io_t *io, ocs_scsi_io_cb_t cb, void *arg); extern void ocs_scsi_io_complete(ocs_io_t *io); extern uint32_t ocs_scsi_get_property(ocs_t *ocs, ocs_scsi_property_e prop); extern void *ocs_scsi_get_property_ptr(ocs_t *ocs, ocs_scsi_property_e prop); extern void ocs_scsi_del_initiator_complete(ocs_node_t *node); extern void ocs_scsi_update_first_burst_transferred(ocs_io_t *io, uint32_t transferred); /* Calls from base driver to initiator-client */ extern int32_t ocs_scsi_ini_driver_init(void); extern int32_t ocs_scsi_ini_driver_exit(void); extern int32_t ocs_scsi_ini_io_init(ocs_io_t *io); extern int32_t ocs_scsi_ini_io_exit(ocs_io_t *io); extern int32_t ocs_scsi_ini_new_device(ocs_t *ocs); extern int32_t ocs_scsi_ini_del_device(ocs_t *ocs); extern int32_t ocs_scsi_ini_new_domain(ocs_domain_t *domain); extern void ocs_scsi_ini_del_domain(ocs_domain_t *domain); extern int32_t ocs_scsi_ini_new_sport(ocs_sport_t *sport); extern void ocs_scsi_ini_del_sport(ocs_sport_t *sport); extern int32_t ocs_scsi_new_target(ocs_node_t *node); typedef enum { OCS_SCSI_TARGET_DELETED, OCS_SCSI_TARGET_MISSING, } ocs_scsi_del_target_reason_e; extern int32_t ocs_scsi_del_target(ocs_node_t *node, ocs_scsi_del_target_reason_e reason); /* Calls from the initiator-client to the base driver */ extern int32_t ocs_scsi_send_rd_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len, ocs_scsi_dif_info_t *dif_info, - ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, ocs_scsi_rsp_io_cb_t cb, void *arg); + ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags); extern int32_t ocs_scsi_send_wr_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len, ocs_scsi_dif_info_t *dif_info, - ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, ocs_scsi_rsp_io_cb_t cb, void *arg); + ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags); extern int32_t ocs_scsi_send_wr_io_first_burst(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len, ocs_scsi_dif_info_t *dif_info, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, uint32_t first_burst, - ocs_scsi_rsp_io_cb_t cb, void *arg); + ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags); extern int32_t ocs_scsi_send_tmf(ocs_node_t *node, ocs_io_t *io, ocs_io_t *io_to_abort, uint64_t lun, ocs_scsi_tmf_cmd_e tmf, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t len, ocs_scsi_rsp_io_cb_t cb, void *arg); -extern int32_t ocs_scsi_send_nodata_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len, ocs_scsi_rsp_io_cb_t cb, void *arg); +extern int32_t ocs_scsi_send_nodata_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len, + ocs_scsi_rsp_io_cb_t cb, void *arg, uint32_t flags); extern void ocs_scsi_del_target_complete(ocs_node_t *node); typedef enum { OCS_SCSI_DDUMP_DEVICE, OCS_SCSI_DDUMP_DOMAIN, OCS_SCSI_DDUMP_SPORT, OCS_SCSI_DDUMP_NODE, OCS_SCSI_DDUMP_IO, } ocs_scsi_ddump_type_e; /* base driver to target/initiator */ struct ocs_scsi_vaddr_len_s { void *vaddr; uint32_t length; } ; extern int32_t ocs_scsi_get_block_vaddr(ocs_io_t *io, uint64_t blocknumber, ocs_scsi_vaddr_len_t addrlen[], uint32_t max_addrlen, void **dif_vaddr); extern void ocs_scsi_ini_ddump(ocs_textbuf_t *textbuf, ocs_scsi_ddump_type_e type, void *obj); extern void ocs_scsi_tgt_ddump(ocs_textbuf_t *textbuf, ocs_scsi_ddump_type_e type, void *obj); /* Calls within base driver */ extern int32_t ocs_scsi_io_dispatch(ocs_io_t *io, void *cb); extern int32_t ocs_scsi_io_dispatch_abort(ocs_io_t *io, void *cb); extern void ocs_scsi_check_pending(ocs_t *ocs); extern uint32_t ocs_scsi_dif_blocksize(ocs_scsi_dif_info_t *dif_info); extern int32_t ocs_scsi_dif_set_blocksize(ocs_scsi_dif_info_t *dif_info, uint32_t blocksize); extern int32_t ocs_scsi_dif_mem_blocksize(ocs_scsi_dif_info_t *dif_info, int wiretomem); extern int32_t ocs_scsi_dif_wire_blocksize(ocs_scsi_dif_info_t *dif_info, int wiretomem); uint32_t ocs_get_crn(ocs_node_t *node, uint8_t *crn, uint64_t lun); void ocs_del_crn(ocs_node_t *node); void ocs_reset_crn(ocs_node_t *node, uint64_t lun); /** * @brief Notification from base driver that domain is in force-free path. * * @par Description Domain is forcefully going away. Cleanup any resources associated with it. * * @param domain Pointer to domain being free'd. * * @return None. */ static inline void ocs_scsi_notify_domain_force_free(ocs_domain_t *domain) { /* Nothing to do */ return; } /** * @brief Notification from base driver that sport is in force-free path. * * @par Description Sport is forcefully going away. Cleanup any resources associated with it. * * @param sport Pointer to sport being free'd. * * @return None. */ static inline void ocs_scsi_notify_sport_force_free(ocs_sport_t *sport) { /* Nothing to do */ return; } /** * @brief Notification from base driver that node is in force-free path. * * @par Description Node is forcefully going away. Cleanup any resources associated with it. * * @param node Pointer to node being free'd. * * @return None. */ static inline void ocs_scsi_notify_node_force_free(ocs_node_t *node) { /* Nothing to do */ return; } #endif /* __OCS_SCSI_H__ */ Index: head/sys/dev/ocs_fc/ocs_unsol.c =================================================================== --- head/sys/dev/ocs_fc/ocs_unsol.c (revision 367043) +++ head/sys/dev/ocs_fc/ocs_unsol.c (revision 367044) @@ -1,1395 +1,1396 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * Code to handle unsolicited received FC frames. */ /*! * @defgroup unsol Unsolicited Frame Handling */ #include "ocs.h" #include "ocs_els.h" #include "ocs_fabric.h" #include "ocs_device.h" #define frame_printf(ocs, hdr, fmt, ...) \ do { \ char s_id_text[16]; \ ocs_node_fcid_display(fc_be24toh((hdr)->s_id), s_id_text, sizeof(s_id_text)); \ ocs_log_debug(ocs, "[%06x.%s] %02x/%04x/%04x: " fmt, fc_be24toh((hdr)->d_id), s_id_text, \ (hdr)->r_ctl, ocs_be16toh((hdr)->ox_id), ocs_be16toh((hdr)->rx_id), ##__VA_ARGS__); \ } while(0) static int32_t ocs_unsol_process(ocs_t *ocs, ocs_hw_sequence_t *seq); static int32_t ocs_dispatch_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq); static int32_t ocs_dispatch_fcp_cmd_auto_xfer_rdy(ocs_node_t *node, ocs_hw_sequence_t *seq); static int32_t ocs_dispatch_fcp_data(ocs_node_t *node, ocs_hw_sequence_t *seq); static int32_t ocs_domain_dispatch_frame(void *arg, ocs_hw_sequence_t *seq); static int32_t ocs_node_dispatch_frame(void *arg, ocs_hw_sequence_t *seq); static int32_t ocs_fc_tmf_rejected_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg); static ocs_hw_sequence_t *ocs_frame_next(ocs_list_t *pend_list, ocs_lock_t *list_lock); static uint8_t ocs_node_frames_held(void *arg); static uint8_t ocs_domain_frames_held(void *arg); static int32_t ocs_purge_pending(ocs_t *ocs, ocs_list_t *pend_list, ocs_lock_t *list_lock); static int32_t ocs_sframe_send_task_set_full_or_busy(ocs_node_t *node, ocs_hw_sequence_t *seq); #define OCS_MAX_FRAMES_BEFORE_YEILDING 10000 /** * @brief Process the RQ circular buffer and process the incoming frames. * * @param mythread Pointer to thread object. * * @return Returns 0 on success, or a non-zero value on failure. */ int32_t ocs_unsol_rq_thread(ocs_thread_t *mythread) { ocs_xport_rq_thread_info_t *thread_data = mythread->arg; ocs_t *ocs = thread_data->ocs; ocs_hw_sequence_t *seq; uint32_t yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING; ocs_log_debug(ocs, "%s running\n", mythread->name); while (!ocs_thread_terminate_requested(mythread)) { seq = ocs_cbuf_get(thread_data->seq_cbuf, 100000); if (seq == NULL) { /* Prevent soft lockups by yielding the CPU */ ocs_thread_yield(&thread_data->thread); yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING; continue; } /* Note: Always returns 0 */ ocs_unsol_process((ocs_t*)seq->hw->os, seq); /* We have to prevent CPU soft lockups, so just yield the CPU after x frames. */ if (--yield_count == 0) { ocs_thread_yield(&thread_data->thread); yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING; } } ocs_log_debug(ocs, "%s exiting\n", mythread->name); thread_data->thread_started = FALSE; return 0; } /** * @ingroup unsol * @brief Callback function when aborting a port owned XRI * exchanges. * * @return Returns 0. */ static int32_t ocs_unsol_abort_cb (ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t len, int32_t status, uint32_t ext, void *arg) { ocs_t *ocs = arg; ocs_assert(hio, -1); ocs_assert(arg, -1); ocs_log_debug(ocs, "xri=0x%x tag=0x%x\n", hio->indicator, hio->reqtag); ocs_hw_io_free(&ocs->hw, hio); return 0; } /** * @ingroup unsol * @brief Abort either a RQ Pair auto XFER RDY XRI. * @return Returns None. */ static void ocs_port_owned_abort(ocs_t *ocs, ocs_hw_io_t *hio) { ocs_hw_rtn_e hw_rc; hw_rc = ocs_hw_io_abort(&ocs->hw, hio, FALSE, ocs_unsol_abort_cb, ocs); if((hw_rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) || (hw_rc == OCS_HW_RTN_IO_PORT_OWNED_ALREADY_ABORTED)) { ocs_log_debug(ocs, "already aborted XRI 0x%x\n", hio->indicator); } else if(hw_rc != OCS_HW_RTN_SUCCESS) { ocs_log_debug(ocs, "Error aborting XRI 0x%x status %d\n", hio->indicator, hw_rc); } } /** * @ingroup unsol * @brief Handle unsolicited FC frames. * *

Description

* This function is called from the HW with unsolicited FC frames (FCP, ELS, BLS, etc.). * * @param arg Application-specified callback data. * @param seq Header/payload sequence buffers. * * @return Returns 0 on success; or a negative error value on failure. */ int32_t ocs_unsolicited_cb(void *arg, ocs_hw_sequence_t *seq) { ocs_t *ocs = arg; ocs_xport_t *xport = ocs->xport; int32_t rc; CPUTRACE(""); if (ocs->rq_threads == 0) { rc = ocs_unsol_process(ocs, seq); } else { /* use the ox_id to dispatch this IO to a thread */ fc_header_t *hdr = seq->header->dma.virt; uint32_t ox_id = ocs_be16toh(hdr->ox_id); uint32_t thr_index = ox_id % ocs->rq_threads; rc = ocs_cbuf_put(xport->rq_thread_info[thr_index].seq_cbuf, seq); } if (rc) { ocs_hw_sequence_free(&ocs->hw, seq); } return 0; } /** * @ingroup unsol * @brief Handle unsolicited FC frames. * *

Description

* This function is called either from ocs_unsolicited_cb() or ocs_unsol_rq_thread(). * * @param ocs Pointer to the ocs structure. * @param seq Header/payload sequence buffers. * * @return Returns 0 on success, or a negative error value on failure. */ static int32_t ocs_unsol_process(ocs_t *ocs, ocs_hw_sequence_t *seq) { ocs_xport_fcfi_t *xport_fcfi = NULL; ocs_domain_t *domain; uint8_t seq_fcfi = seq->fcfi; /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ if (ocs->hw.workaround.override_fcfi) { if (ocs->hw.first_domain_idx > -1) { seq_fcfi = ocs->hw.first_domain_idx; } } /* Range check seq->fcfi */ if (seq_fcfi < ARRAY_SIZE(ocs->xport->fcfi)) { xport_fcfi = &ocs->xport->fcfi[seq_fcfi]; } /* If the transport FCFI entry is NULL, then drop the frame */ if (xport_fcfi == NULL) { ocs_log_test(ocs, "FCFI %d is not valid, dropping frame\n", seq->fcfi); if (seq->hio != NULL) { ocs_port_owned_abort(ocs, seq->hio); } ocs_hw_sequence_free(&ocs->hw, seq); return 0; } domain = ocs_hw_domain_get(&ocs->hw, seq_fcfi); /* * If we are holding frames or the domain is not yet registered or * there's already frames on the pending list, * then add the new frame to pending list */ if (domain == NULL || xport_fcfi->hold_frames || !ocs_list_empty(&xport_fcfi->pend_frames)) { ocs_lock(&xport_fcfi->pend_frames_lock); ocs_list_add_tail(&xport_fcfi->pend_frames, seq); ocs_unlock(&xport_fcfi->pend_frames_lock); if (domain != NULL) { /* immediately process pending frames */ ocs_domain_process_pending(domain); } } else { /* * We are not holding frames and pending list is empty, just process frame. * A non-zero return means the frame was not handled - so cleanup */ if (ocs_domain_dispatch_frame(domain, seq)) { if (seq->hio != NULL) { ocs_port_owned_abort(ocs, seq->hio); } ocs_hw_sequence_free(&ocs->hw, seq); } } return 0; } /** * @ingroup unsol * @brief Process pending frames queued to the given node. * *

Description

* Frames that are queued for the \c node are dispatched and returned * to the RQ. * * @param node Node of the queued frames that are to be dispatched. * * @return Returns 0 on success, or a negative error value on failure. */ int32_t ocs_process_node_pending(ocs_node_t *node) { ocs_t *ocs = node->ocs; ocs_hw_sequence_t *seq = NULL; uint32_t pend_frames_processed = 0; for (;;) { /* need to check for hold frames condition after each frame processed * because any given frame could cause a transition to a state that * holds frames */ if (ocs_node_frames_held(node)) { break; } /* Get next frame/sequence */ ocs_lock(&node->pend_frames_lock); seq = ocs_list_remove_head(&node->pend_frames); if (seq == NULL) { pend_frames_processed = node->pend_frames_processed; node->pend_frames_processed = 0; ocs_unlock(&node->pend_frames_lock); break; } node->pend_frames_processed++; ocs_unlock(&node->pend_frames_lock); /* now dispatch frame(s) to dispatch function */ if (ocs_node_dispatch_frame(node, seq)) { if (seq->hio != NULL) { ocs_port_owned_abort(ocs, seq->hio); } ocs_hw_sequence_free(&ocs->hw, seq); } } if (pend_frames_processed != 0) { ocs_log_debug(ocs, "%u node frames held and processed\n", pend_frames_processed); } return 0; } /** * @ingroup unsol * @brief Process pending frames queued to the given domain. * *

Description

* Frames that are queued for the \c domain are dispatched and * returned to the RQ. * * @param domain Domain of the queued frames that are to be * dispatched. * * @return Returns 0 on success, or a negative error value on failure. */ int32_t ocs_domain_process_pending(ocs_domain_t *domain) { ocs_t *ocs = domain->ocs; ocs_xport_fcfi_t *xport_fcfi; ocs_hw_sequence_t *seq = NULL; uint32_t pend_frames_processed = 0; ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, -1); xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator]; for (;;) { /* need to check for hold frames condition after each frame processed * because any given frame could cause a transition to a state that * holds frames */ if (ocs_domain_frames_held(domain)) { break; } /* Get next frame/sequence */ ocs_lock(&xport_fcfi->pend_frames_lock); seq = ocs_list_remove_head(&xport_fcfi->pend_frames); if (seq == NULL) { pend_frames_processed = xport_fcfi->pend_frames_processed; xport_fcfi->pend_frames_processed = 0; ocs_unlock(&xport_fcfi->pend_frames_lock); break; } xport_fcfi->pend_frames_processed++; ocs_unlock(&xport_fcfi->pend_frames_lock); /* now dispatch frame(s) to dispatch function */ if (ocs_domain_dispatch_frame(domain, seq)) { if (seq->hio != NULL) { ocs_port_owned_abort(ocs, seq->hio); } ocs_hw_sequence_free(&ocs->hw, seq); } } if (pend_frames_processed != 0) { ocs_log_debug(ocs, "%u domain frames held and processed\n", pend_frames_processed); } return 0; } /** * @ingroup unsol * @brief Purge given pending list * *

Description

* Frames that are queued on the given pending list are * discarded and returned to the RQ. * * @param ocs Pointer to ocs object. * @param pend_list Pending list to be purged. * @param list_lock Lock that protects pending list. * * @return Returns 0 on success, or a negative error value on failure. */ static int32_t ocs_purge_pending(ocs_t *ocs, ocs_list_t *pend_list, ocs_lock_t *list_lock) { ocs_hw_sequence_t *frame; for (;;) { frame = ocs_frame_next(pend_list, list_lock); if (frame == NULL) { break; } frame_printf(ocs, (fc_header_t*) frame->header->dma.virt, "Discarding held frame\n"); if (frame->hio != NULL) { ocs_port_owned_abort(ocs, frame->hio); } ocs_hw_sequence_free(&ocs->hw, frame); } return 0; } /** * @ingroup unsol * @brief Purge node's pending (queued) frames. * *

Description

* Frames that are queued for the \c node are discarded and returned * to the RQ. * * @param node Node of the queued frames that are to be discarded. * * @return Returns 0 on success, or a negative error value on failure. */ int32_t ocs_node_purge_pending(ocs_node_t *node) { return ocs_purge_pending(node->ocs, &node->pend_frames, &node->pend_frames_lock); } /** * @ingroup unsol * @brief Purge xport's pending (queued) frames. * *

Description

* Frames that are queued for the \c xport are discarded and * returned to the RQ. * * @param domain Pointer to domain object. * * @return Returns 0 on success; or a negative error value on failure. */ int32_t ocs_domain_purge_pending(ocs_domain_t *domain) { ocs_t *ocs = domain->ocs; ocs_xport_fcfi_t *xport_fcfi; ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, -1); xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator]; return ocs_purge_pending(domain->ocs, &xport_fcfi->pend_frames, &xport_fcfi->pend_frames_lock); } /** * @ingroup unsol * @brief Check if node's pending frames are held. * * @param arg Node for which the pending frame hold condition is * checked. * * @return Returns 1 if node is holding pending frames, or 0 * if not. */ static uint8_t ocs_node_frames_held(void *arg) { ocs_node_t *node = (ocs_node_t *)arg; return node->hold_frames; } /** * @ingroup unsol * @brief Check if domain's pending frames are held. * * @param arg Domain for which the pending frame hold condition is * checked. * * @return Returns 1 if domain is holding pending frames, or 0 * if not. */ static uint8_t ocs_domain_frames_held(void *arg) { ocs_domain_t *domain = (ocs_domain_t *)arg; ocs_t *ocs = domain->ocs; ocs_xport_fcfi_t *xport_fcfi; ocs_assert(domain != NULL, 1); ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, 1); xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator]; return xport_fcfi->hold_frames; } /** * @ingroup unsol * @brief Globally (at xport level) hold unsolicited frames. * *

Description

* This function places a hold on processing unsolicited FC * frames queued to the xport pending list. * * @param domain Pointer to domain object. * * @return Returns None. */ void ocs_domain_hold_frames(ocs_domain_t *domain) { ocs_t *ocs = domain->ocs; ocs_xport_fcfi_t *xport_fcfi; ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI); xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator]; if (!xport_fcfi->hold_frames) { ocs_log_debug(domain->ocs, "hold frames set for FCFI %d\n", domain->fcf_indicator); xport_fcfi->hold_frames = 1; } } /** * @ingroup unsol * @brief Clear hold on unsolicited frames. * *

Description

* This function clears the hold on processing unsolicited FC * frames queued to the domain pending list. * * @param domain Pointer to domain object. * * @return Returns None. */ void ocs_domain_accept_frames(ocs_domain_t *domain) { ocs_t *ocs = domain->ocs; ocs_xport_fcfi_t *xport_fcfi; ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI); xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator]; if (xport_fcfi->hold_frames == 1) { ocs_log_debug(domain->ocs, "hold frames cleared for FCFI %d\n", domain->fcf_indicator); } xport_fcfi->hold_frames = 0; ocs_domain_process_pending(domain); } /** * @ingroup unsol * @brief Dispatch unsolicited FC frame. * *

Description

* This function processes an unsolicited FC frame queued at the * domain level. * * @param arg Pointer to ocs object. * @param seq Header/payload sequence buffers. * * @return Returns 0 if frame processed and RX buffers cleaned * up appropriately, -1 if frame not handled. */ static __inline int32_t ocs_domain_dispatch_frame(void *arg, ocs_hw_sequence_t *seq) { ocs_domain_t *domain = (ocs_domain_t *)arg; ocs_t *ocs = domain->ocs; fc_header_t *hdr; uint32_t s_id; uint32_t d_id; ocs_node_t *node = NULL; ocs_sport_t *sport = NULL; ocs_assert(seq->header, -1); ocs_assert(seq->header->dma.virt, -1); ocs_assert(seq->payload->dma.virt, -1); hdr = seq->header->dma.virt; /* extract the s_id and d_id */ s_id = fc_be24toh(hdr->s_id); d_id = fc_be24toh(hdr->d_id); sport = domain->sport; if (sport == NULL) { frame_printf(ocs, hdr, "phy sport for FC ID 0x%06x is NULL, dropping frame\n", d_id); return -1; } if (sport->fc_id != d_id) { /* Not a physical port IO lookup sport associated with the npiv port */ sport = ocs_sport_find(domain, d_id); /* Look up without lock */ if (sport == NULL) { if (hdr->type == FC_TYPE_FCP) { /* Drop frame */ ocs_log_warn(ocs, "unsolicited FCP frame with invalid d_id x%x, dropping\n", d_id); return -1; } else { /* p2p will use this case */ sport = domain->sport; } } } /* Lookup the node given the remote s_id */ node = ocs_node_find(sport, s_id); /* If not found, then create a new node */ if (node == NULL) { /* If this is solicited data or control based on R_CTL and there is no node context, * then we can drop the frame */ if ((hdr->r_ctl == FC_RCTL_FC4_DATA) && ( (hdr->info == FC_RCTL_INFO_SOL_DATA) || (hdr->info == FC_RCTL_INFO_SOL_CTRL))) { ocs_log_debug(ocs, "solicited data/ctrl frame without node, dropping\n"); return -1; } node = ocs_node_alloc(sport, s_id, FALSE, FALSE); if (node == NULL) { ocs_log_err(ocs, "ocs_node_alloc() failed\n"); return -1; } /* don't send PLOGI on ocs_d_init entry */ ocs_node_init_device(node, FALSE); } if (node->hold_frames || !ocs_list_empty((&node->pend_frames))) { /* TODO: info log level frame_printf(ocs, hdr, "Holding frame\n"); */ /* add frame to node's pending list */ ocs_lock(&node->pend_frames_lock); ocs_list_add_tail(&node->pend_frames, seq); ocs_unlock(&node->pend_frames_lock); return 0; } /* now dispatch frame to the node frame handler */ return ocs_node_dispatch_frame(node, seq); } /** * @ingroup unsol * @brief Dispatch a frame. * *

Description

* A frame is dispatched from the \c node to the handler. * * @param arg Node that originated the frame. * @param seq Header/payload sequence buffers. * * @return Returns 0 if frame processed and RX buffers cleaned * up appropriately, -1 if frame not handled. */ static int32_t ocs_node_dispatch_frame(void *arg, ocs_hw_sequence_t *seq) { fc_header_t *hdr = seq->header->dma.virt; uint32_t port_id; ocs_node_t *node = (ocs_node_t *)arg; int32_t rc = -1; int32_t sit_set = 0; port_id = fc_be24toh(hdr->s_id); ocs_assert(port_id == node->rnode.fc_id, -1); if (fc_be24toh(hdr->f_ctl) & FC_FCTL_END_SEQUENCE) { /*if SIT is set */ if (fc_be24toh(hdr->f_ctl) & FC_FCTL_SEQUENCE_INITIATIVE) { sit_set = 1; } switch (hdr->r_ctl) { case FC_RCTL_ELS: if (sit_set) { rc = ocs_node_recv_els_frame(node, seq); } break; case FC_RCTL_BLS: if (sit_set) { rc = ocs_node_recv_abts_frame(node, seq); }else { rc = ocs_node_recv_bls_no_sit(node, seq); } break; case FC_RCTL_FC4_DATA: switch(hdr->type) { case FC_TYPE_FCP: if (hdr->info == FC_RCTL_INFO_UNSOL_CMD) { if (node->fcp_enabled) { if (sit_set) { rc = ocs_dispatch_fcp_cmd(node, seq); }else { /* send the auto xfer ready command */ rc = ocs_dispatch_fcp_cmd_auto_xfer_rdy(node, seq); } } else { rc = ocs_node_recv_fcp_cmd(node, seq); } } else if (hdr->info == FC_RCTL_INFO_SOL_DATA) { if (sit_set) { rc = ocs_dispatch_fcp_data(node, seq); } } break; case FC_TYPE_GS: if (sit_set) { rc = ocs_node_recv_ct_frame(node, seq); } break; default: break; } break; } } else { node_printf(node, "Dropping frame hdr = %08x %08x %08x %08x %08x %08x\n", ocs_htobe32(((uint32_t *)hdr)[0]), ocs_htobe32(((uint32_t *)hdr)[1]), ocs_htobe32(((uint32_t *)hdr)[2]), ocs_htobe32(((uint32_t *)hdr)[3]), ocs_htobe32(((uint32_t *)hdr)[4]), ocs_htobe32(((uint32_t *)hdr)[5])); } return rc; } /** * @ingroup unsol * @brief Dispatch unsolicited FCP frames (RQ Pair). * *

Description

* Dispatch unsolicited FCP frames (called from the device node state machine). * * @param io Pointer to the IO context. * @param task_management_flags Task management flags from the FCP_CMND frame. * @param node Node that originated the frame. * @param lun 32-bit LUN from FCP_CMND frame. * * @return Returns None. */ static void ocs_dispatch_unsolicited_tmf(ocs_io_t *io, uint8_t task_management_flags, ocs_node_t *node, uint64_t lun) { uint32_t i; struct { uint32_t mask; ocs_scsi_tmf_cmd_e cmd; } tmflist[] = { {FCP_QUERY_TASK_SET, OCS_SCSI_TMF_QUERY_TASK_SET}, {FCP_ABORT_TASK_SET, OCS_SCSI_TMF_ABORT_TASK_SET}, {FCP_CLEAR_TASK_SET, OCS_SCSI_TMF_CLEAR_TASK_SET}, {FCP_QUERY_ASYNCHRONOUS_EVENT, OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT}, {FCP_LOGICAL_UNIT_RESET, OCS_SCSI_TMF_LOGICAL_UNIT_RESET}, {FCP_TARGET_RESET, OCS_SCSI_TMF_TARGET_RESET}, {FCP_CLEAR_ACA, OCS_SCSI_TMF_CLEAR_ACA}}; io->exp_xfer_len = 0; /* BUG 32235 */ for (i = 0; i < ARRAY_SIZE(tmflist); i ++) { if (tmflist[i].mask & task_management_flags) { io->tmf_cmd = tmflist[i].cmd; ocs_scsi_recv_tmf(io, lun, tmflist[i].cmd, NULL, 0); break; } } if (i == ARRAY_SIZE(tmflist)) { /* Not handled */ node_printf(node, "TMF x%x rejected\n", task_management_flags); ocs_scsi_send_tmf_resp(io, OCS_SCSI_TMF_FUNCTION_REJECTED, NULL, ocs_fc_tmf_rejected_cb, NULL); } } static int32_t ocs_validate_fcp_cmd(ocs_t *ocs, ocs_hw_sequence_t *seq) { size_t exp_payload_len = 0; fcp_cmnd_iu_t *cmnd = seq->payload->dma.virt; exp_payload_len = sizeof(fcp_cmnd_iu_t) - 16 + cmnd->additional_fcp_cdb_length; /* * If we received less than FCP_CMND_IU bytes, assume that the frame is * corrupted in some way and drop it. This was seen when jamming the FCTL * fill bytes field. */ if (seq->payload->dma.len < exp_payload_len) { fc_header_t *fchdr = seq->header->dma.virt; ocs_log_debug(ocs, "dropping ox_id %04x with payload length (%zd) less than expected (%zd)\n", ocs_be16toh(fchdr->ox_id), seq->payload->dma.len, exp_payload_len); return -1; } return 0; } static void ocs_populate_io_fcp_cmd(ocs_io_t *io, fcp_cmnd_iu_t *cmnd, fc_header_t *fchdr, uint8_t sit) { uint32_t *fcp_dl; io->init_task_tag = ocs_be16toh(fchdr->ox_id); /* note, tgt_task_tag, hw_tag set when HW io is allocated */ fcp_dl = (uint32_t*)(&(cmnd->fcp_cdb_and_dl)); fcp_dl += cmnd->additional_fcp_cdb_length; io->exp_xfer_len = ocs_be32toh(*fcp_dl); io->transferred = 0; /* The upper 7 bits of CS_CTL is the frame priority thru the SAN. * Our assertion here is, the priority given to a frame containing * the FCP cmd should be the priority given to ALL frames contained * in that IO. Thus we need to save the incoming CS_CTL here. */ if (fc_be24toh(fchdr->f_ctl) & FC_FCTL_PRIORITY_ENABLE) { io->cs_ctl = fchdr->cs_ctl; } else { io->cs_ctl = 0; } io->seq_init = sit; } static uint32_t ocs_get_flags_fcp_cmd(fcp_cmnd_iu_t *cmnd) { uint32_t flags = 0; switch (cmnd->task_attribute) { case FCP_TASK_ATTR_SIMPLE: flags |= OCS_SCSI_CMD_SIMPLE; break; case FCP_TASK_ATTR_HEAD_OF_QUEUE: flags |= OCS_SCSI_CMD_HEAD_OF_QUEUE; break; case FCP_TASK_ATTR_ORDERED: flags |= OCS_SCSI_CMD_ORDERED; break; case FCP_TASK_ATTR_ACA: flags |= OCS_SCSI_CMD_ACA; break; case FCP_TASK_ATTR_UNTAGGED: flags |= OCS_SCSI_CMD_UNTAGGED; break; } + flags |= (uint32_t)cmnd->command_priority << OCS_SCSI_PRIORITY_SHIFT; if (cmnd->wrdata) flags |= OCS_SCSI_CMD_DIR_IN; if (cmnd->rddata) flags |= OCS_SCSI_CMD_DIR_OUT; return flags; } /** * @ingroup unsol * @brief Dispatch unsolicited FCP_CMND frame. * *

Description

* Dispatch unsolicited FCP_CMND frame. RQ Pair mode - always * used for RQ Pair mode since first burst is not supported. * * @param node Node that originated the frame. * @param seq Header/payload sequence buffers. * * @return Returns 0 if frame processed and RX buffers cleaned * up appropriately, -1 if frame not handled and RX buffers need * to be returned. */ static int32_t ocs_dispatch_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq) { ocs_t *ocs = node->ocs; fc_header_t *fchdr = seq->header->dma.virt; fcp_cmnd_iu_t *cmnd = NULL; ocs_io_t *io = NULL; fc_vm_header_t *vhdr; uint8_t df_ctl; uint64_t lun = UINT64_MAX; int32_t rc = 0; ocs_assert(seq->payload, -1); cmnd = seq->payload->dma.virt; /* perform FCP_CMND validation check(s) */ if (ocs_validate_fcp_cmd(ocs, seq)) { return -1; } lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(cmnd->fcp_lun)); if (lun == UINT64_MAX) { return -1; } io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER); if (io == NULL) { uint32_t send_frame_capable; /* If we have SEND_FRAME capability, then use it to send task set full or busy */ rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable); if ((rc == 0) && send_frame_capable) { rc = ocs_sframe_send_task_set_full_or_busy(node, seq); if (rc) { ocs_log_test(ocs, "ocs_sframe_send_task_set_full_or_busy failed: %d\n", rc); } return rc; } ocs_log_err(ocs, "IO allocation failed ox_id %04x\n", ocs_be16toh(fchdr->ox_id)); return -1; } io->hw_priv = seq->hw_priv; /* Check if the CMD has vmheader. */ io->app_id = 0; df_ctl = fchdr->df_ctl; if (df_ctl & FC_DFCTL_DEVICE_HDR_16_MASK) { uint32_t vmhdr_offset = 0; /* Presence of VMID. Get the vm header offset. */ if (df_ctl & FC_DFCTL_ESP_HDR_MASK) { vmhdr_offset += FC_DFCTL_ESP_HDR_SIZE; ocs_log_err(ocs, "ESP Header present. Fix ESP Size.\n"); } if (df_ctl & FC_DFCTL_NETWORK_HDR_MASK) { vmhdr_offset += FC_DFCTL_NETWORK_HDR_SIZE; } vhdr = (fc_vm_header_t *) ((char *)fchdr + sizeof(fc_header_t) + vmhdr_offset); io->app_id = ocs_be32toh(vhdr->src_vmid); } /* RQ pair, if we got here, SIT=1 */ ocs_populate_io_fcp_cmd(io, cmnd, fchdr, TRUE); if (cmnd->task_management_flags) { ocs_dispatch_unsolicited_tmf(io, cmnd->task_management_flags, node, lun); } else { uint32_t flags = ocs_get_flags_fcp_cmd(cmnd); /* can return failure for things like task set full and UAs, * no need to treat as a dropped frame if rc != 0 */ ocs_scsi_recv_cmd(io, lun, cmnd->fcp_cdb, sizeof(cmnd->fcp_cdb) + (cmnd->additional_fcp_cdb_length * sizeof(uint32_t)), flags); } /* successfully processed, now return RX buffer to the chip */ ocs_hw_sequence_free(&ocs->hw, seq); return 0; } /** * @ingroup unsol * @brief Dispatch unsolicited FCP_CMND frame (auto xfer rdy). * *

Description

* Dispatch unsolicited FCP_CMND frame that is assisted with auto xfer ready. * * @param node Node that originated the frame. * @param seq Header/payload sequence buffers. * * @return Returns 0 if frame processed and RX buffers cleaned * up appropriately, -1 if frame not handled and RX buffers need * to be returned. */ static int32_t ocs_dispatch_fcp_cmd_auto_xfer_rdy(ocs_node_t *node, ocs_hw_sequence_t *seq) { ocs_t *ocs = node->ocs; fc_header_t *fchdr = seq->header->dma.virt; fcp_cmnd_iu_t *cmnd = NULL; ocs_io_t *io = NULL; uint64_t lun = UINT64_MAX; int32_t rc = 0; ocs_assert(seq->payload, -1); cmnd = seq->payload->dma.virt; /* perform FCP_CMND validation check(s) */ if (ocs_validate_fcp_cmd(ocs, seq)) { return -1; } /* make sure first burst or auto xfer_rdy is enabled */ if (!seq->auto_xrdy) { node_printf(node, "IO is not Auto Xfr Rdy assisted, dropping FCP_CMND\n"); return -1; } lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(cmnd->fcp_lun)); /* TODO should there be a check here for an error? Why do any of the * below if the LUN decode failed? */ io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER); if (io == NULL) { uint32_t send_frame_capable; /* If we have SEND_FRAME capability, then use it to send task set full or busy */ rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable); if ((rc == 0) && send_frame_capable) { rc = ocs_sframe_send_task_set_full_or_busy(node, seq); if (rc) { ocs_log_test(ocs, "ocs_sframe_send_task_set_full_or_busy failed: %d\n", rc); } return rc; } ocs_log_err(ocs, "IO allocation failed ox_id %04x\n", ocs_be16toh(fchdr->ox_id)); return -1; } io->hw_priv = seq->hw_priv; /* RQ pair, if we got here, SIT=0 */ ocs_populate_io_fcp_cmd(io, cmnd, fchdr, FALSE); if (cmnd->task_management_flags) { /* first burst command better not be a TMF */ ocs_log_err(ocs, "TMF flags set 0x%x\n", cmnd->task_management_flags); ocs_scsi_io_free(io); return -1; } else { uint32_t flags = ocs_get_flags_fcp_cmd(cmnd); /* activate HW IO */ ocs_hw_io_activate_port_owned(&ocs->hw, seq->hio); io->hio = seq->hio; seq->hio->ul_io = io; io->tgt_task_tag = seq->hio->indicator; /* Note: Data buffers are received in another call */ ocs_scsi_recv_cmd_first_burst(io, lun, cmnd->fcp_cdb, sizeof(cmnd->fcp_cdb) + (cmnd->additional_fcp_cdb_length * sizeof(uint32_t)), flags, NULL, 0); } /* FCP_CMND processed, return RX buffer to the chip */ ocs_hw_sequence_free(&ocs->hw, seq); return 0; } /** * @ingroup unsol * @brief Dispatch FCP data frames for auto xfer ready. * *

Description

* Dispatch unsolicited FCP data frames (auto xfer ready) * containing sequence initiative transferred (SIT=1). * * @param node Node that originated the frame. * @param seq Header/payload sequence buffers. * * @return Returns 0 if frame processed and RX buffers cleaned * up appropriately, -1 if frame not handled. */ static int32_t ocs_dispatch_fcp_data(ocs_node_t *node, ocs_hw_sequence_t *seq) { ocs_t *ocs = node->ocs; ocs_hw_t *hw = &ocs->hw; ocs_hw_io_t *hio = seq->hio; ocs_io_t *io; ocs_dma_t fburst[1]; ocs_assert(seq->payload, -1); ocs_assert(hio, -1); io = hio->ul_io; if (io == NULL) { ocs_log_err(ocs, "data received for NULL io, xri=0x%x\n", hio->indicator); return -1; } /* * We only support data completions for auto xfer ready. Make sure * this is a port owned XRI. */ if (!ocs_hw_is_io_port_owned(hw, seq->hio)) { ocs_log_err(ocs, "data received for host owned XRI, xri=0x%x\n", hio->indicator); return -1; } /* For error statuses, pass the error to the target back end */ if (seq->status != OCS_HW_UNSOL_SUCCESS) { ocs_log_err(ocs, "data with status 0x%x received, xri=0x%x\n", seq->status, hio->indicator); /* * In this case, there is an existing, in-use HW IO that * first may need to be aborted. Then, the backend will be * notified of the error while waiting for the data. */ ocs_port_owned_abort(ocs, seq->hio); /* * HW IO has already been allocated and is waiting for data. * Need to tell backend that an error has occurred. */ ocs_scsi_recv_cmd_first_burst(io, 0, NULL, 0, OCS_SCSI_FIRST_BURST_ERR, NULL, 0); return -1; } /* sequence initiative has been transferred */ io->seq_init = 1; /* convert the array of pointers to the correct type, to send to backend */ fburst[0] = seq->payload->dma; /* the amount of first burst data was saved as "acculated sequence length" */ io->transferred = seq->payload->dma.len; if (ocs_scsi_recv_cmd_first_burst(io, 0, NULL, 0, 0, fburst, io->transferred)) { ocs_log_err(ocs, "error passing first burst, xri=0x%x, oxid=0x%x\n", hio->indicator, io->init_task_tag); } /* Free the header and all the accumulated payload buffers */ ocs_hw_sequence_free(&ocs->hw, seq); return 0; } /** * @ingroup unsol * @brief Handle the callback for the TMF FUNCTION_REJECTED response. * *

Description

* Handle the callback of a send TMF FUNCTION_REJECTED response request. * * @param io Pointer to the IO context. * @param scsi_status Status of the response. * @param flags Callback flags. * @param arg Callback argument. * * @return Returns 0 on success, or a negative error value on failure. */ static int32_t ocs_fc_tmf_rejected_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg) { ocs_scsi_io_free(io); return 0; } /** * @brief Return next FC frame on node->pend_frames list * * The next FC frame on the node->pend_frames list is returned, or NULL * if the list is empty. * * @param pend_list Pending list to be purged. * @param list_lock Lock that protects pending list. * * @return Returns pointer to the next FC frame, or NULL if the pending frame list * is empty. */ static ocs_hw_sequence_t * ocs_frame_next(ocs_list_t *pend_list, ocs_lock_t *list_lock) { ocs_hw_sequence_t *frame = NULL; ocs_lock(list_lock); frame = ocs_list_remove_head(pend_list); ocs_unlock(list_lock); return frame; } /** * @brief Process send fcp response frame callback * * The function is called when the send FCP response posting has completed. Regardless * of the outcome, the sequence is freed. * * @param arg Pointer to originator frame sequence. * @param cqe Pointer to completion queue entry. * @param status Status of operation. * * @return None. */ static void ocs_sframe_common_send_cb(void *arg, uint8_t *cqe, int32_t status) { ocs_hw_send_frame_context_t *ctx = arg; ocs_hw_t *hw = ctx->hw; /* Free WQ completion callback */ ocs_hw_reqtag_free(hw, ctx->wqcb); /* Free sequence */ ocs_hw_sequence_free(hw, ctx->seq); } /** * @brief Send a frame, common code * * A frame is sent using SEND_FRAME, the R_CTL/F_CTL/TYPE may be specified, the payload is * sent as a single frame. * * Memory resources are allocated from RQ buffers contained in the passed in sequence data. * * @param node Pointer to node object. * @param seq Pointer to sequence object. * @param r_ctl R_CTL value to place in FC header. * @param info INFO value to place in FC header. * @param f_ctl F_CTL value to place in FC header. * @param type TYPE value to place in FC header. * @param payload Pointer to payload data * @param payload_len Length of payload in bytes. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t ocs_sframe_common_send(ocs_node_t *node, ocs_hw_sequence_t *seq, uint8_t r_ctl, uint8_t info, uint32_t f_ctl, uint8_t type, void *payload, uint32_t payload_len) { ocs_t *ocs = node->ocs; ocs_hw_t *hw = &ocs->hw; ocs_hw_rtn_e rc = 0; fc_header_t *behdr = seq->header->dma.virt; fc_header_le_t hdr; uint32_t s_id = fc_be24toh(behdr->s_id); uint32_t d_id = fc_be24toh(behdr->d_id); uint16_t ox_id = ocs_be16toh(behdr->ox_id); uint16_t rx_id = ocs_be16toh(behdr->rx_id); ocs_hw_send_frame_context_t *ctx; uint32_t heap_size = seq->payload->dma.size; uintptr_t heap_phys_base = seq->payload->dma.phys; uint8_t *heap_virt_base = seq->payload->dma.virt; uint32_t heap_offset = 0; /* Build the FC header reusing the RQ header DMA buffer */ ocs_memset(&hdr, 0, sizeof(hdr)); hdr.d_id = s_id; /* send it back to whomever sent it to us */ hdr.r_ctl = r_ctl; hdr.info = info; hdr.s_id = d_id; hdr.cs_ctl = 0; hdr.f_ctl = f_ctl; hdr.type = type; hdr.seq_cnt = 0; hdr.df_ctl = 0; /* * send_frame_seq_id is an atomic, we just let it increment, while storing only * the low 8 bits to hdr->seq_id */ hdr.seq_id = (uint8_t) ocs_atomic_add_return(&hw->send_frame_seq_id, 1); hdr.rx_id = rx_id; hdr.ox_id = ox_id; hdr.parameter = 0; /* Allocate and fill in the send frame request context */ ctx = (void*)(heap_virt_base + heap_offset); heap_offset += sizeof(*ctx); ocs_assert(heap_offset < heap_size, -1); ocs_memset(ctx, 0, sizeof(*ctx)); /* Save sequence */ ctx->seq = seq; /* Allocate a response payload DMA buffer from the heap */ ctx->payload.phys = heap_phys_base + heap_offset; ctx->payload.virt = heap_virt_base + heap_offset; ctx->payload.size = payload_len; ctx->payload.len = payload_len; heap_offset += payload_len; ocs_assert(heap_offset <= heap_size, -1); /* Copy the payload in */ ocs_memcpy(ctx->payload.virt, payload, payload_len); /* Send */ rc = ocs_hw_send_frame(&ocs->hw, (void*)&hdr, FC_SOFI3, FC_EOFT, &ctx->payload, ctx, ocs_sframe_common_send_cb, ctx); if (rc) { ocs_log_test(ocs, "ocs_hw_send_frame failed: %d\n", rc); } return rc ? -1 : 0; } /** * @brief Send FCP response using SEND_FRAME * * The FCP response is send using the SEND_FRAME function. * * @param node Pointer to node object. * @param seq Pointer to inbound sequence. * @param rsp Pointer to response data. * @param rsp_len Length of response data, in bytes. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t ocs_sframe_send_fcp_rsp(ocs_node_t *node, ocs_hw_sequence_t *seq, void *rsp, uint32_t rsp_len) { return ocs_sframe_common_send(node, seq, FC_RCTL_FC4_DATA, FC_RCTL_INFO_CMD_STATUS, FC_FCTL_EXCHANGE_RESPONDER | FC_FCTL_LAST_SEQUENCE | FC_FCTL_END_SEQUENCE | FC_FCTL_SEQUENCE_INITIATIVE, FC_TYPE_FCP, rsp, rsp_len); } /** * @brief Send task set full response * * Return a task set full or busy response using send frame. * * @param node Pointer to node object. * @param seq Pointer to originator frame sequence. * * @return Returns 0 on success, or a negative error code value on failure. */ static int32_t ocs_sframe_send_task_set_full_or_busy(ocs_node_t *node, ocs_hw_sequence_t *seq) { fcp_rsp_iu_t fcprsp; fcp_cmnd_iu_t *fcpcmd = seq->payload->dma.virt; uint32_t *fcp_dl_ptr; uint32_t fcp_dl; int32_t rc = 0; /* extract FCP_DL from FCP command*/ fcp_dl_ptr = (uint32_t*)(&(fcpcmd->fcp_cdb_and_dl)); fcp_dl_ptr += fcpcmd->additional_fcp_cdb_length; fcp_dl = ocs_be32toh(*fcp_dl_ptr); /* construct task set full or busy response */ ocs_memset(&fcprsp, 0, sizeof(fcprsp)); ocs_lock(&node->active_ios_lock); fcprsp.scsi_status = ocs_list_empty(&node->active_ios) ? SCSI_STATUS_BUSY : SCSI_STATUS_TASK_SET_FULL; ocs_unlock(&node->active_ios_lock); *((uint32_t*)&fcprsp.fcp_resid) = fcp_dl; /* send it using send_frame */ rc = ocs_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp) - sizeof(fcprsp.data)); if (rc) { ocs_log_test(node->ocs, "ocs_sframe_send_fcp_rsp failed: %d\n", rc); } return rc; } /** * @brief Send BA_ACC using sent frame * * A BA_ACC is sent using SEND_FRAME * * @param node Pointer to node object. * @param seq Pointer to originator frame sequence. * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_sframe_send_bls_acc(ocs_node_t *node, ocs_hw_sequence_t *seq) { fc_header_t *behdr = seq->header->dma.virt; uint16_t ox_id = ocs_be16toh(behdr->ox_id); uint16_t rx_id = ocs_be16toh(behdr->rx_id); fc_ba_acc_payload_t acc = {0}; acc.ox_id = ocs_htobe16(ox_id); acc.rx_id = ocs_htobe16(rx_id); acc.low_seq_cnt = UINT16_MAX; acc.high_seq_cnt = UINT16_MAX; return ocs_sframe_common_send(node, seq, FC_RCTL_BLS, FC_RCTL_INFO_UNSOL_DATA, FC_FCTL_EXCHANGE_RESPONDER | FC_FCTL_LAST_SEQUENCE | FC_FCTL_END_SEQUENCE, FC_TYPE_BASIC_LINK, &acc, sizeof(acc)); }