diff --git a/sys/cam/cam_ccb.h b/sys/cam/cam_ccb.h index 3a01cde1a442..f0150d680d2f 100644 --- a/sys/cam/cam_ccb.h +++ b/sys/cam/cam_ccb.h @@ -1,1559 +1,1558 @@ /*- * Data structures and definitions for CAM Control Blocks (CCBs). * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_CCB_H #define _CAM_CAM_CCB_H 1 #include #include #include #include #ifndef _KERNEL #include #endif #include #include #include #include #include /* General allocation length definitions for CCB structures */ #define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */ #define VUHBALEN 14 /* Vendor Unique HBA length */ #define SIM_IDLEN 16 /* ASCII string len for SIM ID */ #define HBA_IDLEN 16 /* ASCII string len for HBA ID */ #define DEV_IDLEN 16 /* ASCII string len for device names */ #define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */ #define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */ /* Struct definitions for CAM control blocks */ /* Common CCB header */ /* CCB memory allocation flags */ typedef enum { CAM_CCB_FROM_UMA = 0x00000001,/* CCB from a periph UMA zone */ } ccb_alloc_flags; /* CAM CCB flags */ typedef enum { CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */ CAM_unused1 = 0x00000002, CAM_unused2 = 0x00000004, CAM_NEGOTIATE = 0x00000008,/* * Perform transport negotiation * with this command. */ CAM_DATA_ISPHYS = 0x00000010,/* Data type with physical addrs */ CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */ CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */ CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */ CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */ CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */ CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */ CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */ CAM_DATA_PADDR = 0x00000010,/* Data type (001:Physical) */ CAM_DATA_SG = 0x00040000,/* Data type (010:sglist) */ CAM_DATA_SG_PADDR = 0x00040010,/* Data type (011:sglist phys) */ CAM_DATA_BIO = 0x00200000,/* Data type (100:bio) */ CAM_DATA_MASK = 0x00240010,/* Data type mask */ CAM_unused3 = 0x00000100, CAM_unused4 = 0x00000200, CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */ CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */ CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */ CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */ CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/ CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/ CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/ CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */ CAM_unused5 = 0x00080000, CAM_unused6 = 0x00100000, CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */ CAM_unused7 = 0x00800000, /* Phase cognizant mode flags */ CAM_unused8 = 0x01000000, CAM_unused9 = 0x02000000, CAM_unused10 = 0x04000000, CAM_unused11 = 0x08000000, CAM_unused12 = 0x10000000, CAM_unused13 = 0x20000000, CAM_unused14 = 0x40000000, /* Host target Mode flags */ CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */ CAM_unused15 = 0x10000000, CAM_unused16 = 0x20000000, CAM_SEND_STATUS = 0x40000000,/* Send status after data phase */ CAM_UNLOCKED = 0x80000000 /* Call callback without lock. */ } ccb_flags; typedef enum { CAM_USER_DATA_ADDR = 0x00000002,/* Userspace data pointers */ CAM_SG_FORMAT_IOVEC = 0x00000004,/* iovec instead of busdma S/G*/ CAM_UNMAPPED_BUF = 0x00000008 /* use unmapped I/O */ } ccb_xflags; /* XPT Opcodes for xpt_action */ typedef enum { /* Function code flags are bits greater than 0xff */ XPT_FC_QUEUED = 0x100, /* Non-immediate function code */ XPT_FC_USER_CCB = 0x200, XPT_FC_XPT_ONLY = 0x400, /* Only for the transport layer device */ XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED, /* Passes through the device queues */ /* Common function commands: 0x00->0x0F */ XPT_NOOP = 0x00, /* Execute Nothing */ XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED, /* Execute the requested I/O operation */ XPT_GDEV_TYPE = 0x02, /* Get type information for specified device */ XPT_GDEVLIST = 0x03, /* Get a list of peripheral devices */ XPT_PATH_INQ = 0x04, /* Path routing inquiry */ XPT_REL_SIMQ = 0x05, /* Release a frozen device queue */ XPT_SASYNC_CB = 0x06, /* Set Asynchronous Callback Parameters */ XPT_SDEV_TYPE = 0x07, /* Set device type information */ XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* (Re)Scan the SCSI Bus */ XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY, /* Get EDT entries matching the given pattern */ XPT_DEBUG = 0x0a, /* Turn on debugging for a bus, target or lun */ XPT_PATH_STATS = 0x0b, /* Path statistics (error counts, etc.) */ XPT_GDEV_STATS = 0x0c, /* Device statistics (error counts, etc.) */ XPT_DEV_ADVINFO = 0x0e, /* Get/Set Device advanced information */ XPT_ASYNC = 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Asynchronous event */ /* SCSI Control Functions: 0x10->0x1F */ XPT_ABORT = 0x10, /* Abort the specified CCB */ XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY, /* Reset the specified SCSI bus */ XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED, /* Bus Device Reset the specified SCSI device */ XPT_TERM_IO = 0x13, /* Terminate the I/O process */ XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Logical Unit */ XPT_GET_TRAN_SETTINGS = 0x15, /* * Get default/user transfer settings * for the target */ XPT_SET_TRAN_SETTINGS = 0x16, /* * Set transfer rate/width * negotiation settings */ XPT_CALC_GEOMETRY = 0x17, /* * Calculate the geometry parameters for * a device give the sector size and * volume size. */ XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED, /* Execute the requested ATA I/O operation */ XPT_GET_SIM_KNOB_OLD = 0x18, /* Compat only */ XPT_SET_SIM_KNOB = 0x19, /* * Set SIM specific knob values. */ XPT_GET_SIM_KNOB = 0x1a, /* * Get SIM specific knob values. */ XPT_SMP_IO = 0x1b | XPT_FC_DEV_QUEUED, /* Serial Management Protocol */ XPT_NVME_IO = 0x1c | XPT_FC_DEV_QUEUED, /* Execute the requested NVMe I/O operation */ XPT_MMC_IO = 0x1d | XPT_FC_DEV_QUEUED, /* Placeholder for MMC / SD / SDIO I/O stuff */ XPT_SCAN_TGT = 0x1e | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Target */ XPT_NVME_ADMIN = 0x1f | XPT_FC_DEV_QUEUED, /* Execute the requested NVMe Admin operation */ /* HBA engine commands 0x20->0x2F */ XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY, /* HBA engine feature inquiry */ XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED, /* HBA execute engine request */ /* Target mode commands: 0x30->0x3F */ XPT_EN_LUN = 0x30, /* Enable LUN as a target */ XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED, /* Execute target I/O request */ XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Accept Host Target Mode CDB */ XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED, /* Continue Host Target I/O Connection */ XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event (obsolete) */ XPT_NOTIFY_ACK = 0x35, /* Acknowledgement of event (obsolete) */ XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event */ XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Acknowledgement of event */ XPT_REPROBE_LUN = 0x38 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Query device capacity and notify GEOM */ XPT_MMC_SET_TRAN_SETTINGS = 0x40 | XPT_FC_DEV_QUEUED, XPT_MMC_GET_TRAN_SETTINGS = 0x41 | XPT_FC_DEV_QUEUED, /* Vendor Unique codes: 0x80->0x8F */ XPT_VUNIQUE = 0x80 } xpt_opcode; #define XPT_FC_GROUP_MASK 0xF0 #define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK) #define XPT_FC_GROUP_COMMON 0x00 #define XPT_FC_GROUP_SCSI_CONTROL 0x10 #define XPT_FC_GROUP_HBA_ENGINE 0x20 #define XPT_FC_GROUP_TMODE 0x30 #define XPT_FC_GROUP_VENDOR_UNIQUE 0x80 #define XPT_FC_IS_DEV_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED) #define XPT_FC_IS_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0) typedef enum { PROTO_UNKNOWN, PROTO_UNSPECIFIED, PROTO_SCSI, /* Small Computer System Interface */ PROTO_ATA, /* AT Attachment */ PROTO_ATAPI, /* AT Attachment Packetized Interface */ PROTO_SATAPM, /* SATA Port Multiplier */ PROTO_SEMB, /* SATA Enclosure Management Bridge */ PROTO_NVME, /* NVME */ PROTO_MMCSD, /* MMC, SD, SDIO */ } cam_proto; typedef enum { XPORT_UNKNOWN, XPORT_UNSPECIFIED, XPORT_SPI, /* SCSI Parallel Interface */ XPORT_FC, /* Fiber Channel */ XPORT_SSA, /* Serial Storage Architecture */ XPORT_USB, /* Universal Serial Bus */ XPORT_PPB, /* Parallel Port Bus */ XPORT_ATA, /* AT Attachment */ XPORT_SAS, /* Serial Attached SCSI */ XPORT_SATA, /* Serial AT Attachment */ XPORT_ISCSI, /* iSCSI */ XPORT_SRP, /* SCSI RDMA Protocol */ XPORT_NVME, /* NVMe over PCIe */ XPORT_MMCSD, /* MMC, SD, SDIO card */ } cam_xport; #define XPORT_IS_NVME(t) ((t) == XPORT_NVME) #define XPORT_IS_ATA(t) ((t) == XPORT_ATA || (t) == XPORT_SATA) #define XPORT_IS_SCSI(t) ((t) != XPORT_UNKNOWN && \ (t) != XPORT_UNSPECIFIED && \ !XPORT_IS_ATA(t) && !XPORT_IS_NVME(t)) #define XPORT_DEVSTAT_TYPE(t) (XPORT_IS_ATA(t) ? DEVSTAT_TYPE_IF_IDE : \ XPORT_IS_SCSI(t) ? DEVSTAT_TYPE_IF_SCSI : \ DEVSTAT_TYPE_IF_OTHER) #define PROTO_VERSION_UNKNOWN (UINT_MAX - 1) #define PROTO_VERSION_UNSPECIFIED UINT_MAX #define XPORT_VERSION_UNKNOWN (UINT_MAX - 1) #define XPORT_VERSION_UNSPECIFIED UINT_MAX typedef union { LIST_ENTRY(ccb_hdr) le; SLIST_ENTRY(ccb_hdr) sle; TAILQ_ENTRY(ccb_hdr) tqe; STAILQ_ENTRY(ccb_hdr) stqe; } camq_entry; typedef union { void *ptr; u_long field; u_int8_t bytes[sizeof(uintptr_t)]; } ccb_priv_entry; typedef union { ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE]; u_int8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_ppriv_area; typedef union { ccb_priv_entry entries[CCB_SIM_PRIV_SIZE]; u_int8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_spriv_area; typedef struct { struct timeval *etime; uintptr_t sim_data; uintptr_t periph_data; } ccb_qos_area; struct ccb_hdr { cam_pinfo pinfo; /* Info for priority scheduling */ camq_entry xpt_links; /* For chaining in the XPT layer */ camq_entry sim_links; /* For chaining in the SIM layer */ camq_entry periph_links; /* For chaining in the type driver */ #if BYTE_ORDER == LITTLE_ENDIAN u_int16_t retry_count; u_int16_t alloc_flags; /* ccb_alloc_flags */ #else u_int16_t alloc_flags; /* ccb_alloc_flags */ u_int16_t retry_count; #endif void (*cbfcnp)(struct cam_periph *, union ccb *); /* Callback on completion function */ xpt_opcode func_code; /* XPT function code */ u_int32_t status; /* Status returned by CAM subsystem */ struct cam_path *path; /* Compiled path for this ccb */ path_id_t path_id; /* Path ID for the request */ target_id_t target_id; /* Target device ID */ lun_id_t target_lun; /* Target LUN number */ u_int32_t flags; /* ccb_flags */ u_int32_t xflags; /* Extended flags */ ccb_ppriv_area periph_priv; ccb_spriv_area sim_priv; ccb_qos_area qos; u_int32_t timeout; /* Hard timeout value in mseconds */ struct timeval softtimeout; /* Soft timeout value in sec + usec */ }; /* Get Device Information CCB */ struct ccb_getdev { struct ccb_hdr ccb_h; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; u_int8_t serial_num[252]; u_int8_t inq_flags; u_int8_t serial_num_len; void *padding[2]; }; /* Device Statistics CCB */ struct ccb_getdevstats { struct ccb_hdr ccb_h; int dev_openings; /* Space left for more work on device*/ int dev_active; /* Transactions running on the device */ int allocated; /* CCBs allocated for the device */ int queued; /* CCBs queued to be sent to the device */ int held; /* * CCBs held by peripheral drivers * for this device */ int maxtags; /* * Boundary conditions for number of * tagged operations */ int mintags; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { CAM_GDEVLIST_LAST_DEVICE, CAM_GDEVLIST_LIST_CHANGED, CAM_GDEVLIST_MORE_DEVS, CAM_GDEVLIST_ERROR } ccb_getdevlist_status_e; struct ccb_getdevlist { struct ccb_hdr ccb_h; char periph_name[DEV_IDLEN]; u_int32_t unit_number; unsigned int generation; u_int32_t index; ccb_getdevlist_status_e status; }; typedef enum { PERIPH_MATCH_NONE = 0x000, PERIPH_MATCH_PATH = 0x001, PERIPH_MATCH_TARGET = 0x002, PERIPH_MATCH_LUN = 0x004, PERIPH_MATCH_NAME = 0x008, PERIPH_MATCH_UNIT = 0x010, PERIPH_MATCH_ANY = 0x01f } periph_pattern_flags; struct periph_match_pattern { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; periph_pattern_flags flags; }; typedef enum { DEV_MATCH_NONE = 0x000, DEV_MATCH_PATH = 0x001, DEV_MATCH_TARGET = 0x002, DEV_MATCH_LUN = 0x004, DEV_MATCH_INQUIRY = 0x008, DEV_MATCH_DEVID = 0x010, DEV_MATCH_ANY = 0x00f } dev_pattern_flags; struct device_id_match_pattern { uint8_t id_len; uint8_t id[256]; }; struct device_match_pattern { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; dev_pattern_flags flags; union { struct scsi_static_inquiry_pattern inq_pat; struct device_id_match_pattern devid_pat; } data; }; typedef enum { BUS_MATCH_NONE = 0x000, BUS_MATCH_PATH = 0x001, BUS_MATCH_NAME = 0x002, BUS_MATCH_UNIT = 0x004, BUS_MATCH_BUS_ID = 0x008, BUS_MATCH_ANY = 0x00f } bus_pattern_flags; struct bus_match_pattern { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; bus_pattern_flags flags; }; union match_pattern { struct periph_match_pattern periph_pattern; struct device_match_pattern device_pattern; struct bus_match_pattern bus_pattern; }; typedef enum { DEV_MATCH_PERIPH, DEV_MATCH_DEVICE, DEV_MATCH_BUS } dev_match_type; struct dev_match_pattern { dev_match_type type; union match_pattern pattern; }; struct periph_match_result { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; }; typedef enum { DEV_RESULT_NOFLAG = 0x00, DEV_RESULT_UNCONFIGURED = 0x01 } dev_result_flags; struct device_match_result { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; dev_result_flags flags; }; struct bus_match_result { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; }; union match_result { struct periph_match_result periph_result; struct device_match_result device_result; struct bus_match_result bus_result; }; struct dev_match_result { dev_match_type type; union match_result result; }; typedef enum { CAM_DEV_MATCH_LAST, CAM_DEV_MATCH_MORE, CAM_DEV_MATCH_LIST_CHANGED, CAM_DEV_MATCH_SIZE_ERROR, CAM_DEV_MATCH_ERROR } ccb_dev_match_status; typedef enum { CAM_DEV_POS_NONE = 0x000, CAM_DEV_POS_BUS = 0x001, CAM_DEV_POS_TARGET = 0x002, CAM_DEV_POS_DEVICE = 0x004, CAM_DEV_POS_PERIPH = 0x008, CAM_DEV_POS_PDPTR = 0x010, CAM_DEV_POS_TYPEMASK = 0xf00, CAM_DEV_POS_EDT = 0x100, CAM_DEV_POS_PDRV = 0x200 } dev_pos_type; struct ccb_dm_cookie { void *bus; void *target; void *device; void *periph; void *pdrv; }; struct ccb_dev_position { u_int generations[4]; #define CAM_BUS_GENERATION 0x00 #define CAM_TARGET_GENERATION 0x01 #define CAM_DEV_GENERATION 0x02 #define CAM_PERIPH_GENERATION 0x03 dev_pos_type position_type; struct ccb_dm_cookie cookie; }; struct ccb_dev_match { struct ccb_hdr ccb_h; ccb_dev_match_status status; u_int32_t num_patterns; u_int32_t pattern_buf_len; struct dev_match_pattern *patterns; u_int32_t num_matches; u_int32_t match_buf_len; struct dev_match_result *matches; struct ccb_dev_position pos; }; /* * Definitions for the path inquiry CCB fields. */ #define CAM_VERSION 0x19 /* Hex value for current version */ typedef enum { PI_MDP_ABLE = 0x80, /* Supports MDP message */ PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */ PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */ PI_SDTR_ABLE = 0x10, /* Supports SDTR message */ PI_LINKED_CDB = 0x08, /* Supports linked CDBs */ PI_SATAPM = 0x04, /* Supports SATA PM */ PI_TAG_ABLE = 0x02, /* Supports tag queue messages */ PI_SOFT_RST = 0x01 /* Supports soft reset alternative */ } pi_inqflag; typedef enum { PIT_PROCESSOR = 0x80, /* Target mode processor mode */ PIT_PHASE = 0x40, /* Target mode phase cog. mode */ PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */ PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */ PIT_GRP_6 = 0x08, /* Group 6 commands supported */ PIT_GRP_7 = 0x04 /* Group 7 commands supported */ } pi_tmflag; typedef enum { PIM_ATA_EXT = 0x200,/* ATA requests can understand ata_ext requests */ PIM_EXTLUNS = 0x100,/* 64bit extended LUNs supported */ PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */ PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */ PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */ PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */ PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */ PIM_SEQSCAN = 0x04, /* Do bus scans sequentially, not in parallel */ PIM_UNMAPPED = 0x02, PIM_NOSCAN = 0x01 /* SIM does its own scanning */ } pi_miscflag; /* Path Inquiry CCB */ struct ccb_pathinq_settings_spi { u_int8_t ppr_options; }; struct ccb_pathinq_settings_fc { u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_pathinq_settings_sas { u_int32_t bitrate; /* Mbps */ }; #define NVME_DEV_NAME_LEN 52 struct ccb_pathinq_settings_nvme { uint32_t nsid; /* Namespace ID for this path */ uint32_t domain; uint8_t bus; uint8_t slot; uint8_t function; uint8_t extra; char dev_name[NVME_DEV_NAME_LEN]; /* nvme controller dev name for this device */ }; _Static_assert(sizeof(struct ccb_pathinq_settings_nvme) == 64, "ccb_pathinq_settings_nvme too big"); #define PATHINQ_SETTINGS_SIZE 128 struct ccb_pathinq { struct ccb_hdr ccb_h; u_int8_t version_num; /* Version number for the SIM/HBA */ u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */ u_int16_t target_sprt; /* Flags for target mode support */ u_int32_t hba_misc; /* Misc HBA features */ u_int16_t hba_eng_cnt; /* HBA engine count */ /* Vendor Unique capabilities */ u_int8_t vuhba_flags[VUHBALEN]; u_int32_t max_target; /* Maximum supported Target */ u_int32_t max_lun; /* Maximum supported Lun */ u_int32_t async_flags; /* Installed Async handlers */ path_id_t hpath_id; /* Highest Path ID in the subsystem */ target_id_t initiator_id; /* ID of the HBA on the SCSI bus */ char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */ char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */ char dev_name[DEV_IDLEN];/* Device name for SIM */ u_int32_t unit_number; /* Unit number for SIM */ u_int32_t bus_id; /* Bus ID for SIM */ u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { struct ccb_pathinq_settings_spi spi; struct ccb_pathinq_settings_fc fc; struct ccb_pathinq_settings_sas sas; struct ccb_pathinq_settings_nvme nvme; char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE]; } xport_specific; u_int maxio; /* Max supported I/O size, in bytes. */ u_int16_t hba_vendor; /* HBA vendor ID */ u_int16_t hba_device; /* HBA device ID */ u_int16_t hba_subvendor; /* HBA subvendor ID */ u_int16_t hba_subdevice; /* HBA subdevice ID */ }; /* Path Statistics CCB */ struct ccb_pathstats { struct ccb_hdr ccb_h; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { SMP_FLAG_NONE = 0x00, SMP_FLAG_REQ_SG = 0x01, SMP_FLAG_RSP_SG = 0x02 } ccb_smp_pass_flags; /* * Serial Management Protocol CCB * XXX Currently the semantics for this CCB are that it is executed either * by the addressed device, or that device's parent (i.e. an expander for * any device on an expander) if the addressed device doesn't support SMP. * Later, once we have the ability to probe SMP-only devices and put them * in CAM's topology, the CCB will only be executed by the addressed device * if possible. */ struct ccb_smpio { struct ccb_hdr ccb_h; uint8_t *smp_request; int smp_request_len; uint16_t smp_request_sglist_cnt; uint8_t *smp_response; int smp_response_len; uint16_t smp_response_sglist_cnt; ccb_smp_pass_flags flags; }; typedef union { u_int8_t *sense_ptr; /* * Pointer to storage * for sense information */ /* Storage Area for sense information */ struct scsi_sense_data sense_buf; } sense_t; typedef union { u_int8_t *cdb_ptr; /* Pointer to the CDB bytes to send */ /* Area for the CDB send */ u_int8_t cdb_bytes[IOCDBLEN]; } cdb_t; /* * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO * function codes. */ struct ccb_scsiio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ u_int8_t *req_map; /* Ptr to mapping info */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ /* Autosense storage */ struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes to autosense */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int16_t sglist_cnt; /* Number of SG list entries */ u_int8_t scsi_status; /* Returned SCSI status */ u_int8_t sense_resid; /* Autosense resid length: 2's comp */ u_int32_t resid; /* Transfer residual length: 2's comp */ cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t *msg_ptr; /* Pointer to the message buffer */ u_int16_t msg_len; /* Number of bytes for the Message */ u_int8_t tag_action; /* What to do for tag queueing */ /* * The tag action should be either the define below (to send a * non-tagged transaction) or one of the defined scsi tag messages * from scsi_message.h. */ #define CAM_TAG_ACTION_NONE 0x00 uint8_t priority; /* Command priority for SIMPLE tag */ u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) struct bio *bio; /* Associated bio */ #endif }; static __inline uint8_t * scsiio_cdb_ptr(struct ccb_scsiio *ccb) { return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); } /* * ATA I/O Request CCB used for the XPT_ATA_IO function code. */ struct ccb_ataio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct ata_cmd cmd; /* ATA command register set */ struct ata_res res; /* ATA result register set */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int32_t resid; /* Transfer residual length: 2's comp */ u_int8_t ata_flags; /* Flags for the rest of the buffer */ #define ATA_FLAG_AUX 0x1 #define ATA_FLAG_ICC 0x2 uint8_t icc; /* Isochronous Command Completion */ uint32_t aux; uint32_t unused; }; /* * MMC I/O Request CCB used for the XPT_MMC_IO function code. */ struct ccb_mmcio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct mmc_command cmd; struct mmc_command stop; }; struct ccb_accept_tio { struct ccb_hdr ccb_h; cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int8_t tag_action; /* What to do for tag queueing */ u_int8_t sense_len; /* Number of bytes of Sense Data */ uint8_t priority; /* Command priority for SIMPLE tag */ u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ struct scsi_sense_data sense_data; }; static __inline uint8_t * atio_cdb_ptr(struct ccb_accept_tio *ccb) { return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); } /* Release SIM Queue */ struct ccb_relsim { struct ccb_hdr ccb_h; u_int32_t release_flags; #define RELSIM_ADJUST_OPENINGS 0x01 #define RELSIM_RELEASE_AFTER_TIMEOUT 0x02 #define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04 #define RELSIM_RELEASE_AFTER_QEMPTY 0x08 u_int32_t openings; u_int32_t release_timeout; /* Abstract argument. */ u_int32_t qfrozen_cnt; }; /* * NVMe I/O Request CCB used for the XPT_NVME_IO and XPT_NVME_ADMIN function codes. */ struct ccb_nvmeio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct nvme_command cmd; /* NVME command, per NVME standard */ struct nvme_completion cpl; /* NVME completion, per NVME standard */ uint8_t *data_ptr; /* Ptr to the data buf/SG list */ uint32_t dxfer_len; /* Data transfer length */ uint16_t sglist_cnt; /* Number of SG list entries */ uint16_t unused; /* padding for removed uint32_t */ }; /* * Definitions for the asynchronous callback CCB fields. */ typedef enum { AC_UNIT_ATTENTION = 0x4000,/* Device reported UNIT ATTENTION */ AC_ADVINFO_CHANGED = 0x2000,/* Advance info might have changes */ AC_CONTRACT = 0x1000,/* A contractual callback */ AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */ AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */ AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */ AC_LOST_DEVICE = 0x100,/* A device went away */ AC_FOUND_DEVICE = 0x080,/* A new device was found */ AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */ AC_PATH_REGISTERED = 0x020,/* A new path has been registered */ AC_SENT_BDR = 0x010,/* A BDR message was sent to target */ AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */ AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */ AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */ } ac_code; typedef void ac_callback_t (void *softc, u_int32_t code, struct cam_path *path, void *args); /* * Generic Asynchronous callbacks. * * Generic arguments passed bac which are then interpreted between a per-system * contract number. */ #define AC_CONTRACT_DATA_MAX (128 - sizeof (u_int64_t)) struct ac_contract { u_int64_t contract_number; u_int8_t contract_data[AC_CONTRACT_DATA_MAX]; }; #define AC_CONTRACT_DEV_CHG 1 struct ac_device_changed { u_int64_t wwpn; u_int32_t port; target_id_t target; u_int8_t arrived; }; /* Set Asynchronous Callback CCB */ struct ccb_setasync { struct ccb_hdr ccb_h; u_int32_t event_enable; /* Async Event enables */ ac_callback_t *callback; void *callback_arg; }; /* Set Device Type CCB */ struct ccb_setdev { struct ccb_hdr ccb_h; u_int8_t dev_type; /* Value for dev type field in EDT */ }; /* SCSI Control Functions */ /* Abort XPT request CCB */ struct ccb_abort { struct ccb_hdr ccb_h; union ccb *abort_ccb; /* Pointer to CCB to abort */ }; /* Reset SCSI Bus CCB */ struct ccb_resetbus { struct ccb_hdr ccb_h; }; /* Reset SCSI Device CCB */ struct ccb_resetdev { struct ccb_hdr ccb_h; }; /* Terminate I/O Process Request CCB */ struct ccb_termio { struct ccb_hdr ccb_h; union ccb *termio_ccb; /* Pointer to CCB to terminate */ }; typedef enum { CTS_TYPE_CURRENT_SETTINGS, CTS_TYPE_USER_SETTINGS } cts_type; struct ccb_trans_settings_scsi { u_int valid; /* Which fields to honor */ #define CTS_SCSI_VALID_TQ 0x01 u_int flags; #define CTS_SCSI_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_ata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_TQ 0x01 u_int flags; #define CTS_ATA_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_spi { u_int valid; /* Which fields to honor */ #define CTS_SPI_VALID_SYNC_RATE 0x01 #define CTS_SPI_VALID_SYNC_OFFSET 0x02 #define CTS_SPI_VALID_BUS_WIDTH 0x04 #define CTS_SPI_VALID_DISC 0x08 #define CTS_SPI_VALID_PPR_OPTIONS 0x10 u_int flags; #define CTS_SPI_FLAGS_DISC_ENB 0x01 u_int sync_period; u_int sync_offset; u_int bus_width; u_int ppr_options; }; struct ccb_trans_settings_fc { u_int valid; /* Which fields to honor */ #define CTS_FC_VALID_WWNN 0x8000 #define CTS_FC_VALID_WWPN 0x4000 #define CTS_FC_VALID_PORT 0x2000 #define CTS_FC_VALID_SPEED 0x1000 u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_sas { u_int valid; /* Which fields to honor */ #define CTS_SAS_VALID_SPEED 0x1000 u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_pata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_MODE 0x01 #define CTS_ATA_VALID_BYTECOUNT 0x02 #define CTS_ATA_VALID_ATAPI 0x20 #define CTS_ATA_VALID_CAPS 0x40 int mode; /* Mode */ u_int bytecount; /* Length of PIO transaction */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_ATA_CAPS_H 0x0000ffff #define CTS_ATA_CAPS_H_DMA48 0x00000001 /* 48-bit DMA */ #define CTS_ATA_CAPS_D 0xffff0000 }; struct ccb_trans_settings_sata { u_int valid; /* Which fields to honor */ #define CTS_SATA_VALID_MODE 0x01 #define CTS_SATA_VALID_BYTECOUNT 0x02 #define CTS_SATA_VALID_REVISION 0x04 #define CTS_SATA_VALID_PM 0x08 #define CTS_SATA_VALID_TAGS 0x10 #define CTS_SATA_VALID_ATAPI 0x20 #define CTS_SATA_VALID_CAPS 0x40 int mode; /* Legacy PATA mode */ u_int bytecount; /* Length of PIO transaction */ int revision; /* SATA revision */ u_int pm_present; /* PM is present (XPT->SIM) */ u_int tags; /* Number of allowed tags */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_SATA_CAPS_H 0x0000ffff #define CTS_SATA_CAPS_H_PMREQ 0x00000001 #define CTS_SATA_CAPS_H_APST 0x00000002 #define CTS_SATA_CAPS_H_DMAAA 0x00000010 /* Auto-activation */ #define CTS_SATA_CAPS_H_AN 0x00000020 /* Async. notification */ #define CTS_SATA_CAPS_D 0xffff0000 #define CTS_SATA_CAPS_D_PMREQ 0x00010000 #define CTS_SATA_CAPS_D_APST 0x00020000 }; struct ccb_trans_settings_nvme { u_int valid; /* Which fields to honor */ #define CTS_NVME_VALID_SPEC 0x01 #define CTS_NVME_VALID_CAPS 0x02 #define CTS_NVME_VALID_LINK 0x04 uint32_t spec; /* NVMe spec implemented -- same as vs register */ uint32_t max_xfer; /* Max transfer size (0 -> unlimited */ uint32_t caps; uint8_t lanes; /* Number of PCIe lanes */ uint8_t speed; /* PCIe generation for each lane */ uint8_t max_lanes; /* Number of PCIe lanes */ uint8_t max_speed; /* PCIe generation for each lane */ }; #include struct ccb_trans_settings_mmc { struct mmc_ios ios; #define MMC_CLK (1 << 1) #define MMC_VDD (1 << 2) #define MMC_CS (1 << 3) #define MMC_BW (1 << 4) #define MMC_PM (1 << 5) #define MMC_BT (1 << 6) #define MMC_BM (1 << 7) #define MMC_VCCQ (1 << 8) uint32_t ios_valid; /* The folowing is used only for GET_TRAN_SETTINGS */ uint32_t host_ocr; int host_f_min; int host_f_max; /* Copied from sys/dev/mmc/bridge.h */ #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can do 4-bit data transfers */ #define MMC_CAP_8_BIT_DATA (1 << 1) /* Can do 8-bit data transfers */ #define MMC_CAP_HSPEED (1 << 2) /* Can do High Speed transfers */ #define MMC_CAP_BOOT_NOACC (1 << 4) /* Cannot access boot partitions */ #define MMC_CAP_WAIT_WHILE_BUSY (1 << 5) /* Host waits for busy responses */ #define MMC_CAP_UHS_SDR12 (1 << 6) /* Can do UHS SDR12 */ #define MMC_CAP_UHS_SDR25 (1 << 7) /* Can do UHS SDR25 */ #define MMC_CAP_UHS_SDR50 (1 << 8) /* Can do UHS SDR50 */ #define MMC_CAP_UHS_SDR104 (1 << 9) /* Can do UHS SDR104 */ #define MMC_CAP_UHS_DDR50 (1 << 10) /* Can do UHS DDR50 */ #define MMC_CAP_MMC_DDR52_120 (1 << 11) /* Can do eMMC DDR52 at 1.2 V */ #define MMC_CAP_MMC_DDR52_180 (1 << 12) /* Can do eMMC DDR52 at 1.8 V */ #define MMC_CAP_MMC_DDR52 (MMC_CAP_MMC_DDR52_120 | MMC_CAP_MMC_DDR52_180) #define MMC_CAP_MMC_HS200_120 (1 << 13) /* Can do eMMC HS200 at 1.2 V */ #define MMC_CAP_MMC_HS200_180 (1 << 14) /* Can do eMMC HS200 at 1.8 V */ #define MMC_CAP_MMC_HS200 (MMC_CAP_MMC_HS200_120| MMC_CAP_MMC_HS200_180) #define MMC_CAP_MMC_HS400_120 (1 << 15) /* Can do eMMC HS400 at 1.2 V */ #define MMC_CAP_MMC_HS400_180 (1 << 16) /* Can do eMMC HS400 at 1.8 V */ #define MMC_CAP_MMC_HS400 (MMC_CAP_MMC_HS400_120 | MMC_CAP_MMC_HS400_180) #define MMC_CAP_MMC_HSX00_120 (MMC_CAP_MMC_HS200_120 | MMC_CAP_MMC_HS400_120) #define MMC_CAP_MMC_ENH_STROBE (1 << 17) /* Can do eMMC Enhanced Strobe */ #define MMC_CAP_SIGNALING_120 (1 << 18) /* Can do signaling at 1.2 V */ #define MMC_CAP_SIGNALING_180 (1 << 19) /* Can do signaling at 1.8 V */ #define MMC_CAP_SIGNALING_330 (1 << 20) /* Can do signaling at 3.3 V */ #define MMC_CAP_DRIVER_TYPE_A (1 << 21) /* Can do Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 22) /* Can do Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 23) /* Can do Driver Type D */ uint32_t host_caps; uint32_t host_max_data; }; /* Get/Set transfer rate/width/disconnection/tag queueing settings */ struct ccb_trans_settings { struct ccb_hdr ccb_h; cts_type type; /* Current or User settings */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_ata ata; struct ccb_trans_settings_scsi scsi; struct ccb_trans_settings_nvme nvme; struct ccb_trans_settings_mmc mmc; } proto_specific; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_spi spi; struct ccb_trans_settings_fc fc; struct ccb_trans_settings_sas sas; struct ccb_trans_settings_pata ata; struct ccb_trans_settings_sata sata; struct ccb_trans_settings_nvme nvme; } xport_specific; }; /* * Calculate the geometry parameters for a device * give the block size and volume size in blocks. */ struct ccb_calc_geometry { struct ccb_hdr ccb_h; u_int32_t block_size; u_int64_t volume_size; u_int32_t cylinders; u_int8_t heads; u_int8_t secs_per_track; }; /* * Set or get SIM (and transport) specific knobs */ #define KNOB_VALID_ADDRESS 0x1 #define KNOB_VALID_ROLE 0x2 #define KNOB_ROLE_NONE 0x0 #define KNOB_ROLE_INITIATOR 0x1 #define KNOB_ROLE_TARGET 0x2 #define KNOB_ROLE_BOTH 0x3 struct ccb_sim_knob_settings_spi { u_int valid; u_int initiator_id; u_int role; }; struct ccb_sim_knob_settings_fc { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int role; }; struct ccb_sim_knob_settings_sas { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int role; }; #define KNOB_SETTINGS_SIZE 128 struct ccb_sim_knob { struct ccb_hdr ccb_h; union { u_int valid; /* Which fields to honor */ struct ccb_sim_knob_settings_spi spi; struct ccb_sim_knob_settings_fc fc; struct ccb_sim_knob_settings_sas sas; char pad[KNOB_SETTINGS_SIZE]; } xport_specific; }; /* * Rescan the given bus, or bus/target/lun */ struct ccb_rescan { struct ccb_hdr ccb_h; cam_flags flags; }; /* * Turn on debugging for the given bus, bus/target, or bus/target/lun. */ struct ccb_debug { struct ccb_hdr ccb_h; cam_debug_flags flags; }; /* Target mode structures. */ struct ccb_en_lun { struct ccb_hdr ccb_h; u_int16_t grp6_len; /* Group 6 VU CDB length */ u_int16_t grp7_len; /* Group 7 VU CDB length */ u_int8_t enable; }; /* old, barely used immediate notify, binary compatibility */ struct ccb_immed_notify { struct ccb_hdr ccb_h; struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes in sense buffer */ u_int8_t initiator_id; /* Id of initiator that selected */ u_int8_t message_args[7]; /* Message Arguments */ }; struct ccb_notify_ack { struct ccb_hdr ccb_h; u_int16_t seq_id; /* Sequence identifier */ u_int8_t event; /* Event flags */ }; struct ccb_immediate_notify { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tag for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Function specific */ }; struct ccb_notify_acknowledge { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tar for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Response information */ /* * Lower byte of arg is one of RESPONSE CODE values defined below * (subset of response codes from SPL-4 and FCP-4 specifications), * upper 3 bytes is code-specific ADDITIONAL RESPONSE INFORMATION. */ #define CAM_RSP_TMF_COMPLETE 0x00 #define CAM_RSP_TMF_REJECTED 0x04 #define CAM_RSP_TMF_FAILED 0x05 #define CAM_RSP_TMF_SUCCEEDED 0x08 #define CAM_RSP_TMF_INCORRECT_LUN 0x09 }; /* HBA engine structures. */ typedef enum { EIT_BUFFER, /* Engine type: buffer memory */ EIT_LOSSLESS, /* Engine type: lossless compression */ EIT_LOSSY, /* Engine type: lossy compression */ EIT_ENCRYPT /* Engine type: encryption */ } ei_type; typedef enum { EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */ EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */ EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */ EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */ } ei_algo; struct ccb_eng_inq { struct ccb_hdr ccb_h; u_int16_t eng_num; /* The engine number for this inquiry */ ei_type eng_type; /* Returned engine type */ ei_algo eng_algo; /* Returned engine algorithm type */ u_int32_t eng_memeory; /* Returned engine memory size */ }; struct ccb_eng_exec { /* This structure must match SCSIIO size */ struct ccb_hdr ccb_h; u_int8_t *pdrv_ptr; /* Ptr used by the peripheral driver */ u_int8_t *req_map; /* Ptr for mapping info on the req. */ u_int8_t *data_ptr; /* Pointer to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int8_t *engdata_ptr; /* Pointer to the engine buffer data */ u_int16_t sglist_cnt; /* Num of scatter gather list entries */ u_int32_t dmax_len; /* Destination data maximum length */ u_int32_t dest_len; /* Destination data length */ int32_t src_resid; /* Source residual length: 2's comp */ u_int32_t timeout; /* Timeout value */ u_int16_t eng_num; /* Engine number for this request */ u_int16_t vu_flags; /* Vendor Unique flags */ }; /* * Definitions for the timeout field in the SCSI I/O CCB. */ #define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */ #define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */ #define CAM_SUCCESS 0 /* For signaling general success */ -#define CAM_FAILURE 1 /* For signaling general failure */ #define CAM_FALSE 0 #define CAM_TRUE 1 #define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */ /* * CCB for working with advanced device information. This operates in a fashion * similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer * type requested, and provide a buffer size/buffer to write to. If the * buffer is too small, provsiz will be larger than bufsiz. */ struct ccb_dev_advinfo { struct ccb_hdr ccb_h; uint32_t flags; #define CDAI_FLAG_NONE 0x0 /* No flags set */ #define CDAI_FLAG_STORE 0x1 /* If set, action becomes store */ uint32_t buftype; /* IN: Type of data being requested */ /* NB: buftype is interpreted on a per-transport basis */ #define CDAI_TYPE_SCSI_DEVID 1 #define CDAI_TYPE_SERIAL_NUM 2 #define CDAI_TYPE_PHYS_PATH 3 #define CDAI_TYPE_RCAPLONG 4 #define CDAI_TYPE_EXT_INQ 5 #define CDAI_TYPE_NVME_CNTRL 6 /* NVMe Identify Controller data */ #define CDAI_TYPE_NVME_NS 7 /* NVMe Identify Namespace data */ #define CDAI_TYPE_MMC_PARAMS 8 /* MMC/SD ident */ off_t bufsiz; /* IN: Size of external buffer */ #define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */ off_t provsiz; /* OUT: Size required/used */ uint8_t *buf; /* IN/OUT: Buffer for requested data */ }; /* * CCB for sending async events */ struct ccb_async { struct ccb_hdr ccb_h; uint32_t async_code; off_t async_arg_size; void *async_arg_ptr; }; /* * Union of all CCB types for kernel space allocation. This union should * never be used for manipulating CCBs - its only use is for the allocation * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc * and the argument to xpt_ccb_free. */ union ccb { struct ccb_hdr ccb_h; /* For convenience */ struct ccb_scsiio csio; struct ccb_getdev cgd; struct ccb_getdevlist cgdl; struct ccb_pathinq cpi; struct ccb_relsim crs; struct ccb_setasync csa; struct ccb_setdev csd; struct ccb_pathstats cpis; struct ccb_getdevstats cgds; struct ccb_dev_match cdm; struct ccb_trans_settings cts; struct ccb_calc_geometry ccg; struct ccb_sim_knob knob; struct ccb_abort cab; struct ccb_resetbus crb; struct ccb_resetdev crd; struct ccb_termio tio; struct ccb_accept_tio atio; struct ccb_scsiio ctio; struct ccb_en_lun cel; struct ccb_immed_notify cin; struct ccb_notify_ack cna; struct ccb_immediate_notify cin1; struct ccb_notify_acknowledge cna2; struct ccb_eng_inq cei; struct ccb_eng_exec cee; struct ccb_smpio smpio; struct ccb_rescan crcn; struct ccb_debug cdbg; struct ccb_ataio ataio; struct ccb_dev_advinfo cdai; struct ccb_async casync; struct ccb_nvmeio nvmeio; struct ccb_mmcio mmcio; }; #define CCB_CLEAR_ALL_EXCEPT_HDR(ccbp) \ bzero((char *)(ccbp) + sizeof((ccbp)->ccb_h), \ sizeof(*(ccbp)) - sizeof((ccbp)->ccb_h)) __BEGIN_DECLS static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_SCSI_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->sense_len = sense_len; csio->cdb_len = cdb_len; csio->tag_action = tag_action; csio->priority = 0; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) csio->bio = NULL; #endif } static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_CONT_TARGET_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->scsi_status = scsi_status; csio->tag_action = tag_action; csio->priority = 0; csio->tag_id = tag_id; csio->init_id = init_id; } static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action __unused, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { ataio->ccb_h.func_code = XPT_ATA_IO; ataio->ccb_h.flags = flags; ataio->ccb_h.retry_count = retries; ataio->ccb_h.cbfcnp = cbfcnp; ataio->ccb_h.timeout = timeout; ataio->data_ptr = data_ptr; ataio->dxfer_len = dxfer_len; ataio->ata_flags = 0; } static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout) { #ifdef _KERNEL KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH, ("direction != CAM_DIR_BOTH")); KASSERT((smp_request != NULL) && (smp_response != NULL), ("need valid request and response buffers")); KASSERT((smp_request_len != 0) && (smp_response_len != 0), ("need non-zero request and response lengths")); #endif /*_KERNEL*/ smpio->ccb_h.func_code = XPT_SMP_IO; smpio->ccb_h.flags = flags; smpio->ccb_h.retry_count = retries; smpio->ccb_h.cbfcnp = cbfcnp; smpio->ccb_h.timeout = timeout; smpio->smp_request = smp_request; smpio->smp_request_len = smp_request_len; smpio->smp_response = smp_response; smpio->smp_response_len = smp_response_len; } static __inline void cam_fill_mmcio(struct ccb_mmcio *mmcio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint32_t mmc_opcode, uint32_t mmc_arg, uint32_t mmc_flags, struct mmc_data *mmc_d, uint32_t timeout) { mmcio->ccb_h.func_code = XPT_MMC_IO; mmcio->ccb_h.flags = flags; mmcio->ccb_h.retry_count = retries; mmcio->ccb_h.cbfcnp = cbfcnp; mmcio->ccb_h.timeout = timeout; mmcio->cmd.opcode = mmc_opcode; mmcio->cmd.arg = mmc_arg; mmcio->cmd.flags = mmc_flags; mmcio->stop.opcode = 0; mmcio->stop.arg = 0; mmcio->stop.flags = 0; if (mmc_d != NULL) { mmcio->cmd.data = mmc_d; } else mmcio->cmd.data = NULL; mmcio->cmd.resp[0] = 0; mmcio->cmd.resp[1] = 0; mmcio->cmd.resp[2] = 0; mmcio->cmd.resp[3] = 0; } static __inline void cam_set_ccbstatus(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } static __inline cam_status cam_ccb_status(union ccb *ccb) { return ((cam_status)(ccb->ccb_h.status & CAM_STATUS_MASK)); } void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended); static __inline void cam_fill_nvmeio(struct ccb_nvmeio *nvmeio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { nvmeio->ccb_h.func_code = XPT_NVME_IO; nvmeio->ccb_h.flags = flags; nvmeio->ccb_h.retry_count = retries; nvmeio->ccb_h.cbfcnp = cbfcnp; nvmeio->ccb_h.timeout = timeout; nvmeio->data_ptr = data_ptr; nvmeio->dxfer_len = dxfer_len; } static __inline void cam_fill_nvmeadmin(struct ccb_nvmeio *nvmeio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { nvmeio->ccb_h.func_code = XPT_NVME_ADMIN; nvmeio->ccb_h.flags = flags; nvmeio->ccb_h.retry_count = retries; nvmeio->ccb_h.cbfcnp = cbfcnp; nvmeio->ccb_h.timeout = timeout; nvmeio->data_ptr = data_ptr; nvmeio->dxfer_len = dxfer_len; } __END_DECLS #endif /* _CAM_CAM_CCB_H */ diff --git a/sys/cam/cam_xpt.c b/sys/cam/cam_xpt.c index ee216451bd6d..016f0e6a38be 100644 --- a/sys/cam/cam_xpt.c +++ b/sys/cam/cam_xpt.c @@ -1,5658 +1,5658 @@ /*- * Implementation of the Common Access Method Transport (XPT) layer. * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_printf.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for xpt_print below */ #include "opt_cam.h" /* Wild guess based on not wanting to grow the stack too much */ #define XPT_PRINT_MAXLEN 512 #ifdef PRINTF_BUFR_SIZE #define XPT_PRINT_LEN PRINTF_BUFR_SIZE #else #define XPT_PRINT_LEN 128 #endif _Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large"); /* * This is the maximum number of high powered commands (e.g. start unit) * that can be outstanding at a particular time. */ #ifndef CAM_MAX_HIGHPOWER #define CAM_MAX_HIGHPOWER 4 #endif /* Datastructures internal to the xpt layer */ MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); struct xpt_softc { uint32_t xpt_generation; /* number of high powered commands that can go through right now */ struct mtx xpt_highpower_lock; STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; int num_highpower; /* queue for handling async rescan requests. */ TAILQ_HEAD(, ccb_hdr) ccb_scanq; int buses_to_config; int buses_config_done; int announce_nosbuf; /* * Registered buses * * N.B., "busses" is an archaic spelling of "buses". In new code * "buses" is preferred. */ TAILQ_HEAD(,cam_eb) xpt_busses; u_int bus_generation; int boot_delay; struct callout boot_callout; struct task boot_task; struct root_hold_token xpt_rootmount; struct mtx xpt_topo_lock; struct taskqueue *xpt_taskq; }; typedef enum { DM_RET_COPY = 0x01, DM_RET_FLAG_MASK = 0x0f, DM_RET_NONE = 0x00, DM_RET_STOP = 0x10, DM_RET_DESCEND = 0x20, DM_RET_ERROR = 0x30, DM_RET_ACTION_MASK = 0xf0 } dev_match_ret; typedef enum { XPT_DEPTH_BUS, XPT_DEPTH_TARGET, XPT_DEPTH_DEVICE, XPT_DEPTH_PERIPH } xpt_traverse_depth; struct xpt_traverse_config { xpt_traverse_depth depth; void *tr_func; void *tr_arg; }; typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); /* Transport layer configuration information */ static struct xpt_softc xsoftc; MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, &xsoftc.boot_delay, 0, "Bus registration wait time"); SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); SYSCTL_INT(_kern_cam, OID_AUTO, announce_nosbuf, CTLFLAG_RWTUN, &xsoftc.announce_nosbuf, 0, "Don't use sbuf for announcements"); struct cam_doneq { struct mtx_padalign cam_doneq_mtx; STAILQ_HEAD(, ccb_hdr) cam_doneq; int cam_doneq_sleep; }; static struct cam_doneq cam_doneqs[MAXCPU]; static u_int __read_mostly cam_num_doneqs; static struct proc *cam_proc; static struct cam_doneq cam_async; SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, &cam_num_doneqs, 0, "Number of completion queues/threads"); struct cam_periph *xpt_periph; static periph_init_t xpt_periph_init; static struct periph_driver xpt_driver = { xpt_periph_init, "xpt", TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(xpt, xpt_driver); static d_open_t xptopen; static d_close_t xptclose; static d_ioctl_t xptioctl; static d_ioctl_t xptdoioctl; static struct cdevsw xpt_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = xptopen, .d_close = xptclose, .d_ioctl = xptioctl, .d_name = "xpt", }; /* Storage for debugging datastructures */ struct cam_path *cam_dpath; u_int32_t __read_mostly cam_dflags = CAM_DEBUG_FLAGS; SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, &cam_dflags, 0, "Enabled debug flags"); u_int32_t cam_debug_delay = CAM_DEBUG_DELAY; SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, &cam_debug_delay, 0, "Delay in us after each debug message"); /* Our boot-time initialization hook */ static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); static moduledata_t cam_moduledata = { "cam", cam_module_event_handler, NULL }; static int xpt_init(void *); DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); MODULE_VERSION(cam, 1); static void xpt_async_bcast(struct async_list *async_head, u_int32_t async_code, struct cam_path *path, void *async_arg); static path_id_t xptnextfreepathid(void); static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); static union ccb *xpt_get_ccb(struct cam_periph *periph); static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); static void xpt_run_allocq(struct cam_periph *periph, int sleep); static void xpt_run_allocq_task(void *context, int pending); static void xpt_run_devq(struct cam_devq *devq); static callout_func_t xpt_release_devq_timeout; static void xpt_acquire_bus(struct cam_eb *bus); static void xpt_release_bus(struct cam_eb *bus); static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); static int xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue); static struct cam_et* xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); static void xpt_acquire_target(struct cam_et *target); static void xpt_release_target(struct cam_et *target); static struct cam_eb* xpt_find_bus(path_id_t path_id); static struct cam_et* xpt_find_target(struct cam_eb *bus, target_id_t target_id); static struct cam_ed* xpt_find_device(struct cam_et *target, lun_id_t lun_id); static void xpt_config(void *arg); static void xpt_hold_boot_locked(void); static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, u_int32_t new_priority); static xpt_devicefunc_t xptpassannouncefunc; static void xptaction(struct cam_sim *sim, union ccb *work_ccb); static void xptpoll(struct cam_sim *sim); static void camisr_runqueue(void); static void xpt_done_process(struct ccb_hdr *ccb_h); static void xpt_done_td(void *); static void xpt_async_td(void *); static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_eb *bus); static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_ed *device); static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_periph *periph); static xpt_busfunc_t xptedtbusfunc; static xpt_targetfunc_t xptedttargetfunc; static xpt_devicefunc_t xptedtdevicefunc; static xpt_periphfunc_t xptedtperiphfunc; static xpt_pdrvfunc_t xptplistpdrvfunc; static xpt_periphfunc_t xptplistperiphfunc; static int xptedtmatch(struct ccb_dev_match *cdm); static int xptperiphlistmatch(struct ccb_dev_match *cdm); static int xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg); static int xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, xpt_targetfunc_t *tr_func, void *arg); static int xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, xpt_devicefunc_t *tr_func, void *arg); static int xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg); static int xptpdrvtraverse(struct periph_driver **start_pdrv, xpt_pdrvfunc_t *tr_func, void *arg); static int xptpdperiphtraverse(struct periph_driver **pdrv, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg); static xpt_busfunc_t xptdefbusfunc; static xpt_targetfunc_t xptdeftargetfunc; static xpt_devicefunc_t xptdefdevicefunc; static xpt_periphfunc_t xptdefperiphfunc; static void xpt_finishconfig_task(void *context, int pending); static void xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static xpt_devicefunc_t xptsetasyncfunc; static xpt_busfunc_t xptsetasyncbusfunc; static cam_status xptregister(struct cam_periph *periph, void *arg); static __inline int xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) { int retval; mtx_assert(&devq->send_mtx, MA_OWNED); if ((dev->ccbq.queue.entries > 0) && (dev->ccbq.dev_openings > 0) && (dev->ccbq.queue.qfrozen_cnt == 0)) { /* * The priority of a device waiting for controller * resources is that of the highest priority CCB * enqueued. */ retval = xpt_schedule_dev(&devq->send_queue, &dev->devq_entry, CAMQ_GET_PRIO(&dev->ccbq.queue)); } else { retval = 0; } return (retval); } static __inline int device_is_queued(struct cam_ed *device) { return (device->devq_entry.index != CAM_UNQUEUED_INDEX); } static void xpt_periph_init(void) { make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); } static int xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) { /* * Only allow read-write access. */ if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) return(EPERM); /* * We don't allow nonblocking access. */ if ((flags & O_NONBLOCK) != 0) { printf("%s: can't do nonblocking access\n", devtoname(dev)); return(ENODEV); } return(0); } static int xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) { return(0); } /* * Don't automatically grab the xpt softc lock here even though this is going * through the xpt device. The xpt device is really just a back door for * accessing other devices and SIMs, so the right thing to do is to grab * the appropriate SIM lock once the bus/SIM is located. */ static int xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); } return (error); } static int xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; error = 0; switch(cmd) { /* * For the transport layer CAMIOCOMMAND ioctl, we really only want * to accept CCB types that don't quite make sense to send through a * passthrough driver. XPT_PATH_INQ is an exception to this, as stated * in the CAM spec. */ case CAMIOCOMMAND: { union ccb *ccb; union ccb *inccb; struct cam_eb *bus; inccb = (union ccb *)addr; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (inccb->ccb_h.func_code == XPT_SCSI_IO) inccb->csio.bio = NULL; #endif if (inccb->ccb_h.flags & CAM_UNLOCKED) return (EINVAL); bus = xpt_find_bus(inccb->ccb_h.path_id); if (bus == NULL) return (EINVAL); switch (inccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_RESET_BUS: if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { xpt_release_bus(bus); return (EINVAL); } break; case XPT_SCAN_TGT: if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { xpt_release_bus(bus); return (EINVAL); } break; default: break; } switch(inccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_RESET_BUS: case XPT_PATH_INQ: case XPT_ENG_INQ: case XPT_SCAN_LUN: case XPT_SCAN_TGT: ccb = xpt_alloc_ccb(); /* * Create a path using the bus, target, and lun the * user passed in. */ if (xpt_create_path(&ccb->ccb_h.path, NULL, inccb->ccb_h.path_id, inccb->ccb_h.target_id, inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; xpt_free_ccb(ccb); break; } /* Ensure all of our fields are correct */ xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(ccb, inccb); xpt_path_lock(ccb->ccb_h.path); cam_periph_runccb(ccb, NULL, 0, 0, NULL); xpt_path_unlock(ccb->ccb_h.path); bcopy(ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); break; case XPT_DEBUG: { union ccb ccb; /* * This is an immediate CCB, so it's okay to * allocate it on the stack. */ memset(&ccb, 0, sizeof(ccb)); /* * Create a path using the bus, target, and lun the * user passed in. */ if (xpt_create_path(&ccb.ccb_h.path, NULL, inccb->ccb_h.path_id, inccb->ccb_h.target_id, inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; break; } /* Ensure all of our fields are correct */ xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(&ccb, inccb); xpt_action(&ccb); bcopy(&ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb.ccb_h.path); break; } case XPT_DEV_MATCH: { struct cam_periph_map_info mapinfo; struct cam_path *old_path; /* * We can't deal with physical addresses for this * type of transaction. */ if ((inccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) { error = EINVAL; break; } /* * Save this in case the caller had it set to * something in particular. */ old_path = inccb->ccb_h.path; /* * We really don't need a path for the matching * code. The path is needed because of the * debugging statements in xpt_action(). They * assume that the CCB has a valid path. */ inccb->ccb_h.path = xpt_periph->path; bzero(&mapinfo, sizeof(mapinfo)); /* * Map the pattern and match buffers into kernel * virtual address space. */ error = cam_periph_mapmem(inccb, &mapinfo, maxphys); if (error) { inccb->ccb_h.path = old_path; break; } /* * This is an immediate CCB, we can send it on directly. */ xpt_action(inccb); /* * Map the buffers back into user space. */ cam_periph_unmapmem(inccb, &mapinfo); inccb->ccb_h.path = old_path; error = 0; break; } default: error = ENOTSUP; break; } xpt_release_bus(bus); break; } /* * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, * with the periphal driver name and unit name filled in. The other * fields don't really matter as input. The passthrough driver name * ("pass"), and unit number are passed back in the ccb. The current * device generation number, and the index into the device peripheral * driver list, and the status are also passed back. Note that * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is * (or rather should be) impossible for the device peripheral driver * list to change since we look at the whole thing in one pass, and * we do it with lock protection. * */ case CAMGETPASSTHRU: { union ccb *ccb; struct cam_periph *periph; struct periph_driver **p_drv; char *name; u_int unit; int base_periph_found; ccb = (union ccb *)addr; unit = ccb->cgdl.unit_number; name = ccb->cgdl.periph_name; base_periph_found = 0; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.bio = NULL; #endif /* * Sanity check -- make sure we don't get a null peripheral * driver name. */ if (*ccb->cgdl.periph_name == '\0') { error = EINVAL; break; } /* Keep the list from changing while we traverse it */ xpt_lock_buses(); /* first find our driver in the list of drivers */ for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) if (strcmp((*p_drv)->driver_name, name) == 0) break; if (*p_drv == NULL) { xpt_unlock_buses(); ccb->ccb_h.status = CAM_REQ_CMP_ERR; ccb->cgdl.status = CAM_GDEVLIST_ERROR; *ccb->cgdl.periph_name = '\0'; ccb->cgdl.unit_number = 0; error = ENOENT; break; } /* * Run through every peripheral instance of this driver * and check to see whether it matches the unit passed * in by the user. If it does, get out of the loops and * find the passthrough driver associated with that * peripheral driver. */ for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; periph = TAILQ_NEXT(periph, unit_links)) { if (periph->unit_number == unit) break; } /* * If we found the peripheral driver that the user passed * in, go through all of the peripheral drivers for that * particular device and look for a passthrough driver. */ if (periph != NULL) { struct cam_ed *device; int i; base_periph_found = 1; device = periph->path->device; for (i = 0, periph = SLIST_FIRST(&device->periphs); periph != NULL; periph = SLIST_NEXT(periph, periph_links), i++) { /* * Check to see whether we have a * passthrough device or not. */ if (strcmp(periph->periph_name, "pass") == 0) { /* * Fill in the getdevlist fields. */ strlcpy(ccb->cgdl.periph_name, periph->periph_name, sizeof(ccb->cgdl.periph_name)); ccb->cgdl.unit_number = periph->unit_number; if (SLIST_NEXT(periph, periph_links)) ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; else ccb->cgdl.status = CAM_GDEVLIST_LAST_DEVICE; ccb->cgdl.generation = device->generation; ccb->cgdl.index = i; /* * Fill in some CCB header fields * that the user may want. */ ccb->ccb_h.path_id = periph->path->bus->path_id; ccb->ccb_h.target_id = periph->path->target->target_id; ccb->ccb_h.target_lun = periph->path->device->lun_id; ccb->ccb_h.status = CAM_REQ_CMP; break; } } } /* * If the periph is null here, one of two things has * happened. The first possibility is that we couldn't * find the unit number of the particular peripheral driver * that the user is asking about. e.g. the user asks for * the passthrough driver for "da11". We find the list of * "da" peripherals all right, but there is no unit 11. * The other possibility is that we went through the list * of peripheral drivers attached to the device structure, * but didn't find one with the name "pass". Either way, * we return ENOENT, since we couldn't find something. */ if (periph == NULL) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; ccb->cgdl.status = CAM_GDEVLIST_ERROR; *ccb->cgdl.periph_name = '\0'; ccb->cgdl.unit_number = 0; error = ENOENT; /* * It is unfortunate that this is even necessary, * but there are many, many clueless users out there. * If this is true, the user is looking for the * passthrough driver, but doesn't have one in his * kernel. */ if (base_periph_found == 1) { printf("xptioctl: pass driver is not in the " "kernel\n"); printf("xptioctl: put \"device pass\" in " "your kernel config file\n"); } } xpt_unlock_buses(); break; } default: error = ENOTTY; break; } return(error); } static int cam_module_event_handler(module_t mod, int what, void *arg) { int error; switch (what) { case MOD_LOAD: if ((error = xpt_init(NULL)) != 0) return (error); break; case MOD_UNLOAD: return EBUSY; default: return EOPNOTSUPP; } return 0; } static struct xpt_proto * xpt_proto_find(cam_proto proto) { struct xpt_proto **pp; SET_FOREACH(pp, cam_xpt_proto_set) { if ((*pp)->proto == proto) return *pp; } return NULL; } static void xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) { if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } else { done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); } xpt_release_boot(); } /* thread to handle bus rescans */ static void xpt_scanner_thread(void *dummy) { union ccb *ccb; struct mtx *mtx; struct cam_ed *device; xpt_lock_buses(); for (;;) { if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, "-", 0); if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); xpt_unlock_buses(); /* * We need to lock the device's mutex which we use as * the path mutex. We can't do it directly because the * cam_path in the ccb may wind up going away because * the path lock may be dropped and the path retired in * the completion callback. We do this directly to keep * the reference counts in cam_path sane. We also have * to copy the device pointer because ccb_h.path may * be freed in the callback. */ mtx = xpt_path_mtx(ccb->ccb_h.path); device = ccb->ccb_h.path->device; xpt_acquire_device(device); mtx_lock(mtx); xpt_action(ccb); mtx_unlock(mtx); xpt_release_device(device); xpt_lock_buses(); } } } void xpt_rescan(union ccb *ccb) { struct ccb_hdr *hdr; /* Prepare request */ if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_BUS; else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_TGT; else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_LUN; else { xpt_print(ccb->ccb_h.path, "illegal scan path\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code, xpt_action_name(ccb->ccb_h.func_code))); ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; ccb->ccb_h.cbfcnp = xpt_rescan_done; xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); /* Don't make duplicate entries for the same paths. */ xpt_lock_buses(); if (ccb->ccb_h.ppriv_ptr1 == NULL) { TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { wakeup(&xsoftc.ccb_scanq); xpt_unlock_buses(); xpt_print(ccb->ccb_h.path, "rescan already queued\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } } } TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); xpt_hold_boot_locked(); wakeup(&xsoftc.ccb_scanq); xpt_unlock_buses(); } /* Functions accessed by the peripheral drivers */ static int xpt_init(void *dummy) { struct cam_sim *xpt_sim; struct cam_path *path; struct cam_devq *devq; cam_status status; int error, i; TAILQ_INIT(&xsoftc.xpt_busses); TAILQ_INIT(&xsoftc.ccb_scanq); STAILQ_INIT(&xsoftc.highpowerq); xsoftc.num_highpower = CAM_MAX_HIGHPOWER; mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); #ifdef CAM_BOOT_DELAY /* * Override this value at compile time to assist our users * who don't use loader to boot a kernel. */ xsoftc.boot_delay = CAM_BOOT_DELAY; #endif /* * The xpt layer is, itself, the equivalent of a SIM. * Allow 16 ccbs in the ccb pool for it. This should * give decent parallelism when we probe buses and * perform other XPT functions. */ devq = cam_simq_alloc(16); xpt_sim = cam_sim_alloc(xptaction, xptpoll, "xpt", /*softc*/NULL, /*unit*/0, /*mtx*/NULL, /*max_dev_transactions*/0, /*max_tagged_dev_transactions*/0, devq); if (xpt_sim == NULL) return (ENOMEM); - if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { - printf("xpt_init: xpt_bus_register failed with status %#x," - " failing attach\n", status); + if ((error = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { + printf("xpt_init: xpt_bus_register failed with errno %d," + " failing attach\n", error); return (EINVAL); } /* * Looking at the XPT from the SIM layer, the XPT is * the equivalent of a peripheral driver. Allocate * a peripheral driver entry for us. */ if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { printf("xpt_init: xpt_create_path failed with status %#x," " failing attach\n", status); return (EINVAL); } xpt_path_lock(path); cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, path, NULL, 0, xpt_sim); xpt_path_unlock(path); xpt_free_path(path); if (cam_num_doneqs < 1) cam_num_doneqs = 1 + mp_ncpus / 6; else if (cam_num_doneqs > MAXCPU) cam_num_doneqs = MAXCPU; for (i = 0; i < cam_num_doneqs; i++) { mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, MTX_DEF); STAILQ_INIT(&cam_doneqs[i].cam_doneq); error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); if (error != 0) { cam_num_doneqs = i; break; } } if (cam_num_doneqs < 1) { printf("xpt_init: Cannot init completion queues " "- failing attach\n"); return (ENOMEM); } mtx_init(&cam_async.cam_doneq_mtx, "CAM async", NULL, MTX_DEF); STAILQ_INIT(&cam_async.cam_doneq); if (kproc_kthread_add(xpt_async_td, &cam_async, &cam_proc, NULL, 0, 0, "cam", "async") != 0) { printf("xpt_init: Cannot init async thread " "- failing attach\n"); return (ENOMEM); } /* * Register a callback for when interrupts are enabled. */ config_intrhook_oneshot(xpt_config, NULL); return (0); } static cam_status xptregister(struct cam_periph *periph, void *arg) { struct cam_sim *xpt_sim; if (periph == NULL) { printf("xptregister: periph was NULL!!\n"); return(CAM_REQ_CMP_ERR); } xpt_sim = (struct cam_sim *)arg; xpt_sim->softc = periph; xpt_periph = periph; periph->softc = NULL; return(CAM_REQ_CMP); } int32_t xpt_add_periph(struct cam_periph *periph) { struct cam_ed *device; int32_t status; TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); device = periph->path->device; status = CAM_REQ_CMP; if (device != NULL) { mtx_lock(&device->target->bus->eb_mtx); device->generation++; SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); mtx_unlock(&device->target->bus->eb_mtx); atomic_add_32(&xsoftc.xpt_generation, 1); } return (status); } void xpt_remove_periph(struct cam_periph *periph) { struct cam_ed *device; device = periph->path->device; if (device != NULL) { mtx_lock(&device->target->bus->eb_mtx); device->generation++; SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); mtx_unlock(&device->target->bus->eb_mtx); atomic_add_32(&xsoftc.xpt_generation, 1); } } void xpt_announce_periph(struct cam_periph *periph, char *announce_string) { struct cam_path *path = periph->path; struct xpt_proto *proto; cam_periph_assert(periph, MA_OWNED); periph->flags |= CAM_PERIPH_ANNOUNCED; printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); printf("%s%d: ", periph->periph_name, periph->unit_number); proto = xpt_proto_find(path->device->protocol); if (proto) proto->ops->announce(path->device); else printf("%s%d: Unknown protocol device %d\n", periph->periph_name, periph->unit_number, path->device->protocol); if (path->device->serial_num_len > 0) { /* Don't wrap the screen - print only the first 60 chars */ printf("%s%d: Serial Number %.60s\n", periph->periph_name, periph->unit_number, path->device->serial_num); } /* Announce transport details. */ path->bus->xport->ops->announce(periph); /* Announce command queueing. */ if (path->device->inq_flags & SID_CmdQue || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { printf("%s%d: Command Queueing enabled\n", periph->periph_name, periph->unit_number); } /* Announce caller's details if they've passed in. */ if (announce_string != NULL) printf("%s%d: %s\n", periph->periph_name, periph->unit_number, announce_string); } void xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb, char *announce_string) { struct cam_path *path = periph->path; struct xpt_proto *proto; cam_periph_assert(periph, MA_OWNED); periph->flags |= CAM_PERIPH_ANNOUNCED; /* Fall back to the non-sbuf method if necessary */ if (xsoftc.announce_nosbuf != 0) { xpt_announce_periph(periph, announce_string); return; } proto = xpt_proto_find(path->device->protocol); if (((proto != NULL) && (proto->ops->announce_sbuf == NULL)) || (path->bus->xport->ops->announce_sbuf == NULL)) { xpt_announce_periph(periph, announce_string); return; } sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); if (proto) proto->ops->announce_sbuf(path->device, sb); else sbuf_printf(sb, "%s%d: Unknown protocol device %d\n", periph->periph_name, periph->unit_number, path->device->protocol); if (path->device->serial_num_len > 0) { /* Don't wrap the screen - print only the first 60 chars */ sbuf_printf(sb, "%s%d: Serial Number %.60s\n", periph->periph_name, periph->unit_number, path->device->serial_num); } /* Announce transport details. */ path->bus->xport->ops->announce_sbuf(periph, sb); /* Announce command queueing. */ if (path->device->inq_flags & SID_CmdQue || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { sbuf_printf(sb, "%s%d: Command Queueing enabled\n", periph->periph_name, periph->unit_number); } /* Announce caller's details if they've passed in. */ if (announce_string != NULL) sbuf_printf(sb, "%s%d: %s\n", periph->periph_name, periph->unit_number, announce_string); } void xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) { if (quirks != 0) { printf("%s%d: quirks=0x%b\n", periph->periph_name, periph->unit_number, quirks, bit_string); } } void xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb, int quirks, char *bit_string) { if (xsoftc.announce_nosbuf != 0) { xpt_announce_quirks(periph, quirks, bit_string); return; } if (quirks != 0) { sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name, periph->unit_number, quirks, bit_string); } } void xpt_denounce_periph(struct cam_periph *periph) { struct cam_path *path = periph->path; struct xpt_proto *proto; cam_periph_assert(periph, MA_OWNED); printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); printf("%s%d: ", periph->periph_name, periph->unit_number); proto = xpt_proto_find(path->device->protocol); if (proto) proto->ops->denounce(path->device); else printf("%s%d: Unknown protocol device %d\n", periph->periph_name, periph->unit_number, path->device->protocol); if (path->device->serial_num_len > 0) printf(" s/n %.60s", path->device->serial_num); printf(" detached\n"); } void xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) { struct cam_path *path = periph->path; struct xpt_proto *proto; cam_periph_assert(periph, MA_OWNED); /* Fall back to the non-sbuf method if necessary */ if (xsoftc.announce_nosbuf != 0) { xpt_denounce_periph(periph); return; } proto = xpt_proto_find(path->device->protocol); if ((proto != NULL) && (proto->ops->denounce_sbuf == NULL)) { xpt_denounce_periph(periph); return; } sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); if (proto) proto->ops->denounce_sbuf(path->device, sb); else sbuf_printf(sb, "%s%d: Unknown protocol device %d\n", periph->periph_name, periph->unit_number, path->device->protocol); if (path->device->serial_num_len > 0) sbuf_printf(sb, " s/n %.60s", path->device->serial_num); sbuf_printf(sb, " detached\n"); } int xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) { int ret = -1, l, o; struct ccb_dev_advinfo cdai; struct scsi_vpd_device_id *did; struct scsi_vpd_id_descriptor *idd; xpt_path_assert(path, MA_OWNED); memset(&cdai, 0, sizeof(cdai)); xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.flags = CDAI_FLAG_NONE; cdai.bufsiz = len; cdai.buf = buf; if (!strcmp(attr, "GEOM::ident")) cdai.buftype = CDAI_TYPE_SERIAL_NUM; else if (!strcmp(attr, "GEOM::physpath")) cdai.buftype = CDAI_TYPE_PHYS_PATH; else if (strcmp(attr, "GEOM::lunid") == 0 || strcmp(attr, "GEOM::lunname") == 0) { cdai.buftype = CDAI_TYPE_SCSI_DEVID; cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT); if (cdai.buf == NULL) { ret = ENOMEM; goto out; } } else goto out; xpt_action((union ccb *)&cdai); /* can only be synchronous */ if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.provsiz == 0) goto out; switch(cdai.buftype) { case CDAI_TYPE_SCSI_DEVID: did = (struct scsi_vpd_device_id *)cdai.buf; if (strcmp(attr, "GEOM::lunid") == 0) { idd = scsi_get_devid(did, cdai.provsiz, scsi_devid_is_lun_naa); if (idd == NULL) idd = scsi_get_devid(did, cdai.provsiz, scsi_devid_is_lun_eui64); if (idd == NULL) idd = scsi_get_devid(did, cdai.provsiz, scsi_devid_is_lun_uuid); if (idd == NULL) idd = scsi_get_devid(did, cdai.provsiz, scsi_devid_is_lun_md5); } else idd = NULL; if (idd == NULL) idd = scsi_get_devid(did, cdai.provsiz, scsi_devid_is_lun_t10); if (idd == NULL) idd = scsi_get_devid(did, cdai.provsiz, scsi_devid_is_lun_name); if (idd == NULL) break; ret = 0; if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) { if (idd->length < len) { for (l = 0; l < idd->length; l++) buf[l] = idd->identifier[l] ? idd->identifier[l] : ' '; buf[l] = 0; } else ret = EFAULT; break; } if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) { l = strnlen(idd->identifier, idd->length); if (l < len) { bcopy(idd->identifier, buf, l); buf[l] = 0; } else ret = EFAULT; break; } if ((idd->id_type & SVPD_ID_TYPE_MASK) == SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) { if ((idd->length - 2) * 2 + 4 >= len) { ret = EFAULT; break; } for (l = 2, o = 0; l < idd->length; l++) { if (l == 6 || l == 8 || l == 10 || l == 12) o += sprintf(buf + o, "-"); o += sprintf(buf + o, "%02x", idd->identifier[l]); } break; } if (idd->length * 2 < len) { for (l = 0; l < idd->length; l++) sprintf(buf + l * 2, "%02x", idd->identifier[l]); } else ret = EFAULT; break; default: if (cdai.provsiz < len) { cdai.buf[cdai.provsiz] = 0; ret = 0; } else ret = EFAULT; break; } out: if ((char *)cdai.buf != buf) free(cdai.buf, M_CAMXPT); return ret; } static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_eb *bus) { dev_match_ret retval; u_int i; retval = DM_RET_NONE; /* * If we aren't given something to match against, that's an error. */ if (bus == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this bus matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_DESCEND | DM_RET_COPY); for (i = 0; i < num_patterns; i++) { struct bus_match_pattern *cur_pattern; /* * If the pattern in question isn't for a bus node, we * aren't interested. However, we do indicate to the * calling routine that we should continue descending the * tree, since the user wants to match against lower-level * EDT elements. */ if (patterns[i].type != DEV_MATCH_BUS) { if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_DESCEND; continue; } cur_pattern = &patterns[i].pattern.bus_pattern; /* * If they want to match any bus node, we give them any * device node. */ if (cur_pattern->flags == BUS_MATCH_ANY) { /* set the copy flag */ retval |= DM_RET_COPY; /* * If we've already decided on an action, go ahead * and return. */ if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) return(retval); } /* * Not sure why someone would do this... */ if (cur_pattern->flags == BUS_MATCH_NONE) continue; if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) && (cur_pattern->path_id != bus->path_id)) continue; if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) && (cur_pattern->bus_id != bus->sim->bus_id)) continue; if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) && (cur_pattern->unit_number != bus->sim->unit_number)) continue; if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, DEV_IDLEN) != 0)) continue; /* * If we get to this point, the user definitely wants * information on this bus. So tell the caller to copy the * data out. */ retval |= DM_RET_COPY; /* * If the return action has been set to descend, then we * know that we've already seen a non-bus matching * expression, therefore we need to further descend the tree. * This won't change by continuing around the loop, so we * go ahead and return. If we haven't seen a non-bus * matching expression, we keep going around the loop until * we exhaust the matching expressions. We'll set the stop * flag once we fall out of the loop. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) return(retval); } /* * If the return action hasn't been set to descend yet, that means * we haven't seen anything other than bus matching patterns. So * tell the caller to stop descending the tree -- the user doesn't * want to match against lower level tree elements. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_STOP; return(retval); } static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_ed *device) { dev_match_ret retval; u_int i; retval = DM_RET_NONE; /* * If we aren't given something to match against, that's an error. */ if (device == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this device matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_DESCEND | DM_RET_COPY); for (i = 0; i < num_patterns; i++) { struct device_match_pattern *cur_pattern; struct scsi_vpd_device_id *device_id_page; /* * If the pattern in question isn't for a device node, we * aren't interested. */ if (patterns[i].type != DEV_MATCH_DEVICE) { if ((patterns[i].type == DEV_MATCH_PERIPH) && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) retval |= DM_RET_DESCEND; continue; } cur_pattern = &patterns[i].pattern.device_pattern; /* Error out if mutually exclusive options are specified. */ if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) return(DM_RET_ERROR); /* * If they want to match any device node, we give them any * device node. */ if (cur_pattern->flags == DEV_MATCH_ANY) goto copy_dev_node; /* * Not sure why someone would do this... */ if (cur_pattern->flags == DEV_MATCH_NONE) continue; if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) && (cur_pattern->path_id != device->target->bus->path_id)) continue; if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) && (cur_pattern->target_id != device->target->target_id)) continue; if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) && (cur_pattern->target_lun != device->lun_id)) continue; if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) && (cam_quirkmatch((caddr_t)&device->inq_data, (caddr_t)&cur_pattern->data.inq_pat, 1, sizeof(cur_pattern->data.inq_pat), scsi_static_inquiry_match) == NULL)) continue; device_id_page = (struct scsi_vpd_device_id *)device->device_id; if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN || scsi_devid_match((uint8_t *)device_id_page->desc_list, device->device_id_len - SVPD_DEVICE_ID_HDR_LEN, cur_pattern->data.devid_pat.id, cur_pattern->data.devid_pat.id_len) != 0)) continue; copy_dev_node: /* * If we get to this point, the user definitely wants * information on this device. So tell the caller to copy * the data out. */ retval |= DM_RET_COPY; /* * If the return action has been set to descend, then we * know that we've already seen a peripheral matching * expression, therefore we need to further descend the tree. * This won't change by continuing around the loop, so we * go ahead and return. If we haven't seen a peripheral * matching expression, we keep going around the loop until * we exhaust the matching expressions. We'll set the stop * flag once we fall out of the loop. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) return(retval); } /* * If the return action hasn't been set to descend yet, that means * we haven't seen any peripheral matching patterns. So tell the * caller to stop descending the tree -- the user doesn't want to * match against lower level tree elements. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_STOP; return(retval); } /* * Match a single peripheral against any number of match patterns. */ static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_periph *periph) { dev_match_ret retval; u_int i; /* * If we aren't given something to match against, that's an error. */ if (periph == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this peripheral matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_STOP | DM_RET_COPY); /* * There aren't any nodes below a peripheral node, so there's no * reason to descend the tree any further. */ retval = DM_RET_STOP; for (i = 0; i < num_patterns; i++) { struct periph_match_pattern *cur_pattern; /* * If the pattern in question isn't for a peripheral, we * aren't interested. */ if (patterns[i].type != DEV_MATCH_PERIPH) continue; cur_pattern = &patterns[i].pattern.periph_pattern; /* * If they want to match on anything, then we will do so. */ if (cur_pattern->flags == PERIPH_MATCH_ANY) { /* set the copy flag */ retval |= DM_RET_COPY; /* * We've already set the return action to stop, * since there are no nodes below peripherals in * the tree. */ return(retval); } /* * Not sure why someone would do this... */ if (cur_pattern->flags == PERIPH_MATCH_NONE) continue; if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) && (cur_pattern->path_id != periph->path->bus->path_id)) continue; /* * For the target and lun id's, we have to make sure the * target and lun pointers aren't NULL. The xpt peripheral * has a wildcard target and device. */ if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) && ((periph->path->target == NULL) ||(cur_pattern->target_id != periph->path->target->target_id))) continue; if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) && ((periph->path->device == NULL) || (cur_pattern->target_lun != periph->path->device->lun_id))) continue; if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) && (cur_pattern->unit_number != periph->unit_number)) continue; if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) && (strncmp(cur_pattern->periph_name, periph->periph_name, DEV_IDLEN) != 0)) continue; /* * If we get to this point, the user definitely wants * information on this peripheral. So tell the caller to * copy the data out. */ retval |= DM_RET_COPY; /* * The return action has already been set to stop, since * peripherals don't have any nodes below them in the EDT. */ return(retval); } /* * If we get to this point, the peripheral that was passed in * doesn't match any of the patterns. */ return(retval); } static int xptedtbusfunc(struct cam_eb *bus, void *arg) { struct ccb_dev_match *cdm; struct cam_et *target; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; /* * If our position is for something deeper in the tree, that means * that we've already seen this node. So, we keep going down. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target != NULL)) retval = DM_RET_DESCEND; else retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); /* * If we got an error, bail out of the search. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this bus out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; cdm->pos.cookie.bus = bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_BUS; cdm->matches[j].result.bus_result.path_id = bus->path_id; cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; cdm->matches[j].result.bus_result.unit_number = bus->sim->unit_number; strlcpy(cdm->matches[j].result.bus_result.dev_name, bus->sim->sim_name, sizeof(cdm->matches[j].result.bus_result.dev_name)); } /* * If the user is only interested in buses, there's no * reason to descend to the next level in the tree. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) return(1); /* * If there is a target generation recorded, check it to * make sure the target list hasn't changed. */ mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target != NULL)) { if ((cdm->pos.generations[CAM_TARGET_GENERATION] != bus->generation)) { mtx_unlock(&bus->eb_mtx); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return (0); } target = (struct cam_et *)cdm->pos.cookie.target; target->refcount++; } else target = NULL; mtx_unlock(&bus->eb_mtx); return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); } static int xptedttargetfunc(struct cam_et *target, void *arg) { struct ccb_dev_match *cdm; struct cam_eb *bus; struct cam_ed *device; cdm = (struct ccb_dev_match *)arg; bus = target->bus; /* * If there is a device list generation recorded, check it to * make sure the device list hasn't changed. */ mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device != NULL)) { if (cdm->pos.generations[CAM_DEV_GENERATION] != target->generation) { mtx_unlock(&bus->eb_mtx); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } device = (struct cam_ed *)cdm->pos.cookie.device; device->refcount++; } else device = NULL; mtx_unlock(&bus->eb_mtx); return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); } static int xptedtdevicefunc(struct cam_ed *device, void *arg) { struct cam_eb *bus; struct cam_periph *periph; struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; bus = device->target->bus; /* * If our position is for something deeper in the tree, that means * that we've already seen this node. So, we keep going down. */ if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device == device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) retval = DM_RET_DESCEND; else retval = xptdevicematch(cdm->patterns, cdm->num_patterns, device); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this device out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; cdm->pos.cookie.bus = device->target->bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->pos.cookie.target = device->target; cdm->pos.generations[CAM_TARGET_GENERATION] = device->target->bus->generation; cdm->pos.cookie.device = device; cdm->pos.generations[CAM_DEV_GENERATION] = device->target->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_DEVICE; cdm->matches[j].result.device_result.path_id = device->target->bus->path_id; cdm->matches[j].result.device_result.target_id = device->target->target_id; cdm->matches[j].result.device_result.target_lun = device->lun_id; cdm->matches[j].result.device_result.protocol = device->protocol; bcopy(&device->inq_data, &cdm->matches[j].result.device_result.inq_data, sizeof(struct scsi_inquiry_data)); bcopy(&device->ident_data, &cdm->matches[j].result.device_result.ident_data, sizeof(struct ata_params)); /* Let the user know whether this device is unconfigured */ if (device->flags & CAM_DEV_UNCONFIGURED) cdm->matches[j].result.device_result.flags = DEV_RESULT_UNCONFIGURED; else cdm->matches[j].result.device_result.flags = DEV_RESULT_NOFLAG; } /* * If the user isn't interested in peripherals, don't descend * the tree any further. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) return(1); /* * If there is a peripheral list generation recorded, make sure * it hasn't changed. */ xpt_lock_buses(); mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == device->target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device == device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) { if (cdm->pos.generations[CAM_PERIPH_GENERATION] != device->generation) { mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } periph = (struct cam_periph *)cdm->pos.cookie.periph; periph->refcount++; } else periph = NULL; mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); } static int xptedtperiphfunc(struct cam_periph *periph, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this peripheral out. */ if (retval & DM_RET_COPY) { int spaceleft, j; size_t l; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | CAM_DEV_POS_PERIPH; cdm->pos.cookie.bus = periph->path->bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->pos.cookie.target = periph->path->target; cdm->pos.generations[CAM_TARGET_GENERATION] = periph->path->bus->generation; cdm->pos.cookie.device = periph->path->device; cdm->pos.generations[CAM_DEV_GENERATION] = periph->path->target->generation; cdm->pos.cookie.periph = periph; cdm->pos.generations[CAM_PERIPH_GENERATION] = periph->path->device->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_PERIPH; cdm->matches[j].result.periph_result.path_id = periph->path->bus->path_id; cdm->matches[j].result.periph_result.target_id = periph->path->target->target_id; cdm->matches[j].result.periph_result.target_lun = periph->path->device->lun_id; cdm->matches[j].result.periph_result.unit_number = periph->unit_number; l = sizeof(cdm->matches[j].result.periph_result.periph_name); strlcpy(cdm->matches[j].result.periph_result.periph_name, periph->periph_name, l); } return(1); } static int xptedtmatch(struct ccb_dev_match *cdm) { struct cam_eb *bus; int ret; cdm->num_matches = 0; /* * Check the bus list generation. If it has changed, the user * needs to reset everything and start over. */ xpt_lock_buses(); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus != NULL)) { if (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation) { xpt_unlock_buses(); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } bus = (struct cam_eb *)cdm->pos.cookie.bus; bus->refcount++; } else bus = NULL; xpt_unlock_buses(); ret = xptbustraverse(bus, xptedtbusfunc, cdm); /* * If we get back 0, that means that we had to stop before fully * traversing the EDT. It also means that one of the subroutines * has set the status field to the proper value. If we get back 1, * we've fully traversed the EDT and copied out any matching entries. */ if (ret == 1) cdm->status = CAM_DEV_MATCH_LAST; return(ret); } static int xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) { struct cam_periph *periph; struct ccb_dev_match *cdm; cdm = (struct ccb_dev_match *)arg; xpt_lock_buses(); if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv == pdrv) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) { if (cdm->pos.generations[CAM_PERIPH_GENERATION] != (*pdrv)->generation) { xpt_unlock_buses(); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } periph = (struct cam_periph *)cdm->pos.cookie.periph; periph->refcount++; } else periph = NULL; xpt_unlock_buses(); return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); } static int xptplistperiphfunc(struct cam_periph *periph, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this peripheral out. */ if (retval & DM_RET_COPY) { int spaceleft, j; size_t l; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { struct periph_driver **pdrv; pdrv = NULL; bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | CAM_DEV_POS_PERIPH; /* * This may look a bit non-sensical, but it is * actually quite logical. There are very few * peripheral drivers, and bloating every peripheral * structure with a pointer back to its parent * peripheral driver linker set entry would cost * more in the long run than doing this quick lookup. */ for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { if (strcmp((*pdrv)->driver_name, periph->periph_name) == 0) break; } if (*pdrv == NULL) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } cdm->pos.cookie.pdrv = pdrv; /* * The periph generation slot does double duty, as * does the periph pointer slot. They are used for * both edt and pdrv lookups and positioning. */ cdm->pos.cookie.periph = periph; cdm->pos.generations[CAM_PERIPH_GENERATION] = (*pdrv)->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_PERIPH; cdm->matches[j].result.periph_result.path_id = periph->path->bus->path_id; /* * The transport layer peripheral doesn't have a target or * lun. */ if (periph->path->target) cdm->matches[j].result.periph_result.target_id = periph->path->target->target_id; else cdm->matches[j].result.periph_result.target_id = CAM_TARGET_WILDCARD; if (periph->path->device) cdm->matches[j].result.periph_result.target_lun = periph->path->device->lun_id; else cdm->matches[j].result.periph_result.target_lun = CAM_LUN_WILDCARD; cdm->matches[j].result.periph_result.unit_number = periph->unit_number; l = sizeof(cdm->matches[j].result.periph_result.periph_name); strlcpy(cdm->matches[j].result.periph_result.periph_name, periph->periph_name, l); } return(1); } static int xptperiphlistmatch(struct ccb_dev_match *cdm) { int ret; cdm->num_matches = 0; /* * At this point in the edt traversal function, we check the bus * list generation to make sure that no buses have been added or * removed since the user last sent a XPT_DEV_MATCH ccb through. * For the peripheral driver list traversal function, however, we * don't have to worry about new peripheral driver types coming or * going; they're in a linker set, and therefore can't change * without a recompile. */ if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv != NULL)) ret = xptpdrvtraverse( (struct periph_driver **)cdm->pos.cookie.pdrv, xptplistpdrvfunc, cdm); else ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); /* * If we get back 0, that means that we had to stop before fully * traversing the peripheral driver tree. It also means that one of * the subroutines has set the status field to the proper value. If * we get back 1, we've fully traversed the EDT and copied out any * matching entries. */ if (ret == 1) cdm->status = CAM_DEV_MATCH_LAST; return(ret); } static int xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) { struct cam_eb *bus, *next_bus; int retval; retval = 1; if (start_bus) bus = start_bus; else { xpt_lock_buses(); bus = TAILQ_FIRST(&xsoftc.xpt_busses); if (bus == NULL) { xpt_unlock_buses(); return (retval); } bus->refcount++; xpt_unlock_buses(); } for (; bus != NULL; bus = next_bus) { retval = tr_func(bus, arg); if (retval == 0) { xpt_release_bus(bus); break; } xpt_lock_buses(); next_bus = TAILQ_NEXT(bus, links); if (next_bus) next_bus->refcount++; xpt_unlock_buses(); xpt_release_bus(bus); } return(retval); } static int xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, xpt_targetfunc_t *tr_func, void *arg) { struct cam_et *target, *next_target; int retval; retval = 1; if (start_target) target = start_target; else { mtx_lock(&bus->eb_mtx); target = TAILQ_FIRST(&bus->et_entries); if (target == NULL) { mtx_unlock(&bus->eb_mtx); return (retval); } target->refcount++; mtx_unlock(&bus->eb_mtx); } for (; target != NULL; target = next_target) { retval = tr_func(target, arg); if (retval == 0) { xpt_release_target(target); break; } mtx_lock(&bus->eb_mtx); next_target = TAILQ_NEXT(target, links); if (next_target) next_target->refcount++; mtx_unlock(&bus->eb_mtx); xpt_release_target(target); } return(retval); } static int xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, xpt_devicefunc_t *tr_func, void *arg) { struct cam_eb *bus; struct cam_ed *device, *next_device; int retval; retval = 1; bus = target->bus; if (start_device) device = start_device; else { mtx_lock(&bus->eb_mtx); device = TAILQ_FIRST(&target->ed_entries); if (device == NULL) { mtx_unlock(&bus->eb_mtx); return (retval); } device->refcount++; mtx_unlock(&bus->eb_mtx); } for (; device != NULL; device = next_device) { mtx_lock(&device->device_mtx); retval = tr_func(device, arg); mtx_unlock(&device->device_mtx); if (retval == 0) { xpt_release_device(device); break; } mtx_lock(&bus->eb_mtx); next_device = TAILQ_NEXT(device, links); if (next_device) next_device->refcount++; mtx_unlock(&bus->eb_mtx); xpt_release_device(device); } return(retval); } static int xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg) { struct cam_eb *bus; struct cam_periph *periph, *next_periph; int retval; retval = 1; bus = device->target->bus; if (start_periph) periph = start_periph; else { xpt_lock_buses(); mtx_lock(&bus->eb_mtx); periph = SLIST_FIRST(&device->periphs); while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) periph = SLIST_NEXT(periph, periph_links); if (periph == NULL) { mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); return (retval); } periph->refcount++; mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); } for (; periph != NULL; periph = next_periph) { retval = tr_func(periph, arg); if (retval == 0) { cam_periph_release_locked(periph); break; } xpt_lock_buses(); mtx_lock(&bus->eb_mtx); next_periph = SLIST_NEXT(periph, periph_links); while (next_periph != NULL && (next_periph->flags & CAM_PERIPH_FREE) != 0) next_periph = SLIST_NEXT(next_periph, periph_links); if (next_periph) next_periph->refcount++; mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); cam_periph_release_locked(periph); } return(retval); } static int xptpdrvtraverse(struct periph_driver **start_pdrv, xpt_pdrvfunc_t *tr_func, void *arg) { struct periph_driver **pdrv; int retval; retval = 1; /* * We don't traverse the peripheral driver list like we do the * other lists, because it is a linker set, and therefore cannot be * changed during runtime. If the peripheral driver list is ever * re-done to be something other than a linker set (i.e. it can * change while the system is running), the list traversal should * be modified to work like the other traversal functions. */ for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); *pdrv != NULL; pdrv++) { retval = tr_func(pdrv, arg); if (retval == 0) return(retval); } return(retval); } static int xptpdperiphtraverse(struct periph_driver **pdrv, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg) { struct cam_periph *periph, *next_periph; int retval; retval = 1; if (start_periph) periph = start_periph; else { xpt_lock_buses(); periph = TAILQ_FIRST(&(*pdrv)->units); while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) periph = TAILQ_NEXT(periph, unit_links); if (periph == NULL) { xpt_unlock_buses(); return (retval); } periph->refcount++; xpt_unlock_buses(); } for (; periph != NULL; periph = next_periph) { cam_periph_lock(periph); retval = tr_func(periph, arg); cam_periph_unlock(periph); if (retval == 0) { cam_periph_release(periph); break; } xpt_lock_buses(); next_periph = TAILQ_NEXT(periph, unit_links); while (next_periph != NULL && (next_periph->flags & CAM_PERIPH_FREE) != 0) next_periph = TAILQ_NEXT(next_periph, unit_links); if (next_periph) next_periph->refcount++; xpt_unlock_buses(); cam_periph_release(periph); } return(retval); } static int xptdefbusfunc(struct cam_eb *bus, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_BUS) { xpt_busfunc_t *tr_func; tr_func = (xpt_busfunc_t *)tr_config->tr_func; return(tr_func(bus, tr_config->tr_arg)); } else return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); } static int xptdeftargetfunc(struct cam_et *target, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_TARGET) { xpt_targetfunc_t *tr_func; tr_func = (xpt_targetfunc_t *)tr_config->tr_func; return(tr_func(target, tr_config->tr_arg)); } else return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); } static int xptdefdevicefunc(struct cam_ed *device, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_DEVICE) { xpt_devicefunc_t *tr_func; tr_func = (xpt_devicefunc_t *)tr_config->tr_func; return(tr_func(device, tr_config->tr_arg)); } else return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); } static int xptdefperiphfunc(struct cam_periph *periph, void *arg) { struct xpt_traverse_config *tr_config; xpt_periphfunc_t *tr_func; tr_config = (struct xpt_traverse_config *)arg; tr_func = (xpt_periphfunc_t *)tr_config->tr_func; /* * Unlike the other default functions, we don't check for depth * here. The peripheral driver level is the last level in the EDT, * so if we're here, we should execute the function in question. */ return(tr_func(periph, tr_config->tr_arg)); } /* * Execute the given function for every bus in the EDT. */ static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) { struct xpt_traverse_config tr_config; tr_config.depth = XPT_DEPTH_BUS; tr_config.tr_func = tr_func; tr_config.tr_arg = arg; return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); } /* * Execute the given function for every device in the EDT. */ static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) { struct xpt_traverse_config tr_config; tr_config.depth = XPT_DEPTH_DEVICE; tr_config.tr_func = tr_func; tr_config.tr_arg = arg; return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); } static int xptsetasyncfunc(struct cam_ed *device, void *arg) { struct cam_path path; struct ccb_getdev cgd; struct ccb_setasync *csa = (struct ccb_setasync *)arg; /* * Don't report unconfigured devices (Wildcard devs, * devices only for target mode, device instances * that have been invalidated but are waiting for * their last reference count to be released). */ if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) return (1); memset(&cgd, 0, sizeof(cgd)); xpt_compile_path(&path, NULL, device->target->bus->path_id, device->target->target_id, device->lun_id); xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); csa->callback(csa->callback_arg, AC_FOUND_DEVICE, &path, &cgd); xpt_release_path(&path); return(1); } static int xptsetasyncbusfunc(struct cam_eb *bus, void *arg) { struct cam_path path; struct ccb_pathinq cpi; struct ccb_setasync *csa = (struct ccb_setasync *)arg; xpt_compile_path(&path, /*periph*/NULL, bus->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); xpt_path_lock(&path); xpt_path_inq(&cpi, &path); csa->callback(csa->callback_arg, AC_PATH_REGISTERED, &path, &cpi); xpt_path_unlock(&path); xpt_release_path(&path); return(1); } void xpt_action(union ccb *start_ccb) { CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code, xpt_action_name(start_ccb->ccb_h.func_code))); start_ccb->ccb_h.status = CAM_REQ_INPROG; (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb); } void xpt_action_default(union ccb *start_ccb) { struct cam_path *path; struct cam_sim *sim; struct mtx *mtx; path = start_ccb->ccb_h.path; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code, xpt_action_name(start_ccb->ccb_h.func_code))); switch (start_ccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct cam_ed *device; /* * For the sake of compatibility with SCSI-1 * devices that may not understand the identify * message, we include lun information in the * second byte of all commands. SCSI-1 specifies * that luns are a 3 bit value and reserves only 3 * bits for lun information in the CDB. Later * revisions of the SCSI spec allow for more than 8 * luns, but have deprecated lun information in the * CDB. So, if the lun won't fit, we must omit. * * Also be aware that during initial probing for devices, * the inquiry information is unknown but initialized to 0. * This means that this code will be exercised while probing * devices with an ANSI revision greater than 2. */ device = path->device; if (device->protocol_version <= SCSI_REV_2 && start_ccb->ccb_h.target_lun < 8 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { start_ccb->csio.cdb_io.cdb_bytes[1] |= start_ccb->ccb_h.target_lun << 5; } start_ccb->csio.scsi_status = SCSI_STATUS_OK; } /* FALLTHROUGH */ case XPT_TARGET_IO: case XPT_CONT_TARGET_IO: start_ccb->csio.sense_resid = 0; start_ccb->csio.resid = 0; /* FALLTHROUGH */ case XPT_ATA_IO: if (start_ccb->ccb_h.func_code == XPT_ATA_IO) start_ccb->ataio.resid = 0; /* FALLTHROUGH */ case XPT_NVME_IO: case XPT_NVME_ADMIN: case XPT_MMC_IO: case XPT_MMC_GET_TRAN_SETTINGS: case XPT_MMC_SET_TRAN_SETTINGS: case XPT_RESET_DEV: case XPT_ENG_EXEC: case XPT_SMP_IO: { struct cam_devq *devq; devq = path->bus->sim->devq; mtx_lock(&devq->send_mtx); cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); if (xpt_schedule_devq(devq, path->device) != 0) xpt_run_devq(devq); mtx_unlock(&devq->send_mtx); break; } case XPT_CALC_GEOMETRY: /* Filter out garbage */ if (start_ccb->ccg.block_size == 0 || start_ccb->ccg.volume_size == 0) { start_ccb->ccg.cylinders = 0; start_ccb->ccg.heads = 0; start_ccb->ccg.secs_per_track = 0; start_ccb->ccb_h.status = CAM_REQ_CMP; break; } goto call_sim; case XPT_ABORT: { union ccb* abort_ccb; abort_ccb = start_ccb->cab.abort_ccb; if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { struct cam_ed *device; struct cam_devq *devq; device = abort_ccb->ccb_h.path->device; devq = device->sim->devq; mtx_lock(&devq->send_mtx); if (abort_ccb->ccb_h.pinfo.index > 0) { cam_ccbq_remove_ccb(&device->ccbq, abort_ccb); abort_ccb->ccb_h.status = CAM_REQ_ABORTED|CAM_DEV_QFRZN; xpt_freeze_devq_device(device, 1); mtx_unlock(&devq->send_mtx); xpt_done(abort_ccb); start_ccb->ccb_h.status = CAM_REQ_CMP; break; } mtx_unlock(&devq->send_mtx); if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { /* * We've caught this ccb en route to * the SIM. Flag it for abort and the * SIM will do so just before starting * real work on the CCB. */ abort_ccb->ccb_h.status = CAM_REQ_ABORTED|CAM_DEV_QFRZN; xpt_freeze_devq(abort_ccb->ccb_h.path, 1); start_ccb->ccb_h.status = CAM_REQ_CMP; break; } } if (XPT_FC_IS_QUEUED(abort_ccb) && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { /* * It's already completed but waiting * for our SWI to get to it. */ start_ccb->ccb_h.status = CAM_UA_ABORT; break; } /* * If we weren't able to take care of the abort request * in the XPT, pass the request down to the SIM for processing. */ } /* FALLTHROUGH */ case XPT_ACCEPT_TARGET_IO: case XPT_EN_LUN: case XPT_IMMED_NOTIFY: case XPT_NOTIFY_ACK: case XPT_RESET_BUS: case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: case XPT_GET_SIM_KNOB_OLD: case XPT_GET_SIM_KNOB: case XPT_SET_SIM_KNOB: case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: case XPT_PATH_INQ: call_sim: sim = path->bus->sim; mtx = sim->mtx; if (mtx && !mtx_owned(mtx)) mtx_lock(mtx); else mtx = NULL; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code)); (*(sim->sim_action))(sim, start_ccb); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status)); if (mtx) mtx_unlock(mtx); break; case XPT_PATH_STATS: start_ccb->cpis.last_reset = path->bus->last_reset; start_ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_GDEV_TYPE: { struct cam_ed *dev; dev = path->device; if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else { struct ccb_getdev *cgd; cgd = &start_ccb->cgd; cgd->protocol = dev->protocol; cgd->inq_data = dev->inq_data; cgd->ident_data = dev->ident_data; cgd->inq_flags = dev->inq_flags; cgd->ccb_h.status = CAM_REQ_CMP; cgd->serial_num_len = dev->serial_num_len; if ((dev->serial_num_len > 0) && (dev->serial_num != NULL)) bcopy(dev->serial_num, cgd->serial_num, dev->serial_num_len); } break; } case XPT_GDEV_STATS: { struct ccb_getdevstats *cgds = &start_ccb->cgds; struct cam_ed *dev = path->device; struct cam_eb *bus = path->bus; struct cam_et *tar = path->target; struct cam_devq *devq = bus->sim->devq; mtx_lock(&devq->send_mtx); cgds->dev_openings = dev->ccbq.dev_openings; cgds->dev_active = dev->ccbq.dev_active; cgds->allocated = dev->ccbq.allocated; cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); cgds->held = cgds->allocated - cgds->dev_active - cgds->queued; cgds->last_reset = tar->last_reset; cgds->maxtags = dev->maxtags; cgds->mintags = dev->mintags; if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) cgds->last_reset = bus->last_reset; mtx_unlock(&devq->send_mtx); cgds->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GDEVLIST: { struct cam_periph *nperiph; struct periph_list *periph_head; struct ccb_getdevlist *cgdl; u_int i; struct cam_ed *device; int found; found = 0; /* * Don't want anyone mucking with our data. */ device = path->device; periph_head = &device->periphs; cgdl = &start_ccb->cgdl; /* * Check and see if the list has changed since the user * last requested a list member. If so, tell them that the * list has changed, and therefore they need to start over * from the beginning. */ if ((cgdl->index != 0) && (cgdl->generation != device->generation)) { cgdl->status = CAM_GDEVLIST_LIST_CHANGED; break; } /* * Traverse the list of peripherals and attempt to find * the requested peripheral. */ for (nperiph = SLIST_FIRST(periph_head), i = 0; (nperiph != NULL) && (i <= cgdl->index); nperiph = SLIST_NEXT(nperiph, periph_links), i++) { if (i == cgdl->index) { strlcpy(cgdl->periph_name, nperiph->periph_name, sizeof(cgdl->periph_name)); cgdl->unit_number = nperiph->unit_number; found = 1; } } if (found == 0) { cgdl->status = CAM_GDEVLIST_ERROR; break; } if (nperiph == NULL) cgdl->status = CAM_GDEVLIST_LAST_DEVICE; else cgdl->status = CAM_GDEVLIST_MORE_DEVS; cgdl->index++; cgdl->generation = device->generation; cgdl->ccb_h.status = CAM_REQ_CMP; break; } case XPT_DEV_MATCH: { dev_pos_type position_type; struct ccb_dev_match *cdm; cdm = &start_ccb->cdm; /* * There are two ways of getting at information in the EDT. * The first way is via the primary EDT tree. It starts * with a list of buses, then a list of targets on a bus, * then devices/luns on a target, and then peripherals on a * device/lun. The "other" way is by the peripheral driver * lists. The peripheral driver lists are organized by * peripheral driver. (obviously) So it makes sense to * use the peripheral driver list if the user is looking * for something like "da1", or all "da" devices. If the * user is looking for something on a particular bus/target * or lun, it's generally better to go through the EDT tree. */ if (cdm->pos.position_type != CAM_DEV_POS_NONE) position_type = cdm->pos.position_type; else { u_int i; position_type = CAM_DEV_POS_NONE; for (i = 0; i < cdm->num_patterns; i++) { if ((cdm->patterns[i].type == DEV_MATCH_BUS) ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ position_type = CAM_DEV_POS_EDT; break; } } if (cdm->num_patterns == 0) position_type = CAM_DEV_POS_EDT; else if (position_type == CAM_DEV_POS_NONE) position_type = CAM_DEV_POS_PDRV; } switch(position_type & CAM_DEV_POS_TYPEMASK) { case CAM_DEV_POS_EDT: xptedtmatch(cdm); break; case CAM_DEV_POS_PDRV: xptperiphlistmatch(cdm); break; default: cdm->status = CAM_DEV_MATCH_ERROR; break; } if (cdm->status == CAM_DEV_MATCH_ERROR) start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; else start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SASYNC_CB: { struct ccb_setasync *csa; struct async_node *cur_entry; struct async_list *async_head; u_int32_t added; csa = &start_ccb->csa; added = csa->event_enable; async_head = &path->device->asyncs; /* * If there is already an entry for us, simply * update it. */ cur_entry = SLIST_FIRST(async_head); while (cur_entry != NULL) { if ((cur_entry->callback_arg == csa->callback_arg) && (cur_entry->callback == csa->callback)) break; cur_entry = SLIST_NEXT(cur_entry, links); } if (cur_entry != NULL) { /* * If the request has no flags set, * remove the entry. */ added &= ~cur_entry->event_enable; if (csa->event_enable == 0) { SLIST_REMOVE(async_head, cur_entry, async_node, links); xpt_release_device(path->device); free(cur_entry, M_CAMXPT); } else { cur_entry->event_enable = csa->event_enable; } csa->event_enable = added; } else { cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, M_NOWAIT); if (cur_entry == NULL) { csa->ccb_h.status = CAM_RESRC_UNAVAIL; break; } cur_entry->event_enable = csa->event_enable; cur_entry->event_lock = (path->bus->sim->mtx && mtx_owned(path->bus->sim->mtx)) ? 1 : 0; cur_entry->callback_arg = csa->callback_arg; cur_entry->callback = csa->callback; SLIST_INSERT_HEAD(async_head, cur_entry, links); xpt_acquire_device(path->device); } start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_REL_SIMQ: { struct ccb_relsim *crs; struct cam_ed *dev; crs = &start_ccb->crs; dev = path->device; if (dev == NULL) { crs->ccb_h.status = CAM_DEV_NOT_THERE; break; } if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { /* Don't ever go below one opening */ if (crs->openings > 0) { xpt_dev_ccbq_resize(path, crs->openings); if (bootverbose) { xpt_print(path, "number of openings is now %d\n", crs->openings); } } } mtx_lock(&dev->sim->devq->send_mtx); if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { /* * Just extend the old timeout and decrement * the freeze count so that a single timeout * is sufficient for releasing the queue. */ start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; callout_stop(&dev->callout); } else { start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } callout_reset_sbt(&dev->callout, SBT_1MS * crs->release_timeout, 0, xpt_release_devq_timeout, dev, 0); dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; } if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { /* * Decrement the freeze count so that a single * completion is still sufficient to unfreeze * the queue. */ start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; } else { dev->flags |= CAM_DEV_REL_ON_COMPLETE; start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } } if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 || (dev->ccbq.dev_active == 0)) { start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; } else { dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } } mtx_unlock(&dev->sim->devq->send_mtx); if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_DEBUG: { struct cam_path *oldpath; /* Check that all request bits are supported. */ if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } cam_dflags = CAM_DEBUG_NONE; if (cam_dpath != NULL) { oldpath = cam_dpath; cam_dpath = NULL; xpt_free_path(oldpath); } if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { if (xpt_create_path(&cam_dpath, NULL, start_ccb->ccb_h.path_id, start_ccb->ccb_h.target_id, start_ccb->ccb_h.target_lun) != CAM_REQ_CMP) { start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; } else { cam_dflags = start_ccb->cdbg.flags; start_ccb->ccb_h.status = CAM_REQ_CMP; xpt_print(cam_dpath, "debugging flags now %x\n", cam_dflags); } } else start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_NOOP: if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) xpt_freeze_devq(path, 1); start_ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_REPROBE_LUN: xpt_async(AC_INQ_CHANGED, path, NULL); start_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(start_ccb); break; case XPT_ASYNC: /* * Queue the async operation so it can be run from a sleepable * context. */ start_ccb->ccb_h.status = CAM_REQ_CMP; mtx_lock(&cam_async.cam_doneq_mtx); STAILQ_INSERT_TAIL(&cam_async.cam_doneq, &start_ccb->ccb_h, sim_links.stqe); start_ccb->ccb_h.pinfo.index = CAM_ASYNC_INDEX; mtx_unlock(&cam_async.cam_doneq_mtx); wakeup(&cam_async.cam_doneq); break; default: case XPT_SDEV_TYPE: case XPT_TERM_IO: case XPT_ENG_INQ: /* XXX Implement */ xpt_print(start_ccb->ccb_h.path, "%s: CCB type %#x %s not supported\n", __func__, start_ccb->ccb_h.func_code, xpt_action_name(start_ccb->ccb_h.func_code)); start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { xpt_done(start_ccb); } break; } CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default: func= %#x %s status %#x\n", start_ccb->ccb_h.func_code, xpt_action_name(start_ccb->ccb_h.func_code), start_ccb->ccb_h.status)); } /* * Call the sim poll routine to allow the sim to complete * any inflight requests, then call camisr_runqueue to * complete any CCB that the polling completed. */ void xpt_sim_poll(struct cam_sim *sim) { struct mtx *mtx; KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__)); mtx = sim->mtx; if (mtx) mtx_lock(mtx); (*(sim->sim_poll))(sim); if (mtx) mtx_unlock(mtx); camisr_runqueue(); } uint32_t xpt_poll_setup(union ccb *start_ccb) { u_int32_t timeout; struct cam_sim *sim; struct cam_devq *devq; struct cam_ed *dev; timeout = start_ccb->ccb_h.timeout * 10; sim = start_ccb->ccb_h.path->bus->sim; devq = sim->devq; dev = start_ccb->ccb_h.path->device; KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__)); /* * Steal an opening so that no other queued requests * can get it before us while we simulate interrupts. */ mtx_lock(&devq->send_mtx); dev->ccbq.dev_openings--; while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && (--timeout > 0)) { mtx_unlock(&devq->send_mtx); DELAY(100); xpt_sim_poll(sim); mtx_lock(&devq->send_mtx); } dev->ccbq.dev_openings++; mtx_unlock(&devq->send_mtx); return (timeout); } void xpt_pollwait(union ccb *start_ccb, uint32_t timeout) { KASSERT(cam_sim_pollable(start_ccb->ccb_h.path->bus->sim), ("%s: non-pollable sim", __func__)); while (--timeout > 0) { xpt_sim_poll(start_ccb->ccb_h.path->bus->sim); if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) break; DELAY(100); } if (timeout == 0) { /* * XXX Is it worth adding a sim_timeout entry * point so we can attempt recovery? If * this is only used for dumps, I don't think * it is. */ start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; } } /* * Schedule a peripheral driver to receive a ccb when its * target device has space for more transactions. */ void xpt_schedule(struct cam_periph *periph, u_int32_t new_priority) { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); cam_periph_assert(periph, MA_OWNED); if (new_priority < periph->scheduled_priority) { periph->scheduled_priority = new_priority; xpt_run_allocq(periph, 0); } } /* * Schedule a device to run on a given queue. * If the device was inserted as a new entry on the queue, * return 1 meaning the device queue should be run. If we * were already queued, implying someone else has already * started the queue, return 0 so the caller doesn't attempt * to run the queue. */ static int xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, u_int32_t new_priority) { int retval; u_int32_t old_priority; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); old_priority = pinfo->priority; /* * Are we already queued? */ if (pinfo->index != CAM_UNQUEUED_INDEX) { /* Simply reorder based on new priority */ if (new_priority < old_priority) { camq_change_priority(queue, pinfo->index, new_priority); CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("changed priority to %d\n", new_priority)); retval = 1; } else retval = 0; } else { /* New entry on the queue */ if (new_priority < old_priority) pinfo->priority = new_priority; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("Inserting onto queue\n")); pinfo->generation = ++queue->generation; camq_insert(queue, pinfo); retval = 1; } return (retval); } static void xpt_run_allocq_task(void *context, int pending) { struct cam_periph *periph = context; cam_periph_lock(periph); periph->flags &= ~CAM_PERIPH_RUN_TASK; xpt_run_allocq(periph, 1); cam_periph_unlock(periph); cam_periph_release(periph); } static void xpt_run_allocq(struct cam_periph *periph, int sleep) { struct cam_ed *device; union ccb *ccb; uint32_t prio; cam_periph_assert(periph, MA_OWNED); if (periph->periph_allocating) return; cam_periph_doacquire(periph); periph->periph_allocating = 1; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); device = periph->path->device; ccb = NULL; restart: while ((prio = min(periph->scheduled_priority, periph->immediate_priority)) != CAM_PRIORITY_NONE && (periph->periph_allocated - (ccb != NULL ? 1 : 0) < device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { if (ccb == NULL && (ccb = xpt_get_ccb_nowait(periph)) == NULL) { if (sleep) { ccb = xpt_get_ccb(periph); goto restart; } if (periph->flags & CAM_PERIPH_RUN_TASK) break; cam_periph_doacquire(periph); periph->flags |= CAM_PERIPH_RUN_TASK; taskqueue_enqueue(xsoftc.xpt_taskq, &periph->periph_run_task); break; } xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); if (prio == periph->immediate_priority) { periph->immediate_priority = CAM_PRIORITY_NONE; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("waking cam_periph_getccb()\n")); SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, periph_links.sle); wakeup(&periph->ccb_list); } else { periph->scheduled_priority = CAM_PRIORITY_NONE; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("calling periph_start()\n")); periph->periph_start(periph, ccb); } ccb = NULL; } if (ccb != NULL) xpt_release_ccb(ccb); periph->periph_allocating = 0; cam_periph_release_locked(periph); } static void xpt_run_devq(struct cam_devq *devq) { struct mtx *mtx; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); devq->send_queue.qfrozen_cnt++; while ((devq->send_queue.entries > 0) && (devq->send_openings > 0) && (devq->send_queue.qfrozen_cnt <= 1)) { struct cam_ed *device; union ccb *work_ccb; struct cam_sim *sim; struct xpt_proto *proto; device = (struct cam_ed *)camq_remove(&devq->send_queue, CAMQ_HEAD); CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("running device %p\n", device)); work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); if (work_ccb == NULL) { printf("device on run queue with no ccbs???\n"); continue; } if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { mtx_lock(&xsoftc.xpt_highpower_lock); if (xsoftc.num_highpower <= 0) { /* * We got a high power command, but we * don't have any available slots. Freeze * the device queue until we have a slot * available. */ xpt_freeze_devq_device(device, 1); STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, highpowerq_entry); mtx_unlock(&xsoftc.xpt_highpower_lock); continue; } else { /* * Consume a high power slot while * this ccb runs. */ xsoftc.num_highpower--; } mtx_unlock(&xsoftc.xpt_highpower_lock); } cam_ccbq_remove_ccb(&device->ccbq, work_ccb); cam_ccbq_send_ccb(&device->ccbq, work_ccb); devq->send_openings--; devq->send_active++; xpt_schedule_devq(devq, device); mtx_unlock(&devq->send_mtx); if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { /* * The client wants to freeze the queue * after this CCB is sent. */ xpt_freeze_devq(work_ccb->ccb_h.path, 1); } /* In Target mode, the peripheral driver knows best... */ if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { if ((device->inq_flags & SID_CmdQue) != 0 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; else /* * Clear this in case of a retried CCB that * failed due to a rejected tag. */ work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; } KASSERT(device == work_ccb->ccb_h.path->device, ("device (%p) / path->device (%p) mismatch", device, work_ccb->ccb_h.path->device)); proto = xpt_proto_find(device->protocol); if (proto && proto->ops->debug_out) proto->ops->debug_out(work_ccb); /* * Device queues can be shared among multiple SIM instances * that reside on different buses. Use the SIM from the * queued device, rather than the one from the calling bus. */ sim = device->sim; mtx = sim->mtx; if (mtx && !mtx_owned(mtx)) mtx_lock(mtx); else mtx = NULL; work_ccb->ccb_h.qos.periph_data = cam_iosched_now(); (*(sim->sim_action))(sim, work_ccb); if (mtx) mtx_unlock(mtx); mtx_lock(&devq->send_mtx); } devq->send_queue.qfrozen_cnt--; } /* * This function merges stuff from the src ccb into the dst ccb, while keeping * important fields in the dst ccb constant. */ void xpt_merge_ccb(union ccb *dst_ccb, union ccb *src_ccb) { /* * Pull fields that are valid for peripheral drivers to set * into the dst CCB along with the CCB "payload". */ dst_ccb->ccb_h.retry_count = src_ccb->ccb_h.retry_count; dst_ccb->ccb_h.func_code = src_ccb->ccb_h.func_code; dst_ccb->ccb_h.timeout = src_ccb->ccb_h.timeout; dst_ccb->ccb_h.flags = src_ccb->ccb_h.flags; bcopy(&(&src_ccb->ccb_h)[1], &(&dst_ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); } void xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority, u_int32_t flags) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); ccb_h->pinfo.priority = priority; ccb_h->path = path; ccb_h->path_id = path->bus->path_id; if (path->target) ccb_h->target_id = path->target->target_id; else ccb_h->target_id = CAM_TARGET_WILDCARD; if (path->device) { ccb_h->target_lun = path->device->lun_id; ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; } else { ccb_h->target_lun = CAM_TARGET_WILDCARD; } ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; ccb_h->flags = flags; ccb_h->xflags = 0; } void xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) { xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); } /* Path manipulation functions */ cam_status xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_path *path; cam_status status; path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); if (path == NULL) { status = CAM_RESRC_UNAVAIL; return(status); } status = xpt_compile_path(path, perph, path_id, target_id, lun_id); if (status != CAM_REQ_CMP) { free(path, M_CAMPATH); path = NULL; } *new_path_ptr = path; return (status); } cam_status xpt_create_path_unlocked(struct cam_path **new_path_ptr, struct cam_periph *periph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { return (xpt_create_path(new_path_ptr, periph, path_id, target_id, lun_id)); } cam_status xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_eb *bus; struct cam_et *target; struct cam_ed *device; cam_status status; status = CAM_REQ_CMP; /* Completed without error */ target = NULL; /* Wildcarded */ device = NULL; /* Wildcarded */ /* * We will potentially modify the EDT, so block interrupts * that may attempt to create cam paths. */ bus = xpt_find_bus(path_id); if (bus == NULL) { status = CAM_PATH_INVALID; } else { xpt_lock_buses(); mtx_lock(&bus->eb_mtx); target = xpt_find_target(bus, target_id); if (target == NULL) { /* Create one */ struct cam_et *new_target; new_target = xpt_alloc_target(bus, target_id); if (new_target == NULL) { status = CAM_RESRC_UNAVAIL; } else { target = new_target; } } xpt_unlock_buses(); if (target != NULL) { device = xpt_find_device(target, lun_id); if (device == NULL) { /* Create one */ struct cam_ed *new_device; new_device = (*(bus->xport->ops->alloc_device))(bus, target, lun_id); if (new_device == NULL) { status = CAM_RESRC_UNAVAIL; } else { device = new_device; } } } mtx_unlock(&bus->eb_mtx); } /* * Only touch the user's data if we are successful. */ if (status == CAM_REQ_CMP) { new_path->periph = perph; new_path->bus = bus; new_path->target = target; new_path->device = device; CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); } else { if (device != NULL) xpt_release_device(device); if (target != NULL) xpt_release_target(target); if (bus != NULL) xpt_release_bus(bus); } return (status); } cam_status xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) { struct cam_path *new_path; new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); if (new_path == NULL) return(CAM_RESRC_UNAVAIL); *new_path = *path; if (path->bus != NULL) xpt_acquire_bus(path->bus); if (path->target != NULL) xpt_acquire_target(path->target); if (path->device != NULL) xpt_acquire_device(path->device); *new_path_ptr = new_path; return (CAM_REQ_CMP); } void xpt_release_path(struct cam_path *path) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); if (path->device != NULL) { xpt_release_device(path->device); path->device = NULL; } if (path->target != NULL) { xpt_release_target(path->target); path->target = NULL; } if (path->bus != NULL) { xpt_release_bus(path->bus); path->bus = NULL; } } void xpt_free_path(struct cam_path *path) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); xpt_release_path(path); free(path, M_CAMPATH); } void xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) { xpt_lock_buses(); if (bus_ref) { if (path->bus) *bus_ref = path->bus->refcount; else *bus_ref = 0; } if (periph_ref) { if (path->periph) *periph_ref = path->periph->refcount; else *periph_ref = 0; } xpt_unlock_buses(); if (target_ref) { if (path->target) *target_ref = path->target->refcount; else *target_ref = 0; } if (device_ref) { if (path->device) *device_ref = path->device->refcount; else *device_ref = 0; } } /* * Return -1 for failure, 0 for exact match, 1 for match with wildcards * in path1, 2 for match with wildcards in path2. */ int xpt_path_comp(struct cam_path *path1, struct cam_path *path2) { int retval = 0; if (path1->bus != path2->bus) { if (path1->bus->path_id == CAM_BUS_WILDCARD) retval = 1; else if (path2->bus->path_id == CAM_BUS_WILDCARD) retval = 2; else return (-1); } if (path1->target != path2->target) { if (path1->target->target_id == CAM_TARGET_WILDCARD) { if (retval == 0) retval = 1; } else if (path2->target->target_id == CAM_TARGET_WILDCARD) retval = 2; else return (-1); } if (path1->device != path2->device) { if (path1->device->lun_id == CAM_LUN_WILDCARD) { if (retval == 0) retval = 1; } else if (path2->device->lun_id == CAM_LUN_WILDCARD) retval = 2; else return (-1); } return (retval); } int xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) { int retval = 0; if (path->bus != dev->target->bus) { if (path->bus->path_id == CAM_BUS_WILDCARD) retval = 1; else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) retval = 2; else return (-1); } if (path->target != dev->target) { if (path->target->target_id == CAM_TARGET_WILDCARD) { if (retval == 0) retval = 1; } else if (dev->target->target_id == CAM_TARGET_WILDCARD) retval = 2; else return (-1); } if (path->device != dev) { if (path->device->lun_id == CAM_LUN_WILDCARD) { if (retval == 0) retval = 1; } else if (dev->lun_id == CAM_LUN_WILDCARD) retval = 2; else return (-1); } return (retval); } void xpt_print_path(struct cam_path *path) { struct sbuf sb; char buffer[XPT_PRINT_LEN]; sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); xpt_path_sbuf(path, &sb); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); sbuf_delete(&sb); } void xpt_print_device(struct cam_ed *device) { if (device == NULL) printf("(nopath): "); else { printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name, device->sim->unit_number, device->sim->bus_id, device->target->target_id, (uintmax_t)device->lun_id); } } void xpt_print(struct cam_path *path, const char *fmt, ...) { va_list ap; struct sbuf sb; char buffer[XPT_PRINT_LEN]; sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); xpt_path_sbuf(path, &sb); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); sbuf_delete(&sb); } int xpt_path_string(struct cam_path *path, char *str, size_t str_len) { struct sbuf sb; int len; sbuf_new(&sb, str, str_len, 0); len = xpt_path_sbuf(path, &sb); sbuf_finish(&sb); return (len); } int xpt_path_sbuf(struct cam_path *path, struct sbuf *sb) { if (path == NULL) sbuf_printf(sb, "(nopath): "); else { if (path->periph != NULL) sbuf_printf(sb, "(%s%d:", path->periph->periph_name, path->periph->unit_number); else sbuf_printf(sb, "(noperiph:"); if (path->bus != NULL) sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id); else sbuf_printf(sb, "nobus:"); if (path->target != NULL) sbuf_printf(sb, "%d:", path->target->target_id); else sbuf_printf(sb, "X:"); if (path->device != NULL) sbuf_printf(sb, "%jx): ", (uintmax_t)path->device->lun_id); else sbuf_printf(sb, "X): "); } return(sbuf_len(sb)); } path_id_t xpt_path_path_id(struct cam_path *path) { return(path->bus->path_id); } target_id_t xpt_path_target_id(struct cam_path *path) { if (path->target != NULL) return (path->target->target_id); else return (CAM_TARGET_WILDCARD); } lun_id_t xpt_path_lun_id(struct cam_path *path) { if (path->device != NULL) return (path->device->lun_id); else return (CAM_LUN_WILDCARD); } struct cam_sim * xpt_path_sim(struct cam_path *path) { return (path->bus->sim); } struct cam_periph* xpt_path_periph(struct cam_path *path) { return (path->periph); } /* * Release a CAM control block for the caller. Remit the cost of the structure * to the device referenced by the path. If the this device had no 'credits' * and peripheral drivers have registered async callbacks for this notification * call them now. */ void xpt_release_ccb(union ccb *free_ccb) { struct cam_ed *device; struct cam_periph *periph; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); device = free_ccb->ccb_h.path->device; periph = free_ccb->ccb_h.path->periph; xpt_free_ccb(free_ccb); periph->periph_allocated--; cam_ccbq_release_opening(&device->ccbq); xpt_run_allocq(periph, 0); } /* Functions accessed by SIM drivers */ static struct xpt_xport_ops xport_default_ops = { .alloc_device = xpt_alloc_device_default, .action = xpt_action_default, .async = xpt_dev_async_default, }; static struct xpt_xport xport_default = { .xport = XPORT_UNKNOWN, .name = "unknown", .ops = &xport_default_ops, }; CAM_XPT_XPORT(xport_default); /* * A sim structure, listing the SIM entry points and instance * identification info is passed to xpt_bus_register to hook the SIM * into the CAM framework. xpt_bus_register creates a cam_eb entry * for this new bus and places it in the array of buses and assigns * it a path_id. The path_id may be influenced by "hard wiring" * information specified by the user. Once interrupt services are * available, the bus will be probed. */ -int32_t -xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) +int +xpt_bus_register(struct cam_sim *sim, device_t parent, uint32_t bus) { struct cam_eb *new_bus; struct cam_eb *old_bus; struct ccb_pathinq cpi; struct cam_path *path; cam_status status; sim->bus_id = bus; new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), M_CAMXPT, M_NOWAIT|M_ZERO); if (new_bus == NULL) { /* Couldn't satisfy request */ - return (CAM_RESRC_UNAVAIL); + return (ENOMEM); } mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); TAILQ_INIT(&new_bus->et_entries); cam_sim_hold(sim); new_bus->sim = sim; timevalclear(&new_bus->last_reset); new_bus->flags = 0; new_bus->refcount = 1; /* Held until a bus_deregister event */ new_bus->generation = 0; new_bus->parent_dev = parent; xpt_lock_buses(); sim->path_id = new_bus->path_id = xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); while (old_bus != NULL && old_bus->path_id < new_bus->path_id) old_bus = TAILQ_NEXT(old_bus, links); if (old_bus != NULL) TAILQ_INSERT_BEFORE(old_bus, new_bus, links); else TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); xsoftc.bus_generation++; xpt_unlock_buses(); /* * Set a default transport so that a PATH_INQ can be issued to * the SIM. This will then allow for probing and attaching of * a more appropriate transport. */ new_bus->xport = &xport_default; status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { xpt_release_bus(new_bus); - return (CAM_RESRC_UNAVAIL); + return (ENOMEM); } xpt_path_inq(&cpi, path); if (cpi.ccb_h.status == CAM_REQ_CMP) { struct xpt_xport **xpt; SET_FOREACH(xpt, cam_xpt_xport_set) { if ((*xpt)->xport == cpi.transport) { new_bus->xport = *xpt; break; } } if (new_bus->xport == NULL) { xpt_print(path, "No transport found for %d\n", cpi.transport); xpt_release_bus(new_bus); free(path, M_CAMXPT); - return (CAM_RESRC_UNAVAIL); + return (EINVAL); } } /* Notify interested parties */ if (sim->path_id != CAM_XPT_PATH_ID) { xpt_async(AC_PATH_REGISTERED, path, &cpi); if ((cpi.hba_misc & PIM_NOSCAN) == 0) { union ccb *scan_ccb; /* Initiate bus rescan. */ scan_ccb = xpt_alloc_ccb_nowait(); if (scan_ccb != NULL) { scan_ccb->ccb_h.path = path; scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; scan_ccb->crcn.flags = 0; xpt_rescan(scan_ccb); } else { xpt_print(path, "Can't allocate CCB to scan bus\n"); xpt_free_path(path); } } else xpt_free_path(path); } else xpt_free_path(path); return (CAM_SUCCESS); } -int32_t +int xpt_bus_deregister(path_id_t pathid) { struct cam_path bus_path; cam_status status; status = xpt_compile_path(&bus_path, NULL, pathid, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) - return (status); + return (ENOMEM); xpt_async(AC_LOST_DEVICE, &bus_path, NULL); xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); /* Release the reference count held while registered. */ xpt_release_bus(bus_path.bus); xpt_release_path(&bus_path); - return (CAM_REQ_CMP); + return (CAM_SUCCESS); } static path_id_t xptnextfreepathid(void) { struct cam_eb *bus; path_id_t pathid; const char *strval; mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); pathid = 0; bus = TAILQ_FIRST(&xsoftc.xpt_busses); retry: /* Find an unoccupied pathid */ while (bus != NULL && bus->path_id <= pathid) { if (bus->path_id == pathid) pathid++; bus = TAILQ_NEXT(bus, links); } /* * Ensure that this pathid is not reserved for * a bus that may be registered in the future. */ if (resource_string_value("scbus", pathid, "at", &strval) == 0) { ++pathid; /* Start the search over */ goto retry; } return (pathid); } static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus) { path_id_t pathid; int i, dunit, val; char buf[32]; const char *dname; pathid = CAM_XPT_PATH_ID; snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) return (pathid); i = 0; while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { if (strcmp(dname, "scbus")) { /* Avoid a bit of foot shooting. */ continue; } if (dunit < 0) /* unwired?! */ continue; if (resource_int_value("scbus", dunit, "bus", &val) == 0) { if (sim_bus == val) { pathid = dunit; break; } } else if (sim_bus == 0) { /* Unspecified matches bus 0 */ pathid = dunit; break; } else { printf("Ambiguous scbus configuration for %s%d " "bus %d, cannot wire down. The kernel " "config entry for scbus%d should " "specify a controller bus.\n" "Scbus will be assigned dynamically.\n", sim_name, sim_unit, sim_bus, dunit); break; } } if (pathid == CAM_XPT_PATH_ID) pathid = xptnextfreepathid(); return (pathid); } static const char * xpt_async_string(u_int32_t async_code) { switch (async_code) { case AC_BUS_RESET: return ("AC_BUS_RESET"); case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); case AC_SCSI_AEN: return ("AC_SCSI_AEN"); case AC_SENT_BDR: return ("AC_SENT_BDR"); case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); case AC_CONTRACT: return ("AC_CONTRACT"); case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); } return ("AC_UNKNOWN"); } static int xpt_async_size(u_int32_t async_code) { switch (async_code) { case AC_BUS_RESET: return (0); case AC_UNSOL_RESEL: return (0); case AC_SCSI_AEN: return (0); case AC_SENT_BDR: return (0); case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); case AC_PATH_DEREGISTERED: return (0); case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); case AC_LOST_DEVICE: return (0); case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); case AC_INQ_CHANGED: return (0); case AC_GETDEV_CHANGED: return (0); case AC_CONTRACT: return (sizeof(struct ac_contract)); case AC_ADVINFO_CHANGED: return (-1); case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); } return (0); } static int xpt_async_process_dev(struct cam_ed *device, void *arg) { union ccb *ccb = arg; struct cam_path *path = ccb->ccb_h.path; void *async_arg = ccb->casync.async_arg_ptr; u_int32_t async_code = ccb->casync.async_code; int relock; if (path->device != device && path->device->lun_id != CAM_LUN_WILDCARD && device->lun_id != CAM_LUN_WILDCARD) return (1); /* * The async callback could free the device. * If it is a broadcast async, it doesn't hold * device reference, so take our own reference. */ xpt_acquire_device(device); /* * If async for specific device is to be delivered to * the wildcard client, take the specific device lock. * XXX: We may need a way for client to specify it. */ if ((device->lun_id == CAM_LUN_WILDCARD && path->device->lun_id != CAM_LUN_WILDCARD) || (device->target->target_id == CAM_TARGET_WILDCARD && path->target->target_id != CAM_TARGET_WILDCARD) || (device->target->bus->path_id == CAM_BUS_WILDCARD && path->target->bus->path_id != CAM_BUS_WILDCARD)) { mtx_unlock(&device->device_mtx); xpt_path_lock(path); relock = 1; } else relock = 0; (*(device->target->bus->xport->ops->async))(async_code, device->target->bus, device->target, device, async_arg); xpt_async_bcast(&device->asyncs, async_code, path, async_arg); if (relock) { xpt_path_unlock(path); mtx_lock(&device->device_mtx); } xpt_release_device(device); return (1); } static int xpt_async_process_tgt(struct cam_et *target, void *arg) { union ccb *ccb = arg; struct cam_path *path = ccb->ccb_h.path; if (path->target != target && path->target->target_id != CAM_TARGET_WILDCARD && target->target_id != CAM_TARGET_WILDCARD) return (1); if (ccb->casync.async_code == AC_SENT_BDR) { /* Update our notion of when the last reset occurred */ microtime(&target->last_reset); } return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); } static void xpt_async_process(struct cam_periph *periph, union ccb *ccb) { struct cam_eb *bus; struct cam_path *path; void *async_arg; u_int32_t async_code; path = ccb->ccb_h.path; async_code = ccb->casync.async_code; async_arg = ccb->casync.async_arg_ptr; CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, ("xpt_async(%s)\n", xpt_async_string(async_code))); bus = path->bus; if (async_code == AC_BUS_RESET) { /* Update our notion of when the last reset occurred */ microtime(&bus->last_reset); } xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); /* * If this wasn't a fully wildcarded async, tell all * clients that want all async events. */ if (bus != xpt_periph->path->bus) { xpt_path_lock(xpt_periph->path); xpt_async_process_dev(xpt_periph->path->device, ccb); xpt_path_unlock(xpt_periph->path); } if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) xpt_release_devq(path, 1, TRUE); else xpt_release_simq(path->bus->sim, TRUE); if (ccb->casync.async_arg_size > 0) free(async_arg, M_CAMXPT); xpt_free_path(path); xpt_free_ccb(ccb); } static void xpt_async_bcast(struct async_list *async_head, u_int32_t async_code, struct cam_path *path, void *async_arg) { struct async_node *cur_entry; struct mtx *mtx; cur_entry = SLIST_FIRST(async_head); while (cur_entry != NULL) { struct async_node *next_entry; /* * Grab the next list entry before we call the current * entry's callback. This is because the callback function * can delete its async callback entry. */ next_entry = SLIST_NEXT(cur_entry, links); if ((cur_entry->event_enable & async_code) != 0) { mtx = cur_entry->event_lock ? path->device->sim->mtx : NULL; if (mtx) mtx_lock(mtx); cur_entry->callback(cur_entry->callback_arg, async_code, path, async_arg); if (mtx) mtx_unlock(mtx); } cur_entry = next_entry; } } void xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) { union ccb *ccb; int size; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { xpt_print(path, "Can't allocate CCB to send %s\n", xpt_async_string(async_code)); return; } if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) { xpt_print(path, "Can't allocate path to send %s\n", xpt_async_string(async_code)); xpt_free_ccb(ccb); return; } ccb->ccb_h.path->periph = NULL; ccb->ccb_h.func_code = XPT_ASYNC; ccb->ccb_h.cbfcnp = xpt_async_process; ccb->ccb_h.flags |= CAM_UNLOCKED; ccb->casync.async_code = async_code; ccb->casync.async_arg_size = 0; size = xpt_async_size(async_code); CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_async: func %#x %s aync_code %d %s\n", ccb->ccb_h.func_code, xpt_action_name(ccb->ccb_h.func_code), async_code, xpt_async_string(async_code))); if (size > 0 && async_arg != NULL) { ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); if (ccb->casync.async_arg_ptr == NULL) { xpt_print(path, "Can't allocate argument to send %s\n", xpt_async_string(async_code)); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } memcpy(ccb->casync.async_arg_ptr, async_arg, size); ccb->casync.async_arg_size = size; } else if (size < 0) { ccb->casync.async_arg_ptr = async_arg; ccb->casync.async_arg_size = size; } if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) xpt_freeze_devq(path, 1); else xpt_freeze_simq(path->bus->sim, 1); xpt_action(ccb); } static void xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; printf("%s called\n", __func__); } static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count) { struct cam_devq *devq; uint32_t freeze; devq = dev->sim->devq; mtx_assert(&devq->send_mtx, MA_OWNED); CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_freeze_devq_device(%d) %u->%u\n", count, dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); freeze = (dev->ccbq.queue.qfrozen_cnt += count); /* Remove frozen device from sendq. */ if (device_is_queued(dev)) camq_remove(&devq->send_queue, dev->devq_entry.index); return (freeze); } u_int32_t xpt_freeze_devq(struct cam_path *path, u_int count) { struct cam_ed *dev = path->device; struct cam_devq *devq; uint32_t freeze; devq = dev->sim->devq; mtx_lock(&devq->send_mtx); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); freeze = xpt_freeze_devq_device(dev, count); mtx_unlock(&devq->send_mtx); return (freeze); } u_int32_t xpt_freeze_simq(struct cam_sim *sim, u_int count) { struct cam_devq *devq; uint32_t freeze; devq = sim->devq; mtx_lock(&devq->send_mtx); freeze = (devq->send_queue.qfrozen_cnt += count); mtx_unlock(&devq->send_mtx); return (freeze); } static void xpt_release_devq_timeout(void *arg) { struct cam_ed *dev; struct cam_devq *devq; dev = (struct cam_ed *)arg; CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); devq = dev->sim->devq; mtx_assert(&devq->send_mtx, MA_OWNED); if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) xpt_run_devq(devq); } void xpt_release_devq(struct cam_path *path, u_int count, int run_queue) { struct cam_ed *dev; struct cam_devq *devq; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", count, run_queue)); dev = path->device; devq = dev->sim->devq; mtx_lock(&devq->send_mtx); if (xpt_release_devq_device(dev, count, run_queue)) xpt_run_devq(dev->sim->devq); mtx_unlock(&devq->send_mtx); } static int xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) { mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); if (count > dev->ccbq.queue.qfrozen_cnt) { #ifdef INVARIANTS printf("xpt_release_devq(): requested %u > present %u\n", count, dev->ccbq.queue.qfrozen_cnt); #endif count = dev->ccbq.queue.qfrozen_cnt; } dev->ccbq.queue.qfrozen_cnt -= count; if (dev->ccbq.queue.qfrozen_cnt == 0) { /* * No longer need to wait for a successful * command completion. */ dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; /* * Remove any timeouts that might be scheduled * to release this queue. */ if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { callout_stop(&dev->callout); dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; } /* * Now that we are unfrozen schedule the * device so any pending transactions are * run. */ xpt_schedule_devq(dev->sim->devq, dev); } else run_queue = 0; return (run_queue); } void xpt_release_simq(struct cam_sim *sim, int run_queue) { struct cam_devq *devq; devq = sim->devq; mtx_lock(&devq->send_mtx); if (devq->send_queue.qfrozen_cnt <= 0) { #ifdef INVARIANTS printf("xpt_release_simq: requested 1 > present %u\n", devq->send_queue.qfrozen_cnt); #endif } else devq->send_queue.qfrozen_cnt--; if (devq->send_queue.qfrozen_cnt == 0) { if (run_queue) { /* * Now that we are unfrozen run the send queue. */ xpt_run_devq(sim->devq); } } mtx_unlock(&devq->send_mtx); } void xpt_done(union ccb *done_ccb) { struct cam_doneq *queue; int run, hash; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (done_ccb->ccb_h.func_code == XPT_SCSI_IO && done_ccb->csio.bio != NULL) biotrack(done_ccb->csio.bio, __func__); #endif CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done: func= %#x %s status %#x\n", done_ccb->ccb_h.func_code, xpt_action_name(done_ccb->ccb_h.func_code), done_ccb->ccb_h.status)); if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) return; /* Store the time the ccb was in the sim */ done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); done_ccb->ccb_h.status |= CAM_QOS_VALID; hash = (u_int)(done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + done_ccb->ccb_h.target_lun) % cam_num_doneqs; queue = &cam_doneqs[hash]; mtx_lock(&queue->cam_doneq_mtx); run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; mtx_unlock(&queue->cam_doneq_mtx); if (run) wakeup(&queue->cam_doneq); } void xpt_done_direct(union ccb *done_ccb) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status)); if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) return; /* Store the time the ccb was in the sim */ done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); xpt_done_process(&done_ccb->ccb_h); } union ccb * xpt_alloc_ccb(void) { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); return (new_ccb); } union ccb * xpt_alloc_ccb_nowait(void) { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); return (new_ccb); } void xpt_free_ccb(union ccb *free_ccb) { struct cam_periph *periph; if (free_ccb->ccb_h.alloc_flags & CAM_CCB_FROM_UMA) { /* * Looks like a CCB allocated from a periph UMA zone. */ periph = free_ccb->ccb_h.path->periph; uma_zfree(periph->ccb_zone, free_ccb); } else { free(free_ccb, M_CAMCCB); } } /* Private XPT functions */ /* * Get a CAM control block for the caller. Charge the structure to the device * referenced by the path. If we don't have sufficient resources to allocate * more ccbs, we return NULL. */ static union ccb * xpt_get_ccb_nowait(struct cam_periph *periph) { union ccb *new_ccb; int alloc_flags; if (periph->ccb_zone != NULL) { alloc_flags = CAM_CCB_FROM_UMA; new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_NOWAIT); } else { alloc_flags = 0; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); } if (new_ccb == NULL) return (NULL); new_ccb->ccb_h.alloc_flags = alloc_flags; periph->periph_allocated++; cam_ccbq_take_opening(&periph->path->device->ccbq); return (new_ccb); } static union ccb * xpt_get_ccb(struct cam_periph *periph) { union ccb *new_ccb; int alloc_flags; cam_periph_unlock(periph); if (periph->ccb_zone != NULL) { alloc_flags = CAM_CCB_FROM_UMA; new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_WAITOK); } else { alloc_flags = 0; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); } new_ccb->ccb_h.alloc_flags = alloc_flags; cam_periph_lock(periph); periph->periph_allocated++; cam_ccbq_take_opening(&periph->path->device->ccbq); return (new_ccb); } union ccb * cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) { struct ccb_hdr *ccb_h; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); cam_periph_assert(periph, MA_OWNED); while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || ccb_h->pinfo.priority != priority) { if (priority < periph->immediate_priority) { periph->immediate_priority = priority; xpt_run_allocq(periph, 0); } else cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, "cgticb", 0); } SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); return ((union ccb *)ccb_h); } static void xpt_acquire_bus(struct cam_eb *bus) { xpt_lock_buses(); bus->refcount++; xpt_unlock_buses(); } static void xpt_release_bus(struct cam_eb *bus) { xpt_lock_buses(); KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); if (--bus->refcount > 0) { xpt_unlock_buses(); return; } TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); xsoftc.bus_generation++; xpt_unlock_buses(); KASSERT(TAILQ_EMPTY(&bus->et_entries), ("destroying bus, but target list is not empty")); cam_sim_release(bus->sim); mtx_destroy(&bus->eb_mtx); free(bus, M_CAMXPT); } static struct cam_et * xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *cur_target, *target; mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); mtx_assert(&bus->eb_mtx, MA_OWNED); target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT|M_ZERO); if (target == NULL) return (NULL); TAILQ_INIT(&target->ed_entries); target->bus = bus; target->target_id = target_id; target->refcount = 1; target->generation = 0; target->luns = NULL; mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); timevalclear(&target->last_reset); /* * Hold a reference to our parent bus so it * will not go away before we do. */ bus->refcount++; /* Insertion sort into our bus's target list */ cur_target = TAILQ_FIRST(&bus->et_entries); while (cur_target != NULL && cur_target->target_id < target_id) cur_target = TAILQ_NEXT(cur_target, links); if (cur_target != NULL) { TAILQ_INSERT_BEFORE(cur_target, target, links); } else { TAILQ_INSERT_TAIL(&bus->et_entries, target, links); } bus->generation++; return (target); } static void xpt_acquire_target(struct cam_et *target) { struct cam_eb *bus = target->bus; mtx_lock(&bus->eb_mtx); target->refcount++; mtx_unlock(&bus->eb_mtx); } static void xpt_release_target(struct cam_et *target) { struct cam_eb *bus = target->bus; mtx_lock(&bus->eb_mtx); if (--target->refcount > 0) { mtx_unlock(&bus->eb_mtx); return; } TAILQ_REMOVE(&bus->et_entries, target, links); bus->generation++; mtx_unlock(&bus->eb_mtx); KASSERT(TAILQ_EMPTY(&target->ed_entries), ("destroying target, but device list is not empty")); xpt_release_bus(bus); mtx_destroy(&target->luns_mtx); if (target->luns) free(target->luns, M_CAMXPT); free(target, M_CAMXPT); } static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); device->mintags = 1; device->maxtags = 1; return (device); } static void xpt_destroy_device(void *context, int pending) { struct cam_ed *device = context; mtx_lock(&device->device_mtx); mtx_destroy(&device->device_mtx); free(device, M_CAMDEV); } struct cam_ed * xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct cam_ed *cur_device, *device; struct cam_devq *devq; cam_status status; mtx_assert(&bus->eb_mtx, MA_OWNED); /* Make space for us in the device queue on our bus */ devq = bus->sim->devq; mtx_lock(&devq->send_mtx); status = cam_devq_resize(devq, devq->send_queue.array_size + 1); mtx_unlock(&devq->send_mtx); if (status != CAM_REQ_CMP) return (NULL); device = (struct cam_ed *)malloc(sizeof(*device), M_CAMDEV, M_NOWAIT|M_ZERO); if (device == NULL) return (NULL); cam_init_pinfo(&device->devq_entry); device->target = target; device->lun_id = lun_id; device->sim = bus->sim; if (cam_ccbq_init(&device->ccbq, bus->sim->max_dev_openings) != 0) { free(device, M_CAMDEV); return (NULL); } SLIST_INIT(&device->asyncs); SLIST_INIT(&device->periphs); device->generation = 0; device->flags = CAM_DEV_UNCONFIGURED; device->tag_delay_count = 0; device->tag_saved_openings = 0; device->refcount = 1; mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); callout_init_mtx(&device->callout, &devq->send_mtx, 0); TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); /* * Hold a reference to our parent bus so it * will not go away before we do. */ target->refcount++; cur_device = TAILQ_FIRST(&target->ed_entries); while (cur_device != NULL && cur_device->lun_id < lun_id) cur_device = TAILQ_NEXT(cur_device, links); if (cur_device != NULL) TAILQ_INSERT_BEFORE(cur_device, device, links); else TAILQ_INSERT_TAIL(&target->ed_entries, device, links); target->generation++; return (device); } void xpt_acquire_device(struct cam_ed *device) { struct cam_eb *bus = device->target->bus; mtx_lock(&bus->eb_mtx); device->refcount++; mtx_unlock(&bus->eb_mtx); } void xpt_release_device(struct cam_ed *device) { struct cam_eb *bus = device->target->bus; struct cam_devq *devq; mtx_lock(&bus->eb_mtx); if (--device->refcount > 0) { mtx_unlock(&bus->eb_mtx); return; } TAILQ_REMOVE(&device->target->ed_entries, device,links); device->target->generation++; mtx_unlock(&bus->eb_mtx); /* Release our slot in the devq */ devq = bus->sim->devq; mtx_lock(&devq->send_mtx); cam_devq_resize(devq, devq->send_queue.array_size - 1); KASSERT(SLIST_EMPTY(&device->periphs), ("destroying device, but periphs list is not empty")); KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, ("destroying device while still queued for ccbs")); /* The send_mtx must be held when accessing the callout */ if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) callout_stop(&device->callout); mtx_unlock(&devq->send_mtx); xpt_release_target(device->target); cam_ccbq_fini(&device->ccbq); /* * Free allocated memory. free(9) does nothing if the * supplied pointer is NULL, so it is safe to call without * checking. */ free(device->supported_vpds, M_CAMXPT); free(device->device_id, M_CAMXPT); free(device->ext_inq, M_CAMXPT); free(device->physpath, M_CAMXPT); free(device->rcap_buf, M_CAMXPT); free(device->serial_num, M_CAMXPT); free(device->nvme_data, M_CAMXPT); free(device->nvme_cdata, M_CAMXPT); taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); } u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) { int result; struct cam_ed *dev; dev = path->device; mtx_lock(&dev->sim->devq->send_mtx); result = cam_ccbq_resize(&dev->ccbq, newopenings); mtx_unlock(&dev->sim->devq->send_mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (dev->inq_flags & SID_CmdQue) != 0) dev->tag_saved_openings = newopenings; return (result); } static struct cam_eb * xpt_find_bus(path_id_t path_id) { struct cam_eb *bus; xpt_lock_buses(); for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); bus != NULL; bus = TAILQ_NEXT(bus, links)) { if (bus->path_id == path_id) { bus->refcount++; break; } } xpt_unlock_buses(); return (bus); } static struct cam_et * xpt_find_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *target; mtx_assert(&bus->eb_mtx, MA_OWNED); for (target = TAILQ_FIRST(&bus->et_entries); target != NULL; target = TAILQ_NEXT(target, links)) { if (target->target_id == target_id) { target->refcount++; break; } } return (target); } static struct cam_ed * xpt_find_device(struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; mtx_assert(&target->bus->eb_mtx, MA_OWNED); for (device = TAILQ_FIRST(&target->ed_entries); device != NULL; device = TAILQ_NEXT(device, links)) { if (device->lun_id == lun_id) { device->refcount++; break; } } return (device); } void xpt_start_tags(struct cam_path *path) { struct ccb_relsim crs; struct cam_ed *device; struct cam_sim *sim; int newopenings; device = path->device; sim = path->bus->sim; device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; xpt_freeze_devq(path, /*count*/1); device->inq_flags |= SID_CmdQue; if (device->tag_saved_openings != 0) newopenings = device->tag_saved_openings; else newopenings = min(device->maxtags, sim->max_tagged_dev_openings); xpt_dev_ccbq_resize(path, newopenings); xpt_async(AC_GETDEV_CHANGED, path, NULL); memset(&crs, 0, sizeof(crs)); xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; crs.openings = crs.release_timeout = crs.qfrozen_cnt = 0; xpt_action((union ccb *)&crs); } void xpt_stop_tags(struct cam_path *path) { struct ccb_relsim crs; struct cam_ed *device; struct cam_sim *sim; device = path->device; sim = path->bus->sim; device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; device->tag_delay_count = 0; xpt_freeze_devq(path, /*count*/1); device->inq_flags &= ~SID_CmdQue; xpt_dev_ccbq_resize(path, sim->max_dev_openings); xpt_async(AC_GETDEV_CHANGED, path, NULL); memset(&crs, 0, sizeof(crs)); xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; crs.openings = crs.release_timeout = crs.qfrozen_cnt = 0; xpt_action((union ccb *)&crs); } /* * Assume all possible buses are detected by this time, so allow boot * as soon as they all are scanned. */ static void xpt_boot_delay(void *arg) { xpt_release_boot(); } /* * Now that all config hooks have completed, start boot_delay timer, * waiting for possibly still undetected buses (USB) to appear. */ static void xpt_ch_done(void *arg) { callout_init(&xsoftc.boot_callout, 1); callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0, xpt_boot_delay, NULL, 0); } SYSINIT(xpt_hw_delay, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, xpt_ch_done, NULL); /* * Now that interrupts are enabled, go find our devices */ static void xpt_config(void *arg) { if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) printf("xpt_config: failed to create taskqueue thread.\n"); /* Setup debugging path */ if (cam_dflags != CAM_DEBUG_NONE) { if (xpt_create_path(&cam_dpath, NULL, CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN) != CAM_REQ_CMP) { printf("xpt_config: xpt_create_path() failed for debug" " target %d:%d:%d, debugging disabled\n", CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); cam_dflags = CAM_DEBUG_NONE; } } else cam_dpath = NULL; periphdriver_init(1); xpt_hold_boot(); /* Fire up rescan thread. */ if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, "cam", "scanner")) { printf("xpt_config: failed to create rescan thread.\n"); } } void xpt_hold_boot_locked(void) { if (xsoftc.buses_to_config++ == 0) root_mount_hold_token("CAM", &xsoftc.xpt_rootmount); } void xpt_hold_boot(void) { xpt_lock_buses(); xpt_hold_boot_locked(); xpt_unlock_buses(); } void xpt_release_boot(void) { xpt_lock_buses(); if (--xsoftc.buses_to_config == 0) { if (xsoftc.buses_config_done == 0) { xsoftc.buses_config_done = 1; xsoftc.buses_to_config++; TASK_INIT(&xsoftc.boot_task, 0, xpt_finishconfig_task, NULL); taskqueue_enqueue(taskqueue_thread, &xsoftc.boot_task); } else root_mount_rel(&xsoftc.xpt_rootmount); } xpt_unlock_buses(); } /* * If the given device only has one peripheral attached to it, and if that * peripheral is the passthrough driver, announce it. This insures that the * user sees some sort of announcement for every peripheral in their system. */ static int xptpassannouncefunc(struct cam_ed *device, void *arg) { struct cam_periph *periph; int i; for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; periph = SLIST_NEXT(periph, periph_links), i++); periph = SLIST_FIRST(&device->periphs); if ((i == 1) && (strncmp(periph->periph_name, "pass", 4) == 0)) xpt_announce_periph(periph, NULL); return(1); } static void xpt_finishconfig_task(void *context, int pending) { periphdriver_init(2); /* * Check for devices with no "standard" peripheral driver * attached. For any devices like that, announce the * passthrough driver so the user will see something. */ if (!bootverbose) xpt_for_all_devices(xptpassannouncefunc, NULL); xpt_release_boot(); } cam_status xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, struct cam_path *path) { struct ccb_setasync csa; cam_status status; int xptpath = 0; if (path == NULL) { status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) return (status); xpt_path_lock(path); xptpath = 1; } memset(&csa, 0, sizeof(csa)); xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = event; csa.callback = cbfunc; csa.callback_arg = cbarg; xpt_action((union ccb *)&csa); status = csa.ccb_h.status; CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE, ("xpt_register_async: func %p\n", cbfunc)); if (xptpath) { xpt_path_unlock(path); xpt_free_path(path); } if ((status == CAM_REQ_CMP) && (csa.event_enable & AC_FOUND_DEVICE)) { /* * Get this peripheral up to date with all * the currently existing devices. */ xpt_for_all_devices(xptsetasyncfunc, &csa); } if ((status == CAM_REQ_CMP) && (csa.event_enable & AC_PATH_REGISTERED)) { /* * Get this peripheral up to date with all * the currently existing buses. */ xpt_for_all_busses(xptsetasyncbusfunc, &csa); } return (status); } static void xptaction(struct cam_sim *sim, union ccb *work_ccb) { CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); switch (work_ccb->ccb_h.func_code) { /* Common cases first */ case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi; cpi = &work_ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 0; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "", HBA_IDLEN); strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); cpi->unit_number = sim->unit_number; cpi->bus_id = sim->bus_id; cpi->base_transfer_speed = 0; cpi->protocol = PROTO_UNSPECIFIED; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->transport = XPORT_UNSPECIFIED; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: work_ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(work_ccb); } /* * The xpt as a "controller" has no interrupt sources, so polling * is a no-op. */ static void xptpoll(struct cam_sim *sim) { } void xpt_lock_buses(void) { mtx_lock(&xsoftc.xpt_topo_lock); } void xpt_unlock_buses(void) { mtx_unlock(&xsoftc.xpt_topo_lock); } struct mtx * xpt_path_mtx(struct cam_path *path) { return (&path->device->device_mtx); } static void xpt_done_process(struct ccb_hdr *ccb_h) { struct cam_sim *sim = NULL; struct cam_devq *devq = NULL; struct mtx *mtx = NULL; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) struct ccb_scsiio *csio; if (ccb_h->func_code == XPT_SCSI_IO) { csio = &((union ccb *)ccb_h)->csio; if (csio->bio != NULL) biotrack(csio->bio, __func__); } #endif if (ccb_h->flags & CAM_HIGH_POWER) { struct highpowerlist *hphead; struct cam_ed *device; mtx_lock(&xsoftc.xpt_highpower_lock); hphead = &xsoftc.highpowerq; device = STAILQ_FIRST(hphead); /* * Increment the count since this command is done. */ xsoftc.num_highpower++; /* * Any high powered commands queued up? */ if (device != NULL) { STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); mtx_unlock(&xsoftc.xpt_highpower_lock); mtx_lock(&device->sim->devq->send_mtx); xpt_release_devq_device(device, /*count*/1, /*runqueue*/TRUE); mtx_unlock(&device->sim->devq->send_mtx); } else mtx_unlock(&xsoftc.xpt_highpower_lock); } /* * Insulate against a race where the periph is destroyed but CCBs are * still not all processed. This shouldn't happen, but allows us better * bug diagnostic when it does. */ if (ccb_h->path->bus) sim = ccb_h->path->bus->sim; if (ccb_h->status & CAM_RELEASE_SIMQ) { KASSERT(sim, ("sim missing for CAM_RELEASE_SIMQ request")); xpt_release_simq(sim, /*run_queue*/FALSE); ccb_h->status &= ~CAM_RELEASE_SIMQ; } if ((ccb_h->flags & CAM_DEV_QFRZDIS) && (ccb_h->status & CAM_DEV_QFRZN)) { xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); ccb_h->status &= ~CAM_DEV_QFRZN; } if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { struct cam_ed *dev = ccb_h->path->device; if (sim) devq = sim->devq; KASSERT(devq, ("Periph disappeared with CCB %p %s request pending.", ccb_h, xpt_action_name(ccb_h->func_code))); mtx_lock(&devq->send_mtx); devq->send_active--; devq->send_openings++; cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 && (dev->ccbq.dev_active == 0))) { dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; xpt_release_devq_device(dev, /*count*/1, /*run_queue*/FALSE); } if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; xpt_release_devq_device(dev, /*count*/1, /*run_queue*/FALSE); } if (!device_is_queued(dev)) (void)xpt_schedule_devq(devq, dev); xpt_run_devq(devq); mtx_unlock(&devq->send_mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { mtx = xpt_path_mtx(ccb_h->path); mtx_lock(mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 && (--dev->tag_delay_count == 0)) xpt_start_tags(ccb_h->path); } } if ((ccb_h->flags & CAM_UNLOCKED) == 0) { if (mtx == NULL) { mtx = xpt_path_mtx(ccb_h->path); mtx_lock(mtx); } } else { if (mtx != NULL) { mtx_unlock(mtx); mtx = NULL; } } /* Call the peripheral driver's callback */ ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); if (mtx != NULL) mtx_unlock(mtx); } /* * Parameterize instead and use xpt_done_td? */ static void xpt_async_td(void *arg) { struct cam_doneq *queue = arg; struct ccb_hdr *ccb_h; STAILQ_HEAD(, ccb_hdr) doneq; STAILQ_INIT(&doneq); mtx_lock(&queue->cam_doneq_mtx); while (1) { while (STAILQ_EMPTY(&queue->cam_doneq)) msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, PRIBIO, "-", 0); STAILQ_CONCAT(&doneq, &queue->cam_doneq); mtx_unlock(&queue->cam_doneq_mtx); while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); xpt_done_process(ccb_h); } mtx_lock(&queue->cam_doneq_mtx); } } void xpt_done_td(void *arg) { struct cam_doneq *queue = arg; struct ccb_hdr *ccb_h; STAILQ_HEAD(, ccb_hdr) doneq; STAILQ_INIT(&doneq); mtx_lock(&queue->cam_doneq_mtx); while (1) { while (STAILQ_EMPTY(&queue->cam_doneq)) { queue->cam_doneq_sleep = 1; msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, PRIBIO, "-", 0); queue->cam_doneq_sleep = 0; } STAILQ_CONCAT(&doneq, &queue->cam_doneq); mtx_unlock(&queue->cam_doneq_mtx); THREAD_NO_SLEEPING(); while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); xpt_done_process(ccb_h); } THREAD_SLEEPING_OK(); mtx_lock(&queue->cam_doneq_mtx); } } static void camisr_runqueue(void) { struct ccb_hdr *ccb_h; struct cam_doneq *queue; int i; /* Process global queues. */ for (i = 0; i < cam_num_doneqs; i++) { queue = &cam_doneqs[i]; mtx_lock(&queue->cam_doneq_mtx); while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); mtx_unlock(&queue->cam_doneq_mtx); xpt_done_process(ccb_h); mtx_lock(&queue->cam_doneq_mtx); } mtx_unlock(&queue->cam_doneq_mtx); } } /** * @brief Return the device_t associated with the path * * When a SIM is created, it registers a bus with a NEWBUS device_t. This is * stored in the internal cam_eb bus structure. There is no guarnatee any given * path will have a @c device_t associated with it (it's legal to call @c * xpt_bus_register with a @c NULL @c device_t. * * @param path Path to return the device_t for. */ device_t xpt_path_sim_device(const struct cam_path *path) { return (path->bus->parent_dev); } struct kv { uint32_t v; const char *name; }; static struct kv map[] = { { XPT_NOOP, "XPT_NOOP" }, { XPT_SCSI_IO, "XPT_SCSI_IO" }, { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" }, { XPT_GDEVLIST, "XPT_GDEVLIST" }, { XPT_PATH_INQ, "XPT_PATH_INQ" }, { XPT_REL_SIMQ, "XPT_REL_SIMQ" }, { XPT_SASYNC_CB, "XPT_SASYNC_CB" }, { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" }, { XPT_SCAN_BUS, "XPT_SCAN_BUS" }, { XPT_DEV_MATCH, "XPT_DEV_MATCH" }, { XPT_DEBUG, "XPT_DEBUG" }, { XPT_PATH_STATS, "XPT_PATH_STATS" }, { XPT_GDEV_STATS, "XPT_GDEV_STATS" }, { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" }, { XPT_ASYNC, "XPT_ASYNC" }, { XPT_ABORT, "XPT_ABORT" }, { XPT_RESET_BUS, "XPT_RESET_BUS" }, { XPT_RESET_DEV, "XPT_RESET_DEV" }, { XPT_TERM_IO, "XPT_TERM_IO" }, { XPT_SCAN_LUN, "XPT_SCAN_LUN" }, { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" }, { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" }, { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" }, { XPT_ATA_IO, "XPT_ATA_IO" }, { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" }, { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" }, { XPT_NVME_IO, "XPT_NVME_IO" }, { XPT_MMC_IO, "XPT_MMC_IO" }, { XPT_SMP_IO, "XPT_SMP_IO" }, { XPT_SCAN_TGT, "XPT_SCAN_TGT" }, { XPT_NVME_ADMIN, "XPT_NVME_ADMIN" }, { XPT_ENG_INQ, "XPT_ENG_INQ" }, { XPT_ENG_EXEC, "XPT_ENG_EXEC" }, { XPT_EN_LUN, "XPT_EN_LUN" }, { XPT_TARGET_IO, "XPT_TARGET_IO" }, { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" }, { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" }, { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" }, { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" }, { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" }, { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" }, { 0, 0 } }; const char * xpt_action_name(uint32_t action) { static char buffer[32]; /* Only for unknown messages -- racy */ struct kv *walker = map; while (walker->name != NULL) { if (walker->v == action) return (walker->name); walker++; } snprintf(buffer, sizeof(buffer), "%#x", action); return (buffer); } diff --git a/sys/cam/cam_xpt_sim.h b/sys/cam/cam_xpt_sim.h index aa9746019981..569ed09c23d2 100644 --- a/sys/cam/cam_xpt_sim.h +++ b/sys/cam/cam_xpt_sim.h @@ -1,54 +1,54 @@ /*- * Data structures and definitions for dealing with the * Common Access Method Transport (xpt) layer. * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_XPT_SIM_H #define _CAM_CAM_XPT_SIM_H 1 #include #include /* Functions accessed by SIM drivers */ #ifdef _KERNEL -int32_t xpt_bus_register(struct cam_sim *sim, device_t parent, - u_int32_t bus); -int32_t xpt_bus_deregister(path_id_t path_id); +int xpt_bus_register(struct cam_sim *sim, device_t parent, + uint32_t bus); +int xpt_bus_deregister(path_id_t path_id); u_int32_t xpt_freeze_simq(struct cam_sim *sim, u_int count); void xpt_release_simq(struct cam_sim *sim, int run_queue); u_int32_t xpt_freeze_devq(struct cam_path *path, u_int count); void xpt_release_devq(struct cam_path *path, u_int count, int run_queue); void xpt_done(union ccb *done_ccb); void xpt_done_direct(union ccb *done_ccb); #endif #endif /* _CAM_CAM_XPT_SIM_H */ diff --git a/sys/dev/iscsi/iscsi.c b/sys/dev/iscsi/iscsi.c index 7ddb5a9ce1ec..9a1b539e539b 100644 --- a/sys/dev/iscsi/iscsi.c +++ b/sys/dev/iscsi/iscsi.c @@ -1,2636 +1,2635 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012 The FreeBSD Foundation * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ICL_KERNEL_PROXY #include #endif #ifdef ICL_KERNEL_PROXY FEATURE(iscsi_kernel_proxy, "iSCSI initiator built with ICL_KERNEL_PROXY"); #endif /* * XXX: This is global so the iscsi_unload() can access it. * Think about how to do this properly. */ static struct iscsi_softc *sc; SYSCTL_NODE(_kern, OID_AUTO, iscsi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "iSCSI initiator"); static int debug = 1; SYSCTL_INT(_kern_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN, &debug, 0, "Enable debug messages"); static int ping_timeout = 5; SYSCTL_INT(_kern_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN, &ping_timeout, 0, "Timeout for ping (NOP-Out) requests, in seconds"); static int iscsid_timeout = 60; SYSCTL_INT(_kern_iscsi, OID_AUTO, iscsid_timeout, CTLFLAG_RWTUN, &iscsid_timeout, 0, "Time to wait for iscsid(8) to handle reconnection, in seconds"); static int login_timeout = 60; SYSCTL_INT(_kern_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN, &login_timeout, 0, "Time to wait for iscsid(8) to finish Login Phase, in seconds"); static int maxtags = 255; SYSCTL_INT(_kern_iscsi, OID_AUTO, maxtags, CTLFLAG_RWTUN, &maxtags, 0, "Max number of IO requests queued"); static int fail_on_disconnection = 0; SYSCTL_INT(_kern_iscsi, OID_AUTO, fail_on_disconnection, CTLFLAG_RWTUN, &fail_on_disconnection, 0, "Destroy CAM SIM on connection failure"); static int fail_on_shutdown = 1; SYSCTL_INT(_kern_iscsi, OID_AUTO, fail_on_shutdown, CTLFLAG_RWTUN, &fail_on_shutdown, 0, "Fail disconnected sessions on shutdown"); static MALLOC_DEFINE(M_ISCSI, "iSCSI", "iSCSI initiator"); static uma_zone_t iscsi_outstanding_zone; #define CONN_SESSION(X) ((struct iscsi_session *)X->ic_prv0) #define PDU_SESSION(X) (CONN_SESSION(X->ip_conn)) #define ISCSI_DEBUG(X, ...) \ do { \ if (debug > 1) \ printf("%s: " X "\n", __func__, ## __VA_ARGS__);\ } while (0) #define ISCSI_WARN(X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s: " X "\n", \ __func__, ## __VA_ARGS__); \ } \ } while (0) #define ISCSI_SESSION_DEBUG(S, X, ...) \ do { \ if (debug > 1) { \ printf("%s: %s (%s): " X "\n", \ __func__, S->is_conf.isc_target_addr, \ S->is_conf.isc_target, ## __VA_ARGS__); \ } \ } while (0) #define ISCSI_SESSION_WARN(S, X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s (%s): " X "\n", \ S->is_conf.isc_target_addr, \ S->is_conf.isc_target, ## __VA_ARGS__); \ } \ } while (0) #define ISCSI_SESSION_LOCK(X) mtx_lock(&X->is_lock) #define ISCSI_SESSION_UNLOCK(X) mtx_unlock(&X->is_lock) #define ISCSI_SESSION_LOCK_ASSERT(X) mtx_assert(&X->is_lock, MA_OWNED) #define ISCSI_SESSION_LOCK_ASSERT_NOT(X) mtx_assert(&X->is_lock, MA_NOTOWNED) static int iscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int mode, struct thread *td); static struct cdevsw iscsi_cdevsw = { .d_version = D_VERSION, .d_ioctl = iscsi_ioctl, .d_name = "iscsi", }; static void iscsi_pdu_queue_locked(struct icl_pdu *request); static void iscsi_pdu_queue(struct icl_pdu *request); static void iscsi_pdu_update_statsn(const struct icl_pdu *response); static void iscsi_pdu_handle_nop_in(struct icl_pdu *response); static void iscsi_pdu_handle_scsi_response(struct icl_pdu *response); static void iscsi_pdu_handle_task_response(struct icl_pdu *response); static void iscsi_pdu_handle_data_in(struct icl_pdu *response); static void iscsi_pdu_handle_logout_response(struct icl_pdu *response); static void iscsi_pdu_handle_r2t(struct icl_pdu *response); static void iscsi_pdu_handle_async_message(struct icl_pdu *response); static void iscsi_pdu_handle_reject(struct icl_pdu *response); static void iscsi_session_reconnect(struct iscsi_session *is); static void iscsi_session_terminate(struct iscsi_session *is); static void iscsi_action(struct cam_sim *sim, union ccb *ccb); static struct iscsi_outstanding *iscsi_outstanding_find(struct iscsi_session *is, uint32_t initiator_task_tag); static struct iscsi_outstanding *iscsi_outstanding_add(struct iscsi_session *is, struct icl_pdu *request, union ccb *ccb, uint32_t *initiator_task_tagp); static void iscsi_outstanding_remove(struct iscsi_session *is, struct iscsi_outstanding *io); static bool iscsi_pdu_prepare(struct icl_pdu *request) { struct iscsi_session *is; struct iscsi_bhs_scsi_command *bhssc; is = PDU_SESSION(request); ISCSI_SESSION_LOCK_ASSERT(is); /* * We're only using fields common for all the request * (initiator -> target) PDUs. */ bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; /* * Data-Out PDU does not contain CmdSN. */ if (bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_DATA_OUT) { if (ISCSI_SNGT(is->is_cmdsn, is->is_maxcmdsn) && (bhssc->bhssc_opcode & ISCSI_BHS_OPCODE_IMMEDIATE) == 0) { /* * Current MaxCmdSN prevents us from sending any more * SCSI Command PDUs to the target; postpone the PDU. * It will get resent by either iscsi_pdu_queue(), * or by maintenance thread. */ #if 0 ISCSI_SESSION_DEBUG(is, "postponing send, CmdSN %u, " "ExpCmdSN %u, MaxCmdSN %u, opcode 0x%x", is->is_cmdsn, is->is_expcmdsn, is->is_maxcmdsn, bhssc->bhssc_opcode); #endif return (true); } bhssc->bhssc_cmdsn = htonl(is->is_cmdsn); if ((bhssc->bhssc_opcode & ISCSI_BHS_OPCODE_IMMEDIATE) == 0) is->is_cmdsn++; } bhssc->bhssc_expstatsn = htonl(is->is_statsn + 1); return (false); } static void iscsi_session_send_postponed(struct iscsi_session *is) { struct icl_pdu *request; bool postpone; ISCSI_SESSION_LOCK_ASSERT(is); if (STAILQ_EMPTY(&is->is_postponed)) return; while ((request = STAILQ_FIRST(&is->is_postponed)) != NULL) { postpone = iscsi_pdu_prepare(request); if (postpone) return; STAILQ_REMOVE_HEAD(&is->is_postponed, ip_next); icl_pdu_queue(request); } xpt_release_simq(is->is_sim, 1); } static void iscsi_pdu_queue_locked(struct icl_pdu *request) { struct iscsi_session *is; bool postpone; is = PDU_SESSION(request); ISCSI_SESSION_LOCK_ASSERT(is); iscsi_session_send_postponed(is); postpone = iscsi_pdu_prepare(request); if (postpone) { if (STAILQ_EMPTY(&is->is_postponed)) xpt_freeze_simq(is->is_sim, 1); STAILQ_INSERT_TAIL(&is->is_postponed, request, ip_next); return; } icl_pdu_queue(request); } static void iscsi_pdu_queue(struct icl_pdu *request) { struct iscsi_session *is; is = PDU_SESSION(request); ISCSI_SESSION_LOCK(is); iscsi_pdu_queue_locked(request); ISCSI_SESSION_UNLOCK(is); } static void iscsi_session_logout(struct iscsi_session *is) { struct icl_pdu *request; struct iscsi_bhs_logout_request *bhslr; request = icl_pdu_new(is->is_conn, M_NOWAIT); if (request == NULL) return; bhslr = (struct iscsi_bhs_logout_request *)request->ip_bhs; bhslr->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_REQUEST; bhslr->bhslr_reason = BHSLR_REASON_CLOSE_SESSION; iscsi_pdu_queue_locked(request); } static void iscsi_session_terminate_task(struct iscsi_session *is, struct iscsi_outstanding *io, cam_status status) { ISCSI_SESSION_LOCK_ASSERT(is); if (io->io_ccb != NULL) { io->io_ccb->ccb_h.status &= ~(CAM_SIM_QUEUED | CAM_STATUS_MASK); io->io_ccb->ccb_h.status |= status; if ((io->io_ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { io->io_ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(io->io_ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } xpt_done(io->io_ccb); } iscsi_outstanding_remove(is, io); } static void iscsi_session_terminate_tasks(struct iscsi_session *is, cam_status status) { struct iscsi_outstanding *io, *tmp; ISCSI_SESSION_LOCK_ASSERT(is); TAILQ_FOREACH_SAFE(io, &is->is_outstanding, io_next, tmp) { iscsi_session_terminate_task(is, io, status); } } static void iscsi_session_cleanup(struct iscsi_session *is, bool destroy_sim) { struct icl_pdu *pdu; ISCSI_SESSION_LOCK_ASSERT(is); /* * Don't queue any new PDUs. */ if (is->is_sim != NULL && is->is_simq_frozen == false) { ISCSI_SESSION_DEBUG(is, "freezing"); xpt_freeze_simq(is->is_sim, 1); is->is_simq_frozen = true; } /* * Remove postponed PDUs. */ if (!STAILQ_EMPTY(&is->is_postponed)) xpt_release_simq(is->is_sim, 1); while ((pdu = STAILQ_FIRST(&is->is_postponed)) != NULL) { STAILQ_REMOVE_HEAD(&is->is_postponed, ip_next); icl_pdu_free(pdu); } if (destroy_sim == false) { /* * Terminate SCSI tasks, asking CAM to requeue them. */ iscsi_session_terminate_tasks(is, CAM_REQUEUE_REQ); return; } iscsi_session_terminate_tasks(is, CAM_DEV_NOT_THERE); if (is->is_sim == NULL) return; ISCSI_SESSION_DEBUG(is, "deregistering SIM"); xpt_async(AC_LOST_DEVICE, is->is_path, NULL); if (is->is_simq_frozen) { is->is_simq_frozen = false; xpt_release_simq(is->is_sim, 1); } xpt_free_path(is->is_path); is->is_path = NULL; xpt_bus_deregister(cam_sim_path(is->is_sim)); cam_sim_free(is->is_sim, TRUE /*free_devq*/); is->is_sim = NULL; is->is_devq = NULL; } static void iscsi_maintenance_thread_reconnect(struct iscsi_session *is) { icl_conn_close(is->is_conn); ISCSI_SESSION_LOCK(is); is->is_connected = false; is->is_reconnecting = false; is->is_login_phase = false; #ifdef ICL_KERNEL_PROXY if (is->is_login_pdu != NULL) { icl_pdu_free(is->is_login_pdu); is->is_login_pdu = NULL; } cv_signal(&is->is_login_cv); #endif if (fail_on_disconnection) { ISCSI_SESSION_DEBUG(is, "connection failed, destroying devices"); iscsi_session_cleanup(is, true); } else { iscsi_session_cleanup(is, false); } KASSERT(TAILQ_EMPTY(&is->is_outstanding), ("destroying session with active tasks")); KASSERT(STAILQ_EMPTY(&is->is_postponed), ("destroying session with postponed PDUs")); if (is->is_conf.isc_enable == 0 && is->is_conf.isc_discovery == 0) { ISCSI_SESSION_UNLOCK(is); return; } /* * Request immediate reconnection from iscsid(8). */ //ISCSI_SESSION_DEBUG(is, "waking up iscsid(8)"); is->is_waiting_for_iscsid = true; strlcpy(is->is_reason, "Waiting for iscsid(8)", sizeof(is->is_reason)); is->is_timeout = 0; ISCSI_SESSION_UNLOCK(is); cv_signal(&is->is_softc->sc_cv); } static void iscsi_maintenance_thread_terminate(struct iscsi_session *is) { struct iscsi_softc *sc; sc = is->is_softc; sx_xlock(&sc->sc_lock); TAILQ_REMOVE(&sc->sc_sessions, is, is_next); sx_xunlock(&sc->sc_lock); icl_conn_close(is->is_conn); callout_drain(&is->is_callout); ISCSI_SESSION_LOCK(is); KASSERT(is->is_terminating, ("is_terminating == false")); #ifdef ICL_KERNEL_PROXY if (is->is_login_pdu != NULL) { icl_pdu_free(is->is_login_pdu); is->is_login_pdu = NULL; } cv_signal(&is->is_login_cv); #endif iscsi_session_cleanup(is, true); KASSERT(TAILQ_EMPTY(&is->is_outstanding), ("destroying session with active tasks")); KASSERT(STAILQ_EMPTY(&is->is_postponed), ("destroying session with postponed PDUs")); ISCSI_SESSION_UNLOCK(is); icl_conn_free(is->is_conn); mtx_destroy(&is->is_lock); cv_destroy(&is->is_maintenance_cv); #ifdef ICL_KERNEL_PROXY cv_destroy(&is->is_login_cv); #endif ISCSI_SESSION_DEBUG(is, "terminated"); free(is, M_ISCSI); /* * The iscsi_unload() routine might be waiting. */ cv_signal(&sc->sc_cv); } static void iscsi_maintenance_thread(void *arg) { struct iscsi_session *is = arg; ISCSI_SESSION_LOCK(is); for (;;) { if (is->is_reconnecting == false && is->is_terminating == false && (STAILQ_EMPTY(&is->is_postponed) || ISCSI_SNGT(is->is_cmdsn, is->is_maxcmdsn))) cv_wait(&is->is_maintenance_cv, &is->is_lock); /* Terminate supersedes reconnect. */ if (is->is_terminating) { ISCSI_SESSION_UNLOCK(is); iscsi_maintenance_thread_terminate(is); kthread_exit(); return; } if (is->is_reconnecting) { ISCSI_SESSION_UNLOCK(is); iscsi_maintenance_thread_reconnect(is); ISCSI_SESSION_LOCK(is); continue; } iscsi_session_send_postponed(is); } ISCSI_SESSION_UNLOCK(is); } static void iscsi_session_reconnect(struct iscsi_session *is) { /* * XXX: We can't use locking here, because * it's being called from various contexts. * Hope it doesn't break anything. */ if (is->is_reconnecting) return; is->is_reconnecting = true; cv_signal(&is->is_maintenance_cv); } static void iscsi_session_terminate(struct iscsi_session *is) { if (is->is_terminating) return; is->is_terminating = true; #if 0 iscsi_session_logout(is); #endif cv_signal(&is->is_maintenance_cv); } static void iscsi_callout(void *context) { struct icl_pdu *request; struct iscsi_bhs_nop_out *bhsno; struct iscsi_session *is; bool reconnect_needed = false; is = context; ISCSI_SESSION_LOCK(is); if (is->is_terminating) { ISCSI_SESSION_UNLOCK(is); return; } callout_schedule(&is->is_callout, 1 * hz); if (is->is_conf.isc_enable == 0) goto out; is->is_timeout++; if (is->is_waiting_for_iscsid) { if (iscsid_timeout > 0 && is->is_timeout > iscsid_timeout) { ISCSI_SESSION_WARN(is, "timed out waiting for iscsid(8) " "for %d seconds; reconnecting", is->is_timeout); reconnect_needed = true; } goto out; } if (is->is_login_phase) { if (login_timeout > 0 && is->is_timeout > login_timeout) { ISCSI_SESSION_WARN(is, "login timed out after %d seconds; " "reconnecting", is->is_timeout); reconnect_needed = true; } goto out; } if (ping_timeout <= 0) { /* * Pings are disabled. Don't send NOP-Out in this case. * Reset the timeout, to avoid triggering reconnection, * should the user decide to reenable them. */ is->is_timeout = 0; goto out; } if (is->is_timeout >= ping_timeout) { ISCSI_SESSION_WARN(is, "no ping reply (NOP-In) after %d seconds; " "reconnecting", ping_timeout); reconnect_needed = true; goto out; } ISCSI_SESSION_UNLOCK(is); /* * If the ping was reset less than one second ago - which means * that we've received some PDU during the last second - assume * the traffic flows correctly and don't bother sending a NOP-Out. * * (It's 2 - one for one second, and one for incrementing is_timeout * earlier in this routine.) */ if (is->is_timeout < 2) return; request = icl_pdu_new(is->is_conn, M_NOWAIT); if (request == NULL) { ISCSI_SESSION_WARN(is, "failed to allocate PDU"); return; } bhsno = (struct iscsi_bhs_nop_out *)request->ip_bhs; bhsno->bhsno_opcode = ISCSI_BHS_OPCODE_NOP_OUT | ISCSI_BHS_OPCODE_IMMEDIATE; bhsno->bhsno_flags = 0x80; bhsno->bhsno_target_transfer_tag = 0xffffffff; iscsi_pdu_queue(request); return; out: if (is->is_terminating) { ISCSI_SESSION_UNLOCK(is); return; } ISCSI_SESSION_UNLOCK(is); if (reconnect_needed) iscsi_session_reconnect(is); } static void iscsi_pdu_update_statsn(const struct icl_pdu *response) { const struct iscsi_bhs_data_in *bhsdi; struct iscsi_session *is; uint32_t expcmdsn, maxcmdsn, statsn; is = PDU_SESSION(response); ISCSI_SESSION_LOCK_ASSERT(is); /* * We're only using fields common for all the response * (target -> initiator) PDUs. */ bhsdi = (const struct iscsi_bhs_data_in *)response->ip_bhs; /* * Ok, I lied. In case of Data-In, "The fields StatSN, Status, * and Residual Count only have meaningful content if the S bit * is set to 1", so we also need to check the bit specific for * Data-In PDU. */ if (bhsdi->bhsdi_opcode != ISCSI_BHS_OPCODE_SCSI_DATA_IN || (bhsdi->bhsdi_flags & BHSDI_FLAGS_S) != 0) { statsn = ntohl(bhsdi->bhsdi_statsn); if (statsn != is->is_statsn && statsn != (is->is_statsn + 1)) { /* XXX: This is normal situation for MCS */ ISCSI_SESSION_WARN(is, "PDU 0x%x StatSN %u != " "session ExpStatSN %u (or + 1); reconnecting", bhsdi->bhsdi_opcode, statsn, is->is_statsn); iscsi_session_reconnect(is); } if (ISCSI_SNGT(statsn, is->is_statsn)) is->is_statsn = statsn; } expcmdsn = ntohl(bhsdi->bhsdi_expcmdsn); maxcmdsn = ntohl(bhsdi->bhsdi_maxcmdsn); if (ISCSI_SNLT(maxcmdsn + 1, expcmdsn)) { ISCSI_SESSION_DEBUG(is, "PDU MaxCmdSN %u + 1 < PDU ExpCmdSN %u; ignoring", maxcmdsn, expcmdsn); } else { if (ISCSI_SNGT(maxcmdsn, is->is_maxcmdsn)) { is->is_maxcmdsn = maxcmdsn; /* * Command window increased; kick the maintanance thread * to send out postponed commands. */ if (!STAILQ_EMPTY(&is->is_postponed)) cv_signal(&is->is_maintenance_cv); } else if (ISCSI_SNLT(maxcmdsn, is->is_maxcmdsn)) { /* XXX: This is normal situation for MCS */ ISCSI_SESSION_DEBUG(is, "PDU MaxCmdSN %u < session MaxCmdSN %u; ignoring", maxcmdsn, is->is_maxcmdsn); } if (ISCSI_SNGT(expcmdsn, is->is_expcmdsn)) { is->is_expcmdsn = expcmdsn; } else if (ISCSI_SNLT(expcmdsn, is->is_expcmdsn)) { /* XXX: This is normal situation for MCS */ ISCSI_SESSION_DEBUG(is, "PDU ExpCmdSN %u < session ExpCmdSN %u; ignoring", expcmdsn, is->is_expcmdsn); } } /* * Every incoming PDU - not just NOP-In - resets the ping timer. * The purpose of the timeout is to reset the connection when it stalls; * we don't want this to happen when NOP-In or NOP-Out ends up delayed * in some queue. */ is->is_timeout = 0; } static void iscsi_receive_callback(struct icl_pdu *response) { struct iscsi_session *is; is = PDU_SESSION(response); ISCSI_SESSION_LOCK(is); iscsi_pdu_update_statsn(response); #ifdef ICL_KERNEL_PROXY if (is->is_login_phase) { if (is->is_login_pdu == NULL) is->is_login_pdu = response; else icl_pdu_free(response); ISCSI_SESSION_UNLOCK(is); cv_signal(&is->is_login_cv); return; } #endif /* * The handling routine is responsible for freeing the PDU * when it's no longer needed. */ switch (response->ip_bhs->bhs_opcode) { case ISCSI_BHS_OPCODE_NOP_IN: iscsi_pdu_handle_nop_in(response); ISCSI_SESSION_UNLOCK(is); break; case ISCSI_BHS_OPCODE_SCSI_RESPONSE: iscsi_pdu_handle_scsi_response(response); /* Session lock dropped inside. */ ISCSI_SESSION_LOCK_ASSERT_NOT(is); break; case ISCSI_BHS_OPCODE_TASK_RESPONSE: iscsi_pdu_handle_task_response(response); ISCSI_SESSION_UNLOCK(is); break; case ISCSI_BHS_OPCODE_SCSI_DATA_IN: iscsi_pdu_handle_data_in(response); /* Session lock dropped inside. */ ISCSI_SESSION_LOCK_ASSERT_NOT(is); break; case ISCSI_BHS_OPCODE_LOGOUT_RESPONSE: iscsi_pdu_handle_logout_response(response); ISCSI_SESSION_UNLOCK(is); break; case ISCSI_BHS_OPCODE_R2T: iscsi_pdu_handle_r2t(response); ISCSI_SESSION_UNLOCK(is); break; case ISCSI_BHS_OPCODE_ASYNC_MESSAGE: iscsi_pdu_handle_async_message(response); ISCSI_SESSION_UNLOCK(is); break; case ISCSI_BHS_OPCODE_REJECT: iscsi_pdu_handle_reject(response); ISCSI_SESSION_UNLOCK(is); break; default: ISCSI_SESSION_WARN(is, "received PDU with unsupported " "opcode 0x%x; reconnecting", response->ip_bhs->bhs_opcode); iscsi_session_reconnect(is); ISCSI_SESSION_UNLOCK(is); icl_pdu_free(response); } } static void iscsi_error_callback(struct icl_conn *ic) { struct iscsi_session *is; is = CONN_SESSION(ic); ISCSI_SESSION_WARN(is, "connection error; reconnecting"); iscsi_session_reconnect(is); } static void iscsi_pdu_handle_nop_in(struct icl_pdu *response) { struct iscsi_session *is; struct iscsi_bhs_nop_out *bhsno; struct iscsi_bhs_nop_in *bhsni; struct icl_pdu *request; void *data = NULL; size_t datasize; int error; is = PDU_SESSION(response); bhsni = (struct iscsi_bhs_nop_in *)response->ip_bhs; if (bhsni->bhsni_target_transfer_tag == 0xffffffff) { /* * Nothing to do; iscsi_pdu_update_statsn() already * zeroed the timeout. */ icl_pdu_free(response); return; } datasize = icl_pdu_data_segment_length(response); if (datasize > 0) { data = malloc(datasize, M_ISCSI, M_NOWAIT | M_ZERO); if (data == NULL) { ISCSI_SESSION_WARN(is, "failed to allocate memory; " "reconnecting"); icl_pdu_free(response); iscsi_session_reconnect(is); return; } icl_pdu_get_data(response, 0, data, datasize); } request = icl_pdu_new(response->ip_conn, M_NOWAIT); if (request == NULL) { ISCSI_SESSION_WARN(is, "failed to allocate memory; " "reconnecting"); free(data, M_ISCSI); icl_pdu_free(response); iscsi_session_reconnect(is); return; } bhsno = (struct iscsi_bhs_nop_out *)request->ip_bhs; bhsno->bhsno_opcode = ISCSI_BHS_OPCODE_NOP_OUT | ISCSI_BHS_OPCODE_IMMEDIATE; bhsno->bhsno_flags = 0x80; bhsno->bhsno_initiator_task_tag = 0xffffffff; bhsno->bhsno_target_transfer_tag = bhsni->bhsni_target_transfer_tag; if (datasize > 0) { error = icl_pdu_append_data(request, data, datasize, M_NOWAIT); if (error != 0) { ISCSI_SESSION_WARN(is, "failed to allocate memory; " "reconnecting"); free(data, M_ISCSI); icl_pdu_free(request); icl_pdu_free(response); iscsi_session_reconnect(is); return; } free(data, M_ISCSI); } icl_pdu_free(response); iscsi_pdu_queue_locked(request); } static void iscsi_pdu_handle_scsi_response(struct icl_pdu *response) { struct iscsi_bhs_scsi_response *bhssr; struct iscsi_outstanding *io; struct iscsi_session *is; union ccb *ccb; struct ccb_scsiio *csio; size_t data_segment_len, received; uint16_t sense_len; uint32_t resid; is = PDU_SESSION(response); bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs; io = iscsi_outstanding_find(is, bhssr->bhssr_initiator_task_tag); if (io == NULL || io->io_ccb == NULL) { ISCSI_SESSION_WARN(is, "bad itt 0x%x", bhssr->bhssr_initiator_task_tag); icl_pdu_free(response); iscsi_session_reconnect(is); ISCSI_SESSION_UNLOCK(is); return; } ccb = io->io_ccb; /* * With iSER, after getting good response we can be sure * that all the data has been successfully transferred. */ if (is->is_conn->ic_iser) { resid = ntohl(bhssr->bhssr_residual_count); if (bhssr->bhssr_flags & BHSSR_FLAGS_RESIDUAL_UNDERFLOW) { io->io_received = ccb->csio.dxfer_len - resid; } else if (bhssr->bhssr_flags & BHSSR_FLAGS_RESIDUAL_OVERFLOW) { ISCSI_SESSION_WARN(is, "overflow: target indicates %d", resid); } else { io->io_received = ccb->csio.dxfer_len; } } received = io->io_received; iscsi_outstanding_remove(is, io); ISCSI_SESSION_UNLOCK(is); if (bhssr->bhssr_response != BHSSR_RESPONSE_COMMAND_COMPLETED) { ISCSI_SESSION_WARN(is, "service response 0x%x", bhssr->bhssr_response); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_REQ_CMP_ERR | CAM_DEV_QFRZN; } else if (bhssr->bhssr_status == 0) { ccb->ccb_h.status = CAM_REQ_CMP; } else { if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_DEV_QFRZN; ccb->csio.scsi_status = bhssr->bhssr_status; } csio = &ccb->csio; data_segment_len = icl_pdu_data_segment_length(response); if (data_segment_len > 0) { if (data_segment_len < sizeof(sense_len)) { ISCSI_SESSION_WARN(is, "truncated data segment (%zd bytes)", data_segment_len); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_REQ_CMP_ERR | CAM_DEV_QFRZN; goto out; } icl_pdu_get_data(response, 0, &sense_len, sizeof(sense_len)); sense_len = ntohs(sense_len); #if 0 ISCSI_SESSION_DEBUG(is, "sense_len %d, data len %zd", sense_len, data_segment_len); #endif if (sizeof(sense_len) + sense_len > data_segment_len) { ISCSI_SESSION_WARN(is, "truncated data segment " "(%zd bytes, should be %zd)", data_segment_len, sizeof(sense_len) + sense_len); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_REQ_CMP_ERR | CAM_DEV_QFRZN; goto out; } else if (sizeof(sense_len) + sense_len < data_segment_len) ISCSI_SESSION_WARN(is, "oversize data segment " "(%zd bytes, should be %zd)", data_segment_len, sizeof(sense_len) + sense_len); if (sense_len > csio->sense_len) { ISCSI_SESSION_DEBUG(is, "truncating sense from %d to %d", sense_len, csio->sense_len); sense_len = csio->sense_len; } icl_pdu_get_data(response, sizeof(sense_len), &csio->sense_data, sense_len); csio->sense_resid = csio->sense_len - sense_len; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } out: if (bhssr->bhssr_flags & BHSSR_FLAGS_RESIDUAL_UNDERFLOW) csio->resid = ntohl(bhssr->bhssr_residual_count); if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { KASSERT(received <= csio->dxfer_len, ("received > csio->dxfer_len")); if (received < csio->dxfer_len) { if (csio->resid != csio->dxfer_len - received) { ISCSI_SESSION_WARN(is, "underflow mismatch: " "target indicates %d, we calculated %zd", csio->resid, csio->dxfer_len - received); } csio->resid = csio->dxfer_len - received; } } xpt_done(ccb); icl_pdu_free(response); } static void iscsi_pdu_handle_task_response(struct icl_pdu *response) { struct iscsi_bhs_task_management_response *bhstmr; struct iscsi_outstanding *io, *aio; struct iscsi_session *is; is = PDU_SESSION(response); bhstmr = (struct iscsi_bhs_task_management_response *)response->ip_bhs; io = iscsi_outstanding_find(is, bhstmr->bhstmr_initiator_task_tag); if (io == NULL || io->io_ccb != NULL) { ISCSI_SESSION_WARN(is, "bad itt 0x%x", bhstmr->bhstmr_initiator_task_tag); icl_pdu_free(response); iscsi_session_reconnect(is); return; } if (bhstmr->bhstmr_response != BHSTMR_RESPONSE_FUNCTION_COMPLETE) { ISCSI_SESSION_WARN(is, "task response 0x%x", bhstmr->bhstmr_response); } else { aio = iscsi_outstanding_find(is, io->io_referenced_task_tag); if (aio != NULL && aio->io_ccb != NULL) iscsi_session_terminate_task(is, aio, CAM_REQ_ABORTED); } iscsi_outstanding_remove(is, io); icl_pdu_free(response); } static void iscsi_pdu_handle_data_in(struct icl_pdu *response) { struct iscsi_bhs_data_in *bhsdi; struct iscsi_outstanding *io; struct iscsi_session *is; union ccb *ccb; struct ccb_scsiio *csio; size_t data_segment_len, received, oreceived; is = PDU_SESSION(response); bhsdi = (struct iscsi_bhs_data_in *)response->ip_bhs; io = iscsi_outstanding_find(is, bhsdi->bhsdi_initiator_task_tag); if (io == NULL || io->io_ccb == NULL) { ISCSI_SESSION_WARN(is, "bad itt 0x%x", bhsdi->bhsdi_initiator_task_tag); icl_pdu_free(response); iscsi_session_reconnect(is); ISCSI_SESSION_UNLOCK(is); return; } data_segment_len = icl_pdu_data_segment_length(response); if (data_segment_len == 0) { /* * "The sending of 0 length data segments should be avoided, * but initiators and targets MUST be able to properly receive * 0 length data segments." */ ISCSI_SESSION_UNLOCK(is); icl_pdu_free(response); return; } /* * We need to track this for security reasons - without it, malicious target * could respond to SCSI READ without sending Data-In PDUs, which would result * in read operation on the initiator side returning random kernel data. */ if (ntohl(bhsdi->bhsdi_buffer_offset) != io->io_received) { ISCSI_SESSION_WARN(is, "data out of order; expected offset %zd, got %zd", io->io_received, (size_t)ntohl(bhsdi->bhsdi_buffer_offset)); icl_pdu_free(response); iscsi_session_reconnect(is); ISCSI_SESSION_UNLOCK(is); return; } ccb = io->io_ccb; csio = &ccb->csio; if (io->io_received + data_segment_len > csio->dxfer_len) { ISCSI_SESSION_WARN(is, "oversize data segment (%zd bytes " "at offset %zd, buffer is %d)", data_segment_len, io->io_received, csio->dxfer_len); icl_pdu_free(response); iscsi_session_reconnect(is); ISCSI_SESSION_UNLOCK(is); return; } oreceived = io->io_received; io->io_received += data_segment_len; received = io->io_received; if ((bhsdi->bhsdi_flags & BHSDI_FLAGS_S) != 0) iscsi_outstanding_remove(is, io); ISCSI_SESSION_UNLOCK(is); icl_pdu_get_data(response, 0, csio->data_ptr + oreceived, data_segment_len); /* * XXX: Check DataSN. * XXX: Check F. */ if ((bhsdi->bhsdi_flags & BHSDI_FLAGS_S) == 0) { /* * Nothing more to do. */ icl_pdu_free(response); return; } //ISCSI_SESSION_DEBUG(is, "got S flag; status 0x%x", bhsdi->bhsdi_status); if (bhsdi->bhsdi_status == 0) { ccb->ccb_h.status = CAM_REQ_CMP; } else { if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_DEV_QFRZN; csio->scsi_status = bhsdi->bhsdi_status; } if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { KASSERT(received <= csio->dxfer_len, ("received > csio->dxfer_len")); if (received < csio->dxfer_len) { csio->resid = ntohl(bhsdi->bhsdi_residual_count); if (csio->resid != csio->dxfer_len - received) { ISCSI_SESSION_WARN(is, "underflow mismatch: " "target indicates %d, we calculated %zd", csio->resid, csio->dxfer_len - received); } csio->resid = csio->dxfer_len - received; } } xpt_done(ccb); icl_pdu_free(response); } static void iscsi_pdu_handle_logout_response(struct icl_pdu *response) { ISCSI_SESSION_DEBUG(PDU_SESSION(response), "logout response"); icl_pdu_free(response); } static void iscsi_pdu_handle_r2t(struct icl_pdu *response) { struct icl_pdu *request; struct iscsi_session *is; struct iscsi_bhs_r2t *bhsr2t; struct iscsi_bhs_data_out *bhsdo; struct iscsi_outstanding *io; struct ccb_scsiio *csio; size_t off, len, total_len; int error; uint32_t datasn = 0; is = PDU_SESSION(response); bhsr2t = (struct iscsi_bhs_r2t *)response->ip_bhs; io = iscsi_outstanding_find(is, bhsr2t->bhsr2t_initiator_task_tag); if (io == NULL || io->io_ccb == NULL) { ISCSI_SESSION_WARN(is, "bad itt 0x%x; reconnecting", bhsr2t->bhsr2t_initiator_task_tag); icl_pdu_free(response); iscsi_session_reconnect(is); return; } csio = &io->io_ccb->csio; if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_OUT) { ISCSI_SESSION_WARN(is, "received R2T for read command; reconnecting"); icl_pdu_free(response); iscsi_session_reconnect(is); return; } /* * XXX: Verify R2TSN. */ off = ntohl(bhsr2t->bhsr2t_buffer_offset); if (off > csio->dxfer_len) { ISCSI_SESSION_WARN(is, "target requested invalid offset " "%zd, buffer is is %d; reconnecting", off, csio->dxfer_len); icl_pdu_free(response); iscsi_session_reconnect(is); return; } total_len = ntohl(bhsr2t->bhsr2t_desired_data_transfer_length); if (total_len == 0 || total_len > csio->dxfer_len) { ISCSI_SESSION_WARN(is, "target requested invalid length " "%zd, buffer is %d; reconnecting", total_len, csio->dxfer_len); icl_pdu_free(response); iscsi_session_reconnect(is); return; } //ISCSI_SESSION_DEBUG(is, "r2t; off %zd, len %zd", off, total_len); for (;;) { len = total_len; if (len > is->is_conn->ic_max_send_data_segment_length) len = is->is_conn->ic_max_send_data_segment_length; if (off + len > csio->dxfer_len) { ISCSI_SESSION_WARN(is, "target requested invalid " "length/offset %zd, buffer is %d; reconnecting", off + len, csio->dxfer_len); icl_pdu_free(response); iscsi_session_reconnect(is); return; } request = icl_pdu_new(response->ip_conn, M_NOWAIT); if (request == NULL) { icl_pdu_free(response); iscsi_session_reconnect(is); return; } bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs; bhsdo->bhsdo_opcode = ISCSI_BHS_OPCODE_SCSI_DATA_OUT; bhsdo->bhsdo_lun = bhsr2t->bhsr2t_lun; bhsdo->bhsdo_initiator_task_tag = bhsr2t->bhsr2t_initiator_task_tag; bhsdo->bhsdo_target_transfer_tag = bhsr2t->bhsr2t_target_transfer_tag; bhsdo->bhsdo_datasn = htonl(datasn++); bhsdo->bhsdo_buffer_offset = htonl(off); error = icl_pdu_append_data(request, csio->data_ptr + off, len, M_NOWAIT); if (error != 0) { ISCSI_SESSION_WARN(is, "failed to allocate memory; " "reconnecting"); icl_pdu_free(request); icl_pdu_free(response); iscsi_session_reconnect(is); return; } off += len; total_len -= len; if (total_len == 0) { bhsdo->bhsdo_flags |= BHSDO_FLAGS_F; //ISCSI_SESSION_DEBUG(is, "setting F, off %zd", off); } else { //ISCSI_SESSION_DEBUG(is, "not finished, off %zd", off); } iscsi_pdu_queue_locked(request); if (total_len == 0) break; } icl_pdu_free(response); } static void iscsi_pdu_handle_async_message(struct icl_pdu *response) { struct iscsi_bhs_asynchronous_message *bhsam; struct iscsi_session *is; is = PDU_SESSION(response); bhsam = (struct iscsi_bhs_asynchronous_message *)response->ip_bhs; switch (bhsam->bhsam_async_event) { case BHSAM_EVENT_TARGET_REQUESTS_LOGOUT: ISCSI_SESSION_WARN(is, "target requests logout; removing session"); iscsi_session_logout(is); iscsi_session_terminate(is); break; case BHSAM_EVENT_TARGET_TERMINATES_CONNECTION: ISCSI_SESSION_WARN(is, "target indicates it will drop the connection"); break; case BHSAM_EVENT_TARGET_TERMINATES_SESSION: ISCSI_SESSION_WARN(is, "target indicates it will drop the session"); break; default: /* * XXX: Technically, we're obligated to also handle * parameter renegotiation. */ ISCSI_SESSION_WARN(is, "ignoring AsyncEvent %d", bhsam->bhsam_async_event); break; } icl_pdu_free(response); } static void iscsi_pdu_handle_reject(struct icl_pdu *response) { struct iscsi_bhs_reject *bhsr; struct iscsi_session *is; is = PDU_SESSION(response); bhsr = (struct iscsi_bhs_reject *)response->ip_bhs; ISCSI_SESSION_WARN(is, "received Reject PDU, reason 0x%x; protocol error?", bhsr->bhsr_reason); icl_pdu_free(response); } static int iscsi_ioctl_daemon_wait(struct iscsi_softc *sc, struct iscsi_daemon_request *request) { struct iscsi_session *is; struct icl_drv_limits idl; int error; sx_slock(&sc->sc_lock); for (;;) { TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { ISCSI_SESSION_LOCK(is); if (is->is_conf.isc_enable == 0 && is->is_conf.isc_discovery == 0) { ISCSI_SESSION_UNLOCK(is); continue; } if (is->is_waiting_for_iscsid) break; ISCSI_SESSION_UNLOCK(is); } if (is == NULL) { if (sc->sc_unloading) { sx_sunlock(&sc->sc_lock); return (ENXIO); } /* * No session requires attention from iscsid(8); wait. */ error = cv_wait_sig(&sc->sc_cv, &sc->sc_lock); if (error != 0) { sx_sunlock(&sc->sc_lock); return (error); } continue; } is->is_waiting_for_iscsid = false; is->is_login_phase = true; is->is_reason[0] = '\0'; ISCSI_SESSION_UNLOCK(is); request->idr_session_id = is->is_id; memcpy(&request->idr_isid, &is->is_isid, sizeof(request->idr_isid)); request->idr_tsih = 0; /* New or reinstated session. */ memcpy(&request->idr_conf, &is->is_conf, sizeof(request->idr_conf)); error = icl_limits(is->is_conf.isc_offload, is->is_conf.isc_iser, &idl); if (error != 0) { ISCSI_SESSION_WARN(is, "icl_limits for offload \"%s\" " "failed with error %d", is->is_conf.isc_offload, error); sx_sunlock(&sc->sc_lock); return (error); } request->idr_limits.isl_max_recv_data_segment_length = idl.idl_max_recv_data_segment_length; request->idr_limits.isl_max_send_data_segment_length = idl.idl_max_send_data_segment_length; request->idr_limits.isl_max_burst_length = idl.idl_max_burst_length; request->idr_limits.isl_first_burst_length = idl.idl_first_burst_length; sx_sunlock(&sc->sc_lock); return (0); } } static int iscsi_ioctl_daemon_handoff(struct iscsi_softc *sc, struct iscsi_daemon_handoff *handoff) { struct iscsi_session *is; struct icl_conn *ic; int error; sx_slock(&sc->sc_lock); /* * Find the session to hand off socket to. */ TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { if (is->is_id == handoff->idh_session_id) break; } if (is == NULL) { sx_sunlock(&sc->sc_lock); return (ESRCH); } ISCSI_SESSION_LOCK(is); ic = is->is_conn; if (is->is_conf.isc_discovery || is->is_terminating) { ISCSI_SESSION_UNLOCK(is); sx_sunlock(&sc->sc_lock); return (EINVAL); } if (is->is_connected) { /* * This might have happened because another iscsid(8) * instance handed off the connection in the meantime. * Just return. */ ISCSI_SESSION_WARN(is, "handoff on already connected " "session"); ISCSI_SESSION_UNLOCK(is); sx_sunlock(&sc->sc_lock); return (EBUSY); } strlcpy(is->is_target_alias, handoff->idh_target_alias, sizeof(is->is_target_alias)); is->is_tsih = handoff->idh_tsih; is->is_statsn = handoff->idh_statsn; is->is_protocol_level = handoff->idh_protocol_level; is->is_initial_r2t = handoff->idh_initial_r2t; is->is_immediate_data = handoff->idh_immediate_data; ic->ic_max_recv_data_segment_length = handoff->idh_max_recv_data_segment_length; ic->ic_max_send_data_segment_length = handoff->idh_max_send_data_segment_length; is->is_max_burst_length = handoff->idh_max_burst_length; is->is_first_burst_length = handoff->idh_first_burst_length; if (handoff->idh_header_digest == ISCSI_DIGEST_CRC32C) ic->ic_header_crc32c = true; else ic->ic_header_crc32c = false; if (handoff->idh_data_digest == ISCSI_DIGEST_CRC32C) ic->ic_data_crc32c = true; else ic->ic_data_crc32c = false; ic->ic_maxtags = maxtags; is->is_cmdsn = 0; is->is_expcmdsn = 0; is->is_maxcmdsn = 0; is->is_waiting_for_iscsid = false; is->is_login_phase = false; is->is_timeout = 0; is->is_connected = true; is->is_reason[0] = '\0'; ISCSI_SESSION_UNLOCK(is); /* * If we're going through the proxy, the idh_socket will be 0, * and the ICL module can simply ignore this call. It can also * use it to determine it's no longer in the Login phase. */ error = icl_conn_handoff(ic, handoff->idh_socket); if (error != 0) { sx_sunlock(&sc->sc_lock); iscsi_session_terminate(is); return (error); } sx_sunlock(&sc->sc_lock); if (is->is_sim != NULL) { /* * When reconnecting, there already is SIM allocated for the session. */ KASSERT(is->is_simq_frozen, ("reconnect without frozen simq")); ISCSI_SESSION_LOCK(is); ISCSI_SESSION_DEBUG(is, "releasing"); is->is_simq_frozen = false; xpt_release_simq(is->is_sim, 1); ISCSI_SESSION_UNLOCK(is); } else { ISCSI_SESSION_LOCK(is); is->is_devq = cam_simq_alloc(ic->ic_maxtags); if (is->is_devq == NULL) { ISCSI_SESSION_WARN(is, "failed to allocate simq"); iscsi_session_terminate(is); return (ENOMEM); } is->is_sim = cam_sim_alloc(iscsi_action, NULL, "iscsi", is, is->is_id /* unit */, &is->is_lock, 1, ic->ic_maxtags, is->is_devq); if (is->is_sim == NULL) { ISCSI_SESSION_UNLOCK(is); ISCSI_SESSION_WARN(is, "failed to allocate SIM"); cam_simq_free(is->is_devq); iscsi_session_terminate(is); return (ENOMEM); } - error = xpt_bus_register(is->is_sim, NULL, 0); - if (error != 0) { + if (xpt_bus_register(is->is_sim, NULL, 0) != 0) { ISCSI_SESSION_UNLOCK(is); ISCSI_SESSION_WARN(is, "failed to register bus"); iscsi_session_terminate(is); return (ENOMEM); } error = xpt_create_path(&is->is_path, /*periph*/NULL, cam_sim_path(is->is_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (error != CAM_REQ_CMP) { ISCSI_SESSION_UNLOCK(is); ISCSI_SESSION_WARN(is, "failed to create path"); iscsi_session_terminate(is); return (ENOMEM); } ISCSI_SESSION_UNLOCK(is); } return (0); } static int iscsi_ioctl_daemon_fail(struct iscsi_softc *sc, struct iscsi_daemon_fail *fail) { struct iscsi_session *is; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { if (is->is_id == fail->idf_session_id) break; } if (is == NULL) { sx_sunlock(&sc->sc_lock); return (ESRCH); } ISCSI_SESSION_LOCK(is); ISCSI_SESSION_DEBUG(is, "iscsid(8) failed: %s", fail->idf_reason); strlcpy(is->is_reason, fail->idf_reason, sizeof(is->is_reason)); //is->is_waiting_for_iscsid = false; //is->is_login_phase = true; //iscsi_session_reconnect(is); ISCSI_SESSION_UNLOCK(is); sx_sunlock(&sc->sc_lock); return (0); } #ifdef ICL_KERNEL_PROXY static int iscsi_ioctl_daemon_connect(struct iscsi_softc *sc, struct iscsi_daemon_connect *idc) { struct iscsi_session *is; struct sockaddr *from_sa, *to_sa; int error; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { if (is->is_id == idc->idc_session_id) break; } if (is == NULL) { sx_sunlock(&sc->sc_lock); return (ESRCH); } sx_sunlock(&sc->sc_lock); if (idc->idc_from_addrlen > 0) { error = getsockaddr(&from_sa, (void *)idc->idc_from_addr, idc->idc_from_addrlen); if (error != 0) { ISCSI_SESSION_WARN(is, "getsockaddr failed with error %d", error); return (error); } } else { from_sa = NULL; } error = getsockaddr(&to_sa, (void *)idc->idc_to_addr, idc->idc_to_addrlen); if (error != 0) { ISCSI_SESSION_WARN(is, "getsockaddr failed with error %d", error); free(from_sa, M_SONAME); return (error); } ISCSI_SESSION_LOCK(is); is->is_statsn = 0; is->is_cmdsn = 0; is->is_expcmdsn = 0; is->is_maxcmdsn = 0; is->is_waiting_for_iscsid = false; is->is_login_phase = true; is->is_timeout = 0; ISCSI_SESSION_UNLOCK(is); error = icl_conn_connect(is->is_conn, idc->idc_domain, idc->idc_socktype, idc->idc_protocol, from_sa, to_sa); free(from_sa, M_SONAME); free(to_sa, M_SONAME); /* * Digests are always disabled during login phase. */ is->is_conn->ic_header_crc32c = false; is->is_conn->ic_data_crc32c = false; return (error); } static int iscsi_ioctl_daemon_send(struct iscsi_softc *sc, struct iscsi_daemon_send *ids) { struct iscsi_session *is; struct icl_pdu *ip; size_t datalen; void *data; int error; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { if (is->is_id == ids->ids_session_id) break; } if (is == NULL) { sx_sunlock(&sc->sc_lock); return (ESRCH); } sx_sunlock(&sc->sc_lock); if (is->is_login_phase == false) return (EBUSY); if (is->is_terminating || is->is_reconnecting) return (EIO); datalen = ids->ids_data_segment_len; if (datalen > is->is_conn->ic_max_send_data_segment_length) return (EINVAL); if (datalen > 0) { data = malloc(datalen, M_ISCSI, M_WAITOK); error = copyin(ids->ids_data_segment, data, datalen); if (error != 0) { free(data, M_ISCSI); return (error); } } ip = icl_pdu_new(is->is_conn, M_WAITOK); memcpy(ip->ip_bhs, ids->ids_bhs, sizeof(*ip->ip_bhs)); if (datalen > 0) { error = icl_pdu_append_data(ip, data, datalen, M_WAITOK); KASSERT(error == 0, ("icl_pdu_append_data(..., M_WAITOK) failed")); free(data, M_ISCSI); } iscsi_pdu_queue(ip); return (0); } static int iscsi_ioctl_daemon_receive(struct iscsi_softc *sc, struct iscsi_daemon_receive *idr) { struct iscsi_session *is; struct icl_pdu *ip; void *data; int error; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { if (is->is_id == idr->idr_session_id) break; } if (is == NULL) { sx_sunlock(&sc->sc_lock); return (ESRCH); } sx_sunlock(&sc->sc_lock); if (is->is_login_phase == false) return (EBUSY); ISCSI_SESSION_LOCK(is); while (is->is_login_pdu == NULL && is->is_terminating == false && is->is_reconnecting == false) { error = cv_wait_sig(&is->is_login_cv, &is->is_lock); if (error != 0) { ISCSI_SESSION_UNLOCK(is); return (error); } } if (is->is_terminating || is->is_reconnecting) { ISCSI_SESSION_UNLOCK(is); return (EIO); } ip = is->is_login_pdu; is->is_login_pdu = NULL; ISCSI_SESSION_UNLOCK(is); if (ip->ip_data_len > idr->idr_data_segment_len) { icl_pdu_free(ip); return (EMSGSIZE); } copyout(ip->ip_bhs, idr->idr_bhs, sizeof(*ip->ip_bhs)); if (ip->ip_data_len > 0) { data = malloc(ip->ip_data_len, M_ISCSI, M_WAITOK); icl_pdu_get_data(ip, 0, data, ip->ip_data_len); copyout(data, idr->idr_data_segment, ip->ip_data_len); free(data, M_ISCSI); } icl_pdu_free(ip); return (0); } #endif /* ICL_KERNEL_PROXY */ static void iscsi_sanitize_session_conf(struct iscsi_session_conf *isc) { /* * Just make sure all the fields are null-terminated. * * XXX: This is not particularly secure. We should * create our own conf and then copy in relevant * fields. */ isc->isc_initiator[ISCSI_NAME_LEN - 1] = '\0'; isc->isc_initiator_addr[ISCSI_ADDR_LEN - 1] = '\0'; isc->isc_initiator_alias[ISCSI_ALIAS_LEN - 1] = '\0'; isc->isc_target[ISCSI_NAME_LEN - 1] = '\0'; isc->isc_target_addr[ISCSI_ADDR_LEN - 1] = '\0'; isc->isc_user[ISCSI_NAME_LEN - 1] = '\0'; isc->isc_secret[ISCSI_SECRET_LEN - 1] = '\0'; isc->isc_mutual_user[ISCSI_NAME_LEN - 1] = '\0'; isc->isc_mutual_secret[ISCSI_SECRET_LEN - 1] = '\0'; } static bool iscsi_valid_session_conf(const struct iscsi_session_conf *isc) { if (isc->isc_initiator[0] == '\0') { ISCSI_DEBUG("empty isc_initiator"); return (false); } if (isc->isc_target_addr[0] == '\0') { ISCSI_DEBUG("empty isc_target_addr"); return (false); } if (isc->isc_discovery != 0 && isc->isc_target[0] != 0) { ISCSI_DEBUG("non-empty isc_target for discovery session"); return (false); } if (isc->isc_discovery == 0 && isc->isc_target[0] == 0) { ISCSI_DEBUG("empty isc_target for non-discovery session"); return (false); } return (true); } static int iscsi_ioctl_session_add(struct iscsi_softc *sc, struct iscsi_session_add *isa) { struct iscsi_session *is; const struct iscsi_session *is2; int error; iscsi_sanitize_session_conf(&isa->isa_conf); if (iscsi_valid_session_conf(&isa->isa_conf) == false) return (EINVAL); is = malloc(sizeof(*is), M_ISCSI, M_ZERO | M_WAITOK); memcpy(&is->is_conf, &isa->isa_conf, sizeof(is->is_conf)); sx_xlock(&sc->sc_lock); /* * Prevent duplicates. */ TAILQ_FOREACH(is2, &sc->sc_sessions, is_next) { if (!!is->is_conf.isc_discovery != !!is2->is_conf.isc_discovery) continue; if (strcmp(is->is_conf.isc_target_addr, is2->is_conf.isc_target_addr) != 0) continue; if (is->is_conf.isc_discovery == 0 && strcmp(is->is_conf.isc_target, is2->is_conf.isc_target) != 0) continue; sx_xunlock(&sc->sc_lock); free(is, M_ISCSI); return (EBUSY); } is->is_conn = icl_new_conn(is->is_conf.isc_offload, is->is_conf.isc_iser, "iscsi", &is->is_lock); if (is->is_conn == NULL) { sx_xunlock(&sc->sc_lock); free(is, M_ISCSI); return (EINVAL); } is->is_conn->ic_receive = iscsi_receive_callback; is->is_conn->ic_error = iscsi_error_callback; is->is_conn->ic_prv0 = is; TAILQ_INIT(&is->is_outstanding); STAILQ_INIT(&is->is_postponed); mtx_init(&is->is_lock, "iscsi_lock", NULL, MTX_DEF); cv_init(&is->is_maintenance_cv, "iscsi_mt"); #ifdef ICL_KERNEL_PROXY cv_init(&is->is_login_cv, "iscsi_login"); #endif /* * Set some default values, from RFC 3720, section 12. * * These values are updated by the handoff IOCTL, but are * needed prior to the handoff to support sending the ISER * login PDU. */ is->is_conn->ic_max_recv_data_segment_length = 8192; is->is_conn->ic_max_send_data_segment_length = 8192; is->is_max_burst_length = 262144; is->is_first_burst_length = 65536; is->is_softc = sc; sc->sc_last_session_id++; is->is_id = sc->sc_last_session_id; is->is_isid[0] = 0x80; /* RFC 3720, 10.12.5: 10b, "Random" ISID. */ arc4rand(&is->is_isid[1], 5, 0); is->is_tsih = 0; callout_init(&is->is_callout, 1); error = kthread_add(iscsi_maintenance_thread, is, NULL, NULL, 0, 0, "iscsimt"); if (error != 0) { ISCSI_SESSION_WARN(is, "kthread_add(9) failed with error %d", error); sx_xunlock(&sc->sc_lock); return (error); } callout_reset(&is->is_callout, 1 * hz, iscsi_callout, is); TAILQ_INSERT_TAIL(&sc->sc_sessions, is, is_next); ISCSI_SESSION_LOCK(is); /* * Don't notify iscsid(8) if the session is disabled and it's not * a discovery session, */ if (is->is_conf.isc_enable == 0 && is->is_conf.isc_discovery == 0) { ISCSI_SESSION_UNLOCK(is); sx_xunlock(&sc->sc_lock); return (0); } is->is_waiting_for_iscsid = true; strlcpy(is->is_reason, "Waiting for iscsid(8)", sizeof(is->is_reason)); ISCSI_SESSION_UNLOCK(is); cv_signal(&sc->sc_cv); sx_xunlock(&sc->sc_lock); return (0); } static bool iscsi_session_conf_matches(unsigned int id1, const struct iscsi_session_conf *c1, unsigned int id2, const struct iscsi_session_conf *c2) { if (id2 != 0 && id2 != id1) return (false); if (c2->isc_target[0] != '\0' && strcmp(c1->isc_target, c2->isc_target) != 0) return (false); if (c2->isc_target_addr[0] != '\0' && strcmp(c1->isc_target_addr, c2->isc_target_addr) != 0) return (false); return (true); } static int iscsi_ioctl_session_remove(struct iscsi_softc *sc, struct iscsi_session_remove *isr) { struct iscsi_session *is, *tmp; bool found = false; iscsi_sanitize_session_conf(&isr->isr_conf); sx_xlock(&sc->sc_lock); TAILQ_FOREACH_SAFE(is, &sc->sc_sessions, is_next, tmp) { ISCSI_SESSION_LOCK(is); if (iscsi_session_conf_matches(is->is_id, &is->is_conf, isr->isr_session_id, &isr->isr_conf)) { found = true; iscsi_session_logout(is); iscsi_session_terminate(is); } ISCSI_SESSION_UNLOCK(is); } sx_xunlock(&sc->sc_lock); if (!found) return (ESRCH); return (0); } static int iscsi_ioctl_session_list(struct iscsi_softc *sc, struct iscsi_session_list *isl) { int error; unsigned int i = 0; struct iscsi_session *is; struct iscsi_session_state iss; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { if (i >= isl->isl_nentries) { sx_sunlock(&sc->sc_lock); return (EMSGSIZE); } memset(&iss, 0, sizeof(iss)); memcpy(&iss.iss_conf, &is->is_conf, sizeof(iss.iss_conf)); iss.iss_id = is->is_id; strlcpy(iss.iss_target_alias, is->is_target_alias, sizeof(iss.iss_target_alias)); strlcpy(iss.iss_reason, is->is_reason, sizeof(iss.iss_reason)); strlcpy(iss.iss_offload, is->is_conn->ic_offload, sizeof(iss.iss_offload)); if (is->is_conn->ic_header_crc32c) iss.iss_header_digest = ISCSI_DIGEST_CRC32C; else iss.iss_header_digest = ISCSI_DIGEST_NONE; if (is->is_conn->ic_data_crc32c) iss.iss_data_digest = ISCSI_DIGEST_CRC32C; else iss.iss_data_digest = ISCSI_DIGEST_NONE; iss.iss_max_send_data_segment_length = is->is_conn->ic_max_send_data_segment_length; iss.iss_max_recv_data_segment_length = is->is_conn->ic_max_recv_data_segment_length; iss.iss_max_burst_length = is->is_max_burst_length; iss.iss_first_burst_length = is->is_first_burst_length; iss.iss_immediate_data = is->is_immediate_data; iss.iss_connected = is->is_connected; error = copyout(&iss, isl->isl_pstates + i, sizeof(iss)); if (error != 0) { sx_sunlock(&sc->sc_lock); return (error); } i++; } sx_sunlock(&sc->sc_lock); isl->isl_nentries = i; return (0); } static int iscsi_ioctl_session_modify(struct iscsi_softc *sc, struct iscsi_session_modify *ism) { struct iscsi_session *is; const struct iscsi_session *is2; iscsi_sanitize_session_conf(&ism->ism_conf); if (iscsi_valid_session_conf(&ism->ism_conf) == false) return (EINVAL); sx_xlock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { ISCSI_SESSION_LOCK(is); if (is->is_id == ism->ism_session_id) { /* Note that the session remains locked. */ break; } ISCSI_SESSION_UNLOCK(is); } if (is == NULL) { sx_xunlock(&sc->sc_lock); return (ESRCH); } /* * Prevent duplicates. */ TAILQ_FOREACH(is2, &sc->sc_sessions, is_next) { if (is == is2) continue; if (!!ism->ism_conf.isc_discovery != !!is2->is_conf.isc_discovery) continue; if (strcmp(ism->ism_conf.isc_target_addr, is2->is_conf.isc_target_addr) != 0) continue; if (ism->ism_conf.isc_discovery == 0 && strcmp(ism->ism_conf.isc_target, is2->is_conf.isc_target) != 0) continue; ISCSI_SESSION_UNLOCK(is); sx_xunlock(&sc->sc_lock); return (EBUSY); } sx_xunlock(&sc->sc_lock); memcpy(&is->is_conf, &ism->ism_conf, sizeof(is->is_conf)); ISCSI_SESSION_UNLOCK(is); iscsi_session_reconnect(is); return (0); } static int iscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int mode, struct thread *td) { struct iscsi_softc *sc; sc = dev->si_drv1; switch (cmd) { case ISCSIDWAIT: return (iscsi_ioctl_daemon_wait(sc, (struct iscsi_daemon_request *)arg)); case ISCSIDHANDOFF: return (iscsi_ioctl_daemon_handoff(sc, (struct iscsi_daemon_handoff *)arg)); case ISCSIDFAIL: return (iscsi_ioctl_daemon_fail(sc, (struct iscsi_daemon_fail *)arg)); #ifdef ICL_KERNEL_PROXY case ISCSIDCONNECT: return (iscsi_ioctl_daemon_connect(sc, (struct iscsi_daemon_connect *)arg)); case ISCSIDSEND: return (iscsi_ioctl_daemon_send(sc, (struct iscsi_daemon_send *)arg)); case ISCSIDRECEIVE: return (iscsi_ioctl_daemon_receive(sc, (struct iscsi_daemon_receive *)arg)); #endif /* ICL_KERNEL_PROXY */ case ISCSISADD: return (iscsi_ioctl_session_add(sc, (struct iscsi_session_add *)arg)); case ISCSISREMOVE: return (iscsi_ioctl_session_remove(sc, (struct iscsi_session_remove *)arg)); case ISCSISLIST: return (iscsi_ioctl_session_list(sc, (struct iscsi_session_list *)arg)); case ISCSISMODIFY: return (iscsi_ioctl_session_modify(sc, (struct iscsi_session_modify *)arg)); default: return (EINVAL); } } static struct iscsi_outstanding * iscsi_outstanding_find(struct iscsi_session *is, uint32_t initiator_task_tag) { struct iscsi_outstanding *io; ISCSI_SESSION_LOCK_ASSERT(is); TAILQ_FOREACH(io, &is->is_outstanding, io_next) { if (io->io_initiator_task_tag == initiator_task_tag) return (io); } return (NULL); } static struct iscsi_outstanding * iscsi_outstanding_find_ccb(struct iscsi_session *is, union ccb *ccb) { struct iscsi_outstanding *io; ISCSI_SESSION_LOCK_ASSERT(is); TAILQ_FOREACH(io, &is->is_outstanding, io_next) { if (io->io_ccb == ccb) return (io); } return (NULL); } static struct iscsi_outstanding * iscsi_outstanding_add(struct iscsi_session *is, struct icl_pdu *request, union ccb *ccb, uint32_t *initiator_task_tagp) { struct iscsi_outstanding *io; int error; ISCSI_SESSION_LOCK_ASSERT(is); io = uma_zalloc(iscsi_outstanding_zone, M_NOWAIT | M_ZERO); if (io == NULL) { ISCSI_SESSION_WARN(is, "failed to allocate %zd bytes", sizeof(*io)); return (NULL); } error = icl_conn_task_setup(is->is_conn, request, &ccb->csio, initiator_task_tagp, &io->io_icl_prv); if (error != 0) { ISCSI_SESSION_WARN(is, "icl_conn_task_setup() failed with error %d", error); uma_zfree(iscsi_outstanding_zone, io); return (NULL); } KASSERT(iscsi_outstanding_find(is, *initiator_task_tagp) == NULL, ("initiator_task_tag 0x%x already added", *initiator_task_tagp)); io->io_initiator_task_tag = *initiator_task_tagp; io->io_ccb = ccb; TAILQ_INSERT_TAIL(&is->is_outstanding, io, io_next); return (io); } static void iscsi_outstanding_remove(struct iscsi_session *is, struct iscsi_outstanding *io) { ISCSI_SESSION_LOCK_ASSERT(is); icl_conn_task_done(is->is_conn, io->io_icl_prv); TAILQ_REMOVE(&is->is_outstanding, io, io_next); uma_zfree(iscsi_outstanding_zone, io); } static void iscsi_action_abort(struct iscsi_session *is, union ccb *ccb) { struct icl_pdu *request; struct iscsi_bhs_task_management_request *bhstmr; struct ccb_abort *cab = &ccb->cab; struct iscsi_outstanding *io, *aio; uint32_t initiator_task_tag; ISCSI_SESSION_LOCK_ASSERT(is); #if 0 KASSERT(is->is_login_phase == false, ("%s called during Login Phase", __func__)); #else if (is->is_login_phase) { ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); return; } #endif aio = iscsi_outstanding_find_ccb(is, cab->abort_ccb); if (aio == NULL) { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } request = icl_pdu_new(is->is_conn, M_NOWAIT); if (request == NULL) { ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } initiator_task_tag = is->is_initiator_task_tag++; if (initiator_task_tag == 0xffffffff) initiator_task_tag = is->is_initiator_task_tag++; io = iscsi_outstanding_add(is, request, NULL, &initiator_task_tag); if (io == NULL) { icl_pdu_free(request); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } io->io_referenced_task_tag = aio->io_initiator_task_tag; bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs; bhstmr->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_REQUEST; bhstmr->bhstmr_function = 0x80 | BHSTMR_FUNCTION_ABORT_TASK; bhstmr->bhstmr_lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); bhstmr->bhstmr_initiator_task_tag = initiator_task_tag; bhstmr->bhstmr_referenced_task_tag = aio->io_initiator_task_tag; iscsi_pdu_queue_locked(request); } static void iscsi_action_scsiio(struct iscsi_session *is, union ccb *ccb) { struct icl_pdu *request; struct iscsi_bhs_scsi_command *bhssc; struct ccb_scsiio *csio; struct iscsi_outstanding *io; size_t len; uint32_t initiator_task_tag; int error; ISCSI_SESSION_LOCK_ASSERT(is); #if 0 KASSERT(is->is_login_phase == false, ("%s called during Login Phase", __func__)); #else if (is->is_login_phase) { ISCSI_SESSION_DEBUG(is, "called during login phase"); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_DEV_QFRZN; xpt_done(ccb); return; } #endif request = icl_pdu_new(is->is_conn, M_NOWAIT); if (request == NULL) { if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_RESRC_UNAVAIL | CAM_DEV_QFRZN; xpt_done(ccb); return; } initiator_task_tag = is->is_initiator_task_tag++; if (initiator_task_tag == 0xffffffff) initiator_task_tag = is->is_initiator_task_tag++; io = iscsi_outstanding_add(is, request, ccb, &initiator_task_tag); if (io == NULL) { icl_pdu_free(request); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_RESRC_UNAVAIL | CAM_DEV_QFRZN; xpt_done(ccb); return; } csio = &ccb->csio; bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; bhssc->bhssc_opcode = ISCSI_BHS_OPCODE_SCSI_COMMAND; bhssc->bhssc_flags |= BHSSC_FLAGS_F; switch (csio->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bhssc->bhssc_flags |= BHSSC_FLAGS_R; break; case CAM_DIR_OUT: bhssc->bhssc_flags |= BHSSC_FLAGS_W; break; } if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { switch (csio->tag_action) { case MSG_HEAD_OF_Q_TAG: bhssc->bhssc_flags |= BHSSC_FLAGS_ATTR_HOQ; break; case MSG_ORDERED_Q_TAG: bhssc->bhssc_flags |= BHSSC_FLAGS_ATTR_ORDERED; break; case MSG_ACA_TASK: bhssc->bhssc_flags |= BHSSC_FLAGS_ATTR_ACA; break; case MSG_SIMPLE_Q_TAG: default: bhssc->bhssc_flags |= BHSSC_FLAGS_ATTR_SIMPLE; break; } } else bhssc->bhssc_flags |= BHSSC_FLAGS_ATTR_UNTAGGED; if (is->is_protocol_level >= 2) { bhssc->bhssc_pri = (csio->priority << BHSSC_PRI_SHIFT) & BHSSC_PRI_MASK; } bhssc->bhssc_lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); bhssc->bhssc_initiator_task_tag = initiator_task_tag; bhssc->bhssc_expected_data_transfer_length = htonl(csio->dxfer_len); KASSERT(csio->cdb_len <= sizeof(bhssc->bhssc_cdb), ("unsupported CDB size %zd", (size_t)csio->cdb_len)); if (csio->ccb_h.flags & CAM_CDB_POINTER) memcpy(&bhssc->bhssc_cdb, csio->cdb_io.cdb_ptr, csio->cdb_len); else memcpy(&bhssc->bhssc_cdb, csio->cdb_io.cdb_bytes, csio->cdb_len); if (is->is_immediate_data && (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { len = csio->dxfer_len; //ISCSI_SESSION_DEBUG(is, "adding %zd of immediate data", len); if (len > is->is_first_burst_length) { ISCSI_SESSION_DEBUG(is, "len %zd -> %d", len, is->is_first_burst_length); len = is->is_first_burst_length; } if (len > is->is_conn->ic_max_send_data_segment_length) { ISCSI_SESSION_DEBUG(is, "len %zd -> %d", len, is->is_conn->ic_max_send_data_segment_length); len = is->is_conn->ic_max_send_data_segment_length; } error = icl_pdu_append_data(request, csio->data_ptr, len, M_NOWAIT); if (error != 0) { iscsi_outstanding_remove(is, io); icl_pdu_free(request); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ISCSI_SESSION_DEBUG(is, "freezing devq"); } ccb->ccb_h.status = CAM_RESRC_UNAVAIL | CAM_DEV_QFRZN; xpt_done(ccb); return; } } iscsi_pdu_queue_locked(request); } static void iscsi_action(struct cam_sim *sim, union ccb *ccb) { struct iscsi_session *is; is = cam_sim_softc(sim); ISCSI_SESSION_LOCK_ASSERT(is); if (is->is_terminating || (is->is_connected == false && fail_on_disconnection)) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } /* * Make sure CAM doesn't sneak in a CCB just after freezing the queue. */ if (is->is_simq_frozen == true) { ccb->ccb_h.status &= ~(CAM_SIM_QUEUED | CAM_STATUS_MASK); ccb->ccb_h.status |= CAM_REQUEUE_REQ; /* Don't freeze the devq - the SIM queue is already frozen. */ xpt_done(ccb); return; } switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_EXTLUNS; /* * XXX: It shouldn't ever be NULL; this could be turned * into a KASSERT eventually. */ if (is->is_conn == NULL) ISCSI_WARN("NULL conn"); else if (is->is_conn->ic_unmapped) cpi->hba_misc |= PIM_UNMAPPED; cpi->hba_eng_cnt = 0; cpi->max_target = 0; /* * Note that the variable below is only relevant for targets * that don't claim compliance with anything above SPC2, which * means they don't support REPORT_LUNS. */ cpi->max_lun = 255; cpi->initiator_id = ~0; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "iSCSI", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; /* XXX */ cpi->transport = XPORT_ISCSI; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC3; cpi->maxio = maxphys; cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_scsi *scsi; cts = &ccb->cts; scsi = &cts->proto_specific.scsi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC3; cts->transport = XPORT_ISCSI; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; cts->ccb_h.status = CAM_REQ_CMP; break; } case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, /*extended*/1); ccb->ccb_h.status = CAM_REQ_CMP; break; #if 0 /* * XXX: What's the point? */ case XPT_RESET_BUS: case XPT_TERM_IO: ISCSI_SESSION_DEBUG(is, "faking success for reset, abort, or term_io"); ccb->ccb_h.status = CAM_REQ_CMP; break; #endif case XPT_ABORT: iscsi_action_abort(is, ccb); return; case XPT_SCSI_IO: iscsi_action_scsiio(is, ccb); return; default: #if 0 ISCSI_SESSION_DEBUG(is, "got unsupported code 0x%x", ccb->ccb_h.func_code); #endif ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } xpt_done(ccb); } static void iscsi_terminate_sessions(struct iscsi_softc *sc) { struct iscsi_session *is; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) iscsi_session_terminate(is); while(!TAILQ_EMPTY(&sc->sc_sessions)) { ISCSI_DEBUG("waiting for sessions to terminate"); cv_wait(&sc->sc_cv, &sc->sc_lock); } ISCSI_DEBUG("all sessions terminated"); sx_sunlock(&sc->sc_lock); } static void iscsi_shutdown_pre(struct iscsi_softc *sc) { struct iscsi_session *is; if (!fail_on_shutdown) return; /* * If we have any sessions waiting for reconnection, request * maintenance thread to fail them immediately instead of waiting * for reconnect timeout. * * This prevents LUNs with mounted filesystems that are supported * by disconnected iSCSI sessions from hanging, however it will * fail all queued BIOs. */ ISCSI_DEBUG("forcing failing all disconnected sessions due to shutdown"); fail_on_disconnection = 1; sx_slock(&sc->sc_lock); TAILQ_FOREACH(is, &sc->sc_sessions, is_next) { ISCSI_SESSION_LOCK(is); if (!is->is_connected) { ISCSI_SESSION_DEBUG(is, "force failing disconnected session early"); iscsi_session_reconnect(is); } ISCSI_SESSION_UNLOCK(is); } sx_sunlock(&sc->sc_lock); } static void iscsi_shutdown_post(struct iscsi_softc *sc) { if (!KERNEL_PANICKED()) { ISCSI_DEBUG("removing all sessions due to shutdown"); iscsi_terminate_sessions(sc); } } static int iscsi_load(void) { int error; sc = malloc(sizeof(*sc), M_ISCSI, M_ZERO | M_WAITOK); sx_init(&sc->sc_lock, "iscsi"); TAILQ_INIT(&sc->sc_sessions); cv_init(&sc->sc_cv, "iscsi_cv"); iscsi_outstanding_zone = uma_zcreate("iscsi_outstanding", sizeof(struct iscsi_outstanding), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); error = make_dev_p(MAKEDEV_CHECKNAME, &sc->sc_cdev, &iscsi_cdevsw, NULL, UID_ROOT, GID_WHEEL, 0600, "iscsi"); if (error != 0) { ISCSI_WARN("failed to create device node, error %d", error); return (error); } sc->sc_cdev->si_drv1 = sc; sc->sc_shutdown_pre_eh = EVENTHANDLER_REGISTER(shutdown_pre_sync, iscsi_shutdown_pre, sc, SHUTDOWN_PRI_FIRST); /* * shutdown_post_sync needs to run after filesystem shutdown and before * CAM shutdown - otherwise when rebooting with an iSCSI session that is * disconnected but has outstanding requests, dashutdown() will hang on * cam_periph_runccb(). */ sc->sc_shutdown_post_eh = EVENTHANDLER_REGISTER(shutdown_post_sync, iscsi_shutdown_post, sc, SHUTDOWN_PRI_DEFAULT - 1); return (0); } static int iscsi_unload(void) { /* Awaken any threads asleep in iscsi_ioctl(). */ sx_xlock(&sc->sc_lock); sc->sc_unloading = true; cv_signal(&sc->sc_cv); sx_xunlock(&sc->sc_lock); if (sc->sc_cdev != NULL) { ISCSI_DEBUG("removing device node"); destroy_dev(sc->sc_cdev); ISCSI_DEBUG("device node removed"); } if (sc->sc_shutdown_pre_eh != NULL) EVENTHANDLER_DEREGISTER(shutdown_pre_sync, sc->sc_shutdown_pre_eh); if (sc->sc_shutdown_post_eh != NULL) EVENTHANDLER_DEREGISTER(shutdown_post_sync, sc->sc_shutdown_post_eh); iscsi_terminate_sessions(sc); uma_zdestroy(iscsi_outstanding_zone); sx_destroy(&sc->sc_lock); cv_destroy(&sc->sc_cv); free(sc, M_ISCSI); return (0); } static int iscsi_quiesce(void) { sx_slock(&sc->sc_lock); if (!TAILQ_EMPTY(&sc->sc_sessions)) { sx_sunlock(&sc->sc_lock); return (EBUSY); } sx_sunlock(&sc->sc_lock); return (0); } static int iscsi_modevent(module_t mod, int what, void *arg) { int error; switch (what) { case MOD_LOAD: error = iscsi_load(); break; case MOD_UNLOAD: error = iscsi_unload(); break; case MOD_QUIESCE: error = iscsi_quiesce(); break; default: error = EINVAL; break; } return (error); } moduledata_t iscsi_data = { "iscsi", iscsi_modevent, 0 }; DECLARE_MODULE(iscsi, iscsi_data, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); MODULE_DEPEND(iscsi, cam, 1, 1, 1); MODULE_DEPEND(iscsi, icl, 1, 1, 1); diff --git a/sys/dev/smartpqi/smartpqi_cam.c b/sys/dev/smartpqi/smartpqi_cam.c index e389eb143e4a..1278434131d9 100644 --- a/sys/dev/smartpqi/smartpqi_cam.c +++ b/sys/dev/smartpqi/smartpqi_cam.c @@ -1,1354 +1,1354 @@ /*- * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ /* * CAM interface for smartpqi driver */ #include "smartpqi_includes.h" /* * Set cam sim properties of the smartpqi adapter. */ static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi) { pqisrc_softstate_t *softs = (struct pqisrc_softstate *) cam_sim_softc(sim); device_t dev = softs->os_specific.pqi_dev; DBG_FUNC("IN\n"); cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; cpi->hba_eng_cnt = 0; cpi->max_lun = PQI_MAX_MULTILUN; cpi->max_target = 1088; cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE; cpi->initiator_id = 255; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Microsemi", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */ cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC4; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->ccb_h.status = CAM_REQ_CMP; cpi->hba_vendor = pci_get_vendor(dev); cpi->hba_device = pci_get_device(dev); cpi->hba_subvendor = pci_get_subvendor(dev); cpi->hba_subdevice = pci_get_subdevice(dev); DBG_FUNC("OUT\n"); } /* * Get transport settings of the smartpqi adapter */ static void get_transport_settings(struct pqisrc_softstate *softs, struct ccb_trans_settings *cts) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; DBG_FUNC("IN\n"); cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC4; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; sas->valid = CTS_SAS_VALID_SPEED; cts->ccb_h.status = CAM_REQ_CMP; DBG_FUNC("OUT\n"); } /* * Add the target to CAM layer and rescan, when a new device is found */ void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { union ccb *ccb; DBG_FUNC("IN\n"); if(softs->os_specific.sim_registered) { if ((ccb = xpt_alloc_ccb_nowait()) == NULL) { DBG_ERR("rescan failed (can't allocate CCB)\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(softs->os_specific.sim), device->target, device->lun) != CAM_REQ_CMP) { DBG_ERR("rescan failed (can't create path)\n"); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } DBG_FUNC("OUT\n"); } /* * Remove the device from CAM layer when deleted or hot removed */ void os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { struct cam_path *tmppath; DBG_FUNC("IN\n"); if(softs->os_specific.sim_registered) { if (xpt_create_path(&tmppath, NULL, cam_sim_path(softs->os_specific.sim), device->target, device->lun) != CAM_REQ_CMP) { DBG_ERR("unable to create path for async event"); return; } xpt_async(AC_LOST_DEVICE, tmppath, NULL); xpt_free_path(tmppath); softs->device_list[device->target][device->lun] = NULL; pqisrc_free_device(softs, device); } DBG_FUNC("OUT\n"); } /* * Function to release the frozen simq */ static void pqi_release_camq(rcb_t *rcb) { pqisrc_softstate_t *softs; struct ccb_scsiio *csio; csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; softs = rcb->softs; DBG_FUNC("IN\n"); if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) { softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY; if (csio->ccb_h.status & CAM_RELEASE_SIMQ) xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); else csio->ccb_h.status |= CAM_RELEASE_SIMQ; } DBG_FUNC("OUT\n"); } static void pqi_synch_request(rcb_t *rcb) { pqisrc_softstate_t *softs = rcb->softs; DBG_IO("IN rcb = %p\n", rcb); if (!(rcb->cm_flags & PQI_CMD_MAPPED)) return; if (rcb->bcount != 0 ) { if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, BUS_DMASYNC_POSTREAD); if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap); } rcb->cm_flags &= ~PQI_CMD_MAPPED; if(rcb->sgt && rcb->nseg) os_mem_free(rcb->softs, (void*)rcb->sgt, rcb->nseg*sizeof(sgt_t)); DBG_IO("OUT\n"); } /* * Function to dma-unmap the completed request */ static inline void pqi_unmap_request(rcb_t *rcb) { DBG_IO("IN rcb = %p\n", rcb); pqi_synch_request(rcb); pqisrc_put_tag(&rcb->softs->taglist, rcb->tag); DBG_IO("OUT\n"); } /* * Construct meaningful LD name for volume here. */ static void smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio) { struct scsi_inquiry_data *inq = NULL; uint8_t *cdb = NULL; pqi_scsi_dev_t *device = NULL; DBG_FUNC("IN\n"); if (pqisrc_ctrl_offline(softs)) return; cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ? (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes; if(cdb[0] == INQUIRY && (cdb[1] & SI_EVPD) == 0 && (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && csio->dxfer_len >= SHORT_INQUIRY_LENGTH) { inq = (struct scsi_inquiry_data *)csio->data_ptr; device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun]; /* Let the disks be probed and dealt with via CAM. Only for LD let it fall through and inquiry be tweaked */ if (!device || !pqisrc_is_logical_device(device) || (device->devtype != DISK_DEVICE) || pqisrc_is_external_raid_device(device)) { return; } strncpy(inq->vendor, device->vendor, SID_VENDOR_SIZE); strncpy(inq->product, pqisrc_raidlevel_to_string(device->raid_level), SID_PRODUCT_SIZE); strncpy(inq->revision, device->volume_offline?"OFF":"OK", SID_REVISION_SIZE); } DBG_FUNC("OUT\n"); } static void pqi_complete_scsi_io(struct ccb_scsiio *csio, rcb_t *rcb) { uint32_t release_tag; pqisrc_softstate_t *softs = rcb->softs; DBG_IO("IN scsi io = %p\n", csio); pqi_synch_request(rcb); smartpqi_fix_ld_inquiry(rcb->softs, csio); pqi_release_camq(rcb); release_tag = rcb->tag; os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, release_tag); xpt_done((union ccb *)csio); DBG_FUNC("OUT\n"); } /* * Handle completion of a command - pass results back through the CCB */ void os_io_response_success(rcb_t *rcb) { struct ccb_scsiio *csio; DBG_IO("IN rcb = %p\n", rcb); if (rcb == NULL) panic("rcb is null"); csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; if (csio == NULL) panic("csio is null"); rcb->status = REQUEST_SUCCESS; csio->ccb_h.status = CAM_REQ_CMP; pqi_complete_scsi_io(csio, rcb); DBG_IO("OUT\n"); } static void copy_sense_data_to_csio(struct ccb_scsiio *csio, uint8_t *sense_data, uint16_t sense_data_len) { DBG_IO("IN csio = %p\n", csio); memset(&csio->sense_data, 0, csio->sense_len); sense_data_len = (sense_data_len > csio->sense_len) ? csio->sense_len : sense_data_len; if (sense_data) memcpy(&csio->sense_data, sense_data, sense_data_len); if (csio->sense_len > sense_data_len) csio->sense_resid = csio->sense_len - sense_data_len; else csio->sense_resid = 0; DBG_IO("OUT\n"); } /* * Error response handling for raid IO */ void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info) { struct ccb_scsiio *csio; pqisrc_softstate_t *softs; DBG_IO("IN\n"); csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; if (csio == NULL) panic("csio is null"); softs = rcb->softs; csio->ccb_h.status = CAM_REQ_CMP_ERR; if (!err_info || !rcb->dvp) { DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n", err_info, rcb->dvp); goto error_out; } csio->scsi_status = err_info->status; if (csio->ccb_h.func_code == XPT_SCSI_IO) { /* * Handle specific SCSI status values. */ switch(csio->scsi_status) { case PQI_RAID_STATUS_QUEUE_FULL: csio->ccb_h.status = CAM_REQ_CMP; DBG_ERR("Queue Full error\n"); break; /* check condition, sense data included */ case PQI_RAID_STATUS_CHECK_CONDITION: { uint16_t sense_data_len = LE_16(err_info->sense_data_len); uint8_t *sense_data = NULL; if (sense_data_len) sense_data = err_info->data; copy_sense_data_to_csio(csio, sense_data, sense_data_len); csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID | CAM_REQ_CMP_ERR; } break; case PQI_RAID_DATA_IN_OUT_UNDERFLOW: { uint32_t resid = 0; resid = rcb->bcount-err_info->data_out_transferred; csio->resid = resid; csio->ccb_h.status = CAM_REQ_CMP; } break; default: csio->ccb_h.status = CAM_REQ_CMP; break; } } error_out: pqi_complete_scsi_io(csio, rcb); DBG_IO("OUT\n"); } /* * Error response handling for aio. */ void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info) { struct ccb_scsiio *csio; pqisrc_softstate_t *softs; DBG_IO("IN\n"); if (rcb == NULL) panic("rcb is null"); rcb->status = REQUEST_SUCCESS; csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; if (csio == NULL) panic("csio is null"); softs = rcb->softs; if (!err_info || !rcb->dvp) { csio->ccb_h.status = CAM_REQ_CMP_ERR; DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n", err_info, rcb->dvp); goto error_out; } switch (err_info->service_resp) { case PQI_AIO_SERV_RESPONSE_COMPLETE: csio->ccb_h.status = err_info->status; break; case PQI_AIO_SERV_RESPONSE_FAILURE: switch(err_info->status) { case PQI_AIO_STATUS_IO_ABORTED: csio->ccb_h.status = CAM_REQ_ABORTED; DBG_WARN_BTL(rcb->dvp, "IO aborted\n"); break; case PQI_AIO_STATUS_UNDERRUN: csio->ccb_h.status = CAM_REQ_CMP; csio->resid = LE_32(err_info->resd_count); break; case PQI_AIO_STATUS_OVERRUN: csio->ccb_h.status = CAM_REQ_CMP; break; case PQI_AIO_STATUS_AIO_PATH_DISABLED: DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n"); /* Timed out TMF response comes here */ if (rcb->tm_req) { rcb->req_pending = false; rcb->status = REQUEST_SUCCESS; DBG_ERR("AIO Disabled for TMF\n"); return; } rcb->dvp->aio_enabled = false; rcb->dvp->offload_enabled = false; csio->ccb_h.status |= CAM_REQUEUE_REQ; break; case PQI_AIO_STATUS_IO_ERROR: case PQI_AIO_STATUS_IO_NO_DEVICE: case PQI_AIO_STATUS_INVALID_DEVICE: default: DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n"); csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; break; } break; case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n", (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED"); rcb->status = REQUEST_SUCCESS; rcb->req_pending = false; return; case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n", (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN"); rcb->status = REQUEST_FAILED; rcb->req_pending = false; return; default: DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n"); csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; break; } if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) { csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION; uint8_t *sense_data = NULL; unsigned sense_data_len = LE_16(err_info->data_len); if (sense_data_len) sense_data = err_info->data; DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND sense size %u\n", sense_data_len); copy_sense_data_to_csio(csio, sense_data, sense_data_len); csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; } error_out: pqi_complete_scsi_io(csio, rcb); DBG_IO("OUT\n"); } static void pqi_freeze_ccb(union ccb *ccb) { if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } } /* * Command-mapping helper function - populate this command's s/g table. */ static void pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { rcb_t *rcb = (rcb_t *)arg; pqisrc_softstate_t *softs = rcb->softs; union ccb *ccb; if (error || nseg > softs->pqi_cap.max_sg_elem) { DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n", error, nseg, softs->pqi_cap.max_sg_elem); goto error_io; } rcb->sgt = os_mem_alloc(softs, nseg * sizeof(sgt_t)); if (!rcb->sgt) { DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg); goto error_io; } rcb->nseg = nseg; for (int i = 0; i < nseg; i++) { rcb->sgt[i].addr = segs[i].ds_addr; rcb->sgt[i].len = segs[i].ds_len; rcb->sgt[i].flags = 0; } if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, BUS_DMASYNC_PREREAD); if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, BUS_DMASYNC_PREWRITE); /* Call IO functions depending on pd or ld */ rcb->status = REQUEST_PENDING; error = pqisrc_build_send_io(softs, rcb); if (error) { rcb->req_pending = false; DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error); } else { /* Successfully IO was submitted to the device. */ return; } error_io: ccb = rcb->cm_ccb; ccb->ccb_h.status = CAM_RESRC_UNAVAIL; pqi_freeze_ccb(ccb); pqi_unmap_request(rcb); xpt_done(ccb); return; } /* * Function to dma-map the request buffer */ static int pqi_map_request(rcb_t *rcb) { pqisrc_softstate_t *softs = rcb->softs; int bsd_status = BSD_SUCCESS; union ccb *ccb = rcb->cm_ccb; DBG_FUNC("IN\n"); /* check that mapping is necessary */ if (rcb->cm_flags & PQI_CMD_MAPPED) return BSD_SUCCESS; rcb->cm_flags |= PQI_CMD_MAPPED; if (rcb->bcount) { bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0); if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) { DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %d\n", bsd_status, rcb->bcount); return bsd_status; } } else { /* * Set up the command to go to the controller. If there are no * data buffers associated with the command then it can bypass * busdma. */ /* Call IO functions depending on pd or ld */ rcb->status = REQUEST_PENDING; if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) { bsd_status = EIO; } } DBG_FUNC("OUT error = %d\n", bsd_status); return bsd_status; } /* * Function to clear the request control block */ void os_reset_rcb(rcb_t *rcb) { rcb->error_info = NULL; rcb->req = NULL; rcb->status = -1; rcb->tag = INVALID_ELEM; rcb->dvp = NULL; rcb->cdbp = NULL; rcb->softs = NULL; rcb->cm_flags = 0; rcb->cm_data = NULL; rcb->bcount = 0; rcb->nseg = 0; rcb->sgt = NULL; rcb->cm_ccb = NULL; rcb->encrypt_enable = false; rcb->ioaccel_handle = 0; rcb->resp_qid = 0; rcb->req_pending = false; rcb->tm_req = false; } /* * Callback function for the lun rescan */ static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb) { xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); } /* * Function to rescan the lun */ static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target, int lun) { union ccb *ccb = NULL; cam_status status = 0; struct cam_path *path = NULL; DBG_FUNC("IN\n"); ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { DBG_ERR("Unable to alloc ccb for lun rescan\n"); return; } status = xpt_create_path(&path, NULL, cam_sim_path(softs->os_specific.sim), target, lun); if (status != CAM_REQ_CMP) { DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n", status); xpt_free_ccb(ccb); return; } bzero(ccb, sizeof(union ccb)); xpt_setup_ccb(&ccb->ccb_h, path, 5); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); DBG_FUNC("OUT\n"); } /* * Function to rescan the lun under each target */ void smartpqi_target_rescan(struct pqisrc_softstate *softs) { int target = 0, lun = 0; DBG_FUNC("IN\n"); for(target = 0; target < PQI_MAX_DEVICES; target++){ for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){ if(softs->device_list[target][lun]){ smartpqi_lun_rescan(softs, target, lun); } } } DBG_FUNC("OUT\n"); } /* * Set the mode of tagged command queueing for the current task. */ uint8_t os_get_task_attr(rcb_t *rcb) { union ccb *ccb = rcb->cm_ccb; uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; switch(ccb->csio.tag_action) { case MSG_HEAD_OF_Q_TAG: tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE; break; case MSG_ORDERED_Q_TAG: tag_action = SOP_TASK_ATTRIBUTE_ORDERED; break; case MSG_SIMPLE_Q_TAG: default: tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; break; } return tag_action; } /* * Complete all outstanding commands */ void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs) { int tag = 0; pqi_scsi_dev_t *dvp = NULL; DBG_FUNC("IN\n"); for (tag = 1; tag <= softs->max_outstanding_io; tag++) { rcb_t *prcb = &softs->rcb[tag]; dvp = prcb->dvp; if(prcb->req_pending && prcb->cm_ccb ) { prcb->req_pending = false; prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP; pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb); if (dvp) pqisrc_decrement_device_active_io(softs, dvp); } } DBG_FUNC("OUT\n"); } /* * IO handling functionality entry point */ static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb) { rcb_t *rcb; uint32_t tag, no_transfer = 0; pqisrc_softstate_t *softs = (struct pqisrc_softstate *) cam_sim_softc(sim); int32_t error; pqi_scsi_dev_t *dvp; DBG_FUNC("IN\n"); if (softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id); return ENXIO; } dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; /* Check controller state */ if (IN_PQI_RESET(softs)) { ccb->ccb_h.status = CAM_SCSI_BUS_RESET | CAM_BUSY | CAM_REQ_INPROG; DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id); return ENXIO; } /* Check device state */ if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) { ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP; DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id); return ENXIO; } /* Check device reset */ if (DEVICE_RESET(dvp)) { ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY; DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id); return EBUSY; } if (dvp->expose_device == false) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id); return ENXIO; } tag = pqisrc_get_tag(&softs->taglist); if (tag == INVALID_ELEM) { DBG_ERR("Get Tag failed\n"); xpt_freeze_simq(softs->os_specific.sim, 1); softs->os_specific.pqi_flags |= PQI_FLAG_BUSY; ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ); return EIO; } DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist); rcb = &softs->rcb[tag]; os_reset_rcb(rcb); rcb->tag = tag; rcb->softs = softs; rcb->cmdlen = ccb->csio.cdb_len; ccb->ccb_h.sim_priv.entries[0].ptr = rcb; switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE; break; case CAM_DIR_OUT: rcb->data_dir = SOP_DATA_DIR_TO_DEVICE; break; case CAM_DIR_NONE: no_transfer = 1; break; default: DBG_ERR("Unknown Dir\n"); break; } rcb->cm_ccb = ccb; rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; if (!no_transfer) { rcb->cm_data = (void *)ccb->csio.data_ptr; rcb->bcount = ccb->csio.dxfer_len; } else { rcb->cm_data = NULL; rcb->bcount = 0; } /* * Submit the request to the adapter. * * Note that this may fail if we're unable to map the request (and * if we ever learn a transport layer other than simple, may fail * if the adapter rejects the command). */ if ((error = pqi_map_request(rcb)) != BSD_SUCCESS) { xpt_freeze_simq(softs->os_specific.sim, 1); if (error == EINPROGRESS) { /* Release simq in the completion */ softs->os_specific.pqi_flags |= PQI_FLAG_BUSY; error = BSD_SUCCESS; } else { rcb->req_pending = false; ccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; DBG_WARN("Requeue req error = %d target = %d\n", error, ccb->ccb_h.target_id); pqi_unmap_request(rcb); error = EIO; } } DBG_FUNC("OUT error = %d\n", error); return error; } static inline int pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb) { if (PQI_STATUS_SUCCESS == pqi_status && REQUEST_SUCCESS == rcb->status) return BSD_SUCCESS; else return EIO; } /* * Abort a task, task management functionality */ static int pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb) { struct ccb_hdr *ccb_h = &ccb->ccb_h; rcb_t *rcb = NULL; rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr; uint32_t tag; int rval; DBG_FUNC("IN\n"); tag = pqisrc_get_tag(&softs->taglist); rcb = &softs->rcb[tag]; rcb->tag = tag; if (!rcb->dvp) { DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code); rval = ENXIO; goto error_tmf; } rcb->tm_req = true; rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, prcb, SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK); if ((rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb)) == BSD_SUCCESS) ccb->ccb_h.status = CAM_REQ_ABORTED; error_tmf: os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, tag); DBG_FUNC("OUT rval = %d\n", rval); return rval; } /* * Abort a taskset, task management functionality */ static int pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb) { struct ccb_hdr *ccb_h = &ccb->ccb_h; rcb_t *rcb = NULL; uint32_t tag; int rval; DBG_FUNC("IN\n"); tag = pqisrc_get_tag(&softs->taglist); rcb = &softs->rcb[tag]; rcb->tag = tag; if (!rcb->dvp) { DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code); rval = ENXIO; goto error_tmf; } rcb->tm_req = true; rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, NULL, SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET); rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb); error_tmf: os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, tag); DBG_FUNC("OUT rval = %d\n", rval); return rval; } /* * Target reset task management functionality */ static int pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb) { struct ccb_hdr *ccb_h = &ccb->ccb_h; pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; rcb_t *rcb = NULL; uint32_t tag; int rval; DBG_FUNC("IN\n"); if (devp == NULL) { DBG_ERR("bad target %d, tmf type : 0x%x\n", ccb_h->target_id, ccb_h->func_code); return ENXIO; } tag = pqisrc_get_tag(&softs->taglist); rcb = &softs->rcb[tag]; rcb->tag = tag; devp->reset_in_progress = true; rcb->tm_req = true; rval = pqisrc_send_tmf(softs, devp, rcb, NULL, SOP_TASK_MANAGEMENT_LUN_RESET); rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb); devp->reset_in_progress = false; os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, tag); DBG_FUNC("OUT rval = %d\n", rval); return rval; } /* * cam entry point of the smartpqi module. */ static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb) { struct pqisrc_softstate *softs = cam_sim_softc(sim); struct ccb_hdr *ccb_h = &ccb->ccb_h; DBG_FUNC("IN\n"); switch (ccb_h->func_code) { case XPT_SCSI_IO: { if(!pqisrc_io_start(sim, ccb)) { return; } break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; if (ccg->block_size == 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status |= CAM_REQ_INVALID; break; } cam_calc_geometry(ccg, /* extended */ 1); ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_PATH_INQ: { update_sim_properties(sim, &ccb->cpi); ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: get_transport_settings(softs, &ccb->cts); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_ABORT: if(pqisrc_scsi_abort_task(softs, ccb)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); DBG_ERR("Abort task failed on %d\n", ccb->ccb_h.target_id); return; } break; case XPT_TERM_IO: if (pqisrc_scsi_abort_task_set(softs, ccb)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; DBG_ERR("Abort task set failed on %d\n", ccb->ccb_h.target_id); xpt_done(ccb); return; } break; case XPT_RESET_DEV: if(pqisrc_target_reset(softs, ccb)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; DBG_ERR("Target reset failed on %d\n", ccb->ccb_h.target_id); xpt_done(ccb); return; } else { ccb->ccb_h.status = CAM_REQ_CMP; } break; case XPT_RESET_BUS: ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; return; default: DBG_WARN("UNSUPPORTED FUNC CODE\n"); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } xpt_done(ccb); DBG_FUNC("OUT\n"); } /* * Function to poll the response, when interrupts are unavailable * This also serves supporting crash dump. */ static void smartpqi_poll(struct cam_sim *sim) { struct pqisrc_softstate *softs = cam_sim_softc(sim); int i; for (i = 1; i < softs->intr_count; i++ ) pqisrc_process_response_queue(softs, i); } /* * Function to adjust the queue depth of a device */ void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth) { struct ccb_relsim crs; DBG_INFO("IN\n"); memset(&crs, 0, sizeof(crs)); xpt_setup_ccb(&crs.ccb_h, path, 5); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = CAM_DEV_QFREEZE; crs.release_flags = RELSIM_ADJUST_OPENINGS; crs.openings = queue_depth; xpt_action((union ccb *)&crs); if(crs.ccb_h.status != CAM_REQ_CMP) { printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status); } DBG_INFO("OUT\n"); } /* * Function to register async callback for setting queue depth */ static void smartpqi_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct pqisrc_softstate *softs; softs = (struct pqisrc_softstate*)callback_arg; DBG_FUNC("IN\n"); switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { break; } uint32_t t_id = cgd->ccb_h.target_id; if (t_id <= (PQI_CTLR_INDEX - 1)) { if (softs != NULL) { pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; if (dvp == NULL) { DBG_ERR("Target is null, target id=%d\n", t_id); break; } smartpqi_adjust_queue_depth(path, dvp->queue_depth); } } break; } default: break; } DBG_FUNC("OUT\n"); } /* * Function to register sim with CAM layer for smartpqi driver */ int register_sim(struct pqisrc_softstate *softs, int card_index) { int max_transactions; union ccb *ccb = NULL; - cam_status status = 0; + int error; struct ccb_setasync csa; struct cam_sim *sim; DBG_FUNC("IN\n"); max_transactions = softs->max_io_for_scsi_ml; softs->os_specific.devq = cam_simq_alloc(max_transactions); if (softs->os_specific.devq == NULL) { DBG_ERR("cam_simq_alloc failed txns = %d\n", max_transactions); return ENOMEM; } sim = cam_sim_alloc(smartpqi_cam_action, \ smartpqi_poll, "smartpqi", softs, \ card_index, &softs->os_specific.cam_lock, \ 1, max_transactions, softs->os_specific.devq); if (sim == NULL) { DBG_ERR("cam_sim_alloc failed txns = %d\n", max_transactions); cam_simq_free(softs->os_specific.devq); return ENOMEM; } softs->os_specific.sim = sim; mtx_lock(&softs->os_specific.cam_lock); - status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0); - if (status != CAM_SUCCESS) { - DBG_ERR("xpt_bus_register failed status=%d\n", status); + error = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0); + if (error != CAM_SUCCESS) { + DBG_ERR("xpt_bus_register failed errno %d\n", error); cam_sim_free(softs->os_specific.sim, FALSE); cam_simq_free(softs->os_specific.devq); mtx_unlock(&softs->os_specific.cam_lock); return ENXIO; } softs->os_specific.sim_registered = TRUE; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { DBG_ERR("xpt_create_path failed\n"); return ENXIO; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(softs->os_specific.sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { DBG_ERR("xpt_create_path failed\n"); xpt_free_ccb(ccb); xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); cam_sim_free(softs->os_specific.sim, TRUE); mtx_unlock(&softs->os_specific.cam_lock); return ENXIO; } /* * Callback to set the queue depth per target which is * derived from the FW. */ softs->os_specific.path = ccb->ccb_h.path; memset(&csa, 0, sizeof(csa)); xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE; csa.callback = smartpqi_async; csa.callback_arg = softs; xpt_action((union ccb *)&csa); if (csa.ccb_h.status != CAM_REQ_CMP) { DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n", csa.ccb_h.status); } mtx_unlock(&softs->os_specific.cam_lock); DBG_INFO("OUT\n"); return BSD_SUCCESS; } /* * Function to deregister smartpqi sim from cam layer */ void deregister_sim(struct pqisrc_softstate *softs) { struct ccb_setasync csa; DBG_FUNC("IN\n"); if (softs->os_specific.mtx_init) { mtx_lock(&softs->os_specific.cam_lock); } memset(&csa, 0, sizeof(csa)); xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = smartpqi_async; csa.callback_arg = softs; xpt_action((union ccb *)&csa); xpt_free_path(softs->os_specific.path); if (softs->os_specific.sim) { xpt_release_simq(softs->os_specific.sim, 0); xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); softs->os_specific.sim_registered = FALSE; cam_sim_free(softs->os_specific.sim, FALSE); softs->os_specific.sim = NULL; } if (softs->os_specific.mtx_init) { mtx_unlock(&softs->os_specific.cam_lock); } if (softs->os_specific.devq != NULL) { cam_simq_free(softs->os_specific.devq); } if (softs->os_specific.mtx_init) { mtx_destroy(&softs->os_specific.cam_lock); softs->os_specific.mtx_init = FALSE; } mtx_destroy(&softs->os_specific.map_lock); DBG_FUNC("OUT\n"); } void os_rescan_target(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { struct cam_path *tmppath; DBG_FUNC("IN\n"); if(softs->os_specific.sim_registered) { if (xpt_create_path(&tmppath, NULL, cam_sim_path(softs->os_specific.sim), device->target, device->lun) != CAM_REQ_CMP) { DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n", device->bus, device->target, device->lun); return; } xpt_async(AC_INQ_CHANGED, tmppath, NULL); xpt_free_path(tmppath); } device->scsi_rescan = false; DBG_FUNC("OUT\n"); } diff --git a/sys/dev/usb/storage/umass.c b/sys/dev/usb/storage/umass.c index bc07fe50b6ab..65c72b06e244 100644 --- a/sys/dev/usb/storage/umass.c +++ b/sys/dev/usb/storage/umass.c @@ -1,3021 +1,3021 @@ #include __FBSDID("$FreeBSD$"); /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999 MAEKAWA Masahide , * Nick Hibma * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * $NetBSD: umass.c,v 1.28 2000/04/02 23:46:53 augustss Exp $ */ /* Also already merged from NetBSD: * $NetBSD: umass.c,v 1.67 2001/11/25 19:05:22 augustss Exp $ * $NetBSD: umass.c,v 1.90 2002/11/04 19:17:33 pooka Exp $ * $NetBSD: umass.c,v 1.108 2003/11/07 17:03:25 wiz Exp $ * $NetBSD: umass.c,v 1.109 2003/12/04 13:57:31 keihan Exp $ */ /* * Universal Serial Bus Mass Storage Class specs: * http://www.usb.org/developers/devclass_docs/usb_msc_overview_1.2.pdf * http://www.usb.org/developers/devclass_docs/usbmassbulk_10.pdf * http://www.usb.org/developers/devclass_docs/usb_msc_cbi_1.1.pdf * http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf */ /* * Ported to NetBSD by Lennart Augustsson . * Parts of the code written by Jason R. Thorpe . */ /* * The driver handles 3 Wire Protocols * - Command/Bulk/Interrupt (CBI) * - Command/Bulk/Interrupt with Command Completion Interrupt (CBI with CCI) * - Mass Storage Bulk-Only (BBB) * (BBB refers Bulk/Bulk/Bulk for Command/Data/Status phases) * * Over these wire protocols it handles the following command protocols * - SCSI * - UFI (floppy command set) * - 8070i (ATAPI) * * UFI and 8070i (ATAPI) are transformed versions of the SCSI command set. The * sc->sc_transform method is used to convert the commands into the appropriate * format (if at all necessary). For example, UFI requires all commands to be * 12 bytes in length amongst other things. * * The source code below is marked and can be split into a number of pieces * (in this order): * * - probe/attach/detach * - generic transfer routines * - BBB * - CBI * - CBI_I (in addition to functions from CBI) * - CAM (Common Access Method) * - SCSI * - UFI * - 8070i (ATAPI) * * The protocols are implemented using a state machine, for the transfers as * well as for the resets. The state machine is contained in umass_t_*_callback. * The state machine is started through either umass_command_start() or * umass_reset(). * * The reason for doing this is a) CAM performs a lot better this way and b) it * avoids using tsleep from interrupt context (for example after a failed * transfer). */ /* * The SCSI related part of this driver has been derived from the * dev/ppbus/vpo.c driver, by Nicolas Souchu (nsouch@FreeBSD.org). * * The CAM layer uses so called actions which are messages sent to the host * adapter for completion. The actions come in through umass_cam_action. The * appropriate block of routines is called depending on the transport protocol * in use. When the transfer has finished, these routines call * umass_cam_cb again to complete the CAM command. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #include #include #include #include #include #include #include #include #ifdef USB_DEBUG #define DIF(m, x) \ do { \ if (umass_debug & (m)) { x ; } \ } while (0) #define DPRINTF(sc, m, fmt, ...) \ do { \ if (umass_debug & (m)) { \ printf("%s:%s: " fmt, \ (sc) ? (const char *)(sc)->sc_name : \ (const char *)"umassX", \ __FUNCTION__ ,## __VA_ARGS__); \ } \ } while (0) #define UDMASS_GEN 0x00010000 /* general */ #define UDMASS_SCSI 0x00020000 /* scsi */ #define UDMASS_UFI 0x00040000 /* ufi command set */ #define UDMASS_ATAPI 0x00080000 /* 8070i command set */ #define UDMASS_CMD (UDMASS_SCSI|UDMASS_UFI|UDMASS_ATAPI) #define UDMASS_USB 0x00100000 /* USB general */ #define UDMASS_BBB 0x00200000 /* Bulk-Only transfers */ #define UDMASS_CBI 0x00400000 /* CBI transfers */ #define UDMASS_WIRE (UDMASS_BBB|UDMASS_CBI) #define UDMASS_ALL 0xffff0000 /* all of the above */ static int umass_debug; static int umass_throttle; static SYSCTL_NODE(_hw_usb, OID_AUTO, umass, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "USB umass"); SYSCTL_INT(_hw_usb_umass, OID_AUTO, debug, CTLFLAG_RWTUN, &umass_debug, 0, "umass debug level"); SYSCTL_INT(_hw_usb_umass, OID_AUTO, throttle, CTLFLAG_RWTUN, &umass_throttle, 0, "Forced delay between commands in milliseconds"); #else #define DIF(...) do { } while (0) #define DPRINTF(...) do { } while (0) #endif #define UMASS_BULK_SIZE (1 << 17) #define UMASS_CBI_DIAGNOSTIC_CMDLEN 12 /* bytes */ #define UMASS_MAX_CMDLEN MAX(12, CAM_MAX_CDBLEN) /* bytes */ /* USB transfer definitions */ #define UMASS_T_BBB_RESET1 0 /* Bulk-Only */ #define UMASS_T_BBB_RESET2 1 #define UMASS_T_BBB_RESET3 2 #define UMASS_T_BBB_COMMAND 3 #define UMASS_T_BBB_DATA_READ 4 #define UMASS_T_BBB_DATA_RD_CS 5 #define UMASS_T_BBB_DATA_WRITE 6 #define UMASS_T_BBB_DATA_WR_CS 7 #define UMASS_T_BBB_STATUS 8 #define UMASS_T_BBB_MAX 9 #define UMASS_T_CBI_RESET1 0 /* CBI */ #define UMASS_T_CBI_RESET2 1 #define UMASS_T_CBI_RESET3 2 #define UMASS_T_CBI_COMMAND 3 #define UMASS_T_CBI_DATA_READ 4 #define UMASS_T_CBI_DATA_RD_CS 5 #define UMASS_T_CBI_DATA_WRITE 6 #define UMASS_T_CBI_DATA_WR_CS 7 #define UMASS_T_CBI_STATUS 8 #define UMASS_T_CBI_RESET4 9 #define UMASS_T_CBI_MAX 10 #define UMASS_T_MAX MAX(UMASS_T_CBI_MAX, UMASS_T_BBB_MAX) /* Generic definitions */ /* Direction for transfer */ #define DIR_NONE 0 #define DIR_IN 1 #define DIR_OUT 2 /* device name */ #define DEVNAME "umass" #define DEVNAME_SIM "umass-sim" /* Approximate maximum transfer speeds (assumes 33% overhead). */ #define UMASS_FULL_TRANSFER_SPEED 1000 #define UMASS_HIGH_TRANSFER_SPEED 40000 #define UMASS_SUPER_TRANSFER_SPEED 400000 #define UMASS_FLOPPY_TRANSFER_SPEED 20 #define UMASS_TIMEOUT 5000 /* ms */ /* CAM specific definitions */ #define UMASS_SCSIID_MAX 1 /* maximum number of drives expected */ #define UMASS_SCSIID_HOST UMASS_SCSIID_MAX /* Bulk-Only features */ #define UR_BBB_RESET 0xff /* Bulk-Only reset */ #define UR_BBB_GET_MAX_LUN 0xfe /* Get maximum lun */ /* Command Block Wrapper */ typedef struct { uDWord dCBWSignature; #define CBWSIGNATURE 0x43425355 uDWord dCBWTag; uDWord dCBWDataTransferLength; uByte bCBWFlags; #define CBWFLAGS_OUT 0x00 #define CBWFLAGS_IN 0x80 uByte bCBWLUN; uByte bCDBLength; #define CBWCDBLENGTH 16 uByte CBWCDB[CBWCDBLENGTH]; } __packed umass_bbb_cbw_t; #define UMASS_BBB_CBW_SIZE 31 /* Command Status Wrapper */ typedef struct { uDWord dCSWSignature; #define CSWSIGNATURE 0x53425355 #define CSWSIGNATURE_IMAGINATION_DBX1 0x43425355 #define CSWSIGNATURE_OLYMPUS_C1 0x55425355 uDWord dCSWTag; uDWord dCSWDataResidue; uByte bCSWStatus; #define CSWSTATUS_GOOD 0x0 #define CSWSTATUS_FAILED 0x1 #define CSWSTATUS_PHASE 0x2 } __packed umass_bbb_csw_t; #define UMASS_BBB_CSW_SIZE 13 /* CBI features */ #define UR_CBI_ADSC 0x00 typedef union { struct { uint8_t type; #define IDB_TYPE_CCI 0x00 uint8_t value; #define IDB_VALUE_PASS 0x00 #define IDB_VALUE_FAIL 0x01 #define IDB_VALUE_PHASE 0x02 #define IDB_VALUE_PERSISTENT 0x03 #define IDB_VALUE_STATUS_MASK 0x03 } __packed common; struct { uint8_t asc; uint8_t ascq; } __packed ufi; } __packed umass_cbi_sbl_t; struct umass_softc; /* see below */ typedef void (umass_callback_t)(struct umass_softc *sc, union ccb *ccb, uint32_t residue, uint8_t status); #define STATUS_CMD_OK 0 /* everything ok */ #define STATUS_CMD_UNKNOWN 1 /* will have to fetch sense */ #define STATUS_CMD_FAILED 2 /* transfer was ok, command failed */ #define STATUS_WIRE_FAILED 3 /* couldn't even get command across */ typedef uint8_t (umass_transform_t)(struct umass_softc *sc, uint8_t *cmd_ptr, uint8_t cmd_len); /* Wire and command protocol */ #define UMASS_PROTO_BBB 0x0001 /* USB wire protocol */ #define UMASS_PROTO_CBI 0x0002 #define UMASS_PROTO_CBI_I 0x0004 #define UMASS_PROTO_WIRE 0x00ff /* USB wire protocol mask */ #define UMASS_PROTO_SCSI 0x0100 /* command protocol */ #define UMASS_PROTO_ATAPI 0x0200 #define UMASS_PROTO_UFI 0x0400 #define UMASS_PROTO_RBC 0x0800 #define UMASS_PROTO_COMMAND 0xff00 /* command protocol mask */ /* Device specific quirks */ #define NO_QUIRKS 0x0000 /* * The drive does not support Test Unit Ready. Convert to Start Unit */ #define NO_TEST_UNIT_READY 0x0001 /* * The drive does not reset the Unit Attention state after REQUEST * SENSE has been sent. The INQUIRY command does not reset the UA * either, and so CAM runs in circles trying to retrieve the initial * INQUIRY data. */ #define RS_NO_CLEAR_UA 0x0002 /* The drive does not support START STOP. */ #define NO_START_STOP 0x0004 /* Don't ask for full inquiry data (255b). */ #define FORCE_SHORT_INQUIRY 0x0008 /* Needs to be initialised the Shuttle way */ #define SHUTTLE_INIT 0x0010 /* Drive needs to be switched to alternate iface 1 */ #define ALT_IFACE_1 0x0020 /* Drive does not do 1Mb/s, but just floppy speeds (20kb/s) */ #define FLOPPY_SPEED 0x0040 /* The device can't count and gets the residue of transfers wrong */ #define IGNORE_RESIDUE 0x0080 /* No GetMaxLun call */ #define NO_GETMAXLUN 0x0100 /* The device uses a weird CSWSIGNATURE. */ #define WRONG_CSWSIG 0x0200 /* Device cannot handle INQUIRY so fake a generic response */ #define NO_INQUIRY 0x0400 /* Device cannot handle INQUIRY EVPD, return CHECK CONDITION */ #define NO_INQUIRY_EVPD 0x0800 /* Pad all RBC requests to 12 bytes. */ #define RBC_PAD_TO_12 0x1000 /* * Device reports number of sectors from READ_CAPACITY, not max * sector number. */ #define READ_CAPACITY_OFFBY1 0x2000 /* * Device cannot handle a SCSI synchronize cache command. Normally * this quirk would be handled in the cam layer, but for IDE bridges * we need to associate the quirk with the bridge and not the * underlying disk device. This is handled by faking a success * result. */ #define NO_SYNCHRONIZE_CACHE 0x4000 /* Device does not support 'PREVENT/ALLOW MEDIUM REMOVAL'. */ #define NO_PREVENT_ALLOW 0x8000 struct umass_softc { struct scsi_sense cam_scsi_sense; struct scsi_test_unit_ready cam_scsi_test_unit_ready; struct mtx sc_mtx; struct { uint8_t *data_ptr; union ccb *ccb; umass_callback_t *callback; uint32_t data_len; /* bytes */ uint32_t data_rem; /* bytes */ uint32_t data_timeout; /* ms */ uint32_t actlen; /* bytes */ uint8_t cmd_data[UMASS_MAX_CMDLEN]; uint8_t cmd_len; /* bytes */ uint8_t dir; uint8_t lun; } sc_transfer; /* Bulk specific variables for transfers in progress */ umass_bbb_cbw_t cbw; /* command block wrapper */ umass_bbb_csw_t csw; /* command status wrapper */ /* CBI specific variables for transfers in progress */ umass_cbi_sbl_t sbl; /* status block */ device_t sc_dev; struct usb_device *sc_udev; struct cam_sim *sc_sim; /* SCSI Interface Module */ struct usb_xfer *sc_xfer[UMASS_T_MAX]; /* * The command transform function is used to convert the SCSI * commands into their derivatives, like UFI, ATAPI, and friends. */ umass_transform_t *sc_transform; uint32_t sc_unit; uint32_t sc_quirks; /* they got it almost right */ uint32_t sc_proto; /* wire and cmd protocol */ uint8_t sc_name[16]; uint8_t sc_iface_no; /* interface number */ uint8_t sc_maxlun; /* maximum LUN number, inclusive */ uint8_t sc_last_xfer_index; uint8_t sc_status_try; }; struct umass_probe_proto { uint32_t quirks; uint32_t proto; int error; }; /* prototypes */ static device_probe_t umass_probe; static device_attach_t umass_attach; static device_detach_t umass_detach; static usb_callback_t umass_tr_error; static usb_callback_t umass_t_bbb_reset1_callback; static usb_callback_t umass_t_bbb_reset2_callback; static usb_callback_t umass_t_bbb_reset3_callback; static usb_callback_t umass_t_bbb_command_callback; static usb_callback_t umass_t_bbb_data_read_callback; static usb_callback_t umass_t_bbb_data_rd_cs_callback; static usb_callback_t umass_t_bbb_data_write_callback; static usb_callback_t umass_t_bbb_data_wr_cs_callback; static usb_callback_t umass_t_bbb_status_callback; static usb_callback_t umass_t_cbi_reset1_callback; static usb_callback_t umass_t_cbi_reset2_callback; static usb_callback_t umass_t_cbi_reset3_callback; static usb_callback_t umass_t_cbi_reset4_callback; static usb_callback_t umass_t_cbi_command_callback; static usb_callback_t umass_t_cbi_data_read_callback; static usb_callback_t umass_t_cbi_data_rd_cs_callback; static usb_callback_t umass_t_cbi_data_write_callback; static usb_callback_t umass_t_cbi_data_wr_cs_callback; static usb_callback_t umass_t_cbi_status_callback; static void umass_cancel_ccb(struct umass_softc *); static void umass_init_shuttle(struct umass_softc *); static void umass_reset(struct umass_softc *); static void umass_t_bbb_data_clear_stall_callback(struct usb_xfer *, uint8_t, uint8_t, usb_error_t); static void umass_command_start(struct umass_softc *, uint8_t, void *, uint32_t, uint32_t, umass_callback_t *, union ccb *); static uint8_t umass_bbb_get_max_lun(struct umass_softc *); static void umass_cbi_start_status(struct umass_softc *); static void umass_t_cbi_data_clear_stall_callback(struct usb_xfer *, uint8_t, uint8_t, usb_error_t); static int umass_cam_attach_sim(struct umass_softc *); static void umass_cam_attach(struct umass_softc *); static void umass_cam_detach_sim(struct umass_softc *); static void umass_cam_action(struct cam_sim *, union ccb *); static void umass_cam_poll(struct cam_sim *); static void umass_cam_cb(struct umass_softc *, union ccb *, uint32_t, uint8_t); static void umass_cam_sense_cb(struct umass_softc *, union ccb *, uint32_t, uint8_t); static void umass_cam_quirk_cb(struct umass_softc *, union ccb *, uint32_t, uint8_t); static uint8_t umass_scsi_transform(struct umass_softc *, uint8_t *, uint8_t); static uint8_t umass_rbc_transform(struct umass_softc *, uint8_t *, uint8_t); static uint8_t umass_ufi_transform(struct umass_softc *, uint8_t *, uint8_t); static uint8_t umass_atapi_transform(struct umass_softc *, uint8_t *, uint8_t); static uint8_t umass_no_transform(struct umass_softc *, uint8_t *, uint8_t); static uint8_t umass_std_transform(struct umass_softc *, union ccb *, uint8_t *, uint8_t); #ifdef USB_DEBUG static void umass_bbb_dump_cbw(struct umass_softc *, umass_bbb_cbw_t *); static void umass_bbb_dump_csw(struct umass_softc *, umass_bbb_csw_t *); static void umass_cbi_dump_cmd(struct umass_softc *, void *, uint8_t); static void umass_dump_buffer(struct umass_softc *, uint8_t *, uint32_t, uint32_t); #endif static struct usb_config umass_bbb_config[UMASS_T_BBB_MAX] = { [UMASS_T_BBB_RESET1] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &umass_t_bbb_reset1_callback, .timeout = 5000, /* 5 seconds */ .interval = 500, /* 500 milliseconds */ }, [UMASS_T_BBB_RESET2] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &umass_t_bbb_reset2_callback, .timeout = 5000, /* 5 seconds */ .interval = 50, /* 50 milliseconds */ }, [UMASS_T_BBB_RESET3] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &umass_t_bbb_reset3_callback, .timeout = 5000, /* 5 seconds */ .interval = 50, /* 50 milliseconds */ }, [UMASS_T_BBB_COMMAND] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = sizeof(umass_bbb_cbw_t), .callback = &umass_t_bbb_command_callback, .timeout = 5000, /* 5 seconds */ }, [UMASS_T_BBB_DATA_READ] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = UMASS_BULK_SIZE, .flags = {.proxy_buffer = 1,.short_xfer_ok = 1,.ext_buffer=1,}, .callback = &umass_t_bbb_data_read_callback, .timeout = 0, /* overwritten later */ }, [UMASS_T_BBB_DATA_RD_CS] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &umass_t_bbb_data_rd_cs_callback, .timeout = 5000, /* 5 seconds */ }, [UMASS_T_BBB_DATA_WRITE] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = UMASS_BULK_SIZE, .flags = {.proxy_buffer = 1,.short_xfer_ok = 1,.ext_buffer=1,}, .callback = &umass_t_bbb_data_write_callback, .timeout = 0, /* overwritten later */ }, [UMASS_T_BBB_DATA_WR_CS] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &umass_t_bbb_data_wr_cs_callback, .timeout = 5000, /* 5 seconds */ }, [UMASS_T_BBB_STATUS] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = sizeof(umass_bbb_csw_t), .flags = {.short_xfer_ok = 1,}, .callback = &umass_t_bbb_status_callback, .timeout = 5000, /* ms */ }, }; static struct usb_config umass_cbi_config[UMASS_T_CBI_MAX] = { [UMASS_T_CBI_RESET1] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = (sizeof(struct usb_device_request) + UMASS_CBI_DIAGNOSTIC_CMDLEN), .callback = &umass_t_cbi_reset1_callback, .timeout = 5000, /* 5 seconds */ .interval = 500, /* 500 milliseconds */ }, [UMASS_T_CBI_RESET2] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &umass_t_cbi_reset2_callback, .timeout = 5000, /* 5 seconds */ .interval = 50, /* 50 milliseconds */ }, [UMASS_T_CBI_RESET3] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &umass_t_cbi_reset3_callback, .timeout = 5000, /* 5 seconds */ .interval = 50, /* 50 milliseconds */ }, [UMASS_T_CBI_COMMAND] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = (sizeof(struct usb_device_request) + UMASS_MAX_CMDLEN), .callback = &umass_t_cbi_command_callback, .timeout = 5000, /* 5 seconds */ }, [UMASS_T_CBI_DATA_READ] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = UMASS_BULK_SIZE, .flags = {.proxy_buffer = 1,.short_xfer_ok = 1,.ext_buffer=1,}, .callback = &umass_t_cbi_data_read_callback, .timeout = 0, /* overwritten later */ }, [UMASS_T_CBI_DATA_RD_CS] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &umass_t_cbi_data_rd_cs_callback, .timeout = 5000, /* 5 seconds */ }, [UMASS_T_CBI_DATA_WRITE] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = UMASS_BULK_SIZE, .flags = {.proxy_buffer = 1,.short_xfer_ok = 1,.ext_buffer=1,}, .callback = &umass_t_cbi_data_write_callback, .timeout = 0, /* overwritten later */ }, [UMASS_T_CBI_DATA_WR_CS] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &umass_t_cbi_data_wr_cs_callback, .timeout = 5000, /* 5 seconds */ }, [UMASS_T_CBI_STATUS] = { .type = UE_INTERRUPT, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .flags = {.short_xfer_ok = 1,.no_pipe_ok = 1,}, .bufsize = sizeof(umass_cbi_sbl_t), .callback = &umass_t_cbi_status_callback, .timeout = 5000, /* ms */ }, [UMASS_T_CBI_RESET4] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &umass_t_cbi_reset4_callback, .timeout = 5000, /* ms */ }, }; /* If device cannot return valid inquiry data, fake it */ static const uint8_t fake_inq_data[SHORT_INQUIRY_LENGTH] = { 0, /* removable */ 0x80, SCSI_REV_2, SCSI_REV_2, /* additional_length */ 31, 0, 0, 0 }; #define UFI_COMMAND_LENGTH 12 /* UFI commands are always 12 bytes */ #define ATAPI_COMMAND_LENGTH 12 /* ATAPI commands are always 12 bytes */ static devclass_t umass_devclass; static device_method_t umass_methods[] = { /* Device interface */ DEVMETHOD(device_probe, umass_probe), DEVMETHOD(device_attach, umass_attach), DEVMETHOD(device_detach, umass_detach), DEVMETHOD_END }; static driver_t umass_driver = { .name = "umass", .methods = umass_methods, .size = sizeof(struct umass_softc), }; static const STRUCT_USB_HOST_ID __used umass_devs[] = { /* generic mass storage class */ {USB_IFACE_CLASS(UICLASS_MASS),}, }; DRIVER_MODULE(umass, uhub, umass_driver, umass_devclass, NULL, 0); MODULE_DEPEND(umass, usb, 1, 1, 1); MODULE_DEPEND(umass, cam, 1, 1, 1); MODULE_VERSION(umass, 1); USB_PNP_HOST_INFO(umass_devs); /* * USB device probe/attach/detach */ static uint16_t umass_get_proto(struct usb_interface *iface) { struct usb_interface_descriptor *id; uint16_t retval; retval = 0; /* Check for a standards compliant device */ id = usbd_get_interface_descriptor(iface); if ((id == NULL) || (id->bInterfaceClass != UICLASS_MASS)) { goto done; } switch (id->bInterfaceSubClass) { case UISUBCLASS_SCSI: retval |= UMASS_PROTO_SCSI; break; case UISUBCLASS_UFI: retval |= UMASS_PROTO_UFI; break; case UISUBCLASS_RBC: retval |= UMASS_PROTO_RBC; break; case UISUBCLASS_SFF8020I: case UISUBCLASS_SFF8070I: retval |= UMASS_PROTO_ATAPI; break; default: goto done; } switch (id->bInterfaceProtocol) { case UIPROTO_MASS_CBI: retval |= UMASS_PROTO_CBI; break; case UIPROTO_MASS_CBI_I: retval |= UMASS_PROTO_CBI_I; break; case UIPROTO_MASS_BBB_OLD: case UIPROTO_MASS_BBB: retval |= UMASS_PROTO_BBB; break; default: goto done; } done: return (retval); } /* * Match the device we are seeing with the devices supported. */ static struct umass_probe_proto umass_probe_proto(device_t dev, struct usb_attach_arg *uaa) { struct umass_probe_proto ret; uint32_t quirks = NO_QUIRKS; uint32_t proto = umass_get_proto(uaa->iface); memset(&ret, 0, sizeof(ret)); ret.error = BUS_PROBE_GENERIC; /* Check if we should deny probing. */ if (usb_test_quirk(uaa, UQ_MSC_IGNORE)) { ret.error = ENXIO; goto done; } /* Search for protocol enforcement */ if (usb_test_quirk(uaa, UQ_MSC_FORCE_WIRE_BBB)) { proto &= ~UMASS_PROTO_WIRE; proto |= UMASS_PROTO_BBB; } else if (usb_test_quirk(uaa, UQ_MSC_FORCE_WIRE_CBI)) { proto &= ~UMASS_PROTO_WIRE; proto |= UMASS_PROTO_CBI; } else if (usb_test_quirk(uaa, UQ_MSC_FORCE_WIRE_CBI_I)) { proto &= ~UMASS_PROTO_WIRE; proto |= UMASS_PROTO_CBI_I; } if (usb_test_quirk(uaa, UQ_MSC_FORCE_PROTO_SCSI)) { proto &= ~UMASS_PROTO_COMMAND; proto |= UMASS_PROTO_SCSI; } else if (usb_test_quirk(uaa, UQ_MSC_FORCE_PROTO_ATAPI)) { proto &= ~UMASS_PROTO_COMMAND; proto |= UMASS_PROTO_ATAPI; } else if (usb_test_quirk(uaa, UQ_MSC_FORCE_PROTO_UFI)) { proto &= ~UMASS_PROTO_COMMAND; proto |= UMASS_PROTO_UFI; } else if (usb_test_quirk(uaa, UQ_MSC_FORCE_PROTO_RBC)) { proto &= ~UMASS_PROTO_COMMAND; proto |= UMASS_PROTO_RBC; } /* Check if the protocol is invalid */ if ((proto & UMASS_PROTO_COMMAND) == 0) { ret.error = ENXIO; goto done; } if ((proto & UMASS_PROTO_WIRE) == 0) { ret.error = ENXIO; goto done; } /* Search for quirks */ if (usb_test_quirk(uaa, UQ_MSC_NO_TEST_UNIT_READY)) quirks |= NO_TEST_UNIT_READY; if (usb_test_quirk(uaa, UQ_MSC_NO_RS_CLEAR_UA)) quirks |= RS_NO_CLEAR_UA; if (usb_test_quirk(uaa, UQ_MSC_NO_START_STOP)) quirks |= NO_START_STOP; if (usb_test_quirk(uaa, UQ_MSC_NO_GETMAXLUN)) quirks |= NO_GETMAXLUN; if (usb_test_quirk(uaa, UQ_MSC_NO_INQUIRY)) quirks |= NO_INQUIRY; if (usb_test_quirk(uaa, UQ_MSC_NO_INQUIRY_EVPD)) quirks |= NO_INQUIRY_EVPD; if (usb_test_quirk(uaa, UQ_MSC_NO_PREVENT_ALLOW)) quirks |= NO_PREVENT_ALLOW; if (usb_test_quirk(uaa, UQ_MSC_NO_SYNC_CACHE)) quirks |= NO_SYNCHRONIZE_CACHE; if (usb_test_quirk(uaa, UQ_MSC_SHUTTLE_INIT)) quirks |= SHUTTLE_INIT; if (usb_test_quirk(uaa, UQ_MSC_ALT_IFACE_1)) quirks |= ALT_IFACE_1; if (usb_test_quirk(uaa, UQ_MSC_FLOPPY_SPEED)) quirks |= FLOPPY_SPEED; if (usb_test_quirk(uaa, UQ_MSC_IGNORE_RESIDUE)) quirks |= IGNORE_RESIDUE; if (usb_test_quirk(uaa, UQ_MSC_WRONG_CSWSIG)) quirks |= WRONG_CSWSIG; if (usb_test_quirk(uaa, UQ_MSC_RBC_PAD_TO_12)) quirks |= RBC_PAD_TO_12; if (usb_test_quirk(uaa, UQ_MSC_READ_CAP_OFFBY1)) quirks |= READ_CAPACITY_OFFBY1; if (usb_test_quirk(uaa, UQ_MSC_FORCE_SHORT_INQ)) quirks |= FORCE_SHORT_INQUIRY; done: ret.quirks = quirks; ret.proto = proto; return (ret); } static int umass_probe(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct umass_probe_proto temp; if (uaa->usb_mode != USB_MODE_HOST) { return (ENXIO); } temp = umass_probe_proto(dev, uaa); return (temp.error); } static int umass_attach(device_t dev) { struct umass_softc *sc = device_get_softc(dev); struct usb_attach_arg *uaa = device_get_ivars(dev); struct umass_probe_proto temp = umass_probe_proto(dev, uaa); struct usb_interface_descriptor *id; int err; /* * NOTE: the softc struct is cleared in device_set_driver. * We can safely call umass_detach without specifically * initializing the struct. */ sc->sc_dev = dev; sc->sc_udev = uaa->device; sc->sc_proto = temp.proto; sc->sc_quirks = temp.quirks; sc->sc_unit = device_get_unit(dev); snprintf(sc->sc_name, sizeof(sc->sc_name), "%s", device_get_nameunit(dev)); device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF | MTX_RECURSE); /* get interface index */ id = usbd_get_interface_descriptor(uaa->iface); if (id == NULL) { device_printf(dev, "failed to get " "interface number\n"); goto detach; } sc->sc_iface_no = id->bInterfaceNumber; #ifdef USB_DEBUG device_printf(dev, " "); switch (sc->sc_proto & UMASS_PROTO_COMMAND) { case UMASS_PROTO_SCSI: printf("SCSI"); break; case UMASS_PROTO_ATAPI: printf("8070i (ATAPI)"); break; case UMASS_PROTO_UFI: printf("UFI"); break; case UMASS_PROTO_RBC: printf("RBC"); break; default: printf("(unknown 0x%02x)", sc->sc_proto & UMASS_PROTO_COMMAND); break; } printf(" over "); switch (sc->sc_proto & UMASS_PROTO_WIRE) { case UMASS_PROTO_BBB: printf("Bulk-Only"); break; case UMASS_PROTO_CBI: /* uses Comand/Bulk pipes */ printf("CBI"); break; case UMASS_PROTO_CBI_I: /* uses Comand/Bulk/Interrupt pipes */ printf("CBI with CCI"); break; default: printf("(unknown 0x%02x)", sc->sc_proto & UMASS_PROTO_WIRE); } printf("; quirks = 0x%04x\n", sc->sc_quirks); #endif if (sc->sc_quirks & ALT_IFACE_1) { err = usbd_set_alt_interface_index (uaa->device, uaa->info.bIfaceIndex, 1); if (err) { DPRINTF(sc, UDMASS_USB, "could not switch to " "Alt Interface 1\n"); goto detach; } } /* allocate all required USB transfers */ if (sc->sc_proto & UMASS_PROTO_BBB) { err = usbd_transfer_setup(uaa->device, &uaa->info.bIfaceIndex, sc->sc_xfer, umass_bbb_config, UMASS_T_BBB_MAX, sc, &sc->sc_mtx); /* skip reset first time */ sc->sc_last_xfer_index = UMASS_T_BBB_COMMAND; } else if (sc->sc_proto & (UMASS_PROTO_CBI | UMASS_PROTO_CBI_I)) { err = usbd_transfer_setup(uaa->device, &uaa->info.bIfaceIndex, sc->sc_xfer, umass_cbi_config, UMASS_T_CBI_MAX, sc, &sc->sc_mtx); /* skip reset first time */ sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND; } else { err = USB_ERR_INVAL; } if (err) { device_printf(dev, "could not setup required " "transfers, %s\n", usbd_errstr(err)); goto detach; } #ifdef USB_DEBUG if (umass_throttle > 0) { uint8_t x; int iv; iv = umass_throttle; if (iv < 1) iv = 1; else if (iv > 8000) iv = 8000; for (x = 0; x != UMASS_T_MAX; x++) { if (sc->sc_xfer[x] != NULL) usbd_xfer_set_interval(sc->sc_xfer[x], iv); } } #endif sc->sc_transform = (sc->sc_proto & UMASS_PROTO_SCSI) ? &umass_scsi_transform : (sc->sc_proto & UMASS_PROTO_UFI) ? &umass_ufi_transform : (sc->sc_proto & UMASS_PROTO_ATAPI) ? &umass_atapi_transform : (sc->sc_proto & UMASS_PROTO_RBC) ? &umass_rbc_transform : &umass_no_transform; /* from here onwards the device can be used. */ if (sc->sc_quirks & SHUTTLE_INIT) { umass_init_shuttle(sc); } /* get the maximum LUN supported by the device */ if (((sc->sc_proto & UMASS_PROTO_WIRE) == UMASS_PROTO_BBB) && !(sc->sc_quirks & NO_GETMAXLUN)) sc->sc_maxlun = umass_bbb_get_max_lun(sc); else sc->sc_maxlun = 0; /* Prepare the SCSI command block */ sc->cam_scsi_sense.opcode = REQUEST_SENSE; sc->cam_scsi_test_unit_ready.opcode = TEST_UNIT_READY; /* register the SIM */ err = umass_cam_attach_sim(sc); if (err) { goto detach; } /* scan the SIM */ umass_cam_attach(sc); DPRINTF(sc, UDMASS_GEN, "Attach finished\n"); return (0); /* success */ detach: umass_detach(dev); return (ENXIO); /* failure */ } static int umass_detach(device_t dev) { struct umass_softc *sc = device_get_softc(dev); DPRINTF(sc, UDMASS_USB, "\n"); /* teardown our statemachine */ usbd_transfer_unsetup(sc->sc_xfer, UMASS_T_MAX); mtx_lock(&sc->sc_mtx); /* cancel any leftover CCB's */ umass_cancel_ccb(sc); umass_cam_detach_sim(sc); mtx_unlock(&sc->sc_mtx); mtx_destroy(&sc->sc_mtx); return (0); /* success */ } static void umass_init_shuttle(struct umass_softc *sc) { struct usb_device_request req; uint8_t status[2] = {0, 0}; /* * The Linux driver does this, but no one can tell us what the * command does. */ req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = 1; /* XXX unknown command */ USETW(req.wValue, 0); req.wIndex[0] = sc->sc_iface_no; req.wIndex[1] = 0; USETW(req.wLength, sizeof(status)); usbd_do_request(sc->sc_udev, NULL, &req, &status); DPRINTF(sc, UDMASS_GEN, "Shuttle init returned 0x%02x%02x\n", status[0], status[1]); } /* * Generic functions to handle transfers */ static void umass_transfer_start(struct umass_softc *sc, uint8_t xfer_index) { DPRINTF(sc, UDMASS_GEN, "transfer index = " "%d\n", xfer_index); if (sc->sc_xfer[xfer_index]) { sc->sc_last_xfer_index = xfer_index; usbd_transfer_start(sc->sc_xfer[xfer_index]); } else { umass_cancel_ccb(sc); } } static void umass_reset(struct umass_softc *sc) { DPRINTF(sc, UDMASS_GEN, "resetting device\n"); /* * stop the last transfer, if not already stopped: */ usbd_transfer_stop(sc->sc_xfer[sc->sc_last_xfer_index]); umass_transfer_start(sc, 0); } static void umass_cancel_ccb(struct umass_softc *sc) { union ccb *ccb; USB_MTX_ASSERT(&sc->sc_mtx, MA_OWNED); ccb = sc->sc_transfer.ccb; sc->sc_transfer.ccb = NULL; sc->sc_last_xfer_index = 0; if (ccb) { (sc->sc_transfer.callback) (sc, ccb, (sc->sc_transfer.data_len - sc->sc_transfer.actlen), STATUS_WIRE_FAILED); } } static void umass_tr_error(struct usb_xfer *xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); if (error != USB_ERR_CANCELLED) { DPRINTF(sc, UDMASS_GEN, "transfer error, %s -> " "reset\n", usbd_errstr(error)); } umass_cancel_ccb(sc); } /* * BBB protocol specific functions */ static void umass_t_bbb_reset1_callback(struct usb_xfer *xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); struct usb_device_request req; struct usb_page_cache *pc; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: umass_transfer_start(sc, UMASS_T_BBB_RESET2); return; case USB_ST_SETUP: /* * Reset recovery (5.3.4 in Universal Serial Bus Mass Storage Class) * * For Reset Recovery the host shall issue in the following order: * a) a Bulk-Only Mass Storage Reset * b) a Clear Feature HALT to the Bulk-In endpoint * c) a Clear Feature HALT to the Bulk-Out endpoint * * This is done in 3 steps, using 3 transfers: * UMASS_T_BBB_RESET1 * UMASS_T_BBB_RESET2 * UMASS_T_BBB_RESET3 */ DPRINTF(sc, UDMASS_BBB, "BBB reset!\n"); req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UR_BBB_RESET; /* bulk only reset */ USETW(req.wValue, 0); req.wIndex[0] = sc->sc_iface_no; req.wIndex[1] = 0; USETW(req.wLength, 0); pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &req, sizeof(req)); usbd_xfer_set_frame_len(xfer, 0, sizeof(req)); usbd_xfer_set_frames(xfer, 1); usbd_transfer_submit(xfer); return; default: /* Error */ umass_tr_error(xfer, error); return; } } static void umass_t_bbb_reset2_callback(struct usb_xfer *xfer, usb_error_t error) { umass_t_bbb_data_clear_stall_callback(xfer, UMASS_T_BBB_RESET3, UMASS_T_BBB_DATA_READ, error); } static void umass_t_bbb_reset3_callback(struct usb_xfer *xfer, usb_error_t error) { umass_t_bbb_data_clear_stall_callback(xfer, UMASS_T_BBB_COMMAND, UMASS_T_BBB_DATA_WRITE, error); } static void umass_t_bbb_data_clear_stall_callback(struct usb_xfer *xfer, uint8_t next_xfer, uint8_t stall_xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: tr_transferred: umass_transfer_start(sc, next_xfer); return; case USB_ST_SETUP: if (usbd_clear_stall_callback(xfer, sc->sc_xfer[stall_xfer])) { goto tr_transferred; } return; default: /* Error */ umass_tr_error(xfer, error); return; } } static void umass_t_bbb_command_callback(struct usb_xfer *xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); union ccb *ccb = sc->sc_transfer.ccb; struct usb_page_cache *pc; uint32_t tag; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: umass_transfer_start (sc, ((sc->sc_transfer.dir == DIR_IN) ? UMASS_T_BBB_DATA_READ : (sc->sc_transfer.dir == DIR_OUT) ? UMASS_T_BBB_DATA_WRITE : UMASS_T_BBB_STATUS)); return; case USB_ST_SETUP: sc->sc_status_try = 0; if (ccb) { /* * the initial value is not important, * as long as the values are unique: */ tag = UGETDW(sc->cbw.dCBWTag) + 1; USETDW(sc->cbw.dCBWSignature, CBWSIGNATURE); USETDW(sc->cbw.dCBWTag, tag); /* * dCBWDataTransferLength: * This field indicates the number of bytes of data that the host * intends to transfer on the IN or OUT Bulk endpoint(as indicated by * the Direction bit) during the execution of this command. If this * field is set to 0, the device will expect that no data will be * transferred IN or OUT during this command, regardless of the value * of the Direction bit defined in dCBWFlags. */ USETDW(sc->cbw.dCBWDataTransferLength, sc->sc_transfer.data_len); /* * dCBWFlags: * The bits of the Flags field are defined as follows: * Bits 0-6 reserved * Bit 7 Direction - this bit shall be ignored if the * dCBWDataTransferLength field is zero. * 0 = data Out from host to device * 1 = data In from device to host */ sc->cbw.bCBWFlags = ((sc->sc_transfer.dir == DIR_IN) ? CBWFLAGS_IN : CBWFLAGS_OUT); sc->cbw.bCBWLUN = sc->sc_transfer.lun; if (sc->sc_transfer.cmd_len > sizeof(sc->cbw.CBWCDB)) { sc->sc_transfer.cmd_len = sizeof(sc->cbw.CBWCDB); DPRINTF(sc, UDMASS_BBB, "Truncating long command!\n"); } sc->cbw.bCDBLength = sc->sc_transfer.cmd_len; /* copy SCSI command data */ memcpy(sc->cbw.CBWCDB, sc->sc_transfer.cmd_data, sc->sc_transfer.cmd_len); /* clear remaining command area */ memset(sc->cbw.CBWCDB + sc->sc_transfer.cmd_len, 0, sizeof(sc->cbw.CBWCDB) - sc->sc_transfer.cmd_len); DIF(UDMASS_BBB, umass_bbb_dump_cbw(sc, &sc->cbw)); pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &sc->cbw, sizeof(sc->cbw)); usbd_xfer_set_frame_len(xfer, 0, sizeof(sc->cbw)); usbd_transfer_submit(xfer); } return; default: /* Error */ umass_tr_error(xfer, error); return; } } static void umass_t_bbb_data_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); uint32_t max_bulk = usbd_xfer_max_len(xfer); int actlen, sumlen; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: sc->sc_transfer.data_rem -= actlen; sc->sc_transfer.data_ptr += actlen; sc->sc_transfer.actlen += actlen; if (actlen < sumlen) { /* short transfer */ sc->sc_transfer.data_rem = 0; } case USB_ST_SETUP: DPRINTF(sc, UDMASS_BBB, "max_bulk=%d, data_rem=%d\n", max_bulk, sc->sc_transfer.data_rem); if (sc->sc_transfer.data_rem == 0) { umass_transfer_start(sc, UMASS_T_BBB_STATUS); return; } if (max_bulk > sc->sc_transfer.data_rem) { max_bulk = sc->sc_transfer.data_rem; } usbd_xfer_set_timeout(xfer, sc->sc_transfer.data_timeout); usbd_xfer_set_frame_data(xfer, 0, sc->sc_transfer.data_ptr, max_bulk); usbd_transfer_submit(xfer); return; default: /* Error */ if (error == USB_ERR_CANCELLED) { umass_tr_error(xfer, error); } else { umass_transfer_start(sc, UMASS_T_BBB_DATA_RD_CS); } return; } } static void umass_t_bbb_data_rd_cs_callback(struct usb_xfer *xfer, usb_error_t error) { umass_t_bbb_data_clear_stall_callback(xfer, UMASS_T_BBB_STATUS, UMASS_T_BBB_DATA_READ, error); } static void umass_t_bbb_data_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); uint32_t max_bulk = usbd_xfer_max_len(xfer); int actlen, sumlen; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: sc->sc_transfer.data_rem -= actlen; sc->sc_transfer.data_ptr += actlen; sc->sc_transfer.actlen += actlen; if (actlen < sumlen) { /* short transfer */ sc->sc_transfer.data_rem = 0; } case USB_ST_SETUP: DPRINTF(sc, UDMASS_BBB, "max_bulk=%d, data_rem=%d\n", max_bulk, sc->sc_transfer.data_rem); if (sc->sc_transfer.data_rem == 0) { umass_transfer_start(sc, UMASS_T_BBB_STATUS); return; } if (max_bulk > sc->sc_transfer.data_rem) { max_bulk = sc->sc_transfer.data_rem; } usbd_xfer_set_timeout(xfer, sc->sc_transfer.data_timeout); usbd_xfer_set_frame_data(xfer, 0, sc->sc_transfer.data_ptr, max_bulk); usbd_transfer_submit(xfer); return; default: /* Error */ if (error == USB_ERR_CANCELLED) { umass_tr_error(xfer, error); } else { umass_transfer_start(sc, UMASS_T_BBB_DATA_WR_CS); } return; } } static void umass_t_bbb_data_wr_cs_callback(struct usb_xfer *xfer, usb_error_t error) { umass_t_bbb_data_clear_stall_callback(xfer, UMASS_T_BBB_STATUS, UMASS_T_BBB_DATA_WRITE, error); } static void umass_t_bbb_status_callback(struct usb_xfer *xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); union ccb *ccb = sc->sc_transfer.ccb; struct usb_page_cache *pc; uint32_t residue; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: /* * Do a full reset if there is something wrong with the CSW: */ sc->sc_status_try = 1; /* Zero missing parts of the CSW: */ if (actlen < (int)sizeof(sc->csw)) memset(&sc->csw, 0, sizeof(sc->csw)); pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, &sc->csw, actlen); DIF(UDMASS_BBB, umass_bbb_dump_csw(sc, &sc->csw)); residue = UGETDW(sc->csw.dCSWDataResidue); if ((!residue) || (sc->sc_quirks & IGNORE_RESIDUE)) { residue = (sc->sc_transfer.data_len - sc->sc_transfer.actlen); } if (residue > sc->sc_transfer.data_len) { DPRINTF(sc, UDMASS_BBB, "truncating residue from %d " "to %d bytes\n", residue, sc->sc_transfer.data_len); residue = sc->sc_transfer.data_len; } /* translate weird command-status signatures: */ if (sc->sc_quirks & WRONG_CSWSIG) { uint32_t temp = UGETDW(sc->csw.dCSWSignature); if ((temp == CSWSIGNATURE_OLYMPUS_C1) || (temp == CSWSIGNATURE_IMAGINATION_DBX1)) { USETDW(sc->csw.dCSWSignature, CSWSIGNATURE); } } /* check CSW and handle eventual error */ if (UGETDW(sc->csw.dCSWSignature) != CSWSIGNATURE) { DPRINTF(sc, UDMASS_BBB, "bad CSW signature 0x%08x != 0x%08x\n", UGETDW(sc->csw.dCSWSignature), CSWSIGNATURE); /* * Invalid CSW: Wrong signature or wrong tag might * indicate that we lost synchronization. Reset the * device. */ goto tr_error; } else if (UGETDW(sc->csw.dCSWTag) != UGETDW(sc->cbw.dCBWTag)) { DPRINTF(sc, UDMASS_BBB, "Invalid CSW: tag 0x%08x should be " "0x%08x\n", UGETDW(sc->csw.dCSWTag), UGETDW(sc->cbw.dCBWTag)); goto tr_error; } else if (sc->csw.bCSWStatus > CSWSTATUS_PHASE) { DPRINTF(sc, UDMASS_BBB, "Invalid CSW: status %d > %d\n", sc->csw.bCSWStatus, CSWSTATUS_PHASE); goto tr_error; } else if (sc->csw.bCSWStatus == CSWSTATUS_PHASE) { DPRINTF(sc, UDMASS_BBB, "Phase error, residue = " "%d\n", residue); goto tr_error; } else if (sc->sc_transfer.actlen > sc->sc_transfer.data_len) { DPRINTF(sc, UDMASS_BBB, "Buffer overrun %d > %d\n", sc->sc_transfer.actlen, sc->sc_transfer.data_len); goto tr_error; } else if (sc->csw.bCSWStatus == CSWSTATUS_FAILED) { DPRINTF(sc, UDMASS_BBB, "Command failed, residue = " "%d\n", residue); sc->sc_transfer.ccb = NULL; sc->sc_last_xfer_index = UMASS_T_BBB_COMMAND; (sc->sc_transfer.callback) (sc, ccb, residue, STATUS_CMD_FAILED); } else { sc->sc_transfer.ccb = NULL; sc->sc_last_xfer_index = UMASS_T_BBB_COMMAND; (sc->sc_transfer.callback) (sc, ccb, residue, STATUS_CMD_OK); } return; case USB_ST_SETUP: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); return; default: tr_error: DPRINTF(sc, UDMASS_BBB, "Failed to read CSW: %s, try %d\n", usbd_errstr(error), sc->sc_status_try); if ((error == USB_ERR_CANCELLED) || (sc->sc_status_try)) { umass_tr_error(xfer, error); } else { sc->sc_status_try = 1; umass_transfer_start(sc, UMASS_T_BBB_DATA_RD_CS); } return; } } static void umass_command_start(struct umass_softc *sc, uint8_t dir, void *data_ptr, uint32_t data_len, uint32_t data_timeout, umass_callback_t *callback, union ccb *ccb) { sc->sc_transfer.lun = ccb->ccb_h.target_lun; /* * NOTE: assumes that "sc->sc_transfer.cmd_data" and * "sc->sc_transfer.cmd_len" has been properly * initialized. */ sc->sc_transfer.dir = data_len ? dir : DIR_NONE; sc->sc_transfer.data_ptr = data_ptr; sc->sc_transfer.data_len = data_len; sc->sc_transfer.data_rem = data_len; sc->sc_transfer.data_timeout = (data_timeout + UMASS_TIMEOUT); sc->sc_transfer.actlen = 0; sc->sc_transfer.callback = callback; sc->sc_transfer.ccb = ccb; if (sc->sc_xfer[sc->sc_last_xfer_index]) { usbd_transfer_start(sc->sc_xfer[sc->sc_last_xfer_index]); } else { umass_cancel_ccb(sc); } } static uint8_t umass_bbb_get_max_lun(struct umass_softc *sc) { struct usb_device_request req; usb_error_t err; uint8_t buf = 0; /* The Get Max Lun command is a class-specific request. */ req.bmRequestType = UT_READ_CLASS_INTERFACE; req.bRequest = UR_BBB_GET_MAX_LUN; USETW(req.wValue, 0); req.wIndex[0] = sc->sc_iface_no; req.wIndex[1] = 0; USETW(req.wLength, 1); err = usbd_do_request(sc->sc_udev, NULL, &req, &buf); if (err) { buf = 0; /* Device doesn't support Get Max Lun request. */ printf("%s: Get Max Lun not supported (%s)\n", sc->sc_name, usbd_errstr(err)); } return (buf); } /* * Command/Bulk/Interrupt (CBI) specific functions */ static void umass_cbi_start_status(struct umass_softc *sc) { if (sc->sc_xfer[UMASS_T_CBI_STATUS]) { umass_transfer_start(sc, UMASS_T_CBI_STATUS); } else { union ccb *ccb = sc->sc_transfer.ccb; sc->sc_transfer.ccb = NULL; sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND; (sc->sc_transfer.callback) (sc, ccb, (sc->sc_transfer.data_len - sc->sc_transfer.actlen), STATUS_CMD_UNKNOWN); } } static void umass_t_cbi_reset1_callback(struct usb_xfer *xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); struct usb_device_request req; struct usb_page_cache *pc; uint8_t buf[UMASS_CBI_DIAGNOSTIC_CMDLEN]; uint8_t i; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: umass_transfer_start(sc, UMASS_T_CBI_RESET2); break; case USB_ST_SETUP: /* * Command Block Reset Protocol * * First send a reset request to the device. Then clear * any possibly stalled bulk endpoints. * * This is done in 3 steps, using 3 transfers: * UMASS_T_CBI_RESET1 * UMASS_T_CBI_RESET2 * UMASS_T_CBI_RESET3 * UMASS_T_CBI_RESET4 (only if there is an interrupt endpoint) */ DPRINTF(sc, UDMASS_CBI, "CBI reset!\n"); req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UR_CBI_ADSC; USETW(req.wValue, 0); req.wIndex[0] = sc->sc_iface_no; req.wIndex[1] = 0; USETW(req.wLength, UMASS_CBI_DIAGNOSTIC_CMDLEN); /* * The 0x1d code is the SEND DIAGNOSTIC command. To * distinguish between the two, the last 10 bytes of the CBL * is filled with 0xff (section 2.2 of the CBI * specification) */ buf[0] = 0x1d; /* Command Block Reset */ buf[1] = 0x04; for (i = 2; i < UMASS_CBI_DIAGNOSTIC_CMDLEN; i++) { buf[i] = 0xff; } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &req, sizeof(req)); pc = usbd_xfer_get_frame(xfer, 1); usbd_copy_in(pc, 0, buf, sizeof(buf)); usbd_xfer_set_frame_len(xfer, 0, sizeof(req)); usbd_xfer_set_frame_len(xfer, 1, sizeof(buf)); usbd_xfer_set_frames(xfer, 2); usbd_transfer_submit(xfer); break; default: /* Error */ if (error == USB_ERR_CANCELLED) umass_tr_error(xfer, error); else umass_transfer_start(sc, UMASS_T_CBI_RESET2); break; } } static void umass_t_cbi_reset2_callback(struct usb_xfer *xfer, usb_error_t error) { umass_t_cbi_data_clear_stall_callback(xfer, UMASS_T_CBI_RESET3, UMASS_T_CBI_DATA_READ, error); } static void umass_t_cbi_reset3_callback(struct usb_xfer *xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); umass_t_cbi_data_clear_stall_callback (xfer, (sc->sc_xfer[UMASS_T_CBI_RESET4] && sc->sc_xfer[UMASS_T_CBI_STATUS]) ? UMASS_T_CBI_RESET4 : UMASS_T_CBI_COMMAND, UMASS_T_CBI_DATA_WRITE, error); } static void umass_t_cbi_reset4_callback(struct usb_xfer *xfer, usb_error_t error) { umass_t_cbi_data_clear_stall_callback(xfer, UMASS_T_CBI_COMMAND, UMASS_T_CBI_STATUS, error); } static void umass_t_cbi_data_clear_stall_callback(struct usb_xfer *xfer, uint8_t next_xfer, uint8_t stall_xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: tr_transferred: if (next_xfer == UMASS_T_CBI_STATUS) { umass_cbi_start_status(sc); } else { umass_transfer_start(sc, next_xfer); } break; case USB_ST_SETUP: if (usbd_clear_stall_callback(xfer, sc->sc_xfer[stall_xfer])) { goto tr_transferred; /* should not happen */ } break; default: /* Error */ umass_tr_error(xfer, error); break; } } static void umass_t_cbi_command_callback(struct usb_xfer *xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); union ccb *ccb = sc->sc_transfer.ccb; struct usb_device_request req; struct usb_page_cache *pc; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: if (sc->sc_transfer.dir == DIR_NONE) { umass_cbi_start_status(sc); } else { umass_transfer_start (sc, (sc->sc_transfer.dir == DIR_IN) ? UMASS_T_CBI_DATA_READ : UMASS_T_CBI_DATA_WRITE); } break; case USB_ST_SETUP: if (ccb) { /* * do a CBI transfer with cmd_len bytes from * cmd_data, possibly a data phase of data_len * bytes from/to the device and finally a status * read phase. */ req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UR_CBI_ADSC; USETW(req.wValue, 0); req.wIndex[0] = sc->sc_iface_no; req.wIndex[1] = 0; req.wLength[0] = sc->sc_transfer.cmd_len; req.wLength[1] = 0; pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &req, sizeof(req)); pc = usbd_xfer_get_frame(xfer, 1); usbd_copy_in(pc, 0, sc->sc_transfer.cmd_data, sc->sc_transfer.cmd_len); usbd_xfer_set_frame_len(xfer, 0, sizeof(req)); usbd_xfer_set_frame_len(xfer, 1, sc->sc_transfer.cmd_len); usbd_xfer_set_frames(xfer, sc->sc_transfer.cmd_len ? 2 : 1); DIF(UDMASS_CBI, umass_cbi_dump_cmd(sc, sc->sc_transfer.cmd_data, sc->sc_transfer.cmd_len)); usbd_transfer_submit(xfer); } break; default: /* Error */ /* * STALL on the control pipe can be result of the command error. * Attempt to clear this STALL same as for bulk pipe also * results in command completion interrupt, but ASC/ASCQ there * look like not always valid, so don't bother about it. */ if ((error == USB_ERR_STALLED) || (sc->sc_transfer.callback == &umass_cam_cb)) { sc->sc_transfer.ccb = NULL; (sc->sc_transfer.callback) (sc, ccb, sc->sc_transfer.data_len, STATUS_CMD_UNKNOWN); } else { umass_tr_error(xfer, error); /* skip reset */ sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND; } break; } } static void umass_t_cbi_data_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); uint32_t max_bulk = usbd_xfer_max_len(xfer); int actlen, sumlen; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: sc->sc_transfer.data_rem -= actlen; sc->sc_transfer.data_ptr += actlen; sc->sc_transfer.actlen += actlen; if (actlen < sumlen) { /* short transfer */ sc->sc_transfer.data_rem = 0; } case USB_ST_SETUP: DPRINTF(sc, UDMASS_CBI, "max_bulk=%d, data_rem=%d\n", max_bulk, sc->sc_transfer.data_rem); if (sc->sc_transfer.data_rem == 0) { umass_cbi_start_status(sc); break; } if (max_bulk > sc->sc_transfer.data_rem) { max_bulk = sc->sc_transfer.data_rem; } usbd_xfer_set_timeout(xfer, sc->sc_transfer.data_timeout); usbd_xfer_set_frame_data(xfer, 0, sc->sc_transfer.data_ptr, max_bulk); usbd_transfer_submit(xfer); break; default: /* Error */ if ((error == USB_ERR_CANCELLED) || (sc->sc_transfer.callback != &umass_cam_cb)) { umass_tr_error(xfer, error); } else { umass_transfer_start(sc, UMASS_T_CBI_DATA_RD_CS); } break; } } static void umass_t_cbi_data_rd_cs_callback(struct usb_xfer *xfer, usb_error_t error) { umass_t_cbi_data_clear_stall_callback(xfer, UMASS_T_CBI_STATUS, UMASS_T_CBI_DATA_READ, error); } static void umass_t_cbi_data_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); uint32_t max_bulk = usbd_xfer_max_len(xfer); int actlen, sumlen; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: sc->sc_transfer.data_rem -= actlen; sc->sc_transfer.data_ptr += actlen; sc->sc_transfer.actlen += actlen; if (actlen < sumlen) { /* short transfer */ sc->sc_transfer.data_rem = 0; } case USB_ST_SETUP: DPRINTF(sc, UDMASS_CBI, "max_bulk=%d, data_rem=%d\n", max_bulk, sc->sc_transfer.data_rem); if (sc->sc_transfer.data_rem == 0) { umass_cbi_start_status(sc); break; } if (max_bulk > sc->sc_transfer.data_rem) { max_bulk = sc->sc_transfer.data_rem; } usbd_xfer_set_timeout(xfer, sc->sc_transfer.data_timeout); usbd_xfer_set_frame_data(xfer, 0, sc->sc_transfer.data_ptr, max_bulk); usbd_transfer_submit(xfer); break; default: /* Error */ if ((error == USB_ERR_CANCELLED) || (sc->sc_transfer.callback != &umass_cam_cb)) { umass_tr_error(xfer, error); } else { umass_transfer_start(sc, UMASS_T_CBI_DATA_WR_CS); } break; } } static void umass_t_cbi_data_wr_cs_callback(struct usb_xfer *xfer, usb_error_t error) { umass_t_cbi_data_clear_stall_callback(xfer, UMASS_T_CBI_STATUS, UMASS_T_CBI_DATA_WRITE, error); } static void umass_t_cbi_status_callback(struct usb_xfer *xfer, usb_error_t error) { struct umass_softc *sc = usbd_xfer_softc(xfer); union ccb *ccb = sc->sc_transfer.ccb; struct usb_page_cache *pc; uint32_t residue; uint8_t status; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: if (actlen < (int)sizeof(sc->sbl)) { goto tr_setup; } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, &sc->sbl, sizeof(sc->sbl)); residue = (sc->sc_transfer.data_len - sc->sc_transfer.actlen); /* dissect the information in the buffer */ if (sc->sc_proto & UMASS_PROTO_UFI) { /* * Section 3.4.3.1.3 specifies that the UFI command * protocol returns an ASC and ASCQ in the interrupt * data block. */ DPRINTF(sc, UDMASS_CBI, "UFI CCI, ASC = 0x%02x, " "ASCQ = 0x%02x\n", sc->sbl.ufi.asc, sc->sbl.ufi.ascq); status = (((sc->sbl.ufi.asc == 0) && (sc->sbl.ufi.ascq == 0)) ? STATUS_CMD_OK : STATUS_CMD_FAILED); sc->sc_transfer.ccb = NULL; sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND; (sc->sc_transfer.callback) (sc, ccb, residue, status); break; } else { /* Command Interrupt Data Block */ DPRINTF(sc, UDMASS_CBI, "type=0x%02x, value=0x%02x\n", sc->sbl.common.type, sc->sbl.common.value); if (sc->sbl.common.type == IDB_TYPE_CCI) { status = (sc->sbl.common.value & IDB_VALUE_STATUS_MASK); status = ((status == IDB_VALUE_PASS) ? STATUS_CMD_OK : (status == IDB_VALUE_FAIL) ? STATUS_CMD_FAILED : (status == IDB_VALUE_PERSISTENT) ? STATUS_CMD_FAILED : STATUS_WIRE_FAILED); sc->sc_transfer.ccb = NULL; sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND; (sc->sc_transfer.callback) (sc, ccb, residue, status); break; } } /* fallthrough */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); break; default: /* Error */ DPRINTF(sc, UDMASS_CBI, "Failed to read CSW: %s\n", usbd_errstr(error)); umass_tr_error(xfer, error); break; } } /* * CAM specific functions (used by SCSI, UFI, 8070i (ATAPI)) */ static int umass_cam_attach_sim(struct umass_softc *sc) { struct cam_devq *devq; /* Per device Queue */ cam_status status; /* * A HBA is attached to the CAM layer. * * The CAM layer will then after a while start probing for devices on * the bus. The number of SIMs is limited to one. */ devq = cam_simq_alloc(1 /* maximum openings */ ); if (devq == NULL) { return (ENOMEM); } sc->sc_sim = cam_sim_alloc (&umass_cam_action, &umass_cam_poll, DEVNAME_SIM, sc /* priv */ , sc->sc_unit /* unit number */ , &sc->sc_mtx /* mutex */ , 1 /* maximum device openings */ , 0 /* maximum tagged device openings */ , devq); if (sc->sc_sim == NULL) { cam_simq_free(devq); return (ENOMEM); } mtx_lock(&sc->sc_mtx); status = xpt_bus_register(sc->sc_sim, sc->sc_dev, sc->sc_unit); if (status != CAM_SUCCESS) { cam_sim_free(sc->sc_sim, /* free_devq */ TRUE); mtx_unlock(&sc->sc_mtx); printf("%s: xpt_bus_register failed with status %#x\n", __func__, status); return (ENOMEM); } mtx_unlock(&sc->sc_mtx); return (0); } static void umass_cam_attach(struct umass_softc *sc) { #ifndef USB_DEBUG if (bootverbose) #endif printf("%s:%d:%d: Attached to scbus%d\n", sc->sc_name, cam_sim_path(sc->sc_sim), sc->sc_unit, cam_sim_path(sc->sc_sim)); } /* umass_cam_detach * detach from the CAM layer */ static void umass_cam_detach_sim(struct umass_softc *sc) { - cam_status status; + int error; if (sc->sc_sim != NULL) { - status = xpt_bus_deregister(cam_sim_path(sc->sc_sim)); - if (status == CAM_REQ_CMP) { + error = xpt_bus_deregister(cam_sim_path(sc->sc_sim)); + if (error == 0) { /* accessing the softc is not possible after this */ sc->sc_sim->softc = NULL; DPRINTF(sc, UDMASS_SCSI, "%s: %s:%d:%d caling " "cam_sim_free sim %p refc %u mtx %p\n", __func__, sc->sc_name, cam_sim_path(sc->sc_sim), sc->sc_unit, sc->sc_sim, sc->sc_sim->refcount, sc->sc_sim->mtx); cam_sim_free(sc->sc_sim, /* free_devq */ TRUE); } else { - panic("%s: %s: CAM layer is busy: %#x\n", - __func__, sc->sc_name, status); + panic("%s: %s: CAM layer is busy: errno %d\n", + __func__, sc->sc_name, error); } sc->sc_sim = NULL; } } /* umass_cam_action * CAM requests for action come through here */ static void umass_cam_action(struct cam_sim *sim, union ccb *ccb) { struct umass_softc *sc = cam_sim_softc(sim); if (sc == NULL) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } /* Perform the requested action */ switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { uint8_t *cmd; uint8_t dir; if (ccb->csio.ccb_h.flags & CAM_CDB_POINTER) { cmd = (uint8_t *)(ccb->csio.cdb_io.cdb_ptr); } else { cmd = (uint8_t *)(ccb->csio.cdb_io.cdb_bytes); } DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_SCSI_IO: " "cmd: 0x%02x, flags: 0x%02x, " "%db cmd/%db data/%db sense\n", cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, cmd[0], ccb->ccb_h.flags & CAM_DIR_MASK, ccb->csio.cdb_len, ccb->csio.dxfer_len, ccb->csio.sense_len); if (sc->sc_transfer.ccb) { DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_SCSI_IO: " "I/O in progress, deferring\n", cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); ccb->ccb_h.status = CAM_SCSI_BUSY; xpt_done(ccb); goto done; } switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: dir = DIR_IN; break; case CAM_DIR_OUT: dir = DIR_OUT; DIF(UDMASS_SCSI, umass_dump_buffer(sc, ccb->csio.data_ptr, ccb->csio.dxfer_len, 48)); break; default: dir = DIR_NONE; } ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED; /* * sc->sc_transform will convert the command to the * command format needed by the specific command set * and return the converted command in * "sc->sc_transfer.cmd_data" */ if (umass_std_transform(sc, ccb, cmd, ccb->csio.cdb_len)) { if (sc->sc_transfer.cmd_data[0] == INQUIRY) { const char *pserial; pserial = usb_get_serial(sc->sc_udev); /* * Umass devices don't generally report their serial numbers * in the usual SCSI way. Emulate it here. */ if ((sc->sc_transfer.cmd_data[1] & SI_EVPD) && (sc->sc_transfer.cmd_data[2] == SVPD_UNIT_SERIAL_NUMBER) && (pserial[0] != '\0')) { struct scsi_vpd_unit_serial_number *vpd_serial; vpd_serial = (struct scsi_vpd_unit_serial_number *)ccb->csio.data_ptr; vpd_serial->length = strlen(pserial); if (vpd_serial->length > sizeof(vpd_serial->serial_num)) vpd_serial->length = sizeof(vpd_serial->serial_num); memcpy(vpd_serial->serial_num, pserial, vpd_serial->length); ccb->csio.scsi_status = SCSI_STATUS_OK; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); goto done; } /* * Handle EVPD inquiry for broken devices first * NO_INQUIRY also implies NO_INQUIRY_EVPD */ if ((sc->sc_quirks & (NO_INQUIRY_EVPD | NO_INQUIRY)) && (sc->sc_transfer.cmd_data[1] & SI_EVPD)) { scsi_set_sense_data(&ccb->csio.sense_data, /*sense_format*/ SSD_TYPE_NONE, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x24, /*ascq*/ 0x00, /*extra args*/ SSD_ELEM_NONE); ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); goto done; } /* * Return fake inquiry data for * broken devices */ if (sc->sc_quirks & NO_INQUIRY) { memcpy(ccb->csio.data_ptr, &fake_inq_data, sizeof(fake_inq_data)); ccb->csio.scsi_status = SCSI_STATUS_OK; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); goto done; } if (sc->sc_quirks & FORCE_SHORT_INQUIRY) { ccb->csio.dxfer_len = SHORT_INQUIRY_LENGTH; } } else if (sc->sc_transfer.cmd_data[0] == PREVENT_ALLOW) { if (sc->sc_quirks & NO_PREVENT_ALLOW) { ccb->csio.scsi_status = SCSI_STATUS_OK; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); goto done; } } else if (sc->sc_transfer.cmd_data[0] == SYNCHRONIZE_CACHE) { if (sc->sc_quirks & NO_SYNCHRONIZE_CACHE) { ccb->csio.scsi_status = SCSI_STATUS_OK; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); goto done; } } umass_command_start(sc, dir, ccb->csio.data_ptr, ccb->csio.dxfer_len, ccb->ccb_h.timeout, &umass_cam_cb, ccb); } break; } case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_PATH_INQ:.\n", sc ? cam_sim_path(sc->sc_sim) : -1, ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); /* host specific information */ cpi->version_num = 1; cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = PIM_NO_6_BYTE; cpi->hba_eng_cnt = 0; cpi->max_target = UMASS_SCSIID_MAX; /* one target */ cpi->initiator_id = UMASS_SCSIID_HOST; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "USB SCSI", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = sc->sc_unit; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->transport = XPORT_USB; cpi->transport_version = 0; if (sc == NULL) { cpi->base_transfer_speed = 0; cpi->max_lun = 0; } else { if (sc->sc_quirks & FLOPPY_SPEED) { cpi->base_transfer_speed = UMASS_FLOPPY_TRANSFER_SPEED; } else { switch (usbd_get_speed(sc->sc_udev)) { case USB_SPEED_SUPER: cpi->base_transfer_speed = UMASS_SUPER_TRANSFER_SPEED; cpi->maxio = maxphys; break; case USB_SPEED_HIGH: cpi->base_transfer_speed = UMASS_HIGH_TRANSFER_SPEED; break; default: cpi->base_transfer_speed = UMASS_FULL_TRANSFER_SPEED; break; } } cpi->max_lun = sc->sc_maxlun; } cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_DEV: { DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_RESET_DEV:.\n", cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); umass_reset(sc); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_GET_TRAN_SETTINGS:.\n", cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_USB; cts->transport_version = 0; cts->xport_specific.valid = 0; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_SET_TRAN_SETTINGS: { DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_SET_TRAN_SETTINGS:.\n", cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { cam_calc_geometry(&ccb->ccg, /* extended */ 1); xpt_done(ccb); break; } case XPT_NOOP: { DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_NOOP:.\n", sc ? cam_sim_path(sc->sc_sim) : -1, ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:func_code 0x%04x: " "Not implemented\n", sc ? cam_sim_path(sc->sc_sim) : -1, ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; } done: return; } static void umass_cam_poll(struct cam_sim *sim) { struct umass_softc *sc = cam_sim_softc(sim); if (sc == NULL) return; DPRINTF(sc, UDMASS_SCSI, "CAM poll\n"); usbd_transfer_poll(sc->sc_xfer, UMASS_T_MAX); } /* umass_cam_cb * finalise a completed CAM command */ static void umass_cam_cb(struct umass_softc *sc, union ccb *ccb, uint32_t residue, uint8_t status) { ccb->csio.resid = residue; switch (status) { case STATUS_CMD_OK: ccb->ccb_h.status = CAM_REQ_CMP; if ((sc->sc_quirks & READ_CAPACITY_OFFBY1) && (ccb->ccb_h.func_code == XPT_SCSI_IO) && (ccb->csio.cdb_io.cdb_bytes[0] == READ_CAPACITY)) { struct scsi_read_capacity_data *rcap; uint32_t maxsector; rcap = (void *)(ccb->csio.data_ptr); maxsector = scsi_4btoul(rcap->addr) - 1; scsi_ulto4b(maxsector, rcap->addr); } /* * We have to add SVPD_UNIT_SERIAL_NUMBER to the list * of pages supported by the device - otherwise, CAM * will never ask us for the serial number if the * device cannot handle that by itself. */ if (ccb->ccb_h.func_code == XPT_SCSI_IO && sc->sc_transfer.cmd_data[0] == INQUIRY && (sc->sc_transfer.cmd_data[1] & SI_EVPD) && sc->sc_transfer.cmd_data[2] == SVPD_SUPPORTED_PAGE_LIST && (usb_get_serial(sc->sc_udev)[0] != '\0')) { struct ccb_scsiio *csio; struct scsi_vpd_supported_page_list *page_list; csio = &ccb->csio; page_list = (struct scsi_vpd_supported_page_list *)csio->data_ptr; if (page_list->length + 1 < SVPD_SUPPORTED_PAGES_SIZE) { page_list->list[page_list->length] = SVPD_UNIT_SERIAL_NUMBER; page_list->length++; } } xpt_done(ccb); break; case STATUS_CMD_UNKNOWN: case STATUS_CMD_FAILED: /* fetch sense data */ /* the rest of the command was filled in at attach */ sc->cam_scsi_sense.length = ccb->csio.sense_len; DPRINTF(sc, UDMASS_SCSI, "Fetching %d bytes of " "sense data\n", ccb->csio.sense_len); if (umass_std_transform(sc, ccb, &sc->cam_scsi_sense.opcode, sizeof(sc->cam_scsi_sense))) { if ((sc->sc_quirks & FORCE_SHORT_INQUIRY) && (sc->sc_transfer.cmd_data[0] == INQUIRY)) { ccb->csio.sense_len = SHORT_INQUIRY_LENGTH; } umass_command_start(sc, DIR_IN, &ccb->csio.sense_data.error_code, ccb->csio.sense_len, ccb->ccb_h.timeout, &umass_cam_sense_cb, ccb); } break; default: /* * The wire protocol failed and will hopefully have * recovered. We return an error to CAM and let CAM * retry the command if necessary. */ xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status = CAM_REQ_CMP_ERR | CAM_DEV_QFRZN; xpt_done(ccb); break; } } /* * Finalise a completed autosense operation */ static void umass_cam_sense_cb(struct umass_softc *sc, union ccb *ccb, uint32_t residue, uint8_t status) { uint8_t *cmd; switch (status) { case STATUS_CMD_OK: case STATUS_CMD_UNKNOWN: case STATUS_CMD_FAILED: { int key, sense_len; ccb->csio.sense_resid = residue; sense_len = ccb->csio.sense_len - ccb->csio.sense_resid; key = scsi_get_sense_key(&ccb->csio.sense_data, sense_len, /*show_errors*/ 1); if (ccb->csio.ccb_h.flags & CAM_CDB_POINTER) { cmd = (uint8_t *)(ccb->csio.cdb_io.cdb_ptr); } else { cmd = (uint8_t *)(ccb->csio.cdb_io.cdb_bytes); } /* * Getting sense data always succeeds (apart from wire * failures): */ if ((sc->sc_quirks & RS_NO_CLEAR_UA) && (cmd[0] == INQUIRY) && (key == SSD_KEY_UNIT_ATTENTION)) { /* * Ignore unit attention errors in the case where * the Unit Attention state is not cleared on * REQUEST SENSE. They will appear again at the next * command. */ ccb->ccb_h.status = CAM_REQ_CMP; } else if (key == SSD_KEY_NO_SENSE) { /* * No problem after all (in the case of CBI without * CCI) */ ccb->ccb_h.status = CAM_REQ_CMP; } else if ((sc->sc_quirks & RS_NO_CLEAR_UA) && (cmd[0] == READ_CAPACITY) && (key == SSD_KEY_UNIT_ATTENTION)) { /* * Some devices do not clear the unit attention error * on request sense. We insert a test unit ready * command to make sure we clear the unit attention * condition, then allow the retry to proceed as * usual. */ xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID | CAM_DEV_QFRZN; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; #if 0 DELAY(300000); #endif DPRINTF(sc, UDMASS_SCSI, "Doing a sneaky" "TEST_UNIT_READY\n"); /* the rest of the command was filled in at attach */ if ((sc->sc_transform)(sc, &sc->cam_scsi_test_unit_ready.opcode, sizeof(sc->cam_scsi_test_unit_ready)) == 1) { umass_command_start(sc, DIR_NONE, NULL, 0, ccb->ccb_h.timeout, &umass_cam_quirk_cb, ccb); break; } } else { xpt_freeze_devq(ccb->ccb_h.path, 1); if (key >= 0) { ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID | CAM_DEV_QFRZN; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } else ccb->ccb_h.status = CAM_AUTOSENSE_FAIL | CAM_DEV_QFRZN; } xpt_done(ccb); break; } default: DPRINTF(sc, UDMASS_SCSI, "Autosense failed, " "status %d\n", status); xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status = CAM_AUTOSENSE_FAIL | CAM_DEV_QFRZN; xpt_done(ccb); } } /* * This completion code just handles the fact that we sent a test-unit-ready * after having previously failed a READ CAPACITY with CHECK_COND. The CCB * status for CAM is already set earlier. */ static void umass_cam_quirk_cb(struct umass_softc *sc, union ccb *ccb, uint32_t residue, uint8_t status) { DPRINTF(sc, UDMASS_SCSI, "Test unit ready " "returned status %d\n", status); xpt_done(ccb); } /* * SCSI specific functions */ static uint8_t umass_scsi_transform(struct umass_softc *sc, uint8_t *cmd_ptr, uint8_t cmd_len) { if ((cmd_len == 0) || (cmd_len > sizeof(sc->sc_transfer.cmd_data))) { DPRINTF(sc, UDMASS_SCSI, "Invalid command " "length: %d bytes\n", cmd_len); return (0); /* failure */ } sc->sc_transfer.cmd_len = cmd_len; switch (cmd_ptr[0]) { case TEST_UNIT_READY: if (sc->sc_quirks & NO_TEST_UNIT_READY) { DPRINTF(sc, UDMASS_SCSI, "Converted TEST_UNIT_READY " "to START_UNIT\n"); memset(sc->sc_transfer.cmd_data, 0, cmd_len); sc->sc_transfer.cmd_data[0] = START_STOP_UNIT; sc->sc_transfer.cmd_data[4] = SSS_START; return (1); } break; case INQUIRY: /* * some drives wedge when asked for full inquiry * information. */ if (sc->sc_quirks & FORCE_SHORT_INQUIRY) { memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len); sc->sc_transfer.cmd_data[4] = SHORT_INQUIRY_LENGTH; return (1); } break; } memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len); return (1); } static uint8_t umass_rbc_transform(struct umass_softc *sc, uint8_t *cmd_ptr, uint8_t cmd_len) { if ((cmd_len == 0) || (cmd_len > sizeof(sc->sc_transfer.cmd_data))) { DPRINTF(sc, UDMASS_SCSI, "Invalid command " "length: %d bytes\n", cmd_len); return (0); /* failure */ } switch (cmd_ptr[0]) { /* these commands are defined in RBC: */ case READ_10: case READ_CAPACITY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: case WRITE_10: case VERIFY_10: case INQUIRY: case MODE_SELECT_10: case MODE_SENSE_10: case TEST_UNIT_READY: case WRITE_BUFFER: /* * The following commands are not listed in my copy of the * RBC specs. CAM however seems to want those, and at least * the Sony DSC device appears to support those as well */ case REQUEST_SENSE: case PREVENT_ALLOW: memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len); if ((sc->sc_quirks & RBC_PAD_TO_12) && (cmd_len < 12)) { memset(sc->sc_transfer.cmd_data + cmd_len, 0, 12 - cmd_len); cmd_len = 12; } sc->sc_transfer.cmd_len = cmd_len; return (1); /* success */ /* All other commands are not legal in RBC */ default: DPRINTF(sc, UDMASS_SCSI, "Unsupported RBC " "command 0x%02x\n", cmd_ptr[0]); return (0); /* failure */ } } static uint8_t umass_ufi_transform(struct umass_softc *sc, uint8_t *cmd_ptr, uint8_t cmd_len) { if ((cmd_len == 0) || (cmd_len > sizeof(sc->sc_transfer.cmd_data))) { DPRINTF(sc, UDMASS_SCSI, "Invalid command " "length: %d bytes\n", cmd_len); return (0); /* failure */ } /* An UFI command is always 12 bytes in length */ sc->sc_transfer.cmd_len = UFI_COMMAND_LENGTH; /* Zero the command data */ memset(sc->sc_transfer.cmd_data, 0, UFI_COMMAND_LENGTH); switch (cmd_ptr[0]) { /* * Commands of which the format has been verified. They * should work. Copy the command into the (zeroed out) * destination buffer. */ case TEST_UNIT_READY: if (sc->sc_quirks & NO_TEST_UNIT_READY) { /* * Some devices do not support this command. Start * Stop Unit should give the same results */ DPRINTF(sc, UDMASS_UFI, "Converted TEST_UNIT_READY " "to START_UNIT\n"); sc->sc_transfer.cmd_data[0] = START_STOP_UNIT; sc->sc_transfer.cmd_data[4] = SSS_START; return (1); } break; case REZERO_UNIT: case REQUEST_SENSE: case FORMAT_UNIT: case INQUIRY: case START_STOP_UNIT: case SEND_DIAGNOSTIC: case PREVENT_ALLOW: case READ_CAPACITY: case READ_10: case WRITE_10: case POSITION_TO_ELEMENT: /* SEEK_10 */ case WRITE_AND_VERIFY: case VERIFY: case MODE_SELECT_10: case MODE_SENSE_10: case READ_12: case WRITE_12: case READ_FORMAT_CAPACITIES: break; /* * SYNCHRONIZE_CACHE isn't supported by UFI, nor should it be * required for UFI devices, so it is appropriate to fake * success. */ case SYNCHRONIZE_CACHE: return (2); default: DPRINTF(sc, UDMASS_SCSI, "Unsupported UFI " "command 0x%02x\n", cmd_ptr[0]); return (0); /* failure */ } memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len); return (1); /* success */ } /* * 8070i (ATAPI) specific functions */ static uint8_t umass_atapi_transform(struct umass_softc *sc, uint8_t *cmd_ptr, uint8_t cmd_len) { if ((cmd_len == 0) || (cmd_len > sizeof(sc->sc_transfer.cmd_data))) { DPRINTF(sc, UDMASS_SCSI, "Invalid command " "length: %d bytes\n", cmd_len); return (0); /* failure */ } /* An ATAPI command is always 12 bytes in length. */ sc->sc_transfer.cmd_len = ATAPI_COMMAND_LENGTH; /* Zero the command data */ memset(sc->sc_transfer.cmd_data, 0, ATAPI_COMMAND_LENGTH); switch (cmd_ptr[0]) { /* * Commands of which the format has been verified. They * should work. Copy the command into the destination * buffer. */ case INQUIRY: /* * some drives wedge when asked for full inquiry * information. */ if (sc->sc_quirks & FORCE_SHORT_INQUIRY) { memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len); sc->sc_transfer.cmd_data[4] = SHORT_INQUIRY_LENGTH; return (1); } break; case TEST_UNIT_READY: if (sc->sc_quirks & NO_TEST_UNIT_READY) { DPRINTF(sc, UDMASS_SCSI, "Converted TEST_UNIT_READY " "to START_UNIT\n"); sc->sc_transfer.cmd_data[0] = START_STOP_UNIT; sc->sc_transfer.cmd_data[4] = SSS_START; return (1); } break; case REZERO_UNIT: case REQUEST_SENSE: case START_STOP_UNIT: case SEND_DIAGNOSTIC: case PREVENT_ALLOW: case READ_CAPACITY: case READ_10: case WRITE_10: case POSITION_TO_ELEMENT: /* SEEK_10 */ case SYNCHRONIZE_CACHE: case MODE_SELECT_10: case MODE_SENSE_10: case READ_BUFFER: case 0x42: /* READ_SUBCHANNEL */ case 0x43: /* READ_TOC */ case 0x44: /* READ_HEADER */ case 0x47: /* PLAY_MSF (Play Minute/Second/Frame) */ case 0x48: /* PLAY_TRACK */ case 0x49: /* PLAY_TRACK_REL */ case 0x4b: /* PAUSE */ case 0x51: /* READ_DISK_INFO */ case 0x52: /* READ_TRACK_INFO */ case 0x54: /* SEND_OPC */ case 0x59: /* READ_MASTER_CUE */ case 0x5b: /* CLOSE_TR_SESSION */ case 0x5c: /* READ_BUFFER_CAP */ case 0x5d: /* SEND_CUE_SHEET */ case 0xa1: /* BLANK */ case 0xa5: /* PLAY_12 */ case 0xa6: /* EXCHANGE_MEDIUM */ case 0xad: /* READ_DVD_STRUCTURE */ case 0xbb: /* SET_CD_SPEED */ case 0xe5: /* READ_TRACK_INFO_PHILIPS */ break; case READ_12: case WRITE_12: default: DPRINTF(sc, UDMASS_SCSI, "Unsupported ATAPI " "command 0x%02x - trying anyway\n", cmd_ptr[0]); break; } memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len); return (1); /* success */ } static uint8_t umass_no_transform(struct umass_softc *sc, uint8_t *cmd, uint8_t cmdlen) { return (0); /* failure */ } static uint8_t umass_std_transform(struct umass_softc *sc, union ccb *ccb, uint8_t *cmd, uint8_t cmdlen) { uint8_t retval; retval = (sc->sc_transform) (sc, cmd, cmdlen); if (retval == 2) { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return (0); } else if (retval == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status = CAM_REQ_INVALID | CAM_DEV_QFRZN; xpt_done(ccb); return (0); } /* Command should be executed */ return (1); } #ifdef USB_DEBUG static void umass_bbb_dump_cbw(struct umass_softc *sc, umass_bbb_cbw_t *cbw) { uint8_t *c = cbw->CBWCDB; uint32_t dlen = UGETDW(cbw->dCBWDataTransferLength); uint32_t tag = UGETDW(cbw->dCBWTag); uint8_t clen = cbw->bCDBLength; uint8_t flags = cbw->bCBWFlags; uint8_t lun = cbw->bCBWLUN; DPRINTF(sc, UDMASS_BBB, "CBW %d: cmd = %db " "(0x%02x%02x%02x%02x%02x%02x%s), " "data = %db, lun = %d, dir = %s\n", tag, clen, c[0], c[1], c[2], c[3], c[4], c[5], (clen > 6 ? "..." : ""), dlen, lun, (flags == CBWFLAGS_IN ? "in" : (flags == CBWFLAGS_OUT ? "out" : ""))); } static void umass_bbb_dump_csw(struct umass_softc *sc, umass_bbb_csw_t *csw) { uint32_t sig = UGETDW(csw->dCSWSignature); uint32_t tag = UGETDW(csw->dCSWTag); uint32_t res = UGETDW(csw->dCSWDataResidue); uint8_t status = csw->bCSWStatus; DPRINTF(sc, UDMASS_BBB, "CSW %d: sig = 0x%08x (%s), tag = 0x%08x, " "res = %d, status = 0x%02x (%s)\n", tag, sig, (sig == CSWSIGNATURE ? "valid" : "invalid"), tag, res, status, (status == CSWSTATUS_GOOD ? "good" : (status == CSWSTATUS_FAILED ? "failed" : (status == CSWSTATUS_PHASE ? "phase" : "")))); } static void umass_cbi_dump_cmd(struct umass_softc *sc, void *cmd, uint8_t cmdlen) { uint8_t *c = cmd; uint8_t dir = sc->sc_transfer.dir; DPRINTF(sc, UDMASS_BBB, "cmd = %db " "(0x%02x%02x%02x%02x%02x%02x%s), " "data = %db, dir = %s\n", cmdlen, c[0], c[1], c[2], c[3], c[4], c[5], (cmdlen > 6 ? "..." : ""), sc->sc_transfer.data_len, (dir == DIR_IN ? "in" : (dir == DIR_OUT ? "out" : (dir == DIR_NONE ? "no data phase" : "")))); } static void umass_dump_buffer(struct umass_softc *sc, uint8_t *buffer, uint32_t buflen, uint32_t printlen) { uint32_t i, j; char s1[40]; char s2[40]; char s3[5]; s1[0] = '\0'; s3[0] = '\0'; sprintf(s2, " buffer=%p, buflen=%d", buffer, buflen); for (i = 0; (i < buflen) && (i < printlen); i++) { j = i % 16; if (j == 0 && i != 0) { DPRINTF(sc, UDMASS_GEN, "0x %s%s\n", s1, s2); s2[0] = '\0'; } sprintf(&s1[j * 2], "%02x", buffer[i] & 0xff); } if (buflen > printlen) sprintf(s3, " ..."); DPRINTF(sc, UDMASS_GEN, "0x %s%s%s\n", s1, s2, s3); } #endif diff --git a/sys/dev/vmware/pvscsi/pvscsi.c b/sys/dev/vmware/pvscsi/pvscsi.c index ad32d2ab4959..f64b20863f60 100644 --- a/sys/dev/vmware/pvscsi/pvscsi.c +++ b/sys/dev/vmware/pvscsi/pvscsi.c @@ -1,1801 +1,1801 @@ /*- * Copyright (c) 2018 VMware, Inc. * * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0) */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pvscsi.h" #define PVSCSI_DEFAULT_NUM_PAGES_REQ_RING 8 #define PVSCSI_SENSE_LENGTH 256 MALLOC_DECLARE(M_PVSCSI); MALLOC_DEFINE(M_PVSCSI, "pvscsi", "PVSCSI memory"); #ifdef PVSCSI_DEBUG_LOGGING #define DEBUG_PRINTF(level, dev, fmt, ...) \ do { \ if (pvscsi_log_level >= (level)) { \ device_printf((dev), (fmt), ##__VA_ARGS__); \ } \ } while(0) #else #define DEBUG_PRINTF(level, dev, fmt, ...) #endif /* PVSCSI_DEBUG_LOGGING */ #define ccb_pvscsi_hcb spriv_ptr0 #define ccb_pvscsi_sc spriv_ptr1 struct pvscsi_softc; struct pvscsi_hcb; struct pvscsi_dma; static inline uint32_t pvscsi_reg_read(struct pvscsi_softc *sc, uint32_t offset); static inline void pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset, uint32_t val); static inline uint32_t pvscsi_read_intr_status(struct pvscsi_softc *sc); static inline void pvscsi_write_intr_status(struct pvscsi_softc *sc, uint32_t val); static inline void pvscsi_intr_enable(struct pvscsi_softc *sc); static inline void pvscsi_intr_disable(struct pvscsi_softc *sc); static void pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0); static void pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data, uint32_t len); static uint32_t pvscsi_get_max_targets(struct pvscsi_softc *sc); static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable); static void pvscsi_setup_rings(struct pvscsi_softc *sc); static void pvscsi_setup_msg_ring(struct pvscsi_softc *sc); static int pvscsi_hw_supports_msg(struct pvscsi_softc *sc); static void pvscsi_timeout(void *arg); static void pvscsi_freeze(struct pvscsi_softc *sc); static void pvscsi_adapter_reset(struct pvscsi_softc *sc); static void pvscsi_bus_reset(struct pvscsi_softc *sc); static void pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target); static void pvscsi_abort(struct pvscsi_softc *sc, uint32_t target, union ccb *ccb); static void pvscsi_process_completion(struct pvscsi_softc *sc, struct pvscsi_ring_cmp_desc *e); static void pvscsi_process_cmp_ring(struct pvscsi_softc *sc); static void pvscsi_process_msg(struct pvscsi_softc *sc, struct pvscsi_ring_msg_desc *e); static void pvscsi_process_msg_ring(struct pvscsi_softc *sc); static void pvscsi_intr_locked(struct pvscsi_softc *sc); static void pvscsi_intr(void *xsc); static void pvscsi_poll(struct cam_sim *sim); static void pvscsi_execute_ccb(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void pvscsi_action(struct cam_sim *sim, union ccb *ccb); static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb); static inline struct pvscsi_hcb* pvscsi_context_to_hcb(struct pvscsi_softc *sc, uint64_t context); static struct pvscsi_hcb * pvscsi_hcb_get(struct pvscsi_softc *sc); static void pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb); static void pvscsi_dma_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma); static int pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma, bus_size_t size, bus_size_t alignment); static int pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc, struct pvscsi_dma *dma, uint64_t *ppn_list, uint32_t num_pages); static void pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc, uint32_t hcbs_allocated); static int pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc); static void pvscsi_free_rings(struct pvscsi_softc *sc); static int pvscsi_allocate_rings(struct pvscsi_softc *sc); static void pvscsi_free_interrupts(struct pvscsi_softc *sc); static int pvscsi_setup_interrupts(struct pvscsi_softc *sc); static void pvscsi_free_all(struct pvscsi_softc *sc); static int pvscsi_attach(device_t dev); static int pvscsi_detach(device_t dev); static int pvscsi_probe(device_t dev); static int pvscsi_shutdown(device_t dev); static int pvscsi_get_tunable(struct pvscsi_softc *sc, char *name, int value); #ifdef PVSCSI_DEBUG_LOGGING static int pvscsi_log_level = 0; static SYSCTL_NODE(_hw, OID_AUTO, pvscsi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "PVSCSI driver parameters"); SYSCTL_INT(_hw_pvscsi, OID_AUTO, log_level, CTLFLAG_RWTUN, &pvscsi_log_level, 0, "PVSCSI debug log level"); #endif static int pvscsi_request_ring_pages = 0; TUNABLE_INT("hw.pvscsi.request_ring_pages", &pvscsi_request_ring_pages); static int pvscsi_use_msg = 1; TUNABLE_INT("hw.pvscsi.use_msg", &pvscsi_use_msg); static int pvscsi_use_msi = 1; TUNABLE_INT("hw.pvscsi.use_msi", &pvscsi_use_msi); static int pvscsi_use_msix = 1; TUNABLE_INT("hw.pvscsi.use_msix", &pvscsi_use_msix); static int pvscsi_use_req_call_threshold = 1; TUNABLE_INT("hw.pvscsi.use_req_call_threshold", &pvscsi_use_req_call_threshold); static int pvscsi_max_queue_depth = 0; TUNABLE_INT("hw.pvscsi.max_queue_depth", &pvscsi_max_queue_depth); struct pvscsi_sg_list { struct pvscsi_sg_element sge[PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT]; }; #define PVSCSI_ABORT_TIMEOUT 2 #define PVSCSI_RESET_TIMEOUT 10 #define PVSCSI_HCB_NONE 0 #define PVSCSI_HCB_ABORT 1 #define PVSCSI_HCB_DEVICE_RESET 2 #define PVSCSI_HCB_BUS_RESET 3 struct pvscsi_hcb { union ccb *ccb; struct pvscsi_ring_req_desc *e; int recovery; SLIST_ENTRY(pvscsi_hcb) links; struct callout callout; bus_dmamap_t dma_map; void *sense_buffer; bus_addr_t sense_buffer_paddr; struct pvscsi_sg_list *sg_list; bus_addr_t sg_list_paddr; }; struct pvscsi_dma { bus_dma_tag_t tag; bus_dmamap_t map; void *vaddr; bus_addr_t paddr; bus_size_t size; }; struct pvscsi_softc { device_t dev; struct mtx lock; struct cam_sim *sim; struct cam_path *bus_path; int frozen; struct pvscsi_rings_state *rings_state; struct pvscsi_ring_req_desc *req_ring; struct pvscsi_ring_cmp_desc *cmp_ring; struct pvscsi_ring_msg_desc *msg_ring; uint32_t hcb_cnt; struct pvscsi_hcb *hcbs; SLIST_HEAD(, pvscsi_hcb) free_list; bus_dma_tag_t parent_dmat; bus_dma_tag_t buffer_dmat; bool use_msg; uint32_t max_targets; int mm_rid; struct resource *mm_res; int irq_id; struct resource *irq_res; void *irq_handler; int use_req_call_threshold; int use_msi_or_msix; uint64_t rings_state_ppn; uint32_t req_ring_num_pages; uint64_t req_ring_ppn[PVSCSI_MAX_NUM_PAGES_REQ_RING]; uint32_t cmp_ring_num_pages; uint64_t cmp_ring_ppn[PVSCSI_MAX_NUM_PAGES_CMP_RING]; uint32_t msg_ring_num_pages; uint64_t msg_ring_ppn[PVSCSI_MAX_NUM_PAGES_MSG_RING]; struct pvscsi_dma rings_state_dma; struct pvscsi_dma req_ring_dma; struct pvscsi_dma cmp_ring_dma; struct pvscsi_dma msg_ring_dma; struct pvscsi_dma sg_list_dma; struct pvscsi_dma sense_buffer_dma; }; static int pvscsi_get_tunable(struct pvscsi_softc *sc, char *name, int value) { char cfg[64]; snprintf(cfg, sizeof(cfg), "hw.pvscsi.%d.%s", device_get_unit(sc->dev), name); TUNABLE_INT_FETCH(cfg, &value); return (value); } static void pvscsi_freeze(struct pvscsi_softc *sc) { if (!sc->frozen) { xpt_freeze_simq(sc->sim, 1); sc->frozen = 1; } } static inline uint32_t pvscsi_reg_read(struct pvscsi_softc *sc, uint32_t offset) { return (bus_read_4(sc->mm_res, offset)); } static inline void pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset, uint32_t val) { bus_write_4(sc->mm_res, offset, val); } static inline uint32_t pvscsi_read_intr_status(struct pvscsi_softc *sc) { return (pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_INTR_STATUS)); } static inline void pvscsi_write_intr_status(struct pvscsi_softc *sc, uint32_t val) { pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_STATUS, val); } static inline void pvscsi_intr_enable(struct pvscsi_softc *sc) { uint32_t mask; mask = PVSCSI_INTR_CMPL_MASK; if (sc->use_msg) { mask |= PVSCSI_INTR_MSG_MASK; } pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, mask); } static inline void pvscsi_intr_disable(struct pvscsi_softc *sc) { pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, 0); } static void pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0) { struct pvscsi_rings_state *s; if (cdb0 == READ_6 || cdb0 == READ_10 || cdb0 == READ_12 || cdb0 == READ_16 || cdb0 == WRITE_6 || cdb0 == WRITE_10 || cdb0 == WRITE_12 || cdb0 == WRITE_16) { s = sc->rings_state; if (!sc->use_req_call_threshold || (s->req_prod_idx - s->req_cons_idx) >= s->req_call_threshold) { pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_RW_IO, 0); } } else { pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0); } } static void pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data, uint32_t len) { uint32_t *data_ptr; int i; KASSERT(len % sizeof(uint32_t) == 0, ("command size not a multiple of 4")); data_ptr = data; len /= sizeof(uint32_t); pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND, cmd); for (i = 0; i < len; ++i) { pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND_DATA, data_ptr[i]); } } static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb) { /* Offset by 1 because context must not be 0 */ return (hcb - sc->hcbs + 1); } static inline struct pvscsi_hcb* pvscsi_context_to_hcb(struct pvscsi_softc *sc, uint64_t context) { return (sc->hcbs + (context - 1)); } static struct pvscsi_hcb * pvscsi_hcb_get(struct pvscsi_softc *sc) { struct pvscsi_hcb *hcb; mtx_assert(&sc->lock, MA_OWNED); hcb = SLIST_FIRST(&sc->free_list); if (hcb) { SLIST_REMOVE_HEAD(&sc->free_list, links); } return (hcb); } static void pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb) { mtx_assert(&sc->lock, MA_OWNED); hcb->ccb = NULL; hcb->e = NULL; hcb->recovery = PVSCSI_HCB_NONE; SLIST_INSERT_HEAD(&sc->free_list, hcb, links); } static uint32_t pvscsi_get_max_targets(struct pvscsi_softc *sc) { uint32_t max_targets; pvscsi_write_cmd(sc, PVSCSI_CMD_GET_MAX_TARGETS, NULL, 0); max_targets = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS); if (max_targets == ~0) { max_targets = 16; } return (max_targets); } static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable) { uint32_t status; struct pvscsi_cmd_desc_setup_req_call cmd; if (!pvscsi_get_tunable(sc, "pvscsi_use_req_call_threshold", pvscsi_use_req_call_threshold)) { return (0); } pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND, PVSCSI_CMD_SETUP_REQCALLTHRESHOLD); status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS); if (status != -1) { bzero(&cmd, sizeof(cmd)); cmd.enable = enable; pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_REQCALLTHRESHOLD, &cmd, sizeof(cmd)); status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS); return (status != 0); } else { return (0); } } static void pvscsi_dma_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *dest; KASSERT(nseg == 1, ("more than one segment")); dest = arg; if (!error) { *dest = segs->ds_addr; } } static void pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma) { if (dma->tag != NULL) { if (dma->paddr != 0) { bus_dmamap_unload(dma->tag, dma->map); } if (dma->vaddr != NULL) { bus_dmamem_free(dma->tag, dma->vaddr, dma->map); } bus_dma_tag_destroy(dma->tag); } bzero(dma, sizeof(*dma)); } static int pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma, bus_size_t size, bus_size_t alignment) { int error; bzero(dma, sizeof(*dma)); error = bus_dma_tag_create(sc->parent_dmat, alignment, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, BUS_DMA_ALLOCNOW, NULL, NULL, &dma->tag); if (error) { device_printf(sc->dev, "error creating dma tag, error %d\n", error); goto fail; } error = bus_dmamem_alloc(dma->tag, &dma->vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dma->map); if (error) { device_printf(sc->dev, "error allocating dma mem, error %d\n", error); goto fail; } error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, pvscsi_dma_cb, &dma->paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->dev, "error mapping dma mam, error %d\n", error); goto fail; } dma->size = size; fail: if (error) { pvscsi_dma_free(sc, dma); } return (error); } static int pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc, struct pvscsi_dma *dma, uint64_t *ppn_list, uint32_t num_pages) { int error; uint32_t i; uint64_t ppn; error = pvscsi_dma_alloc(sc, dma, num_pages * PAGE_SIZE, PAGE_SIZE); if (error) { device_printf(sc->dev, "Error allocating pages, error %d\n", error); return (error); } ppn = dma->paddr >> PAGE_SHIFT; for (i = 0; i < num_pages; i++) { ppn_list[i] = ppn + i; } return (0); } static void pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc, uint32_t hcbs_allocated) { int i; int lock_owned; struct pvscsi_hcb *hcb; lock_owned = mtx_owned(&sc->lock); if (lock_owned) { mtx_unlock(&sc->lock); } for (i = 0; i < hcbs_allocated; ++i) { hcb = sc->hcbs + i; callout_drain(&hcb->callout); }; if (lock_owned) { mtx_lock(&sc->lock); } for (i = 0; i < hcbs_allocated; ++i) { hcb = sc->hcbs + i; bus_dmamap_destroy(sc->buffer_dmat, hcb->dma_map); }; pvscsi_dma_free(sc, &sc->sense_buffer_dma); pvscsi_dma_free(sc, &sc->sg_list_dma); } static int pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc) { int i; int error; struct pvscsi_hcb *hcb; i = 0; error = pvscsi_dma_alloc(sc, &sc->sg_list_dma, sizeof(struct pvscsi_sg_list) * sc->hcb_cnt, 1); if (error) { device_printf(sc->dev, "Error allocation sg list DMA memory, error %d\n", error); goto fail; } error = pvscsi_dma_alloc(sc, &sc->sense_buffer_dma, PVSCSI_SENSE_LENGTH * sc->hcb_cnt, 1); if (error) { device_printf(sc->dev, "Error allocation sg list DMA memory, error %d\n", error); goto fail; } for (i = 0; i < sc->hcb_cnt; ++i) { hcb = sc->hcbs + i; error = bus_dmamap_create(sc->buffer_dmat, 0, &hcb->dma_map); if (error) { device_printf(sc->dev, "Error creating dma map for hcb %d, error %d\n", i, error); goto fail; } hcb->sense_buffer = (void *)((caddr_t)sc->sense_buffer_dma.vaddr + PVSCSI_SENSE_LENGTH * i); hcb->sense_buffer_paddr = sc->sense_buffer_dma.paddr + PVSCSI_SENSE_LENGTH * i; hcb->sg_list = (struct pvscsi_sg_list *)((caddr_t)sc->sg_list_dma.vaddr + sizeof(struct pvscsi_sg_list) * i); hcb->sg_list_paddr = sc->sg_list_dma.paddr + sizeof(struct pvscsi_sg_list) * i; callout_init_mtx(&hcb->callout, &sc->lock, 0); } SLIST_INIT(&sc->free_list); for (i = (sc->hcb_cnt - 1); i >= 0; --i) { hcb = sc->hcbs + i; SLIST_INSERT_HEAD(&sc->free_list, hcb, links); } fail: if (error) { pvscsi_dma_free_per_hcb(sc, i); } return (error); } static void pvscsi_free_rings(struct pvscsi_softc *sc) { pvscsi_dma_free(sc, &sc->rings_state_dma); pvscsi_dma_free(sc, &sc->req_ring_dma); pvscsi_dma_free(sc, &sc->cmp_ring_dma); if (sc->use_msg) { pvscsi_dma_free(sc, &sc->msg_ring_dma); } } static int pvscsi_allocate_rings(struct pvscsi_softc *sc) { int error; error = pvscsi_dma_alloc_ppns(sc, &sc->rings_state_dma, &sc->rings_state_ppn, 1); if (error) { device_printf(sc->dev, "Error allocating rings state, error = %d\n", error); goto fail; } sc->rings_state = sc->rings_state_dma.vaddr; error = pvscsi_dma_alloc_ppns(sc, &sc->req_ring_dma, sc->req_ring_ppn, sc->req_ring_num_pages); if (error) { device_printf(sc->dev, "Error allocating req ring pages, error = %d\n", error); goto fail; } sc->req_ring = sc->req_ring_dma.vaddr; error = pvscsi_dma_alloc_ppns(sc, &sc->cmp_ring_dma, sc->cmp_ring_ppn, sc->cmp_ring_num_pages); if (error) { device_printf(sc->dev, "Error allocating cmp ring pages, error = %d\n", error); goto fail; } sc->cmp_ring = sc->cmp_ring_dma.vaddr; sc->msg_ring = NULL; if (sc->use_msg) { error = pvscsi_dma_alloc_ppns(sc, &sc->msg_ring_dma, sc->msg_ring_ppn, sc->msg_ring_num_pages); if (error) { device_printf(sc->dev, "Error allocating cmp ring pages, error = %d\n", error); goto fail; } sc->msg_ring = sc->msg_ring_dma.vaddr; } DEBUG_PRINTF(1, sc->dev, "rings_state: %p\n", sc->rings_state); DEBUG_PRINTF(1, sc->dev, "req_ring: %p - %u pages\n", sc->req_ring, sc->req_ring_num_pages); DEBUG_PRINTF(1, sc->dev, "cmp_ring: %p - %u pages\n", sc->cmp_ring, sc->cmp_ring_num_pages); DEBUG_PRINTF(1, sc->dev, "msg_ring: %p - %u pages\n", sc->msg_ring, sc->msg_ring_num_pages); fail: if (error) { pvscsi_free_rings(sc); } return (error); } static void pvscsi_setup_rings(struct pvscsi_softc *sc) { struct pvscsi_cmd_desc_setup_rings cmd; uint32_t i; bzero(&cmd, sizeof(cmd)); cmd.rings_state_ppn = sc->rings_state_ppn; cmd.req_ring_num_pages = sc->req_ring_num_pages; for (i = 0; i < sc->req_ring_num_pages; ++i) { cmd.req_ring_ppns[i] = sc->req_ring_ppn[i]; } cmd.cmp_ring_num_pages = sc->cmp_ring_num_pages; for (i = 0; i < sc->cmp_ring_num_pages; ++i) { cmd.cmp_ring_ppns[i] = sc->cmp_ring_ppn[i]; } pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof(cmd)); } static int pvscsi_hw_supports_msg(struct pvscsi_softc *sc) { uint32_t status; pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND, PVSCSI_CMD_SETUP_MSG_RING); status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS); return (status != -1); } static void pvscsi_setup_msg_ring(struct pvscsi_softc *sc) { struct pvscsi_cmd_desc_setup_msg_ring cmd; uint32_t i; KASSERT(sc->use_msg, ("msg is not being used")); bzero(&cmd, sizeof(cmd)); cmd.num_pages = sc->msg_ring_num_pages; for (i = 0; i < sc->msg_ring_num_pages; ++i) { cmd.ring_ppns[i] = sc->msg_ring_ppn[i]; } pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_MSG_RING, &cmd, sizeof(cmd)); } static void pvscsi_adapter_reset(struct pvscsi_softc *sc) { uint32_t val; device_printf(sc->dev, "Adapter Reset\n"); pvscsi_write_cmd(sc, PVSCSI_CMD_ADAPTER_RESET, NULL, 0); val = pvscsi_read_intr_status(sc); DEBUG_PRINTF(2, sc->dev, "adapter reset done: %u\n", val); } static void pvscsi_bus_reset(struct pvscsi_softc *sc) { device_printf(sc->dev, "Bus Reset\n"); pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_BUS, NULL, 0); pvscsi_process_cmp_ring(sc); DEBUG_PRINTF(2, sc->dev, "bus reset done\n"); } static void pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target) { struct pvscsi_cmd_desc_reset_device cmd; memset(&cmd, 0, sizeof(cmd)); cmd.target = target; device_printf(sc->dev, "Device reset for target %u\n", target); pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_DEVICE, &cmd, sizeof cmd); pvscsi_process_cmp_ring(sc); DEBUG_PRINTF(2, sc->dev, "device reset done\n"); } static void pvscsi_abort(struct pvscsi_softc *sc, uint32_t target, union ccb *ccb) { struct pvscsi_cmd_desc_abort_cmd cmd; struct pvscsi_hcb *hcb; uint64_t context; pvscsi_process_cmp_ring(sc); hcb = ccb->ccb_h.ccb_pvscsi_hcb; if (hcb != NULL) { context = pvscsi_hcb_to_context(sc, hcb); memset(&cmd, 0, sizeof cmd); cmd.target = target; cmd.context = context; device_printf(sc->dev, "Abort for target %u context %llx\n", target, (unsigned long long)context); pvscsi_write_cmd(sc, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd)); pvscsi_process_cmp_ring(sc); DEBUG_PRINTF(2, sc->dev, "abort done\n"); } else { DEBUG_PRINTF(1, sc->dev, "Target %u ccb %p not found for abort\n", target, ccb); } } static int pvscsi_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_ID_VMWARE && pci_get_device(dev) == PCI_DEVICE_ID_VMWARE_PVSCSI) { device_set_desc(dev, "VMware Paravirtual SCSI Controller"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int pvscsi_shutdown(device_t dev) { return (0); } static void pvscsi_timeout(void *arg) { struct pvscsi_hcb *hcb; struct pvscsi_softc *sc; union ccb *ccb; hcb = arg; ccb = hcb->ccb; if (ccb == NULL) { /* Already completed */ return; } sc = ccb->ccb_h.ccb_pvscsi_sc; mtx_assert(&sc->lock, MA_OWNED); device_printf(sc->dev, "Command timed out hcb=%p ccb=%p.\n", hcb, ccb); switch (hcb->recovery) { case PVSCSI_HCB_NONE: hcb->recovery = PVSCSI_HCB_ABORT; pvscsi_abort(sc, ccb->ccb_h.target_id, ccb); callout_reset_sbt(&hcb->callout, PVSCSI_ABORT_TIMEOUT * SBT_1S, 0, pvscsi_timeout, hcb, 0); break; case PVSCSI_HCB_ABORT: hcb->recovery = PVSCSI_HCB_DEVICE_RESET; pvscsi_freeze(sc); pvscsi_device_reset(sc, ccb->ccb_h.target_id); callout_reset_sbt(&hcb->callout, PVSCSI_RESET_TIMEOUT * SBT_1S, 0, pvscsi_timeout, hcb, 0); break; case PVSCSI_HCB_DEVICE_RESET: hcb->recovery = PVSCSI_HCB_BUS_RESET; pvscsi_freeze(sc); pvscsi_bus_reset(sc); callout_reset_sbt(&hcb->callout, PVSCSI_RESET_TIMEOUT * SBT_1S, 0, pvscsi_timeout, hcb, 0); break; case PVSCSI_HCB_BUS_RESET: pvscsi_freeze(sc); pvscsi_adapter_reset(sc); break; }; } static void pvscsi_process_completion(struct pvscsi_softc *sc, struct pvscsi_ring_cmp_desc *e) { struct pvscsi_hcb *hcb; union ccb *ccb; uint32_t status; uint32_t btstat; uint32_t sdstat; bus_dmasync_op_t op; hcb = pvscsi_context_to_hcb(sc, e->context); callout_stop(&hcb->callout); ccb = hcb->ccb; btstat = e->host_status; sdstat = e->scsi_status; ccb->csio.scsi_status = sdstat; ccb->csio.resid = ccb->csio.dxfer_len - e->data_len; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_POSTREAD; } else { op = BUS_DMASYNC_POSTWRITE; } bus_dmamap_sync(sc->buffer_dmat, hcb->dma_map, op); bus_dmamap_unload(sc->buffer_dmat, hcb->dma_map); } if (btstat == BTSTAT_SUCCESS && sdstat == SCSI_STATUS_OK) { DEBUG_PRINTF(3, sc->dev, "completing command context %llx success\n", (unsigned long long)e->context); ccb->csio.resid = 0; status = CAM_REQ_CMP; } else { switch (btstat) { case BTSTAT_SUCCESS: case BTSTAT_LINKED_COMMAND_COMPLETED: case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG: switch (sdstat) { case SCSI_STATUS_OK: ccb->csio.resid = 0; status = CAM_REQ_CMP; break; case SCSI_STATUS_CHECK_COND: status = CAM_SCSI_STATUS_ERROR; if (ccb->csio.sense_len != 0) { status |= CAM_AUTOSNS_VALID; memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); memcpy(&ccb->csio.sense_data, hcb->sense_buffer, MIN(ccb->csio.sense_len, e->sense_len)); } break; case SCSI_STATUS_BUSY: case SCSI_STATUS_QUEUE_FULL: status = CAM_REQUEUE_REQ; break; case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_TASK_ABORTED: status = CAM_REQ_ABORTED; break; default: DEBUG_PRINTF(1, sc->dev, "ccb: %p sdstat=0x%x\n", ccb, sdstat); status = CAM_SCSI_STATUS_ERROR; break; } break; case BTSTAT_SELTIMEO: status = CAM_SEL_TIMEOUT; break; case BTSTAT_DATARUN: case BTSTAT_DATA_UNDERRUN: status = CAM_DATA_RUN_ERR; break; case BTSTAT_ABORTQUEUE: case BTSTAT_HATIMEOUT: status = CAM_REQUEUE_REQ; break; case BTSTAT_NORESPONSE: case BTSTAT_SENTRST: case BTSTAT_RECVRST: case BTSTAT_BUSRESET: status = CAM_SCSI_BUS_RESET; break; case BTSTAT_SCSIPARITY: status = CAM_UNCOR_PARITY; break; case BTSTAT_BUSFREE: status = CAM_UNEXP_BUSFREE; break; case BTSTAT_INVPHASE: status = CAM_SEQUENCE_FAIL; break; case BTSTAT_SENSFAILED: status = CAM_AUTOSENSE_FAIL; break; case BTSTAT_LUNMISMATCH: case BTSTAT_TAGREJECT: case BTSTAT_DISCONNECT: case BTSTAT_BADMSG: case BTSTAT_INVPARAM: status = CAM_REQ_CMP_ERR; break; case BTSTAT_HASOFTWARE: case BTSTAT_HAHARDWARE: status = CAM_NO_HBA; break; default: device_printf(sc->dev, "unknown hba status: 0x%x\n", btstat); status = CAM_NO_HBA; break; } DEBUG_PRINTF(3, sc->dev, "completing command context %llx btstat %x sdstat %x - status %x\n", (unsigned long long)e->context, btstat, sdstat, status); } ccb->ccb_h.ccb_pvscsi_hcb = NULL; ccb->ccb_h.ccb_pvscsi_sc = NULL; pvscsi_hcb_put(sc, hcb); ccb->ccb_h.status = status | (ccb->ccb_h.status & ~(CAM_STATUS_MASK | CAM_SIM_QUEUED)); if (sc->frozen) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; sc->frozen = 0; } if (status != CAM_REQ_CMP) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); } xpt_done(ccb); } static void pvscsi_process_cmp_ring(struct pvscsi_softc *sc) { struct pvscsi_ring_cmp_desc *ring; struct pvscsi_rings_state *s; struct pvscsi_ring_cmp_desc *e; uint32_t mask; mtx_assert(&sc->lock, MA_OWNED); s = sc->rings_state; ring = sc->cmp_ring; mask = MASK(s->cmp_num_entries_log2); while (s->cmp_cons_idx != s->cmp_prod_idx) { e = ring + (s->cmp_cons_idx & mask); pvscsi_process_completion(sc, e); mb(); s->cmp_cons_idx++; } } static void pvscsi_process_msg(struct pvscsi_softc *sc, struct pvscsi_ring_msg_desc *e) { struct pvscsi_ring_msg_dev_status_changed *desc; union ccb *ccb; switch (e->type) { case PVSCSI_MSG_DEV_ADDED: case PVSCSI_MSG_DEV_REMOVED: { desc = (struct pvscsi_ring_msg_dev_status_changed *)e; device_printf(sc->dev, "MSG: device %s at scsi%u:%u:%u\n", desc->type == PVSCSI_MSG_DEV_ADDED ? "addition" : "removal", desc->bus, desc->target, desc->lun[1]); ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { device_printf(sc->dev, "Error allocating CCB for dev change.\n"); break; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->sim), desc->target, desc->lun[1]) != CAM_REQ_CMP) { device_printf(sc->dev, "Error creating path for dev change.\n"); xpt_free_ccb(ccb); break; } xpt_rescan(ccb); } break; default: device_printf(sc->dev, "Unknown msg type 0x%x\n", e->type); }; } static void pvscsi_process_msg_ring(struct pvscsi_softc *sc) { struct pvscsi_ring_msg_desc *ring; struct pvscsi_rings_state *s; struct pvscsi_ring_msg_desc *e; uint32_t mask; mtx_assert(&sc->lock, MA_OWNED); s = sc->rings_state; ring = sc->msg_ring; mask = MASK(s->msg_num_entries_log2); while (s->msg_cons_idx != s->msg_prod_idx) { e = ring + (s->msg_cons_idx & mask); pvscsi_process_msg(sc, e); mb(); s->msg_cons_idx++; } } static void pvscsi_intr_locked(struct pvscsi_softc *sc) { uint32_t val; mtx_assert(&sc->lock, MA_OWNED); val = pvscsi_read_intr_status(sc); if ((val & PVSCSI_INTR_ALL_SUPPORTED) != 0) { pvscsi_write_intr_status(sc, val & PVSCSI_INTR_ALL_SUPPORTED); pvscsi_process_cmp_ring(sc); if (sc->use_msg) { pvscsi_process_msg_ring(sc); } } } static void pvscsi_intr(void *xsc) { struct pvscsi_softc *sc; sc = xsc; mtx_assert(&sc->lock, MA_NOTOWNED); mtx_lock(&sc->lock); pvscsi_intr_locked(xsc); mtx_unlock(&sc->lock); } static void pvscsi_poll(struct cam_sim *sim) { struct pvscsi_softc *sc; sc = cam_sim_softc(sim); mtx_assert(&sc->lock, MA_OWNED); pvscsi_intr_locked(sc); } static void pvscsi_execute_ccb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct pvscsi_hcb *hcb; struct pvscsi_ring_req_desc *e; union ccb *ccb; struct pvscsi_softc *sc; struct pvscsi_rings_state *s; uint8_t cdb0; bus_dmasync_op_t op; hcb = arg; ccb = hcb->ccb; e = hcb->e; sc = ccb->ccb_h.ccb_pvscsi_sc; s = sc->rings_state; mtx_assert(&sc->lock, MA_OWNED); if (error) { device_printf(sc->dev, "pvscsi_execute_ccb error %d\n", error); if (error == EFBIG) { ccb->ccb_h.status = CAM_REQ_TOO_BIG; } else { ccb->ccb_h.status = CAM_REQ_CMP_ERR; } pvscsi_hcb_put(sc, hcb); xpt_done(ccb); return; } e->flags = 0; op = 0; switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_NONE: e->flags |= PVSCSI_FLAG_CMD_DIR_NONE; break; case CAM_DIR_IN: e->flags |= PVSCSI_FLAG_CMD_DIR_TOHOST; op = BUS_DMASYNC_PREREAD; break; case CAM_DIR_OUT: e->flags |= PVSCSI_FLAG_CMD_DIR_TODEVICE; op = BUS_DMASYNC_PREWRITE; break; case CAM_DIR_BOTH: /* TODO: does this need handling? */ break; } if (nseg != 0) { if (nseg > 1) { int i; struct pvscsi_sg_element *sge; KASSERT(nseg <= PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT, ("too many sg segments")); sge = hcb->sg_list->sge; e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; for (i = 0; i < nseg; ++i) { sge[i].addr = segs[i].ds_addr; sge[i].length = segs[i].ds_len; sge[i].flags = 0; } e->data_addr = hcb->sg_list_paddr; } else { e->data_addr = segs->ds_addr; } bus_dmamap_sync(sc->buffer_dmat, hcb->dma_map, op); } else { e->data_addr = 0; } cdb0 = e->cdb[0]; ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { callout_reset_sbt(&hcb->callout, ccb->ccb_h.timeout * SBT_1MS, 0, pvscsi_timeout, hcb, 0); } mb(); s->req_prod_idx++; pvscsi_kick_io(sc, cdb0); } static void pvscsi_action(struct cam_sim *sim, union ccb *ccb) { struct pvscsi_softc *sc; struct ccb_hdr *ccb_h; sc = cam_sim_softc(sim); ccb_h = &ccb->ccb_h; mtx_assert(&sc->lock, MA_OWNED); switch (ccb_h->func_code) { case XPT_SCSI_IO: { struct ccb_scsiio *csio; uint32_t req_num_entries_log2; struct pvscsi_ring_req_desc *ring; struct pvscsi_ring_req_desc *e; struct pvscsi_rings_state *s; struct pvscsi_hcb *hcb; csio = &ccb->csio; ring = sc->req_ring; s = sc->rings_state; hcb = NULL; /* * Check if it was completed already (such as aborted * by upper layers) */ if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { xpt_done(ccb); return; } req_num_entries_log2 = s->req_num_entries_log2; if (s->req_prod_idx - s->cmp_cons_idx >= (1 << req_num_entries_log2)) { device_printf(sc->dev, "Not enough room on completion ring.\n"); pvscsi_freeze(sc); ccb_h->status = CAM_REQUEUE_REQ; goto finish_ccb; } hcb = pvscsi_hcb_get(sc); if (hcb == NULL) { device_printf(sc->dev, "No free hcbs.\n"); pvscsi_freeze(sc); ccb_h->status = CAM_REQUEUE_REQ; goto finish_ccb; } hcb->ccb = ccb; ccb_h->ccb_pvscsi_hcb = hcb; ccb_h->ccb_pvscsi_sc = sc; if (csio->cdb_len > sizeof(e->cdb)) { DEBUG_PRINTF(2, sc->dev, "cdb length %u too large\n", csio->cdb_len); ccb_h->status = CAM_REQ_INVALID; goto finish_ccb; } if (ccb_h->flags & CAM_CDB_PHYS) { DEBUG_PRINTF(2, sc->dev, "CAM_CDB_PHYS not implemented\n"); ccb_h->status = CAM_REQ_INVALID; goto finish_ccb; } e = ring + (s->req_prod_idx & MASK(req_num_entries_log2)); e->bus = cam_sim_bus(sim); e->target = ccb_h->target_id; memset(e->lun, 0, sizeof(e->lun)); e->lun[1] = ccb_h->target_lun; e->data_addr = 0; e->data_len = csio->dxfer_len; e->vcpu_hint = curcpu; e->cdb_len = csio->cdb_len; memcpy(e->cdb, scsiio_cdb_ptr(csio), csio->cdb_len); e->sense_addr = 0; e->sense_len = csio->sense_len; if (e->sense_len > 0) { e->sense_addr = hcb->sense_buffer_paddr; } e->tag = MSG_SIMPLE_Q_TAG; if (ccb_h->flags & CAM_TAG_ACTION_VALID) { e->tag = csio->tag_action; } e->context = pvscsi_hcb_to_context(sc, hcb); hcb->e = e; DEBUG_PRINTF(3, sc->dev, " queuing command %02x context %llx\n", e->cdb[0], (unsigned long long)e->context); bus_dmamap_load_ccb(sc->buffer_dmat, hcb->dma_map, ccb, pvscsi_execute_ccb, hcb, 0); break; finish_ccb: if (hcb != NULL) { pvscsi_hcb_put(sc, hcb); } xpt_done(ccb); } break; case XPT_ABORT: { struct pvscsi_hcb *abort_hcb; union ccb *abort_ccb; abort_ccb = ccb->cab.abort_ccb; abort_hcb = abort_ccb->ccb_h.ccb_pvscsi_hcb; if (abort_hcb->ccb != NULL && abort_hcb->ccb == abort_ccb) { if (abort_ccb->ccb_h.func_code == XPT_SCSI_IO) { pvscsi_abort(sc, ccb_h->target_id, abort_ccb); ccb_h->status = CAM_REQ_CMP; } else { ccb_h->status = CAM_UA_ABORT; } } else { device_printf(sc->dev, "Could not find hcb for ccb %p (tgt %u)\n", ccb, ccb_h->target_id); ccb_h->status = CAM_REQ_CMP; } xpt_done(ccb); } break; case XPT_RESET_DEV: { pvscsi_device_reset(sc, ccb_h->target_id); ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); } break; case XPT_RESET_BUS: { pvscsi_bus_reset(sc); ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); } break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi; cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; cpi->hba_eng_cnt = 0; /* cpi->vuhba_flags = 0; */ cpi->max_target = sc->max_targets; cpi->max_lun = 0; cpi->async_flags = 0; cpi->hpath_id = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = 7; cpi->base_transfer_speed = 750000; strlcpy(cpi->sim_vid, "VMware", SIM_IDLEN); strlcpy(cpi->hba_vid, "VMware", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); /* Limit I/O to 256k since we can't do 512k unaligned I/O */ cpi->maxio = (PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT / 2) * PAGE_SIZE; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC2; cpi->transport = XPORT_SAS; cpi->transport_version = 0; ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); } break; case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; cts = &ccb->cts; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_SAS; cts->transport_version = 0; cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); } break; case XPT_CALC_GEOMETRY: { cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); } break; default: ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void pvscsi_free_interrupts(struct pvscsi_softc *sc) { if (sc->irq_handler != NULL) { bus_teardown_intr(sc->dev, sc->irq_res, sc->irq_handler); } if (sc->irq_res != NULL) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_id, sc->irq_res); } if (sc->use_msi_or_msix) { pci_release_msi(sc->dev); } } static int pvscsi_setup_interrupts(struct pvscsi_softc *sc) { int error; int flags; int use_msix; int use_msi; int count; sc->use_msi_or_msix = 0; use_msix = pvscsi_get_tunable(sc, "use_msix", pvscsi_use_msix); use_msi = pvscsi_get_tunable(sc, "use_msi", pvscsi_use_msi); if (use_msix && pci_msix_count(sc->dev) > 0) { count = 1; if (pci_alloc_msix(sc->dev, &count) == 0 && count == 1) { sc->use_msi_or_msix = 1; device_printf(sc->dev, "Interrupt: MSI-X\n"); } else { pci_release_msi(sc->dev); } } if (sc->use_msi_or_msix == 0 && use_msi && pci_msi_count(sc->dev) > 0) { count = 1; if (pci_alloc_msi(sc->dev, &count) == 0 && count == 1) { sc->use_msi_or_msix = 1; device_printf(sc->dev, "Interrupt: MSI\n"); } else { pci_release_msi(sc->dev); } } flags = RF_ACTIVE; if (sc->use_msi_or_msix) { sc->irq_id = 1; } else { device_printf(sc->dev, "Interrupt: INT\n"); sc->irq_id = 0; flags |= RF_SHAREABLE; } sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_id, flags); if (sc->irq_res == NULL) { device_printf(sc->dev, "IRQ allocation failed\n"); if (sc->use_msi_or_msix) { pci_release_msi(sc->dev); } return (ENXIO); } error = bus_setup_intr(sc->dev, sc->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, pvscsi_intr, sc, &sc->irq_handler); if (error) { device_printf(sc->dev, "IRQ handler setup failed\n"); pvscsi_free_interrupts(sc); return (error); } return (0); } static void pvscsi_free_all(struct pvscsi_softc *sc) { if (sc->sim) { - int32_t status; + int error; if (sc->bus_path) { xpt_free_path(sc->bus_path); } - status = xpt_bus_deregister(cam_sim_path(sc->sim)); - if (status != CAM_REQ_CMP) { + error = xpt_bus_deregister(cam_sim_path(sc->sim)); + if (error != 0) { device_printf(sc->dev, - "Error deregistering bus, status=%d\n", status); + "Error deregistering bus, error %d\n", error); } cam_sim_free(sc->sim, TRUE); } pvscsi_dma_free_per_hcb(sc, sc->hcb_cnt); if (sc->hcbs) { free(sc->hcbs, M_PVSCSI); } pvscsi_free_rings(sc); pvscsi_free_interrupts(sc); if (sc->buffer_dmat != NULL) { bus_dma_tag_destroy(sc->buffer_dmat); } if (sc->parent_dmat != NULL) { bus_dma_tag_destroy(sc->parent_dmat); } if (sc->mm_res != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->mm_rid, sc->mm_res); } } static int pvscsi_attach(device_t dev) { struct pvscsi_softc *sc; int rid; int barid; int error; int max_queue_depth; int adapter_queue_size; struct cam_devq *devq; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->lock, "pvscsi", NULL, MTX_DEF); pci_enable_busmaster(dev); sc->mm_rid = -1; for (barid = 0; barid <= PCIR_MAX_BAR_0; ++barid) { rid = PCIR_BAR(barid); sc->mm_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mm_res != NULL) { sc->mm_rid = rid; break; } } if (sc->mm_res == NULL) { device_printf(dev, "could not map device memory\n"); return (ENXIO); } error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->parent_dmat); if (error) { device_printf(dev, "parent dma tag create failure, error %d\n", error); pvscsi_free_all(sc); return (ENXIO); } error = bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT * PAGE_SIZE, PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT, PAGE_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->buffer_dmat); if (error) { device_printf(dev, "parent dma tag create failure, error %d\n", error); pvscsi_free_all(sc); return (ENXIO); } error = pvscsi_setup_interrupts(sc); if (error) { device_printf(dev, "Interrupt setup failed\n"); pvscsi_free_all(sc); return (error); } sc->max_targets = pvscsi_get_max_targets(sc); sc->use_msg = pvscsi_get_tunable(sc, "use_msg", pvscsi_use_msg) && pvscsi_hw_supports_msg(sc); sc->msg_ring_num_pages = sc->use_msg ? 1 : 0; sc->req_ring_num_pages = pvscsi_get_tunable(sc, "request_ring_pages", pvscsi_request_ring_pages); if (sc->req_ring_num_pages <= 0) { if (sc->max_targets <= 16) { sc->req_ring_num_pages = PVSCSI_DEFAULT_NUM_PAGES_REQ_RING; } else { sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING; } } else if (sc->req_ring_num_pages > PVSCSI_MAX_NUM_PAGES_REQ_RING) { sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING; } sc->cmp_ring_num_pages = sc->req_ring_num_pages; max_queue_depth = pvscsi_get_tunable(sc, "max_queue_depth", pvscsi_max_queue_depth); adapter_queue_size = (sc->req_ring_num_pages * PAGE_SIZE) / sizeof(struct pvscsi_ring_req_desc); if (max_queue_depth > 0) { adapter_queue_size = MIN(adapter_queue_size, max_queue_depth); } adapter_queue_size = MIN(adapter_queue_size, PVSCSI_MAX_REQ_QUEUE_DEPTH); device_printf(sc->dev, "Use Msg: %d\n", sc->use_msg); device_printf(sc->dev, "REQ num pages: %d\n", sc->req_ring_num_pages); device_printf(sc->dev, "CMP num pages: %d\n", sc->cmp_ring_num_pages); device_printf(sc->dev, "MSG num pages: %d\n", sc->msg_ring_num_pages); device_printf(sc->dev, "Queue size: %d\n", adapter_queue_size); if (pvscsi_allocate_rings(sc)) { device_printf(dev, "ring allocation failed\n"); pvscsi_free_all(sc); return (ENXIO); } sc->hcb_cnt = adapter_queue_size; sc->hcbs = malloc(sc->hcb_cnt * sizeof(*sc->hcbs), M_PVSCSI, M_NOWAIT | M_ZERO); if (sc->hcbs == NULL) { device_printf(dev, "error allocating hcb array\n"); pvscsi_free_all(sc); return (ENXIO); } if (pvscsi_dma_alloc_per_hcb(sc)) { device_printf(dev, "error allocating per hcb dma memory\n"); pvscsi_free_all(sc); return (ENXIO); } pvscsi_adapter_reset(sc); devq = cam_simq_alloc(adapter_queue_size); if (devq == NULL) { device_printf(dev, "cam devq alloc failed\n"); pvscsi_free_all(sc); return (ENXIO); } sc->sim = cam_sim_alloc(pvscsi_action, pvscsi_poll, "pvscsi", sc, device_get_unit(dev), &sc->lock, 1, adapter_queue_size, devq); if (sc->sim == NULL) { device_printf(dev, "cam sim alloc failed\n"); cam_simq_free(devq); pvscsi_free_all(sc); return (ENXIO); } mtx_lock(&sc->lock); if (xpt_bus_register(sc->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "xpt bus register failed\n"); pvscsi_free_all(sc); mtx_unlock(&sc->lock); return (ENXIO); } if (xpt_create_path(&sc->bus_path, NULL, cam_sim_path(sc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "xpt create path failed\n"); pvscsi_free_all(sc); mtx_unlock(&sc->lock); return (ENXIO); } pvscsi_setup_rings(sc); if (sc->use_msg) { pvscsi_setup_msg_ring(sc); } sc->use_req_call_threshold = pvscsi_setup_req_call(sc, 1); pvscsi_intr_enable(sc); mtx_unlock(&sc->lock); return (0); } static int pvscsi_detach(device_t dev) { struct pvscsi_softc *sc; sc = device_get_softc(dev); pvscsi_intr_disable(sc); pvscsi_adapter_reset(sc); if (sc->irq_handler != NULL) { bus_teardown_intr(dev, sc->irq_res, sc->irq_handler); } mtx_lock(&sc->lock); pvscsi_free_all(sc); mtx_unlock(&sc->lock); mtx_destroy(&sc->lock); return (0); } static device_method_t pvscsi_methods[] = { DEVMETHOD(device_probe, pvscsi_probe), DEVMETHOD(device_shutdown, pvscsi_shutdown), DEVMETHOD(device_attach, pvscsi_attach), DEVMETHOD(device_detach, pvscsi_detach), DEVMETHOD_END }; static driver_t pvscsi_driver = { "pvscsi", pvscsi_methods, sizeof(struct pvscsi_softc) }; static devclass_t pvscsi_devclass; DRIVER_MODULE(pvscsi, pci, pvscsi_driver, pvscsi_devclass, 0, 0); MODULE_DEPEND(pvscsi, pci, 1, 1, 1); MODULE_DEPEND(pvscsi, cam, 1, 1, 1);