Index: projects/physbio/sys/cam/cam_ccb.h =================================================================== --- projects/physbio/sys/cam/cam_ccb.h (revision 243875) +++ projects/physbio/sys/cam/cam_ccb.h (revision 243876) @@ -1,1297 +1,1303 @@ /*- * Data structures and definitions for CAM Control Blocks (CCBs). * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_CCB_H #define _CAM_CAM_CCB_H 1 #include #include #include #include #ifndef _KERNEL #include #endif #include #include #include /* General allocation length definitions for CCB structures */ #define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */ #define VUHBALEN 14 /* Vendor Unique HBA length */ #define SIM_IDLEN 16 /* ASCII string len for SIM ID */ #define HBA_IDLEN 16 /* ASCII string len for HBA ID */ #define DEV_IDLEN 16 /* ASCII string len for device names */ #define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */ #define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */ /* Struct definitions for CAM control blocks */ /* Common CCB header */ /* CAM CCB flags */ typedef enum { CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */ CAM_QUEUE_ENABLE = 0x00000002,/* SIM queue actions are enabled */ CAM_CDB_LINKED = 0x00000004,/* CCB contains a linked CDB */ CAM_NEGOTIATE = 0x00000008,/* * Perform transport negotiation * with this command. */ - CAM_SCATTER_VALID = 0x00000010,/* Scatter/gather list is valid */ - CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */ + CAM_DIS_AUTOSENSE = 0x00000010,/* Disable autosense feature */ CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */ - CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */ - CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */ - CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */ - CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */ - CAM_SOFT_RST_OP = 0x00000100,/* Use Soft reset alternative */ - CAM_ENG_SYNC = 0x00000200,/* Flush resid bytes on complete */ - CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */ - CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */ - CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */ - CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */ - CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/ - CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/ - CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/ - CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */ - CAM_SG_LIST_PHYS = 0x00040000,/* SG list has physical addrs. */ - CAM_MSG_BUF_PHYS = 0x00080000,/* Message buffer ptr is physical*/ - CAM_SNS_BUF_PHYS = 0x00100000,/* Autosense data ptr is physical*/ - CAM_DATA_PHYS = 0x00200000,/* SG/Buffer data ptrs are phys. */ + CAM_DIR_IN = 0x00000020,/* Data direction (01:DATA IN) */ + CAM_DIR_OUT = 0x00000040,/* Data direction (10:DATA OUT) */ + CAM_DIR_NONE = 0x00000060,/* Data direction (11:no data) */ + CAM_DIR_MASK = 0x00000060,/* Data direction Mask */ + CAM_DATA_ISPHYS = 0x00000080,/* Data type with physical addrs */ + CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */ + CAM_DATA_PADDR = 0x00000080,/* Data type (001:Physical) */ + CAM_DATA_SG = 0x00000100,/* Data type (010:sglist) */ + CAM_DATA_SG_PADDR = 0x00000180,/* Data type (011:sglist phys) */ + CAM_DATA_BIO = 0x00000200,/* Data type (100:bio) */ + CAM_DATA_MASK = 0x00000380,/* Data type mask. */ + CAM_SOFT_RST_OP = 0x00000400,/* Use Soft reset alternative */ + CAM_ENG_SYNC = 0x00000800,/* Flush resid bytes on complete */ + CAM_DEV_QFRZDIS = 0x00001000,/* Disable DEV Q freezing */ + CAM_DEV_QFREEZE = 0x00002000,/* Freeze DEV Q on execution */ + CAM_HIGH_POWER = 0x00004000,/* Command takes a lot of power */ + CAM_SENSE_PTR = 0x00008000,/* Sense data is a pointer */ + CAM_SENSE_PHYS = 0x00010000,/* Sense pointer is physical addr*/ + CAM_TAG_ACTION_VALID = 0x00020000,/* Use the tag action in this ccb*/ + CAM_PASS_ERR_RECOVER = 0x00040000,/* Pass driver does err. recovery*/ + CAM_DIS_DISCONNECT = 0x00080000,/* Disable disconnect */ + CAM_MSG_BUF_PHYS = 0x00100000,/* Message buffer ptr is physical*/ + CAM_SNS_BUF_PHYS = 0x00200000,/* Autosense data ptr is physical*/ + + CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */ CAM_ENG_SGLIST = 0x00800000,/* SG list is for the HBA engine */ /* Phase cognizant mode flags */ CAM_DIS_AUTOSRP = 0x01000000,/* Disable autosave/restore ptrs */ CAM_DIS_AUTODISC = 0x02000000,/* Disable auto disconnect */ CAM_TGT_CCB_AVAIL = 0x04000000,/* Target CCB available */ CAM_TGT_PHASE_MODE = 0x08000000,/* The SIM runs in phase mode */ CAM_MSGB_VALID = 0x10000000,/* Message buffer valid */ CAM_STATUS_VALID = 0x20000000,/* Status buffer valid */ CAM_DATAB_VALID = 0x40000000,/* Data buffer valid */ /* Host target Mode flags */ CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */ CAM_TERM_IO = 0x10000000,/* Terminate I/O Message sup. */ CAM_DISCONNECT = 0x20000000,/* Disconnects are mandatory */ CAM_SEND_STATUS = 0x40000000 /* Send status after data phase */ } ccb_flags; /* XPT Opcodes for xpt_action */ typedef enum { /* Function code flags are bits greater than 0xff */ XPT_FC_QUEUED = 0x100, /* Non-immediate function code */ XPT_FC_USER_CCB = 0x200, XPT_FC_XPT_ONLY = 0x400, /* Only for the transport layer device */ XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED, /* Passes through the device queues */ /* Common function commands: 0x00->0x0F */ XPT_NOOP = 0x00, /* Execute Nothing */ XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED, /* Execute the requested I/O operation */ XPT_GDEV_TYPE = 0x02, /* Get type information for specified device */ XPT_GDEVLIST = 0x03, /* Get a list of peripheral devices */ XPT_PATH_INQ = 0x04, /* Path routing inquiry */ XPT_REL_SIMQ = 0x05, /* Release a frozen device queue */ XPT_SASYNC_CB = 0x06, /* Set Asynchronous Callback Parameters */ XPT_SDEV_TYPE = 0x07, /* Set device type information */ XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* (Re)Scan the SCSI Bus */ XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY, /* Get EDT entries matching the given pattern */ XPT_DEBUG = 0x0a, /* Turn on debugging for a bus, target or lun */ XPT_PATH_STATS = 0x0b, /* Path statistics (error counts, etc.) */ XPT_GDEV_STATS = 0x0c, /* Device statistics (error counts, etc.) */ XPT_FREEZE_QUEUE = 0x0d, /* Freeze device queue */ XPT_DEV_ADVINFO = 0x0e, /* Get/Set Device advanced information */ /* SCSI Control Functions: 0x10->0x1F */ XPT_ABORT = 0x10, /* Abort the specified CCB */ XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY, /* Reset the specified SCSI bus */ XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED, /* Bus Device Reset the specified SCSI device */ XPT_TERM_IO = 0x13, /* Terminate the I/O process */ XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Logical Unit */ XPT_GET_TRAN_SETTINGS = 0x15, /* * Get default/user transfer settings * for the target */ XPT_SET_TRAN_SETTINGS = 0x16, /* * Set transfer rate/width * negotiation settings */ XPT_CALC_GEOMETRY = 0x17, /* * Calculate the geometry parameters for * a device give the sector size and * volume size. */ XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED, /* Execute the requested ATA I/O operation */ XPT_GET_SIM_KNOB = 0x18, /* * Get SIM specific knob values. */ XPT_SET_SIM_KNOB = 0x19, /* * Set SIM specific knob values. */ XPT_SMP_IO = 0x1b | XPT_FC_DEV_QUEUED, /* Serial Management Protocol */ XPT_SCAN_TGT = 0x1E | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Target */ /* HBA engine commands 0x20->0x2F */ XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY, /* HBA engine feature inquiry */ XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED, /* HBA execute engine request */ /* Target mode commands: 0x30->0x3F */ XPT_EN_LUN = 0x30, /* Enable LUN as a target */ XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED, /* Execute target I/O request */ XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Accept Host Target Mode CDB */ XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED, /* Continue Host Target I/O Connection */ XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event (obsolete) */ XPT_NOTIFY_ACK = 0x35, /* Acknowledgement of event (obsolete) */ XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event */ XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Acknowledgement of event */ /* Vendor Unique codes: 0x80->0x8F */ XPT_VUNIQUE = 0x80 } xpt_opcode; #define XPT_FC_GROUP_MASK 0xF0 #define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK) #define XPT_FC_GROUP_COMMON 0x00 #define XPT_FC_GROUP_SCSI_CONTROL 0x10 #define XPT_FC_GROUP_HBA_ENGINE 0x20 #define XPT_FC_GROUP_TMODE 0x30 #define XPT_FC_GROUP_VENDOR_UNIQUE 0x80 #define XPT_FC_IS_DEV_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED) #define XPT_FC_IS_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0) typedef enum { PROTO_UNKNOWN, PROTO_UNSPECIFIED, PROTO_SCSI, /* Small Computer System Interface */ PROTO_ATA, /* AT Attachment */ PROTO_ATAPI, /* AT Attachment Packetized Interface */ PROTO_SATAPM, /* SATA Port Multiplier */ PROTO_SEMB, /* SATA Enclosure Management Bridge */ } cam_proto; typedef enum { XPORT_UNKNOWN, XPORT_UNSPECIFIED, XPORT_SPI, /* SCSI Parallel Interface */ XPORT_FC, /* Fiber Channel */ XPORT_SSA, /* Serial Storage Architecture */ XPORT_USB, /* Universal Serial Bus */ XPORT_PPB, /* Parallel Port Bus */ XPORT_ATA, /* AT Attachment */ XPORT_SAS, /* Serial Attached SCSI */ XPORT_SATA, /* Serial AT Attachment */ XPORT_ISCSI, /* iSCSI */ } cam_xport; #define XPORT_IS_ATA(t) ((t) == XPORT_ATA || (t) == XPORT_SATA) #define XPORT_IS_SCSI(t) ((t) != XPORT_UNKNOWN && \ (t) != XPORT_UNSPECIFIED && \ !XPORT_IS_ATA(t)) #define XPORT_DEVSTAT_TYPE(t) (XPORT_IS_ATA(t) ? DEVSTAT_TYPE_IF_IDE : \ XPORT_IS_SCSI(t) ? DEVSTAT_TYPE_IF_SCSI : \ DEVSTAT_TYPE_IF_OTHER) #define PROTO_VERSION_UNKNOWN (UINT_MAX - 1) #define PROTO_VERSION_UNSPECIFIED UINT_MAX #define XPORT_VERSION_UNKNOWN (UINT_MAX - 1) #define XPORT_VERSION_UNSPECIFIED UINT_MAX typedef union { LIST_ENTRY(ccb_hdr) le; SLIST_ENTRY(ccb_hdr) sle; TAILQ_ENTRY(ccb_hdr) tqe; STAILQ_ENTRY(ccb_hdr) stqe; } camq_entry; typedef union { void *ptr; u_long field; u_int8_t bytes[sizeof(uintptr_t)]; } ccb_priv_entry; typedef union { ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE]; u_int8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_ppriv_area; typedef union { ccb_priv_entry entries[CCB_SIM_PRIV_SIZE]; u_int8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_spriv_area; struct ccb_hdr { cam_pinfo pinfo; /* Info for priority scheduling */ camq_entry xpt_links; /* For chaining in the XPT layer */ camq_entry sim_links; /* For chaining in the SIM layer */ camq_entry periph_links; /* For chaining in the type driver */ u_int32_t retry_count; void (*cbfcnp)(struct cam_periph *, union ccb *); /* Callback on completion function */ xpt_opcode func_code; /* XPT function code */ u_int32_t status; /* Status returned by CAM subsystem */ struct cam_path *path; /* Compiled path for this ccb */ path_id_t path_id; /* Path ID for the request */ target_id_t target_id; /* Target device ID */ lun_id_t target_lun; /* Target LUN number */ u_int32_t flags; /* ccb_flags */ ccb_ppriv_area periph_priv; ccb_spriv_area sim_priv; u_int32_t timeout; /* Timeout value */ /* * Deprecated, only for use by non-MPSAFE SIMs. All others must * allocate and initialize their own callout storage. */ struct callout_handle timeout_ch; }; /* Get Device Information CCB */ struct ccb_getdev { struct ccb_hdr ccb_h; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; u_int8_t serial_num[252]; u_int8_t inq_flags; u_int8_t serial_num_len; }; /* Device Statistics CCB */ struct ccb_getdevstats { struct ccb_hdr ccb_h; int dev_openings; /* Space left for more work on device*/ int dev_active; /* Transactions running on the device */ int devq_openings; /* Space left for more queued work */ int devq_queued; /* Transactions queued to be sent */ int held; /* * CCBs held by peripheral drivers * for this device */ int maxtags; /* * Boundary conditions for number of * tagged operations */ int mintags; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { CAM_GDEVLIST_LAST_DEVICE, CAM_GDEVLIST_LIST_CHANGED, CAM_GDEVLIST_MORE_DEVS, CAM_GDEVLIST_ERROR } ccb_getdevlist_status_e; struct ccb_getdevlist { struct ccb_hdr ccb_h; char periph_name[DEV_IDLEN]; u_int32_t unit_number; unsigned int generation; u_int32_t index; ccb_getdevlist_status_e status; }; typedef enum { PERIPH_MATCH_NONE = 0x000, PERIPH_MATCH_PATH = 0x001, PERIPH_MATCH_TARGET = 0x002, PERIPH_MATCH_LUN = 0x004, PERIPH_MATCH_NAME = 0x008, PERIPH_MATCH_UNIT = 0x010, PERIPH_MATCH_ANY = 0x01f } periph_pattern_flags; struct periph_match_pattern { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; periph_pattern_flags flags; }; typedef enum { DEV_MATCH_NONE = 0x000, DEV_MATCH_PATH = 0x001, DEV_MATCH_TARGET = 0x002, DEV_MATCH_LUN = 0x004, DEV_MATCH_INQUIRY = 0x008, DEV_MATCH_DEVID = 0x010, DEV_MATCH_ANY = 0x00f } dev_pattern_flags; struct device_id_match_pattern { uint8_t id_len; uint8_t id[256]; }; struct device_match_pattern { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; dev_pattern_flags flags; union { struct scsi_static_inquiry_pattern inq_pat; struct device_id_match_pattern devid_pat; } data; }; typedef enum { BUS_MATCH_NONE = 0x000, BUS_MATCH_PATH = 0x001, BUS_MATCH_NAME = 0x002, BUS_MATCH_UNIT = 0x004, BUS_MATCH_BUS_ID = 0x008, BUS_MATCH_ANY = 0x00f } bus_pattern_flags; struct bus_match_pattern { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; bus_pattern_flags flags; }; union match_pattern { struct periph_match_pattern periph_pattern; struct device_match_pattern device_pattern; struct bus_match_pattern bus_pattern; }; typedef enum { DEV_MATCH_PERIPH, DEV_MATCH_DEVICE, DEV_MATCH_BUS } dev_match_type; struct dev_match_pattern { dev_match_type type; union match_pattern pattern; }; struct periph_match_result { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; }; typedef enum { DEV_RESULT_NOFLAG = 0x00, DEV_RESULT_UNCONFIGURED = 0x01 } dev_result_flags; struct device_match_result { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; dev_result_flags flags; }; struct bus_match_result { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; }; union match_result { struct periph_match_result periph_result; struct device_match_result device_result; struct bus_match_result bus_result; }; struct dev_match_result { dev_match_type type; union match_result result; }; typedef enum { CAM_DEV_MATCH_LAST, CAM_DEV_MATCH_MORE, CAM_DEV_MATCH_LIST_CHANGED, CAM_DEV_MATCH_SIZE_ERROR, CAM_DEV_MATCH_ERROR } ccb_dev_match_status; typedef enum { CAM_DEV_POS_NONE = 0x000, CAM_DEV_POS_BUS = 0x001, CAM_DEV_POS_TARGET = 0x002, CAM_DEV_POS_DEVICE = 0x004, CAM_DEV_POS_PERIPH = 0x008, CAM_DEV_POS_PDPTR = 0x010, CAM_DEV_POS_TYPEMASK = 0xf00, CAM_DEV_POS_EDT = 0x100, CAM_DEV_POS_PDRV = 0x200 } dev_pos_type; struct ccb_dm_cookie { void *bus; void *target; void *device; void *periph; void *pdrv; }; struct ccb_dev_position { u_int generations[4]; #define CAM_BUS_GENERATION 0x00 #define CAM_TARGET_GENERATION 0x01 #define CAM_DEV_GENERATION 0x02 #define CAM_PERIPH_GENERATION 0x03 dev_pos_type position_type; struct ccb_dm_cookie cookie; }; struct ccb_dev_match { struct ccb_hdr ccb_h; ccb_dev_match_status status; u_int32_t num_patterns; u_int32_t pattern_buf_len; struct dev_match_pattern *patterns; u_int32_t num_matches; u_int32_t match_buf_len; struct dev_match_result *matches; struct ccb_dev_position pos; }; /* * Definitions for the path inquiry CCB fields. */ #define CAM_VERSION 0x16 /* Hex value for current version */ typedef enum { PI_MDP_ABLE = 0x80, /* Supports MDP message */ PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */ PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */ PI_SDTR_ABLE = 0x10, /* Supports SDTR message */ PI_LINKED_CDB = 0x08, /* Supports linked CDBs */ PI_SATAPM = 0x04, /* Supports SATA PM */ PI_TAG_ABLE = 0x02, /* Supports tag queue messages */ PI_SOFT_RST = 0x01 /* Supports soft reset alternative */ } pi_inqflag; typedef enum { PIT_PROCESSOR = 0x80, /* Target mode processor mode */ PIT_PHASE = 0x40, /* Target mode phase cog. mode */ PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */ PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */ PIT_GRP_6 = 0x08, /* Group 6 commands supported */ PIT_GRP_7 = 0x04 /* Group 7 commands supported */ } pi_tmflag; typedef enum { PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */ PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */ PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */ PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */ PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */ PIM_SEQSCAN = 0x04 /* Do bus scans sequentially, not in parallel */ } pi_miscflag; /* Path Inquiry CCB */ struct ccb_pathinq_settings_spi { u_int8_t ppr_options; }; struct ccb_pathinq_settings_fc { u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_pathinq_settings_sas { u_int32_t bitrate; /* Mbps */ }; #define PATHINQ_SETTINGS_SIZE 128 struct ccb_pathinq { struct ccb_hdr ccb_h; u_int8_t version_num; /* Version number for the SIM/HBA */ u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */ u_int8_t target_sprt; /* Flags for target mode support */ u_int8_t hba_misc; /* Misc HBA features */ u_int16_t hba_eng_cnt; /* HBA engine count */ /* Vendor Unique capabilities */ u_int8_t vuhba_flags[VUHBALEN]; u_int32_t max_target; /* Maximum supported Target */ u_int32_t max_lun; /* Maximum supported Lun */ u_int32_t async_flags; /* Installed Async handlers */ path_id_t hpath_id; /* Highest Path ID in the subsystem */ target_id_t initiator_id; /* ID of the HBA on the SCSI bus */ char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */ char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */ char dev_name[DEV_IDLEN];/* Device name for SIM */ u_int32_t unit_number; /* Unit number for SIM */ u_int32_t bus_id; /* Bus ID for SIM */ u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { struct ccb_pathinq_settings_spi spi; struct ccb_pathinq_settings_fc fc; struct ccb_pathinq_settings_sas sas; char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE]; } xport_specific; u_int maxio; /* Max supported I/O size, in bytes. */ u_int16_t hba_vendor; /* HBA vendor ID */ u_int16_t hba_device; /* HBA device ID */ u_int16_t hba_subvendor; /* HBA subvendor ID */ u_int16_t hba_subdevice; /* HBA subdevice ID */ }; /* Path Statistics CCB */ struct ccb_pathstats { struct ccb_hdr ccb_h; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { SMP_FLAG_NONE = 0x00, SMP_FLAG_REQ_SG = 0x01, SMP_FLAG_RSP_SG = 0x02 } ccb_smp_pass_flags; /* * Serial Management Protocol CCB * XXX Currently the semantics for this CCB are that it is executed either * by the addressed device, or that device's parent (i.e. an expander for * any device on an expander) if the addressed device doesn't support SMP. * Later, once we have the ability to probe SMP-only devices and put them * in CAM's topology, the CCB will only be executed by the addressed device * if possible. */ struct ccb_smpio { struct ccb_hdr ccb_h; uint8_t *smp_request; int smp_request_len; uint16_t smp_request_sglist_cnt; uint8_t *smp_response; int smp_response_len; uint16_t smp_response_sglist_cnt; ccb_smp_pass_flags flags; }; typedef union { u_int8_t *sense_ptr; /* * Pointer to storage * for sense information */ /* Storage Area for sense information */ struct scsi_sense_data sense_buf; } sense_t; typedef union { u_int8_t *cdb_ptr; /* Pointer to the CDB bytes to send */ /* Area for the CDB send */ u_int8_t cdb_bytes[IOCDBLEN]; } cdb_t; /* * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO * function codes. */ struct ccb_scsiio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ u_int8_t *req_map; /* Ptr to mapping info */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ /* Autosense storage */ struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes to autosense */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int16_t sglist_cnt; /* Number of SG list entries */ u_int8_t scsi_status; /* Returned SCSI status */ u_int8_t sense_resid; /* Autosense resid length: 2's comp */ u_int32_t resid; /* Transfer residual length: 2's comp */ cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t *msg_ptr; /* Pointer to the message buffer */ u_int16_t msg_len; /* Number of bytes for the Message */ u_int8_t tag_action; /* What to do for tag queueing */ /* * The tag action should be either the define below (to send a * non-tagged transaction) or one of the defined scsi tag messages * from scsi_message.h. */ #define CAM_TAG_ACTION_NONE 0x00 u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ }; /* * ATA I/O Request CCB used for the XPT_ATA_IO function code. */ struct ccb_ataio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct ata_cmd cmd; /* ATA command register set */ struct ata_res res; /* ATA result register set */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int32_t resid; /* Transfer residual length: 2's comp */ u_int8_t tag_action; /* What to do for tag queueing */ /* * The tag action should be either the define below (to send a * non-tagged transaction) or one of the defined scsi tag messages * from scsi_message.h. */ #define CAM_TAG_ACTION_NONE 0x00 u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ }; struct ccb_accept_tio { struct ccb_hdr ccb_h; cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int8_t tag_action; /* What to do for tag queueing */ u_int8_t sense_len; /* Number of bytes of Sense Data */ u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ struct scsi_sense_data sense_data; }; /* Release SIM Queue */ struct ccb_relsim { struct ccb_hdr ccb_h; u_int32_t release_flags; #define RELSIM_ADJUST_OPENINGS 0x01 #define RELSIM_RELEASE_AFTER_TIMEOUT 0x02 #define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04 #define RELSIM_RELEASE_AFTER_QEMPTY 0x08 #define RELSIM_RELEASE_RUNLEVEL 0x10 u_int32_t openings; u_int32_t release_timeout; /* Abstract argument. */ u_int32_t qfrozen_cnt; }; /* * Definitions for the asynchronous callback CCB fields. */ typedef enum { AC_UNIT_ATTENTION = 0x4000,/* Device reported UNIT ATTENTION */ AC_ADVINFO_CHANGED = 0x2000,/* Advance info might have changes */ AC_CONTRACT = 0x1000,/* A contractual callback */ AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */ AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */ AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */ AC_LOST_DEVICE = 0x100,/* A device went away */ AC_FOUND_DEVICE = 0x080,/* A new device was found */ AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */ AC_PATH_REGISTERED = 0x020,/* A new path has been registered */ AC_SENT_BDR = 0x010,/* A BDR message was sent to target */ AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */ AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */ AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */ } ac_code; typedef void ac_callback_t (void *softc, u_int32_t code, struct cam_path *path, void *args); /* * Generic Asynchronous callbacks. * * Generic arguments passed bac which are then interpreted between a per-system * contract number. */ #define AC_CONTRACT_DATA_MAX (128 - sizeof (u_int64_t)) struct ac_contract { u_int64_t contract_number; u_int8_t contract_data[AC_CONTRACT_DATA_MAX]; }; #define AC_CONTRACT_DEV_CHG 1 struct ac_device_changed { u_int64_t wwpn; u_int32_t port; target_id_t target; u_int8_t arrived; }; /* Set Asynchronous Callback CCB */ struct ccb_setasync { struct ccb_hdr ccb_h; u_int32_t event_enable; /* Async Event enables */ ac_callback_t *callback; void *callback_arg; }; /* Set Device Type CCB */ struct ccb_setdev { struct ccb_hdr ccb_h; u_int8_t dev_type; /* Value for dev type field in EDT */ }; /* SCSI Control Functions */ /* Abort XPT request CCB */ struct ccb_abort { struct ccb_hdr ccb_h; union ccb *abort_ccb; /* Pointer to CCB to abort */ }; /* Reset SCSI Bus CCB */ struct ccb_resetbus { struct ccb_hdr ccb_h; }; /* Reset SCSI Device CCB */ struct ccb_resetdev { struct ccb_hdr ccb_h; }; /* Terminate I/O Process Request CCB */ struct ccb_termio { struct ccb_hdr ccb_h; union ccb *termio_ccb; /* Pointer to CCB to terminate */ }; typedef enum { CTS_TYPE_CURRENT_SETTINGS, CTS_TYPE_USER_SETTINGS } cts_type; struct ccb_trans_settings_scsi { u_int valid; /* Which fields to honor */ #define CTS_SCSI_VALID_TQ 0x01 u_int flags; #define CTS_SCSI_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_ata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_TQ 0x01 u_int flags; #define CTS_ATA_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_spi { u_int valid; /* Which fields to honor */ #define CTS_SPI_VALID_SYNC_RATE 0x01 #define CTS_SPI_VALID_SYNC_OFFSET 0x02 #define CTS_SPI_VALID_BUS_WIDTH 0x04 #define CTS_SPI_VALID_DISC 0x08 #define CTS_SPI_VALID_PPR_OPTIONS 0x10 u_int flags; #define CTS_SPI_FLAGS_DISC_ENB 0x01 u_int sync_period; u_int sync_offset; u_int bus_width; u_int ppr_options; }; struct ccb_trans_settings_fc { u_int valid; /* Which fields to honor */ #define CTS_FC_VALID_WWNN 0x8000 #define CTS_FC_VALID_WWPN 0x4000 #define CTS_FC_VALID_PORT 0x2000 #define CTS_FC_VALID_SPEED 0x1000 u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_sas { u_int valid; /* Which fields to honor */ #define CTS_SAS_VALID_SPEED 0x1000 u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_pata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_MODE 0x01 #define CTS_ATA_VALID_BYTECOUNT 0x02 #define CTS_ATA_VALID_ATAPI 0x20 int mode; /* Mode */ u_int bytecount; /* Length of PIO transaction */ u_int atapi; /* Length of ATAPI CDB */ }; struct ccb_trans_settings_sata { u_int valid; /* Which fields to honor */ #define CTS_SATA_VALID_MODE 0x01 #define CTS_SATA_VALID_BYTECOUNT 0x02 #define CTS_SATA_VALID_REVISION 0x04 #define CTS_SATA_VALID_PM 0x08 #define CTS_SATA_VALID_TAGS 0x10 #define CTS_SATA_VALID_ATAPI 0x20 #define CTS_SATA_VALID_CAPS 0x40 int mode; /* Legacy PATA mode */ u_int bytecount; /* Length of PIO transaction */ int revision; /* SATA revision */ u_int pm_present; /* PM is present (XPT->SIM) */ u_int tags; /* Number of allowed tags */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_SATA_CAPS_H 0x0000ffff #define CTS_SATA_CAPS_H_PMREQ 0x00000001 #define CTS_SATA_CAPS_H_APST 0x00000002 #define CTS_SATA_CAPS_H_DMAAA 0x00000010 /* Auto-activation */ #define CTS_SATA_CAPS_H_AN 0x00000020 /* Async. notification */ #define CTS_SATA_CAPS_D 0xffff0000 #define CTS_SATA_CAPS_D_PMREQ 0x00010000 #define CTS_SATA_CAPS_D_APST 0x00020000 }; /* Get/Set transfer rate/width/disconnection/tag queueing settings */ struct ccb_trans_settings { struct ccb_hdr ccb_h; cts_type type; /* Current or User settings */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_ata ata; struct ccb_trans_settings_scsi scsi; } proto_specific; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_spi spi; struct ccb_trans_settings_fc fc; struct ccb_trans_settings_sas sas; struct ccb_trans_settings_pata ata; struct ccb_trans_settings_sata sata; } xport_specific; }; /* * Calculate the geometry parameters for a device * give the block size and volume size in blocks. */ struct ccb_calc_geometry { struct ccb_hdr ccb_h; u_int32_t block_size; u_int64_t volume_size; u_int32_t cylinders; u_int8_t heads; u_int8_t secs_per_track; }; /* * Set or get SIM (and transport) specific knobs */ #define KNOB_VALID_ADDRESS 0x1 #define KNOB_VALID_ROLE 0x2 #define KNOB_ROLE_NONE 0x0 #define KNOB_ROLE_INITIATOR 0x1 #define KNOB_ROLE_TARGET 0x2 #define KNOB_ROLE_BOTH 0x3 struct ccb_sim_knob_settings_spi { u_int valid; u_int initiator_id; u_int role; }; struct ccb_sim_knob_settings_fc { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int role; }; struct ccb_sim_knob_settings_sas { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int role; }; #define KNOB_SETTINGS_SIZE 128 struct ccb_sim_knob { struct ccb_hdr ccb_h; union { u_int valid; /* Which fields to honor */ struct ccb_sim_knob_settings_spi spi; struct ccb_sim_knob_settings_fc fc; struct ccb_sim_knob_settings_sas sas; char pad[KNOB_SETTINGS_SIZE]; } xport_specific; }; /* * Rescan the given bus, or bus/target/lun */ struct ccb_rescan { struct ccb_hdr ccb_h; cam_flags flags; }; /* * Turn on debugging for the given bus, bus/target, or bus/target/lun. */ struct ccb_debug { struct ccb_hdr ccb_h; cam_debug_flags flags; }; /* Target mode structures. */ struct ccb_en_lun { struct ccb_hdr ccb_h; u_int16_t grp6_len; /* Group 6 VU CDB length */ u_int16_t grp7_len; /* Group 7 VU CDB length */ u_int8_t enable; }; /* old, barely used immediate notify, binary compatibility */ struct ccb_immed_notify { struct ccb_hdr ccb_h; struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes in sense buffer */ u_int8_t initiator_id; /* Id of initiator that selected */ u_int8_t message_args[7]; /* Message Arguments */ }; struct ccb_notify_ack { struct ccb_hdr ccb_h; u_int16_t seq_id; /* Sequence identifier */ u_int8_t event; /* Event flags */ }; struct ccb_immediate_notify { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tag for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Function specific */ }; struct ccb_notify_acknowledge { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tar for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Function specific */ }; /* HBA engine structures. */ typedef enum { EIT_BUFFER, /* Engine type: buffer memory */ EIT_LOSSLESS, /* Engine type: lossless compression */ EIT_LOSSY, /* Engine type: lossy compression */ EIT_ENCRYPT /* Engine type: encryption */ } ei_type; typedef enum { EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */ EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */ EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */ EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */ } ei_algo; struct ccb_eng_inq { struct ccb_hdr ccb_h; u_int16_t eng_num; /* The engine number for this inquiry */ ei_type eng_type; /* Returned engine type */ ei_algo eng_algo; /* Returned engine algorithm type */ u_int32_t eng_memeory; /* Returned engine memory size */ }; struct ccb_eng_exec { /* This structure must match SCSIIO size */ struct ccb_hdr ccb_h; u_int8_t *pdrv_ptr; /* Ptr used by the peripheral driver */ u_int8_t *req_map; /* Ptr for mapping info on the req. */ u_int8_t *data_ptr; /* Pointer to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int8_t *engdata_ptr; /* Pointer to the engine buffer data */ u_int16_t sglist_cnt; /* Num of scatter gather list entries */ u_int32_t dmax_len; /* Destination data maximum length */ u_int32_t dest_len; /* Destination data length */ int32_t src_resid; /* Source residual length: 2's comp */ u_int32_t timeout; /* Timeout value */ u_int16_t eng_num; /* Engine number for this request */ u_int16_t vu_flags; /* Vendor Unique flags */ }; /* * Definitions for the timeout field in the SCSI I/O CCB. */ #define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */ #define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */ #define CAM_SUCCESS 0 /* For signaling general success */ #define CAM_FAILURE 1 /* For signaling general failure */ #define CAM_FALSE 0 #define CAM_TRUE 1 #define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */ /* * CCB for working with advanced device information. This operates in a fashion * similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer * type requested, and provide a buffer size/buffer to write to. If the * buffer is too small, provsiz will be larger than bufsiz. */ struct ccb_dev_advinfo { struct ccb_hdr ccb_h; uint32_t flags; #define CDAI_FLAG_STORE 0x1 /* If set, action becomes store */ uint32_t buftype; /* IN: Type of data being requested */ /* NB: buftype is interpreted on a per-transport basis */ #define CDAI_TYPE_SCSI_DEVID 1 #define CDAI_TYPE_SERIAL_NUM 2 #define CDAI_TYPE_PHYS_PATH 3 #define CDAI_TYPE_RCAPLONG 4 off_t bufsiz; /* IN: Size of external buffer */ #define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */ off_t provsiz; /* OUT: Size required/used */ uint8_t *buf; /* IN/OUT: Buffer for requested data */ }; /* * Union of all CCB types for kernel space allocation. This union should * never be used for manipulating CCBs - its only use is for the allocation * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc * and the argument to xpt_ccb_free. */ union ccb { struct ccb_hdr ccb_h; /* For convenience */ struct ccb_scsiio csio; struct ccb_getdev cgd; struct ccb_getdevlist cgdl; struct ccb_pathinq cpi; struct ccb_relsim crs; struct ccb_setasync csa; struct ccb_setdev csd; struct ccb_pathstats cpis; struct ccb_getdevstats cgds; struct ccb_dev_match cdm; struct ccb_trans_settings cts; struct ccb_calc_geometry ccg; struct ccb_sim_knob knob; struct ccb_abort cab; struct ccb_resetbus crb; struct ccb_resetdev crd; struct ccb_termio tio; struct ccb_accept_tio atio; struct ccb_scsiio ctio; struct ccb_en_lun cel; struct ccb_immed_notify cin; struct ccb_notify_ack cna; struct ccb_immediate_notify cin1; struct ccb_notify_acknowledge cna2; struct ccb_eng_inq cei; struct ccb_eng_exec cee; struct ccb_smpio smpio; struct ccb_rescan crcn; struct ccb_debug cdbg; struct ccb_ataio ataio; struct ccb_dev_advinfo cdai; }; __BEGIN_DECLS static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout); static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout); static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout); static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout); static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_SCSI_IO; csio->ccb_h.flags = flags; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->sense_len = sense_len; csio->cdb_len = cdb_len; csio->tag_action = tag_action; } static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_CONT_TARGET_IO; csio->ccb_h.flags = flags; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->scsi_status = scsi_status; csio->tag_action = tag_action; csio->tag_id = tag_id; csio->init_id = init_id; } static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { ataio->ccb_h.func_code = XPT_ATA_IO; ataio->ccb_h.flags = flags; ataio->ccb_h.retry_count = retries; ataio->ccb_h.cbfcnp = cbfcnp; ataio->ccb_h.timeout = timeout; ataio->data_ptr = data_ptr; ataio->dxfer_len = dxfer_len; ataio->tag_action = tag_action; } static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout) { #ifdef _KERNEL KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH, ("direction != CAM_DIR_BOTH")); KASSERT((smp_request != NULL) && (smp_response != NULL), ("need valid request and response buffers")); KASSERT((smp_request_len != 0) && (smp_response_len != 0), ("need non-zero request and response lengths")); #endif /*_KERNEL*/ smpio->ccb_h.func_code = XPT_SMP_IO; smpio->ccb_h.flags = flags; smpio->ccb_h.retry_count = retries; smpio->ccb_h.cbfcnp = cbfcnp; smpio->ccb_h.timeout = timeout; smpio->smp_request = smp_request; smpio->smp_request_len = smp_request_len; smpio->smp_response = smp_response; smpio->smp_response_len = smp_response_len; } void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended); __END_DECLS #endif /* _CAM_CAM_CCB_H */ Index: projects/physbio/sys/cam/cam_xpt.c =================================================================== --- projects/physbio/sys/cam/cam_xpt.c (revision 243875) +++ projects/physbio/sys/cam/cam_xpt.c (revision 243876) @@ -1,5120 +1,5121 @@ /*- * Implementation of the Common Access Method Transport (XPT) layer. * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* geometry translation */ #include /* for xpt_print below */ #include "opt_cam.h" /* * This is the maximum number of high powered commands (e.g. start unit) * that can be outstanding at a particular time. */ #ifndef CAM_MAX_HIGHPOWER #define CAM_MAX_HIGHPOWER 4 #endif /* Datastructures internal to the xpt layer */ MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); /* Object for defering XPT actions to a taskqueue */ struct xpt_task { struct task task; void *data1; uintptr_t data2; }; typedef enum { XPT_FLAG_OPEN = 0x01 } xpt_flags; struct xpt_softc { xpt_flags flags; u_int32_t xpt_generation; /* number of high powered commands that can go through right now */ STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq; int num_highpower; /* queue for handling async rescan requests. */ TAILQ_HEAD(, ccb_hdr) ccb_scanq; int buses_to_config; int buses_config_done; /* Registered busses */ TAILQ_HEAD(,cam_eb) xpt_busses; u_int bus_generation; struct intr_config_hook *xpt_config_hook; int boot_delay; struct callout boot_callout; struct mtx xpt_topo_lock; struct mtx xpt_lock; }; typedef enum { DM_RET_COPY = 0x01, DM_RET_FLAG_MASK = 0x0f, DM_RET_NONE = 0x00, DM_RET_STOP = 0x10, DM_RET_DESCEND = 0x20, DM_RET_ERROR = 0x30, DM_RET_ACTION_MASK = 0xf0 } dev_match_ret; typedef enum { XPT_DEPTH_BUS, XPT_DEPTH_TARGET, XPT_DEPTH_DEVICE, XPT_DEPTH_PERIPH } xpt_traverse_depth; struct xpt_traverse_config { xpt_traverse_depth depth; void *tr_func; void *tr_arg; }; typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); /* Transport layer configuration information */ static struct xpt_softc xsoftc; TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay); SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, &xsoftc.boot_delay, 0, "Bus registration wait time"); /* Queues for our software interrupt handler */ typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t; typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t; static cam_simq_t cam_simq; static struct mtx cam_simq_lock; /* Pointers to software interrupt handlers */ static void *cambio_ih; struct cam_periph *xpt_periph; static periph_init_t xpt_periph_init; static struct periph_driver xpt_driver = { xpt_periph_init, "xpt", TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(xpt, xpt_driver); static d_open_t xptopen; static d_close_t xptclose; static d_ioctl_t xptioctl; static struct cdevsw xpt_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = xptopen, .d_close = xptclose, .d_ioctl = xptioctl, .d_name = "xpt", }; /* Storage for debugging datastructures */ struct cam_path *cam_dpath; u_int32_t cam_dflags = CAM_DEBUG_FLAGS; TUNABLE_INT("kern.cam.dflags", &cam_dflags); SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW, &cam_dflags, 0, "Enabled debug flags"); u_int32_t cam_debug_delay = CAM_DEBUG_DELAY; TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay); SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW, &cam_debug_delay, 0, "Delay in us after each debug message"); /* Our boot-time initialization hook */ static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); static moduledata_t cam_moduledata = { "cam", cam_module_event_handler, NULL }; static int xpt_init(void *); DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); MODULE_VERSION(cam, 1); static void xpt_async_bcast(struct async_list *async_head, u_int32_t async_code, struct cam_path *path, void *async_arg); static path_id_t xptnextfreepathid(void); static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); static union ccb *xpt_get_ccb(struct cam_ed *device); static void xpt_run_dev_allocq(struct cam_eb *bus); static void xpt_run_dev_sendq(struct cam_eb *bus); static timeout_t xpt_release_devq_timeout; static void xpt_release_simq_timeout(void *arg) __unused; static void xpt_release_bus(struct cam_eb *bus); static void xpt_release_devq_device(struct cam_ed *dev, cam_rl rl, u_int count, int run_queue); static struct cam_et* xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); static void xpt_release_target(struct cam_et *target); static struct cam_eb* xpt_find_bus(path_id_t path_id); static struct cam_et* xpt_find_target(struct cam_eb *bus, target_id_t target_id); static struct cam_ed* xpt_find_device(struct cam_et *target, lun_id_t lun_id); static void xpt_config(void *arg); static xpt_devicefunc_t xptpassannouncefunc; static void xptaction(struct cam_sim *sim, union ccb *work_ccb); static void xptpoll(struct cam_sim *sim); static void camisr(void *); static void camisr_runqueue(void *); static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_eb *bus); static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_ed *device); static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_periph *periph); static xpt_busfunc_t xptedtbusfunc; static xpt_targetfunc_t xptedttargetfunc; static xpt_devicefunc_t xptedtdevicefunc; static xpt_periphfunc_t xptedtperiphfunc; static xpt_pdrvfunc_t xptplistpdrvfunc; static xpt_periphfunc_t xptplistperiphfunc; static int xptedtmatch(struct ccb_dev_match *cdm); static int xptperiphlistmatch(struct ccb_dev_match *cdm); static int xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg); static int xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, xpt_targetfunc_t *tr_func, void *arg); static int xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, xpt_devicefunc_t *tr_func, void *arg); static int xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg); static int xptpdrvtraverse(struct periph_driver **start_pdrv, xpt_pdrvfunc_t *tr_func, void *arg); static int xptpdperiphtraverse(struct periph_driver **pdrv, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg); static xpt_busfunc_t xptdefbusfunc; static xpt_targetfunc_t xptdeftargetfunc; static xpt_devicefunc_t xptdefdevicefunc; static xpt_periphfunc_t xptdefperiphfunc; static void xpt_finishconfig_task(void *context, int pending); static void xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static xpt_devicefunc_t xptsetasyncfunc; static xpt_busfunc_t xptsetasyncbusfunc; static cam_status xptregister(struct cam_periph *periph, void *arg); static __inline int periph_is_queued(struct cam_periph *periph); static __inline int device_is_alloc_queued(struct cam_ed *device); static __inline int device_is_send_queued(struct cam_ed *device); static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev) { int retval; if ((dev->drvq.entries > 0) && (dev->ccbq.devq_openings > 0) && (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL( CAMQ_GET_PRIO(&dev->drvq))) == 0)) { /* * The priority of a device waiting for CCB resources * is that of the highest priority peripheral driver * enqueued. */ retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue, &dev->alloc_ccb_entry.pinfo, CAMQ_GET_PRIO(&dev->drvq)); } else { retval = 0; } return (retval); } static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev) { int retval; if ((dev->ccbq.queue.entries > 0) && (dev->ccbq.dev_openings > 0) && (cam_ccbq_frozen_top(&dev->ccbq) == 0)) { /* * The priority of a device waiting for controller * resources is that of the highest priority CCB * enqueued. */ retval = xpt_schedule_dev(&bus->sim->devq->send_queue, &dev->send_ccb_entry.pinfo, CAMQ_GET_PRIO(&dev->ccbq.queue)); } else { retval = 0; } return (retval); } static __inline int periph_is_queued(struct cam_periph *periph) { return (periph->pinfo.index != CAM_UNQUEUED_INDEX); } static __inline int device_is_alloc_queued(struct cam_ed *device) { return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); } static __inline int device_is_send_queued(struct cam_ed *device) { return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); } static void xpt_periph_init() { make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); } static void xptdone(struct cam_periph *periph, union ccb *done_ccb) { /* Caller will release the CCB */ wakeup(&done_ccb->ccb_h.cbfcnp); } static int xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) { /* * Only allow read-write access. */ if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) return(EPERM); /* * We don't allow nonblocking access. */ if ((flags & O_NONBLOCK) != 0) { printf("%s: can't do nonblocking access\n", devtoname(dev)); return(ENODEV); } /* Mark ourselves open */ mtx_lock(&xsoftc.xpt_lock); xsoftc.flags |= XPT_FLAG_OPEN; mtx_unlock(&xsoftc.xpt_lock); return(0); } static int xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) { /* Mark ourselves closed */ mtx_lock(&xsoftc.xpt_lock); xsoftc.flags &= ~XPT_FLAG_OPEN; mtx_unlock(&xsoftc.xpt_lock); return(0); } /* * Don't automatically grab the xpt softc lock here even though this is going * through the xpt device. The xpt device is really just a back door for * accessing other devices and SIMs, so the right thing to do is to grab * the appropriate SIM lock once the bus/SIM is located. */ static int xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; error = 0; switch(cmd) { /* * For the transport layer CAMIOCOMMAND ioctl, we really only want * to accept CCB types that don't quite make sense to send through a * passthrough driver. XPT_PATH_INQ is an exception to this, as stated * in the CAM spec. */ case CAMIOCOMMAND: { union ccb *ccb; union ccb *inccb; struct cam_eb *bus; inccb = (union ccb *)addr; bus = xpt_find_bus(inccb->ccb_h.path_id); if (bus == NULL) return (EINVAL); switch (inccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_RESET_BUS: if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { xpt_release_bus(bus); return (EINVAL); } break; case XPT_SCAN_TGT: if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { xpt_release_bus(bus); return (EINVAL); } break; default: break; } switch(inccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_RESET_BUS: case XPT_PATH_INQ: case XPT_ENG_INQ: case XPT_SCAN_LUN: case XPT_SCAN_TGT: ccb = xpt_alloc_ccb(); CAM_SIM_LOCK(bus->sim); /* * Create a path using the bus, target, and lun the * user passed in. */ if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, inccb->ccb_h.path_id, inccb->ccb_h.target_id, inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; CAM_SIM_UNLOCK(bus->sim); xpt_free_ccb(ccb); break; } /* Ensure all of our fields are correct */ xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(ccb, inccb); ccb->ccb_h.cbfcnp = xptdone; cam_periph_runccb(ccb, NULL, 0, 0, NULL); bcopy(ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); CAM_SIM_UNLOCK(bus->sim); break; case XPT_DEBUG: { union ccb ccb; /* * This is an immediate CCB, so it's okay to * allocate it on the stack. */ CAM_SIM_LOCK(bus->sim); /* * Create a path using the bus, target, and lun the * user passed in. */ if (xpt_create_path(&ccb.ccb_h.path, xpt_periph, inccb->ccb_h.path_id, inccb->ccb_h.target_id, inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; CAM_SIM_UNLOCK(bus->sim); break; } /* Ensure all of our fields are correct */ xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(&ccb, inccb); ccb.ccb_h.cbfcnp = xptdone; xpt_action(&ccb); bcopy(&ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb.ccb_h.path); CAM_SIM_UNLOCK(bus->sim); break; } case XPT_DEV_MATCH: { struct cam_periph_map_info mapinfo; struct cam_path *old_path; /* * We can't deal with physical addresses for this * type of transaction. */ - if (inccb->ccb_h.flags & CAM_DATA_PHYS) { + if ((inccb->ccb_h.flags & CAM_DATA_MASK) != + CAM_DATA_VADDR) { error = EINVAL; break; } /* * Save this in case the caller had it set to * something in particular. */ old_path = inccb->ccb_h.path; /* * We really don't need a path for the matching * code. The path is needed because of the * debugging statements in xpt_action(). They * assume that the CCB has a valid path. */ inccb->ccb_h.path = xpt_periph->path; bzero(&mapinfo, sizeof(mapinfo)); /* * Map the pattern and match buffers into kernel * virtual address space. */ error = cam_periph_mapmem(inccb, &mapinfo); if (error) { inccb->ccb_h.path = old_path; break; } /* * This is an immediate CCB, we can send it on directly. */ CAM_SIM_LOCK(xpt_path_sim(xpt_periph->path)); xpt_action(inccb); CAM_SIM_UNLOCK(xpt_path_sim(xpt_periph->path)); /* * Map the buffers back into user space. */ cam_periph_unmapmem(inccb, &mapinfo); inccb->ccb_h.path = old_path; error = 0; break; } default: error = ENOTSUP; break; } xpt_release_bus(bus); break; } /* * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, * with the periphal driver name and unit name filled in. The other * fields don't really matter as input. The passthrough driver name * ("pass"), and unit number are passed back in the ccb. The current * device generation number, and the index into the device peripheral * driver list, and the status are also passed back. Note that * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is * (or rather should be) impossible for the device peripheral driver * list to change since we look at the whole thing in one pass, and * we do it with lock protection. * */ case CAMGETPASSTHRU: { union ccb *ccb; struct cam_periph *periph; struct periph_driver **p_drv; char *name; u_int unit; u_int cur_generation; int base_periph_found; int splbreaknum; ccb = (union ccb *)addr; unit = ccb->cgdl.unit_number; name = ccb->cgdl.periph_name; /* * Every 100 devices, we want to drop our lock protection to * give the software interrupt handler a chance to run. * Most systems won't run into this check, but this should * avoid starvation in the software interrupt handler in * large systems. */ splbreaknum = 100; ccb = (union ccb *)addr; base_periph_found = 0; /* * Sanity check -- make sure we don't get a null peripheral * driver name. */ if (*ccb->cgdl.periph_name == '\0') { error = EINVAL; break; } /* Keep the list from changing while we traverse it */ mtx_lock(&xsoftc.xpt_topo_lock); ptstartover: cur_generation = xsoftc.xpt_generation; /* first find our driver in the list of drivers */ for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) if (strcmp((*p_drv)->driver_name, name) == 0) break; if (*p_drv == NULL) { mtx_unlock(&xsoftc.xpt_topo_lock); ccb->ccb_h.status = CAM_REQ_CMP_ERR; ccb->cgdl.status = CAM_GDEVLIST_ERROR; *ccb->cgdl.periph_name = '\0'; ccb->cgdl.unit_number = 0; error = ENOENT; break; } /* * Run through every peripheral instance of this driver * and check to see whether it matches the unit passed * in by the user. If it does, get out of the loops and * find the passthrough driver associated with that * peripheral driver. */ for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; periph = TAILQ_NEXT(periph, unit_links)) { if (periph->unit_number == unit) { break; } else if (--splbreaknum == 0) { mtx_unlock(&xsoftc.xpt_topo_lock); mtx_lock(&xsoftc.xpt_topo_lock); splbreaknum = 100; if (cur_generation != xsoftc.xpt_generation) goto ptstartover; } } /* * If we found the peripheral driver that the user passed * in, go through all of the peripheral drivers for that * particular device and look for a passthrough driver. */ if (periph != NULL) { struct cam_ed *device; int i; base_periph_found = 1; device = periph->path->device; for (i = 0, periph = SLIST_FIRST(&device->periphs); periph != NULL; periph = SLIST_NEXT(periph, periph_links), i++) { /* * Check to see whether we have a * passthrough device or not. */ if (strcmp(periph->periph_name, "pass") == 0) { /* * Fill in the getdevlist fields. */ strcpy(ccb->cgdl.periph_name, periph->periph_name); ccb->cgdl.unit_number = periph->unit_number; if (SLIST_NEXT(periph, periph_links)) ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; else ccb->cgdl.status = CAM_GDEVLIST_LAST_DEVICE; ccb->cgdl.generation = device->generation; ccb->cgdl.index = i; /* * Fill in some CCB header fields * that the user may want. */ ccb->ccb_h.path_id = periph->path->bus->path_id; ccb->ccb_h.target_id = periph->path->target->target_id; ccb->ccb_h.target_lun = periph->path->device->lun_id; ccb->ccb_h.status = CAM_REQ_CMP; break; } } } /* * If the periph is null here, one of two things has * happened. The first possibility is that we couldn't * find the unit number of the particular peripheral driver * that the user is asking about. e.g. the user asks for * the passthrough driver for "da11". We find the list of * "da" peripherals all right, but there is no unit 11. * The other possibility is that we went through the list * of peripheral drivers attached to the device structure, * but didn't find one with the name "pass". Either way, * we return ENOENT, since we couldn't find something. */ if (periph == NULL) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; ccb->cgdl.status = CAM_GDEVLIST_ERROR; *ccb->cgdl.periph_name = '\0'; ccb->cgdl.unit_number = 0; error = ENOENT; /* * It is unfortunate that this is even necessary, * but there are many, many clueless users out there. * If this is true, the user is looking for the * passthrough driver, but doesn't have one in his * kernel. */ if (base_periph_found == 1) { printf("xptioctl: pass driver is not in the " "kernel\n"); printf("xptioctl: put \"device pass\" in " "your kernel config file\n"); } } mtx_unlock(&xsoftc.xpt_topo_lock); break; } default: error = ENOTTY; break; } return(error); } static int cam_module_event_handler(module_t mod, int what, void *arg) { int error; switch (what) { case MOD_LOAD: if ((error = xpt_init(NULL)) != 0) return (error); break; case MOD_UNLOAD: return EBUSY; default: return EOPNOTSUPP; } return 0; } static void xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) { if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } else { done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); } xpt_release_boot(); } /* thread to handle bus rescans */ static void xpt_scanner_thread(void *dummy) { union ccb *ccb; struct cam_sim *sim; xpt_lock_buses(); for (;;) { if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, "ccb_scanq", 0); if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); xpt_unlock_buses(); sim = ccb->ccb_h.path->bus->sim; CAM_SIM_LOCK(sim); xpt_action(ccb); CAM_SIM_UNLOCK(sim); xpt_lock_buses(); } } } void xpt_rescan(union ccb *ccb) { struct ccb_hdr *hdr; /* Prepare request */ if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_BUS; else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_TGT; else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_LUN; else { xpt_print(ccb->ccb_h.path, "illegal scan path\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; ccb->ccb_h.cbfcnp = xpt_rescan_done; xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); /* Don't make duplicate entries for the same paths. */ xpt_lock_buses(); if (ccb->ccb_h.ppriv_ptr1 == NULL) { TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { wakeup(&xsoftc.ccb_scanq); xpt_unlock_buses(); xpt_print(ccb->ccb_h.path, "rescan already queued\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } } } TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); xsoftc.buses_to_config++; wakeup(&xsoftc.ccb_scanq); xpt_unlock_buses(); } /* Functions accessed by the peripheral drivers */ static int xpt_init(void *dummy) { struct cam_sim *xpt_sim; struct cam_path *path; struct cam_devq *devq; cam_status status; TAILQ_INIT(&xsoftc.xpt_busses); TAILQ_INIT(&cam_simq); TAILQ_INIT(&xsoftc.ccb_scanq); STAILQ_INIT(&xsoftc.highpowerq); xsoftc.num_highpower = CAM_MAX_HIGHPOWER; mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF); mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF); mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF); /* * The xpt layer is, itself, the equivelent of a SIM. * Allow 16 ccbs in the ccb pool for it. This should * give decent parallelism when we probe busses and * perform other XPT functions. */ devq = cam_simq_alloc(16); xpt_sim = cam_sim_alloc(xptaction, xptpoll, "xpt", /*softc*/NULL, /*unit*/0, /*mtx*/&xsoftc.xpt_lock, /*max_dev_transactions*/0, /*max_tagged_dev_transactions*/0, devq); if (xpt_sim == NULL) return (ENOMEM); mtx_lock(&xsoftc.xpt_lock); if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { mtx_unlock(&xsoftc.xpt_lock); printf("xpt_init: xpt_bus_register failed with status %#x," " failing attach\n", status); return (EINVAL); } /* * Looking at the XPT from the SIM layer, the XPT is * the equivelent of a peripheral driver. Allocate * a peripheral driver entry for us. */ if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { mtx_unlock(&xsoftc.xpt_lock); printf("xpt_init: xpt_create_path failed with status %#x," " failing attach\n", status); return (EINVAL); } cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, path, NULL, 0, xpt_sim); xpt_free_path(path); mtx_unlock(&xsoftc.xpt_lock); /* Install our software interrupt handlers */ swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih); /* * Register a callback for when interrupts are enabled. */ xsoftc.xpt_config_hook = (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), M_CAMXPT, M_NOWAIT | M_ZERO); if (xsoftc.xpt_config_hook == NULL) { printf("xpt_init: Cannot malloc config hook " "- failing attach\n"); return (ENOMEM); } xsoftc.xpt_config_hook->ich_func = xpt_config; if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) { free (xsoftc.xpt_config_hook, M_CAMXPT); printf("xpt_init: config_intrhook_establish failed " "- failing attach\n"); } return (0); } static cam_status xptregister(struct cam_periph *periph, void *arg) { struct cam_sim *xpt_sim; if (periph == NULL) { printf("xptregister: periph was NULL!!\n"); return(CAM_REQ_CMP_ERR); } xpt_sim = (struct cam_sim *)arg; xpt_sim->softc = periph; xpt_periph = periph; periph->softc = NULL; return(CAM_REQ_CMP); } int32_t xpt_add_periph(struct cam_periph *periph) { struct cam_ed *device; int32_t status; struct periph_list *periph_head; mtx_assert(periph->sim->mtx, MA_OWNED); device = periph->path->device; periph_head = &device->periphs; status = CAM_REQ_CMP; if (device != NULL) { /* * Make room for this peripheral * so it will fit in the queue * when it's scheduled to run */ status = camq_resize(&device->drvq, device->drvq.array_size + 1); device->generation++; SLIST_INSERT_HEAD(periph_head, periph, periph_links); } mtx_lock(&xsoftc.xpt_topo_lock); xsoftc.xpt_generation++; mtx_unlock(&xsoftc.xpt_topo_lock); return (status); } void xpt_remove_periph(struct cam_periph *periph, int topology_lock_held) { struct cam_ed *device; mtx_assert(periph->sim->mtx, MA_OWNED); device = periph->path->device; if (device != NULL) { struct periph_list *periph_head; periph_head = &device->periphs; /* Release the slot for this peripheral */ camq_resize(&device->drvq, device->drvq.array_size - 1); device->generation++; SLIST_REMOVE(periph_head, periph, cam_periph, periph_links); } if (topology_lock_held == 0) mtx_lock(&xsoftc.xpt_topo_lock); xsoftc.xpt_generation++; if (topology_lock_held == 0) mtx_unlock(&xsoftc.xpt_topo_lock); } void xpt_announce_periph(struct cam_periph *periph, char *announce_string) { struct cam_path *path = periph->path; mtx_assert(periph->sim->mtx, MA_OWNED); printf("%s%d at %s%d bus %d scbus%d target %d lun %d\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, path->device->lun_id); printf("%s%d: ", periph->periph_name, periph->unit_number); if (path->device->protocol == PROTO_SCSI) scsi_print_inquiry(&path->device->inq_data); else if (path->device->protocol == PROTO_ATA || path->device->protocol == PROTO_SATAPM) ata_print_ident(&path->device->ident_data); else if (path->device->protocol == PROTO_SEMB) semb_print_ident( (struct sep_identify_data *)&path->device->ident_data); else printf("Unknown protocol device\n"); if (bootverbose && path->device->serial_num_len > 0) { /* Don't wrap the screen - print only the first 60 chars */ printf("%s%d: Serial Number %.60s\n", periph->periph_name, periph->unit_number, path->device->serial_num); } /* Announce transport details. */ (*(path->bus->xport->announce))(periph); /* Announce command queueing. */ if (path->device->inq_flags & SID_CmdQue || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { printf("%s%d: Command Queueing enabled\n", periph->periph_name, periph->unit_number); } /* Announce caller's details if they've passed in. */ if (announce_string != NULL) printf("%s%d: %s\n", periph->periph_name, periph->unit_number, announce_string); } int xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) { int ret = -1; struct ccb_dev_advinfo cdai; mtx_assert(path->bus->sim->mtx, MA_OWNED); memset(&cdai, 0, sizeof(cdai)); xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.bufsiz = len; if (!strcmp(attr, "GEOM::ident")) cdai.buftype = CDAI_TYPE_SERIAL_NUM; else if (!strcmp(attr, "GEOM::physpath")) cdai.buftype = CDAI_TYPE_PHYS_PATH; else goto out; cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO); if (cdai.buf == NULL) { ret = ENOMEM; goto out; } xpt_action((union ccb *)&cdai); /* can only be synchronous */ if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.provsiz == 0) goto out; ret = 0; if (strlcpy(buf, cdai.buf, len) >= len) ret = EFAULT; out: if (cdai.buf != NULL) free(cdai.buf, M_CAMXPT); return ret; } static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_eb *bus) { dev_match_ret retval; int i; retval = DM_RET_NONE; /* * If we aren't given something to match against, that's an error. */ if (bus == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this bus matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_DESCEND | DM_RET_COPY); for (i = 0; i < num_patterns; i++) { struct bus_match_pattern *cur_pattern; /* * If the pattern in question isn't for a bus node, we * aren't interested. However, we do indicate to the * calling routine that we should continue descending the * tree, since the user wants to match against lower-level * EDT elements. */ if (patterns[i].type != DEV_MATCH_BUS) { if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_DESCEND; continue; } cur_pattern = &patterns[i].pattern.bus_pattern; /* * If they want to match any bus node, we give them any * device node. */ if (cur_pattern->flags == BUS_MATCH_ANY) { /* set the copy flag */ retval |= DM_RET_COPY; /* * If we've already decided on an action, go ahead * and return. */ if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) return(retval); } /* * Not sure why someone would do this... */ if (cur_pattern->flags == BUS_MATCH_NONE) continue; if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) && (cur_pattern->path_id != bus->path_id)) continue; if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) && (cur_pattern->bus_id != bus->sim->bus_id)) continue; if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) && (cur_pattern->unit_number != bus->sim->unit_number)) continue; if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, DEV_IDLEN) != 0)) continue; /* * If we get to this point, the user definitely wants * information on this bus. So tell the caller to copy the * data out. */ retval |= DM_RET_COPY; /* * If the return action has been set to descend, then we * know that we've already seen a non-bus matching * expression, therefore we need to further descend the tree. * This won't change by continuing around the loop, so we * go ahead and return. If we haven't seen a non-bus * matching expression, we keep going around the loop until * we exhaust the matching expressions. We'll set the stop * flag once we fall out of the loop. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) return(retval); } /* * If the return action hasn't been set to descend yet, that means * we haven't seen anything other than bus matching patterns. So * tell the caller to stop descending the tree -- the user doesn't * want to match against lower level tree elements. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_STOP; return(retval); } static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_ed *device) { dev_match_ret retval; int i; retval = DM_RET_NONE; /* * If we aren't given something to match against, that's an error. */ if (device == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this device matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_DESCEND | DM_RET_COPY); for (i = 0; i < num_patterns; i++) { struct device_match_pattern *cur_pattern; struct scsi_vpd_device_id *device_id_page; /* * If the pattern in question isn't for a device node, we * aren't interested. */ if (patterns[i].type != DEV_MATCH_DEVICE) { if ((patterns[i].type == DEV_MATCH_PERIPH) && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) retval |= DM_RET_DESCEND; continue; } cur_pattern = &patterns[i].pattern.device_pattern; /* Error out if mutually exclusive options are specified. */ if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) return(DM_RET_ERROR); /* * If they want to match any device node, we give them any * device node. */ if (cur_pattern->flags == DEV_MATCH_ANY) goto copy_dev_node; /* * Not sure why someone would do this... */ if (cur_pattern->flags == DEV_MATCH_NONE) continue; if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) && (cur_pattern->path_id != device->target->bus->path_id)) continue; if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) && (cur_pattern->target_id != device->target->target_id)) continue; if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) && (cur_pattern->target_lun != device->lun_id)) continue; if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) && (cam_quirkmatch((caddr_t)&device->inq_data, (caddr_t)&cur_pattern->data.inq_pat, 1, sizeof(cur_pattern->data.inq_pat), scsi_static_inquiry_match) == NULL)) continue; device_id_page = (struct scsi_vpd_device_id *)device->device_id; if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN || scsi_devid_match((uint8_t *)device_id_page->desc_list, device->device_id_len - SVPD_DEVICE_ID_HDR_LEN, cur_pattern->data.devid_pat.id, cur_pattern->data.devid_pat.id_len) != 0)) continue; copy_dev_node: /* * If we get to this point, the user definitely wants * information on this device. So tell the caller to copy * the data out. */ retval |= DM_RET_COPY; /* * If the return action has been set to descend, then we * know that we've already seen a peripheral matching * expression, therefore we need to further descend the tree. * This won't change by continuing around the loop, so we * go ahead and return. If we haven't seen a peripheral * matching expression, we keep going around the loop until * we exhaust the matching expressions. We'll set the stop * flag once we fall out of the loop. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) return(retval); } /* * If the return action hasn't been set to descend yet, that means * we haven't seen any peripheral matching patterns. So tell the * caller to stop descending the tree -- the user doesn't want to * match against lower level tree elements. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_STOP; return(retval); } /* * Match a single peripheral against any number of match patterns. */ static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_periph *periph) { dev_match_ret retval; int i; /* * If we aren't given something to match against, that's an error. */ if (periph == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this peripheral matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_STOP | DM_RET_COPY); /* * There aren't any nodes below a peripheral node, so there's no * reason to descend the tree any further. */ retval = DM_RET_STOP; for (i = 0; i < num_patterns; i++) { struct periph_match_pattern *cur_pattern; /* * If the pattern in question isn't for a peripheral, we * aren't interested. */ if (patterns[i].type != DEV_MATCH_PERIPH) continue; cur_pattern = &patterns[i].pattern.periph_pattern; /* * If they want to match on anything, then we will do so. */ if (cur_pattern->flags == PERIPH_MATCH_ANY) { /* set the copy flag */ retval |= DM_RET_COPY; /* * We've already set the return action to stop, * since there are no nodes below peripherals in * the tree. */ return(retval); } /* * Not sure why someone would do this... */ if (cur_pattern->flags == PERIPH_MATCH_NONE) continue; if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) && (cur_pattern->path_id != periph->path->bus->path_id)) continue; /* * For the target and lun id's, we have to make sure the * target and lun pointers aren't NULL. The xpt peripheral * has a wildcard target and device. */ if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) && ((periph->path->target == NULL) ||(cur_pattern->target_id != periph->path->target->target_id))) continue; if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) && ((periph->path->device == NULL) || (cur_pattern->target_lun != periph->path->device->lun_id))) continue; if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) && (cur_pattern->unit_number != periph->unit_number)) continue; if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) && (strncmp(cur_pattern->periph_name, periph->periph_name, DEV_IDLEN) != 0)) continue; /* * If we get to this point, the user definitely wants * information on this peripheral. So tell the caller to * copy the data out. */ retval |= DM_RET_COPY; /* * The return action has already been set to stop, since * peripherals don't have any nodes below them in the EDT. */ return(retval); } /* * If we get to this point, the peripheral that was passed in * doesn't match any of the patterns. */ return(retval); } static int xptedtbusfunc(struct cam_eb *bus, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; /* * If our position is for something deeper in the tree, that means * that we've already seen this node. So, we keep going down. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target != NULL)) retval = DM_RET_DESCEND; else retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); /* * If we got an error, bail out of the search. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this bus out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; cdm->pos.cookie.bus = bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_BUS; cdm->matches[j].result.bus_result.path_id = bus->path_id; cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; cdm->matches[j].result.bus_result.unit_number = bus->sim->unit_number; strncpy(cdm->matches[j].result.bus_result.dev_name, bus->sim->sim_name, DEV_IDLEN); } /* * If the user is only interested in busses, there's no * reason to descend to the next level in the tree. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) return(1); /* * If there is a target generation recorded, check it to * make sure the target list hasn't changed. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (bus == cdm->pos.cookie.bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0) && (cdm->pos.generations[CAM_TARGET_GENERATION] != bus->generation)) { cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target != NULL)) return(xpttargettraverse(bus, (struct cam_et *)cdm->pos.cookie.target, xptedttargetfunc, arg)); else return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg)); } static int xptedttargetfunc(struct cam_et *target, void *arg) { struct ccb_dev_match *cdm; cdm = (struct ccb_dev_match *)arg; /* * If there is a device list generation recorded, check it to * make sure the device list hasn't changed. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == target->bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.generations[CAM_DEV_GENERATION] != 0) && (cdm->pos.generations[CAM_DEV_GENERATION] != target->generation)) { cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == target->bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device != NULL)) return(xptdevicetraverse(target, (struct cam_ed *)cdm->pos.cookie.device, xptedtdevicefunc, arg)); else return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg)); } static int xptedtdevicefunc(struct cam_ed *device, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; /* * If our position is for something deeper in the tree, that means * that we've already seen this node. So, we keep going down. */ if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device == device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) retval = DM_RET_DESCEND; else retval = xptdevicematch(cdm->patterns, cdm->num_patterns, device); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this device out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; cdm->pos.cookie.bus = device->target->bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->pos.cookie.target = device->target; cdm->pos.generations[CAM_TARGET_GENERATION] = device->target->bus->generation; cdm->pos.cookie.device = device; cdm->pos.generations[CAM_DEV_GENERATION] = device->target->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_DEVICE; cdm->matches[j].result.device_result.path_id = device->target->bus->path_id; cdm->matches[j].result.device_result.target_id = device->target->target_id; cdm->matches[j].result.device_result.target_lun = device->lun_id; cdm->matches[j].result.device_result.protocol = device->protocol; bcopy(&device->inq_data, &cdm->matches[j].result.device_result.inq_data, sizeof(struct scsi_inquiry_data)); bcopy(&device->ident_data, &cdm->matches[j].result.device_result.ident_data, sizeof(struct ata_params)); /* Let the user know whether this device is unconfigured */ if (device->flags & CAM_DEV_UNCONFIGURED) cdm->matches[j].result.device_result.flags = DEV_RESULT_UNCONFIGURED; else cdm->matches[j].result.device_result.flags = DEV_RESULT_NOFLAG; } /* * If the user isn't interested in peripherals, don't descend * the tree any further. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) return(1); /* * If there is a peripheral list generation recorded, make sure * it hasn't changed. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (device->target->bus == cdm->pos.cookie.bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (device->target == cdm->pos.cookie.target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (device == cdm->pos.cookie.device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) && (cdm->pos.generations[CAM_PERIPH_GENERATION] != device->generation)){ cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == device->target->bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == device->target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device == device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) return(xptperiphtraverse(device, (struct cam_periph *)cdm->pos.cookie.periph, xptedtperiphfunc, arg)); else return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg)); } static int xptedtperiphfunc(struct cam_periph *periph, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this peripheral out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | CAM_DEV_POS_PERIPH; cdm->pos.cookie.bus = periph->path->bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->pos.cookie.target = periph->path->target; cdm->pos.generations[CAM_TARGET_GENERATION] = periph->path->bus->generation; cdm->pos.cookie.device = periph->path->device; cdm->pos.generations[CAM_DEV_GENERATION] = periph->path->target->generation; cdm->pos.cookie.periph = periph; cdm->pos.generations[CAM_PERIPH_GENERATION] = periph->path->device->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_PERIPH; cdm->matches[j].result.periph_result.path_id = periph->path->bus->path_id; cdm->matches[j].result.periph_result.target_id = periph->path->target->target_id; cdm->matches[j].result.periph_result.target_lun = periph->path->device->lun_id; cdm->matches[j].result.periph_result.unit_number = periph->unit_number; strncpy(cdm->matches[j].result.periph_result.periph_name, periph->periph_name, DEV_IDLEN); } return(1); } static int xptedtmatch(struct ccb_dev_match *cdm) { int ret; cdm->num_matches = 0; /* * Check the bus list generation. If it has changed, the user * needs to reset everything and start over. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.generations[CAM_BUS_GENERATION] != 0) && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) { cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus != NULL)) ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus, xptedtbusfunc, cdm); else ret = xptbustraverse(NULL, xptedtbusfunc, cdm); /* * If we get back 0, that means that we had to stop before fully * traversing the EDT. It also means that one of the subroutines * has set the status field to the proper value. If we get back 1, * we've fully traversed the EDT and copied out any matching entries. */ if (ret == 1) cdm->status = CAM_DEV_MATCH_LAST; return(ret); } static int xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) { struct ccb_dev_match *cdm; cdm = (struct ccb_dev_match *)arg; if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv == pdrv) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) && (cdm->pos.generations[CAM_PERIPH_GENERATION] != (*pdrv)->generation)) { cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv == pdrv) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) return(xptpdperiphtraverse(pdrv, (struct cam_periph *)cdm->pos.cookie.periph, xptplistperiphfunc, arg)); else return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg)); } static int xptplistperiphfunc(struct cam_periph *periph, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this peripheral out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { struct periph_driver **pdrv; pdrv = NULL; bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | CAM_DEV_POS_PERIPH; /* * This may look a bit non-sensical, but it is * actually quite logical. There are very few * peripheral drivers, and bloating every peripheral * structure with a pointer back to its parent * peripheral driver linker set entry would cost * more in the long run than doing this quick lookup. */ for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { if (strcmp((*pdrv)->driver_name, periph->periph_name) == 0) break; } if (*pdrv == NULL) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } cdm->pos.cookie.pdrv = pdrv; /* * The periph generation slot does double duty, as * does the periph pointer slot. They are used for * both edt and pdrv lookups and positioning. */ cdm->pos.cookie.periph = periph; cdm->pos.generations[CAM_PERIPH_GENERATION] = (*pdrv)->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_PERIPH; cdm->matches[j].result.periph_result.path_id = periph->path->bus->path_id; /* * The transport layer peripheral doesn't have a target or * lun. */ if (periph->path->target) cdm->matches[j].result.periph_result.target_id = periph->path->target->target_id; else cdm->matches[j].result.periph_result.target_id = -1; if (periph->path->device) cdm->matches[j].result.periph_result.target_lun = periph->path->device->lun_id; else cdm->matches[j].result.periph_result.target_lun = -1; cdm->matches[j].result.periph_result.unit_number = periph->unit_number; strncpy(cdm->matches[j].result.periph_result.periph_name, periph->periph_name, DEV_IDLEN); } return(1); } static int xptperiphlistmatch(struct ccb_dev_match *cdm) { int ret; cdm->num_matches = 0; /* * At this point in the edt traversal function, we check the bus * list generation to make sure that no busses have been added or * removed since the user last sent a XPT_DEV_MATCH ccb through. * For the peripheral driver list traversal function, however, we * don't have to worry about new peripheral driver types coming or * going; they're in a linker set, and therefore can't change * without a recompile. */ if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv != NULL)) ret = xptpdrvtraverse( (struct periph_driver **)cdm->pos.cookie.pdrv, xptplistpdrvfunc, cdm); else ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); /* * If we get back 0, that means that we had to stop before fully * traversing the peripheral driver tree. It also means that one of * the subroutines has set the status field to the proper value. If * we get back 1, we've fully traversed the EDT and copied out any * matching entries. */ if (ret == 1) cdm->status = CAM_DEV_MATCH_LAST; return(ret); } static int xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) { struct cam_eb *bus, *next_bus; int retval; retval = 1; mtx_lock(&xsoftc.xpt_topo_lock); for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses)); bus != NULL; bus = next_bus) { bus->refcount++; /* * XXX The locking here is obviously very complex. We * should work to simplify it. */ mtx_unlock(&xsoftc.xpt_topo_lock); CAM_SIM_LOCK(bus->sim); retval = tr_func(bus, arg); CAM_SIM_UNLOCK(bus->sim); mtx_lock(&xsoftc.xpt_topo_lock); next_bus = TAILQ_NEXT(bus, links); mtx_unlock(&xsoftc.xpt_topo_lock); xpt_release_bus(bus); if (retval == 0) return(retval); mtx_lock(&xsoftc.xpt_topo_lock); } mtx_unlock(&xsoftc.xpt_topo_lock); return(retval); } int xpt_sim_opened(struct cam_sim *sim) { struct cam_eb *bus; struct cam_et *target; struct cam_ed *device; struct cam_periph *periph; KASSERT(sim->refcount >= 1, ("sim->refcount >= 1")); mtx_assert(sim->mtx, MA_OWNED); mtx_lock(&xsoftc.xpt_topo_lock); TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) { if (bus->sim != sim) continue; TAILQ_FOREACH(target, &bus->et_entries, links) { TAILQ_FOREACH(device, &target->ed_entries, links) { SLIST_FOREACH(periph, &device->periphs, periph_links) { if (periph->refcount > 0) { mtx_unlock(&xsoftc.xpt_topo_lock); return (1); } } } } } mtx_unlock(&xsoftc.xpt_topo_lock); return (0); } static int xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, xpt_targetfunc_t *tr_func, void *arg) { struct cam_et *target, *next_target; int retval; retval = 1; for (target = (start_target ? start_target : TAILQ_FIRST(&bus->et_entries)); target != NULL; target = next_target) { target->refcount++; retval = tr_func(target, arg); next_target = TAILQ_NEXT(target, links); xpt_release_target(target); if (retval == 0) return(retval); } return(retval); } static int xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, xpt_devicefunc_t *tr_func, void *arg) { struct cam_ed *device, *next_device; int retval; retval = 1; for (device = (start_device ? start_device : TAILQ_FIRST(&target->ed_entries)); device != NULL; device = next_device) { /* * Hold a reference so the current device does not go away * on us. */ device->refcount++; retval = tr_func(device, arg); /* * Grab our next pointer before we release the current * device. */ next_device = TAILQ_NEXT(device, links); xpt_release_device(device); if (retval == 0) return(retval); } return(retval); } static int xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg) { struct cam_periph *periph, *next_periph; int retval; retval = 1; xpt_lock_buses(); for (periph = (start_periph ? start_periph : SLIST_FIRST(&device->periphs)); periph != NULL; periph = next_periph) { /* * In this case, we want to show peripherals that have been * invalidated, but not peripherals that are scheduled to * be freed. So instead of calling cam_periph_acquire(), * which will fail if the periph has been invalidated, we * just check for the free flag here. If it is free, we * skip to the next periph. */ if (periph->flags & CAM_PERIPH_FREE) { next_periph = SLIST_NEXT(periph, periph_links); continue; } /* * Acquire a reference to this periph while we call the * traversal function, so it can't go away. */ periph->refcount++; xpt_unlock_buses(); retval = tr_func(periph, arg); /* * We need the lock for list traversal. */ xpt_lock_buses(); /* * Grab the next peripheral before we release this one, so * our next pointer is still valid. */ next_periph = SLIST_NEXT(periph, periph_links); cam_periph_release_locked_buses(periph); if (retval == 0) goto bailout_done; } bailout_done: xpt_unlock_buses(); return(retval); } static int xptpdrvtraverse(struct periph_driver **start_pdrv, xpt_pdrvfunc_t *tr_func, void *arg) { struct periph_driver **pdrv; int retval; retval = 1; /* * We don't traverse the peripheral driver list like we do the * other lists, because it is a linker set, and therefore cannot be * changed during runtime. If the peripheral driver list is ever * re-done to be something other than a linker set (i.e. it can * change while the system is running), the list traversal should * be modified to work like the other traversal functions. */ for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); *pdrv != NULL; pdrv++) { retval = tr_func(pdrv, arg); if (retval == 0) return(retval); } return(retval); } static int xptpdperiphtraverse(struct periph_driver **pdrv, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg) { struct cam_periph *periph, *next_periph; int retval; retval = 1; xpt_lock_buses(); for (periph = (start_periph ? start_periph : TAILQ_FIRST(&(*pdrv)->units)); periph != NULL; periph = next_periph) { /* * In this case, we want to show peripherals that have been * invalidated, but not peripherals that are scheduled to * be freed. So instead of calling cam_periph_acquire(), * which will fail if the periph has been invalidated, we * just check for the free flag here. If it is free, we * skip to the next periph. */ if (periph->flags & CAM_PERIPH_FREE) { next_periph = TAILQ_NEXT(periph, unit_links); continue; } /* * Acquire a reference to this periph while we call the * traversal function, so it can't go away. */ periph->refcount++; /* * XXX KDM we have the toplogy lock here, but in * xptperiphtraverse(), we drop it before calling the * traversal function. Which is correct? */ retval = tr_func(periph, arg); /* * Grab the next peripheral before we release this one, so * our next pointer is still valid. */ next_periph = TAILQ_NEXT(periph, unit_links); cam_periph_release_locked_buses(periph); if (retval == 0) goto bailout_done; } bailout_done: xpt_unlock_buses(); return(retval); } static int xptdefbusfunc(struct cam_eb *bus, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_BUS) { xpt_busfunc_t *tr_func; tr_func = (xpt_busfunc_t *)tr_config->tr_func; return(tr_func(bus, tr_config->tr_arg)); } else return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); } static int xptdeftargetfunc(struct cam_et *target, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_TARGET) { xpt_targetfunc_t *tr_func; tr_func = (xpt_targetfunc_t *)tr_config->tr_func; return(tr_func(target, tr_config->tr_arg)); } else return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); } static int xptdefdevicefunc(struct cam_ed *device, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_DEVICE) { xpt_devicefunc_t *tr_func; tr_func = (xpt_devicefunc_t *)tr_config->tr_func; return(tr_func(device, tr_config->tr_arg)); } else return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); } static int xptdefperiphfunc(struct cam_periph *periph, void *arg) { struct xpt_traverse_config *tr_config; xpt_periphfunc_t *tr_func; tr_config = (struct xpt_traverse_config *)arg; tr_func = (xpt_periphfunc_t *)tr_config->tr_func; /* * Unlike the other default functions, we don't check for depth * here. The peripheral driver level is the last level in the EDT, * so if we're here, we should execute the function in question. */ return(tr_func(periph, tr_config->tr_arg)); } /* * Execute the given function for every bus in the EDT. */ static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) { struct xpt_traverse_config tr_config; tr_config.depth = XPT_DEPTH_BUS; tr_config.tr_func = tr_func; tr_config.tr_arg = arg; return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); } /* * Execute the given function for every device in the EDT. */ static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) { struct xpt_traverse_config tr_config; tr_config.depth = XPT_DEPTH_DEVICE; tr_config.tr_func = tr_func; tr_config.tr_arg = arg; return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); } static int xptsetasyncfunc(struct cam_ed *device, void *arg) { struct cam_path path; struct ccb_getdev cgd; struct ccb_setasync *csa = (struct ccb_setasync *)arg; /* * Don't report unconfigured devices (Wildcard devs, * devices only for target mode, device instances * that have been invalidated but are waiting for * their last reference count to be released). */ if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) return (1); xpt_compile_path(&path, NULL, device->target->bus->path_id, device->target->target_id, device->lun_id); xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); csa->callback(csa->callback_arg, AC_FOUND_DEVICE, &path, &cgd); xpt_release_path(&path); return(1); } static int xptsetasyncbusfunc(struct cam_eb *bus, void *arg) { struct cam_path path; struct ccb_pathinq cpi; struct ccb_setasync *csa = (struct ccb_setasync *)arg; xpt_compile_path(&path, /*periph*/NULL, bus->sim->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); csa->callback(csa->callback_arg, AC_PATH_REGISTERED, &path, &cpi); xpt_release_path(&path); return(1); } void xpt_action(union ccb *start_ccb) { CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n")); start_ccb->ccb_h.status = CAM_REQ_INPROG; (*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb); } void xpt_action_default(union ccb *start_ccb) { char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; struct cam_path *path; path = start_ccb->ccb_h.path; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n")); switch (start_ccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct cam_ed *device; /* * For the sake of compatibility with SCSI-1 * devices that may not understand the identify * message, we include lun information in the * second byte of all commands. SCSI-1 specifies * that luns are a 3 bit value and reserves only 3 * bits for lun information in the CDB. Later * revisions of the SCSI spec allow for more than 8 * luns, but have deprecated lun information in the * CDB. So, if the lun won't fit, we must omit. * * Also be aware that during initial probing for devices, * the inquiry information is unknown but initialized to 0. * This means that this code will be exercised while probing * devices with an ANSI revision greater than 2. */ device = path->device; if (device->protocol_version <= SCSI_REV_2 && start_ccb->ccb_h.target_lun < 8 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { start_ccb->csio.cdb_io.cdb_bytes[1] |= start_ccb->ccb_h.target_lun << 5; } start_ccb->csio.scsi_status = SCSI_STATUS_OK; CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n", scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0], &path->device->inq_data), scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes, cdb_str, sizeof(cdb_str)))); } /* FALLTHROUGH */ case XPT_TARGET_IO: case XPT_CONT_TARGET_IO: start_ccb->csio.sense_resid = 0; start_ccb->csio.resid = 0; /* FALLTHROUGH */ case XPT_ATA_IO: if (start_ccb->ccb_h.func_code == XPT_ATA_IO) { start_ccb->ataio.resid = 0; CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. ACB: %s\n", ata_op_string(&start_ccb->ataio.cmd), ata_cmd_string(&start_ccb->ataio.cmd, cdb_str, sizeof(cdb_str)))); } /* FALLTHROUGH */ case XPT_RESET_DEV: case XPT_ENG_EXEC: case XPT_SMP_IO: { int frozen; frozen = cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); path->device->sim->devq->alloc_openings += frozen; if (frozen > 0) xpt_run_dev_allocq(path->bus); if (xpt_schedule_dev_sendq(path->bus, path->device)) xpt_run_dev_sendq(path->bus); break; } case XPT_CALC_GEOMETRY: { struct cam_sim *sim; /* Filter out garbage */ if (start_ccb->ccg.block_size == 0 || start_ccb->ccg.volume_size == 0) { start_ccb->ccg.cylinders = 0; start_ccb->ccg.heads = 0; start_ccb->ccg.secs_per_track = 0; start_ccb->ccb_h.status = CAM_REQ_CMP; break; } #if defined(PC98) || defined(__sparc64__) /* * In a PC-98 system, geometry translation depens on * the "real" device geometry obtained from mode page 4. * SCSI geometry translation is performed in the * initialization routine of the SCSI BIOS and the result * stored in host memory. If the translation is available * in host memory, use it. If not, rely on the default * translation the device driver performs. * For sparc64, we may need adjust the geometry of large * disks in order to fit the limitations of the 16-bit * fields of the VTOC8 disk label. */ if (scsi_da_bios_params(&start_ccb->ccg) != 0) { start_ccb->ccb_h.status = CAM_REQ_CMP; break; } #endif sim = path->bus->sim; (*(sim->sim_action))(sim, start_ccb); break; } case XPT_ABORT: { union ccb* abort_ccb; abort_ccb = start_ccb->cab.abort_ccb; if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { if (abort_ccb->ccb_h.pinfo.index >= 0) { struct cam_ccbq *ccbq; struct cam_ed *device; device = abort_ccb->ccb_h.path->device; ccbq = &device->ccbq; device->sim->devq->alloc_openings -= cam_ccbq_remove_ccb(ccbq, abort_ccb); abort_ccb->ccb_h.status = CAM_REQ_ABORTED|CAM_DEV_QFRZN; xpt_freeze_devq(abort_ccb->ccb_h.path, 1); xpt_done(abort_ccb); start_ccb->ccb_h.status = CAM_REQ_CMP; break; } if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { /* * We've caught this ccb en route to * the SIM. Flag it for abort and the * SIM will do so just before starting * real work on the CCB. */ abort_ccb->ccb_h.status = CAM_REQ_ABORTED|CAM_DEV_QFRZN; xpt_freeze_devq(abort_ccb->ccb_h.path, 1); start_ccb->ccb_h.status = CAM_REQ_CMP; break; } } if (XPT_FC_IS_QUEUED(abort_ccb) && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { /* * It's already completed but waiting * for our SWI to get to it. */ start_ccb->ccb_h.status = CAM_UA_ABORT; break; } /* * If we weren't able to take care of the abort request * in the XPT, pass the request down to the SIM for processing. */ } /* FALLTHROUGH */ case XPT_ACCEPT_TARGET_IO: case XPT_EN_LUN: case XPT_IMMED_NOTIFY: case XPT_NOTIFY_ACK: case XPT_RESET_BUS: case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: case XPT_GET_SIM_KNOB: case XPT_SET_SIM_KNOB: { struct cam_sim *sim; sim = path->bus->sim; (*(sim->sim_action))(sim, start_ccb); break; } case XPT_PATH_INQ: { struct cam_sim *sim; sim = path->bus->sim; (*(sim->sim_action))(sim, start_ccb); break; } case XPT_PATH_STATS: start_ccb->cpis.last_reset = path->bus->last_reset; start_ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_GDEV_TYPE: { struct cam_ed *dev; dev = path->device; if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else { struct ccb_getdev *cgd; cgd = &start_ccb->cgd; cgd->protocol = dev->protocol; cgd->inq_data = dev->inq_data; cgd->ident_data = dev->ident_data; cgd->inq_flags = dev->inq_flags; cgd->ccb_h.status = CAM_REQ_CMP; cgd->serial_num_len = dev->serial_num_len; if ((dev->serial_num_len > 0) && (dev->serial_num != NULL)) bcopy(dev->serial_num, cgd->serial_num, dev->serial_num_len); } break; } case XPT_GDEV_STATS: { struct cam_ed *dev; dev = path->device; if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else { struct ccb_getdevstats *cgds; struct cam_eb *bus; struct cam_et *tar; cgds = &start_ccb->cgds; bus = path->bus; tar = path->target; cgds->dev_openings = dev->ccbq.dev_openings; cgds->dev_active = dev->ccbq.dev_active; cgds->devq_openings = dev->ccbq.devq_openings; cgds->devq_queued = dev->ccbq.queue.entries; cgds->held = dev->ccbq.held; cgds->last_reset = tar->last_reset; cgds->maxtags = dev->maxtags; cgds->mintags = dev->mintags; if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) cgds->last_reset = bus->last_reset; cgds->ccb_h.status = CAM_REQ_CMP; } break; } case XPT_GDEVLIST: { struct cam_periph *nperiph; struct periph_list *periph_head; struct ccb_getdevlist *cgdl; u_int i; struct cam_ed *device; int found; found = 0; /* * Don't want anyone mucking with our data. */ device = path->device; periph_head = &device->periphs; cgdl = &start_ccb->cgdl; /* * Check and see if the list has changed since the user * last requested a list member. If so, tell them that the * list has changed, and therefore they need to start over * from the beginning. */ if ((cgdl->index != 0) && (cgdl->generation != device->generation)) { cgdl->status = CAM_GDEVLIST_LIST_CHANGED; break; } /* * Traverse the list of peripherals and attempt to find * the requested peripheral. */ for (nperiph = SLIST_FIRST(periph_head), i = 0; (nperiph != NULL) && (i <= cgdl->index); nperiph = SLIST_NEXT(nperiph, periph_links), i++) { if (i == cgdl->index) { strncpy(cgdl->periph_name, nperiph->periph_name, DEV_IDLEN); cgdl->unit_number = nperiph->unit_number; found = 1; } } if (found == 0) { cgdl->status = CAM_GDEVLIST_ERROR; break; } if (nperiph == NULL) cgdl->status = CAM_GDEVLIST_LAST_DEVICE; else cgdl->status = CAM_GDEVLIST_MORE_DEVS; cgdl->index++; cgdl->generation = device->generation; cgdl->ccb_h.status = CAM_REQ_CMP; break; } case XPT_DEV_MATCH: { dev_pos_type position_type; struct ccb_dev_match *cdm; cdm = &start_ccb->cdm; /* * There are two ways of getting at information in the EDT. * The first way is via the primary EDT tree. It starts * with a list of busses, then a list of targets on a bus, * then devices/luns on a target, and then peripherals on a * device/lun. The "other" way is by the peripheral driver * lists. The peripheral driver lists are organized by * peripheral driver. (obviously) So it makes sense to * use the peripheral driver list if the user is looking * for something like "da1", or all "da" devices. If the * user is looking for something on a particular bus/target * or lun, it's generally better to go through the EDT tree. */ if (cdm->pos.position_type != CAM_DEV_POS_NONE) position_type = cdm->pos.position_type; else { u_int i; position_type = CAM_DEV_POS_NONE; for (i = 0; i < cdm->num_patterns; i++) { if ((cdm->patterns[i].type == DEV_MATCH_BUS) ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ position_type = CAM_DEV_POS_EDT; break; } } if (cdm->num_patterns == 0) position_type = CAM_DEV_POS_EDT; else if (position_type == CAM_DEV_POS_NONE) position_type = CAM_DEV_POS_PDRV; } /* * Note that we drop the SIM lock here, because the EDT * traversal code needs to do its own locking. */ CAM_SIM_UNLOCK(xpt_path_sim(cdm->ccb_h.path)); switch(position_type & CAM_DEV_POS_TYPEMASK) { case CAM_DEV_POS_EDT: xptedtmatch(cdm); break; case CAM_DEV_POS_PDRV: xptperiphlistmatch(cdm); break; default: cdm->status = CAM_DEV_MATCH_ERROR; break; } CAM_SIM_LOCK(xpt_path_sim(cdm->ccb_h.path)); if (cdm->status == CAM_DEV_MATCH_ERROR) start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; else start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SASYNC_CB: { struct ccb_setasync *csa; struct async_node *cur_entry; struct async_list *async_head; u_int32_t added; csa = &start_ccb->csa; added = csa->event_enable; async_head = &path->device->asyncs; /* * If there is already an entry for us, simply * update it. */ cur_entry = SLIST_FIRST(async_head); while (cur_entry != NULL) { if ((cur_entry->callback_arg == csa->callback_arg) && (cur_entry->callback == csa->callback)) break; cur_entry = SLIST_NEXT(cur_entry, links); } if (cur_entry != NULL) { /* * If the request has no flags set, * remove the entry. */ added &= ~cur_entry->event_enable; if (csa->event_enable == 0) { SLIST_REMOVE(async_head, cur_entry, async_node, links); xpt_release_device(path->device); free(cur_entry, M_CAMXPT); } else { cur_entry->event_enable = csa->event_enable; } csa->event_enable = added; } else { cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, M_NOWAIT); if (cur_entry == NULL) { csa->ccb_h.status = CAM_RESRC_UNAVAIL; break; } cur_entry->event_enable = csa->event_enable; cur_entry->callback_arg = csa->callback_arg; cur_entry->callback = csa->callback; SLIST_INSERT_HEAD(async_head, cur_entry, links); xpt_acquire_device(path->device); } start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_REL_SIMQ: { struct ccb_relsim *crs; struct cam_ed *dev; crs = &start_ccb->crs; dev = path->device; if (dev == NULL) { crs->ccb_h.status = CAM_DEV_NOT_THERE; break; } if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { /* Don't ever go below one opening */ if (crs->openings > 0) { xpt_dev_ccbq_resize(path, crs->openings); if (bootverbose) { xpt_print(path, "number of openings is now %d\n", crs->openings); } } } if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { /* * Just extend the old timeout and decrement * the freeze count so that a single timeout * is sufficient for releasing the queue. */ start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; callout_stop(&dev->callout); } else { start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } callout_reset(&dev->callout, (crs->release_timeout * hz) / 1000, xpt_release_devq_timeout, dev); dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; } if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { /* * Decrement the freeze count so that a single * completion is still sufficient to unfreeze * the queue. */ start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; } else { dev->flags |= CAM_DEV_REL_ON_COMPLETE; start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } } if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 || (dev->ccbq.dev_active == 0)) { start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; } else { dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } } if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) { xpt_release_devq_rl(path, /*runlevel*/ (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ? crs->release_timeout : 0, /*count*/1, /*run_queue*/TRUE); } start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt[0]; start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_DEBUG: { struct cam_path *oldpath; struct cam_sim *oldsim; /* Check that all request bits are supported. */ if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } cam_dflags = CAM_DEBUG_NONE; if (cam_dpath != NULL) { /* To release the old path we must hold proper lock. */ oldpath = cam_dpath; cam_dpath = NULL; oldsim = xpt_path_sim(oldpath); CAM_SIM_UNLOCK(xpt_path_sim(start_ccb->ccb_h.path)); CAM_SIM_LOCK(oldsim); xpt_free_path(oldpath); CAM_SIM_UNLOCK(oldsim); CAM_SIM_LOCK(xpt_path_sim(start_ccb->ccb_h.path)); } if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { if (xpt_create_path(&cam_dpath, xpt_periph, start_ccb->ccb_h.path_id, start_ccb->ccb_h.target_id, start_ccb->ccb_h.target_lun) != CAM_REQ_CMP) { start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; } else { cam_dflags = start_ccb->cdbg.flags; start_ccb->ccb_h.status = CAM_REQ_CMP; xpt_print(cam_dpath, "debugging flags now %x\n", cam_dflags); } } else start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_FREEZE_QUEUE: { struct ccb_relsim *crs = &start_ccb->crs; xpt_freeze_devq_rl(path, /*runlevel*/ (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ? crs->release_timeout : 0, /*count*/1); start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_NOOP: if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) xpt_freeze_devq(path, 1); start_ccb->ccb_h.status = CAM_REQ_CMP; break; default: case XPT_SDEV_TYPE: case XPT_TERM_IO: case XPT_ENG_INQ: /* XXX Implement */ printf("%s: CCB type %#x not supported\n", __func__, start_ccb->ccb_h.func_code); start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { xpt_done(start_ccb); } break; } } void xpt_polled_action(union ccb *start_ccb) { u_int32_t timeout; struct cam_sim *sim; struct cam_devq *devq; struct cam_ed *dev; timeout = start_ccb->ccb_h.timeout * 10; sim = start_ccb->ccb_h.path->bus->sim; devq = sim->devq; dev = start_ccb->ccb_h.path->device; mtx_assert(sim->mtx, MA_OWNED); /* Don't use ISR for this SIM while polling. */ sim->flags |= CAM_SIM_POLLED; /* * Steal an opening so that no other queued requests * can get it before us while we simulate interrupts. */ dev->ccbq.devq_openings--; dev->ccbq.dev_openings--; while(((devq != NULL && devq->send_openings <= 0) || dev->ccbq.dev_openings < 0) && (--timeout > 0)) { DELAY(100); (*(sim->sim_poll))(sim); camisr_runqueue(&sim->sim_doneq); } dev->ccbq.devq_openings++; dev->ccbq.dev_openings++; if (timeout != 0) { xpt_action(start_ccb); while(--timeout > 0) { (*(sim->sim_poll))(sim); camisr_runqueue(&sim->sim_doneq); if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) break; DELAY(100); } if (timeout == 0) { /* * XXX Is it worth adding a sim_timeout entry * point so we can attempt recovery? If * this is only used for dumps, I don't think * it is. */ start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; } } else { start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; } /* We will use CAM ISR for this SIM again. */ sim->flags &= ~CAM_SIM_POLLED; } /* * Schedule a peripheral driver to receive a ccb when it's * target device has space for more transactions. */ void xpt_schedule(struct cam_periph *perph, u_int32_t new_priority) { struct cam_ed *device; int runq = 0; mtx_assert(perph->sim->mtx, MA_OWNED); CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); device = perph->path->device; if (periph_is_queued(perph)) { /* Simply reorder based on new priority */ CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, (" change priority to %d\n", new_priority)); if (new_priority < perph->pinfo.priority) { camq_change_priority(&device->drvq, perph->pinfo.index, new_priority); runq = xpt_schedule_dev_allocq(perph->path->bus, device); } } else { /* New entry on the queue */ CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, (" added periph to queue\n")); perph->pinfo.priority = new_priority; perph->pinfo.generation = ++device->drvq.generation; camq_insert(&device->drvq, &perph->pinfo); runq = xpt_schedule_dev_allocq(perph->path->bus, device); } if (runq != 0) { CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, (" calling xpt_run_devq\n")); xpt_run_dev_allocq(perph->path->bus); } } /* * Schedule a device to run on a given queue. * If the device was inserted as a new entry on the queue, * return 1 meaning the device queue should be run. If we * were already queued, implying someone else has already * started the queue, return 0 so the caller doesn't attempt * to run the queue. */ int xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, u_int32_t new_priority) { int retval; u_int32_t old_priority; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); old_priority = pinfo->priority; /* * Are we already queued? */ if (pinfo->index != CAM_UNQUEUED_INDEX) { /* Simply reorder based on new priority */ if (new_priority < old_priority) { camq_change_priority(queue, pinfo->index, new_priority); CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("changed priority to %d\n", new_priority)); retval = 1; } else retval = 0; } else { /* New entry on the queue */ if (new_priority < old_priority) pinfo->priority = new_priority; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("Inserting onto queue\n")); pinfo->generation = ++queue->generation; camq_insert(queue, pinfo); retval = 1; } return (retval); } static void xpt_run_dev_allocq(struct cam_eb *bus) { struct cam_devq *devq; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n")); devq = bus->sim->devq; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, (" qfrozen_cnt == 0x%x, entries == %d, " "openings == %d, active == %d\n", devq->alloc_queue.qfrozen_cnt[0], devq->alloc_queue.entries, devq->alloc_openings, devq->alloc_active)); devq->alloc_queue.qfrozen_cnt[0]++; while ((devq->alloc_queue.entries > 0) && (devq->alloc_openings > 0) && (devq->alloc_queue.qfrozen_cnt[0] <= 1)) { struct cam_ed_qinfo *qinfo; struct cam_ed *device; union ccb *work_ccb; struct cam_periph *drv; struct camq *drvq; qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue, CAMQ_HEAD); device = qinfo->device; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("running device %p\n", device)); drvq = &device->drvq; KASSERT(drvq->entries > 0, ("xpt_run_dev_allocq: " "Device on queue without any work to do")); if ((work_ccb = xpt_get_ccb(device)) != NULL) { devq->alloc_openings--; devq->alloc_active++; drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD); xpt_setup_ccb(&work_ccb->ccb_h, drv->path, drv->pinfo.priority); CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("calling periph start\n")); drv->periph_start(drv, work_ccb); } else { /* * Malloc failure in alloc_ccb */ /* * XXX add us to a list to be run from free_ccb * if we don't have any ccbs active on this * device queue otherwise we may never get run * again. */ break; } /* We may have more work. Attempt to reschedule. */ xpt_schedule_dev_allocq(bus, device); } devq->alloc_queue.qfrozen_cnt[0]--; } static void xpt_run_dev_sendq(struct cam_eb *bus) { struct cam_devq *devq; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n")); devq = bus->sim->devq; devq->send_queue.qfrozen_cnt[0]++; while ((devq->send_queue.entries > 0) && (devq->send_openings > 0) && (devq->send_queue.qfrozen_cnt[0] <= 1)) { struct cam_ed_qinfo *qinfo; struct cam_ed *device; union ccb *work_ccb; struct cam_sim *sim; qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue, CAMQ_HEAD); device = qinfo->device; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("running device %p\n", device)); work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); if (work_ccb == NULL) { printf("device on run queue with no ccbs???\n"); continue; } if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { mtx_lock(&xsoftc.xpt_lock); if (xsoftc.num_highpower <= 0) { /* * We got a high power command, but we * don't have any available slots. Freeze * the device queue until we have a slot * available. */ xpt_freeze_devq(work_ccb->ccb_h.path, 1); STAILQ_INSERT_TAIL(&xsoftc.highpowerq, &work_ccb->ccb_h, xpt_links.stqe); mtx_unlock(&xsoftc.xpt_lock); continue; } else { /* * Consume a high power slot while * this ccb runs. */ xsoftc.num_highpower--; } mtx_unlock(&xsoftc.xpt_lock); } cam_ccbq_remove_ccb(&device->ccbq, work_ccb); cam_ccbq_send_ccb(&device->ccbq, work_ccb); devq->send_openings--; devq->send_active++; xpt_schedule_dev_sendq(bus, device); if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){ /* * The client wants to freeze the queue * after this CCB is sent. */ xpt_freeze_devq(work_ccb->ccb_h.path, 1); } /* In Target mode, the peripheral driver knows best... */ if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { if ((device->inq_flags & SID_CmdQue) != 0 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; else /* * Clear this in case of a retried CCB that * failed due to a rejected tag. */ work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; } /* * Device queues can be shared among multiple sim instances * that reside on different busses. Use the SIM in the queue * CCB's path, rather than the one in the bus that was passed * into this function. */ sim = work_ccb->ccb_h.path->bus->sim; (*(sim->sim_action))(sim, work_ccb); } devq->send_queue.qfrozen_cnt[0]--; } /* * This function merges stuff from the slave ccb into the master ccb, while * keeping important fields in the master ccb constant. */ void xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) { /* * Pull fields that are valid for peripheral drivers to set * into the master CCB along with the CCB "payload". */ master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); } void xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); ccb_h->pinfo.priority = priority; ccb_h->path = path; ccb_h->path_id = path->bus->path_id; if (path->target) ccb_h->target_id = path->target->target_id; else ccb_h->target_id = CAM_TARGET_WILDCARD; if (path->device) { ccb_h->target_lun = path->device->lun_id; ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; } else { ccb_h->target_lun = CAM_TARGET_WILDCARD; } ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; ccb_h->flags = 0; } /* Path manipulation functions */ cam_status xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_path *path; cam_status status; path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); if (path == NULL) { status = CAM_RESRC_UNAVAIL; return(status); } status = xpt_compile_path(path, perph, path_id, target_id, lun_id); if (status != CAM_REQ_CMP) { free(path, M_CAMPATH); path = NULL; } *new_path_ptr = path; return (status); } cam_status xpt_create_path_unlocked(struct cam_path **new_path_ptr, struct cam_periph *periph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_path *path; struct cam_eb *bus = NULL; cam_status status; path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_WAITOK); bus = xpt_find_bus(path_id); if (bus != NULL) CAM_SIM_LOCK(bus->sim); status = xpt_compile_path(path, periph, path_id, target_id, lun_id); if (bus != NULL) { CAM_SIM_UNLOCK(bus->sim); xpt_release_bus(bus); } if (status != CAM_REQ_CMP) { free(path, M_CAMPATH); path = NULL; } *new_path_ptr = path; return (status); } cam_status xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_eb *bus; struct cam_et *target; struct cam_ed *device; cam_status status; status = CAM_REQ_CMP; /* Completed without error */ target = NULL; /* Wildcarded */ device = NULL; /* Wildcarded */ /* * We will potentially modify the EDT, so block interrupts * that may attempt to create cam paths. */ bus = xpt_find_bus(path_id); if (bus == NULL) { status = CAM_PATH_INVALID; } else { target = xpt_find_target(bus, target_id); if (target == NULL) { /* Create one */ struct cam_et *new_target; new_target = xpt_alloc_target(bus, target_id); if (new_target == NULL) { status = CAM_RESRC_UNAVAIL; } else { target = new_target; } } if (target != NULL) { device = xpt_find_device(target, lun_id); if (device == NULL) { /* Create one */ struct cam_ed *new_device; new_device = (*(bus->xport->alloc_device))(bus, target, lun_id); if (new_device == NULL) { status = CAM_RESRC_UNAVAIL; } else { device = new_device; } } } } /* * Only touch the user's data if we are successful. */ if (status == CAM_REQ_CMP) { new_path->periph = perph; new_path->bus = bus; new_path->target = target; new_path->device = device; CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); } else { if (device != NULL) xpt_release_device(device); if (target != NULL) xpt_release_target(target); if (bus != NULL) xpt_release_bus(bus); } return (status); } void xpt_release_path(struct cam_path *path) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); if (path->device != NULL) { xpt_release_device(path->device); path->device = NULL; } if (path->target != NULL) { xpt_release_target(path->target); path->target = NULL; } if (path->bus != NULL) { xpt_release_bus(path->bus); path->bus = NULL; } } void xpt_free_path(struct cam_path *path) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); xpt_release_path(path); free(path, M_CAMPATH); } void xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) { mtx_lock(&xsoftc.xpt_topo_lock); if (bus_ref) { if (path->bus) *bus_ref = path->bus->refcount; else *bus_ref = 0; } mtx_unlock(&xsoftc.xpt_topo_lock); if (periph_ref) { if (path->periph) *periph_ref = path->periph->refcount; else *periph_ref = 0; } if (target_ref) { if (path->target) *target_ref = path->target->refcount; else *target_ref = 0; } if (device_ref) { if (path->device) *device_ref = path->device->refcount; else *device_ref = 0; } } /* * Return -1 for failure, 0 for exact match, 1 for match with wildcards * in path1, 2 for match with wildcards in path2. */ int xpt_path_comp(struct cam_path *path1, struct cam_path *path2) { int retval = 0; if (path1->bus != path2->bus) { if (path1->bus->path_id == CAM_BUS_WILDCARD) retval = 1; else if (path2->bus->path_id == CAM_BUS_WILDCARD) retval = 2; else return (-1); } if (path1->target != path2->target) { if (path1->target->target_id == CAM_TARGET_WILDCARD) { if (retval == 0) retval = 1; } else if (path2->target->target_id == CAM_TARGET_WILDCARD) retval = 2; else return (-1); } if (path1->device != path2->device) { if (path1->device->lun_id == CAM_LUN_WILDCARD) { if (retval == 0) retval = 1; } else if (path2->device->lun_id == CAM_LUN_WILDCARD) retval = 2; else return (-1); } return (retval); } void xpt_print_path(struct cam_path *path) { if (path == NULL) printf("(nopath): "); else { if (path->periph != NULL) printf("(%s%d:", path->periph->periph_name, path->periph->unit_number); else printf("(noperiph:"); if (path->bus != NULL) printf("%s%d:%d:", path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id); else printf("nobus:"); if (path->target != NULL) printf("%d:", path->target->target_id); else printf("X:"); if (path->device != NULL) printf("%d): ", path->device->lun_id); else printf("X): "); } } void xpt_print(struct cam_path *path, const char *fmt, ...) { va_list ap; xpt_print_path(path); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); } int xpt_path_string(struct cam_path *path, char *str, size_t str_len) { struct sbuf sb; #ifdef INVARIANTS if (path != NULL && path->bus != NULL) mtx_assert(path->bus->sim->mtx, MA_OWNED); #endif sbuf_new(&sb, str, str_len, 0); if (path == NULL) sbuf_printf(&sb, "(nopath): "); else { if (path->periph != NULL) sbuf_printf(&sb, "(%s%d:", path->periph->periph_name, path->periph->unit_number); else sbuf_printf(&sb, "(noperiph:"); if (path->bus != NULL) sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id); else sbuf_printf(&sb, "nobus:"); if (path->target != NULL) sbuf_printf(&sb, "%d:", path->target->target_id); else sbuf_printf(&sb, "X:"); if (path->device != NULL) sbuf_printf(&sb, "%d): ", path->device->lun_id); else sbuf_printf(&sb, "X): "); } sbuf_finish(&sb); return(sbuf_len(&sb)); } path_id_t xpt_path_path_id(struct cam_path *path) { return(path->bus->path_id); } target_id_t xpt_path_target_id(struct cam_path *path) { if (path->target != NULL) return (path->target->target_id); else return (CAM_TARGET_WILDCARD); } lun_id_t xpt_path_lun_id(struct cam_path *path) { if (path->device != NULL) return (path->device->lun_id); else return (CAM_LUN_WILDCARD); } struct cam_sim * xpt_path_sim(struct cam_path *path) { return (path->bus->sim); } struct cam_periph* xpt_path_periph(struct cam_path *path) { mtx_assert(path->bus->sim->mtx, MA_OWNED); return (path->periph); } int xpt_path_legacy_ata_id(struct cam_path *path) { struct cam_eb *bus; int bus_id; if ((strcmp(path->bus->sim->sim_name, "ata") != 0) && strcmp(path->bus->sim->sim_name, "ahcich") != 0 && strcmp(path->bus->sim->sim_name, "mvsch") != 0 && strcmp(path->bus->sim->sim_name, "siisch") != 0) return (-1); if (strcmp(path->bus->sim->sim_name, "ata") == 0 && path->bus->sim->unit_number < 2) { bus_id = path->bus->sim->unit_number; } else { bus_id = 2; xpt_lock_buses(); TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) { if (bus == path->bus) break; if ((strcmp(bus->sim->sim_name, "ata") == 0 && bus->sim->unit_number >= 2) || strcmp(bus->sim->sim_name, "ahcich") == 0 || strcmp(bus->sim->sim_name, "mvsch") == 0 || strcmp(bus->sim->sim_name, "siisch") == 0) bus_id++; } xpt_unlock_buses(); } if (path->target != NULL) { if (path->target->target_id < 2) return (bus_id * 2 + path->target->target_id); else return (-1); } else return (bus_id * 2); } /* * Release a CAM control block for the caller. Remit the cost of the structure * to the device referenced by the path. If the this device had no 'credits' * and peripheral drivers have registered async callbacks for this notification * call them now. */ void xpt_release_ccb(union ccb *free_ccb) { struct cam_path *path; struct cam_ed *device; struct cam_eb *bus; struct cam_sim *sim; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); path = free_ccb->ccb_h.path; device = path->device; bus = path->bus; sim = bus->sim; mtx_assert(sim->mtx, MA_OWNED); cam_ccbq_release_opening(&device->ccbq); if (device->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) { device->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED; cam_ccbq_resize(&device->ccbq, device->ccbq.dev_openings + device->ccbq.dev_active); } if (sim->ccb_count > sim->max_ccbs) { xpt_free_ccb(free_ccb); sim->ccb_count--; } else { SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h, xpt_links.sle); } if (sim->devq == NULL) { return; } sim->devq->alloc_openings++; sim->devq->alloc_active--; if (device_is_alloc_queued(device) == 0) xpt_schedule_dev_allocq(bus, device); xpt_run_dev_allocq(bus); } /* Functions accessed by SIM drivers */ static struct xpt_xport xport_default = { .alloc_device = xpt_alloc_device_default, .action = xpt_action_default, .async = xpt_dev_async_default, }; /* * A sim structure, listing the SIM entry points and instance * identification info is passed to xpt_bus_register to hook the SIM * into the CAM framework. xpt_bus_register creates a cam_eb entry * for this new bus and places it in the array of busses and assigns * it a path_id. The path_id may be influenced by "hard wiring" * information specified by the user. Once interrupt services are * available, the bus will be probed. */ int32_t xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) { struct cam_eb *new_bus; struct cam_eb *old_bus; struct ccb_pathinq cpi; struct cam_path *path; cam_status status; mtx_assert(sim->mtx, MA_OWNED); sim->bus_id = bus; new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), M_CAMXPT, M_NOWAIT); if (new_bus == NULL) { /* Couldn't satisfy request */ return (CAM_RESRC_UNAVAIL); } if (strcmp(sim->sim_name, "xpt") != 0) { sim->path_id = xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); } TAILQ_INIT(&new_bus->et_entries); new_bus->path_id = sim->path_id; cam_sim_hold(sim); new_bus->sim = sim; timevalclear(&new_bus->last_reset); new_bus->flags = 0; new_bus->refcount = 1; /* Held until a bus_deregister event */ new_bus->generation = 0; mtx_lock(&xsoftc.xpt_topo_lock); old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); while (old_bus != NULL && old_bus->path_id < new_bus->path_id) old_bus = TAILQ_NEXT(old_bus, links); if (old_bus != NULL) TAILQ_INSERT_BEFORE(old_bus, new_bus, links); else TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); xsoftc.bus_generation++; mtx_unlock(&xsoftc.xpt_topo_lock); /* * Set a default transport so that a PATH_INQ can be issued to * the SIM. This will then allow for probing and attaching of * a more appropriate transport. */ new_bus->xport = &xport_default; status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { xpt_release_bus(new_bus); free(path, M_CAMXPT); return (CAM_RESRC_UNAVAIL); } xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status == CAM_REQ_CMP) { switch (cpi.transport) { case XPORT_SPI: case XPORT_SAS: case XPORT_FC: case XPORT_USB: case XPORT_ISCSI: case XPORT_PPB: new_bus->xport = scsi_get_xport(); break; case XPORT_ATA: case XPORT_SATA: new_bus->xport = ata_get_xport(); break; default: new_bus->xport = &xport_default; break; } } /* Notify interested parties */ if (sim->path_id != CAM_XPT_PATH_ID) { union ccb *scan_ccb; xpt_async(AC_PATH_REGISTERED, path, &cpi); /* Initiate bus rescan. */ scan_ccb = xpt_alloc_ccb_nowait(); scan_ccb->ccb_h.path = path; scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; scan_ccb->crcn.flags = 0; xpt_rescan(scan_ccb); } else xpt_free_path(path); return (CAM_SUCCESS); } int32_t xpt_bus_deregister(path_id_t pathid) { struct cam_path bus_path; cam_status status; status = xpt_compile_path(&bus_path, NULL, pathid, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) return (status); xpt_async(AC_LOST_DEVICE, &bus_path, NULL); xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); /* Release the reference count held while registered. */ xpt_release_bus(bus_path.bus); xpt_release_path(&bus_path); return (CAM_REQ_CMP); } static path_id_t xptnextfreepathid(void) { struct cam_eb *bus; path_id_t pathid; const char *strval; pathid = 0; mtx_lock(&xsoftc.xpt_topo_lock); bus = TAILQ_FIRST(&xsoftc.xpt_busses); retry: /* Find an unoccupied pathid */ while (bus != NULL && bus->path_id <= pathid) { if (bus->path_id == pathid) pathid++; bus = TAILQ_NEXT(bus, links); } mtx_unlock(&xsoftc.xpt_topo_lock); /* * Ensure that this pathid is not reserved for * a bus that may be registered in the future. */ if (resource_string_value("scbus", pathid, "at", &strval) == 0) { ++pathid; /* Start the search over */ mtx_lock(&xsoftc.xpt_topo_lock); goto retry; } return (pathid); } static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus) { path_id_t pathid; int i, dunit, val; char buf[32]; const char *dname; pathid = CAM_XPT_PATH_ID; snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); i = 0; while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { if (strcmp(dname, "scbus")) { /* Avoid a bit of foot shooting. */ continue; } if (dunit < 0) /* unwired?! */ continue; if (resource_int_value("scbus", dunit, "bus", &val) == 0) { if (sim_bus == val) { pathid = dunit; break; } } else if (sim_bus == 0) { /* Unspecified matches bus 0 */ pathid = dunit; break; } else { printf("Ambiguous scbus configuration for %s%d " "bus %d, cannot wire down. The kernel " "config entry for scbus%d should " "specify a controller bus.\n" "Scbus will be assigned dynamically.\n", sim_name, sim_unit, sim_bus, dunit); break; } } if (pathid == CAM_XPT_PATH_ID) pathid = xptnextfreepathid(); return (pathid); } static const char * xpt_async_string(u_int32_t async_code) { switch (async_code) { case AC_BUS_RESET: return ("AC_BUS_RESET"); case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); case AC_SCSI_AEN: return ("AC_SCSI_AEN"); case AC_SENT_BDR: return ("AC_SENT_BDR"); case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); case AC_CONTRACT: return ("AC_CONTRACT"); case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); } return ("AC_UNKNOWN"); } void xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) { struct cam_eb *bus; struct cam_et *target, *next_target; struct cam_ed *device, *next_device; mtx_assert(path->bus->sim->mtx, MA_OWNED); CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, ("xpt_async(%s)\n", xpt_async_string(async_code))); /* * Most async events come from a CAM interrupt context. In * a few cases, the error recovery code at the peripheral layer, * which may run from our SWI or a process context, may signal * deferred events with a call to xpt_async. */ bus = path->bus; if (async_code == AC_BUS_RESET) { /* Update our notion of when the last reset occurred */ microtime(&bus->last_reset); } for (target = TAILQ_FIRST(&bus->et_entries); target != NULL; target = next_target) { next_target = TAILQ_NEXT(target, links); if (path->target != target && path->target->target_id != CAM_TARGET_WILDCARD && target->target_id != CAM_TARGET_WILDCARD) continue; if (async_code == AC_SENT_BDR) { /* Update our notion of when the last reset occurred */ microtime(&path->target->last_reset); } for (device = TAILQ_FIRST(&target->ed_entries); device != NULL; device = next_device) { next_device = TAILQ_NEXT(device, links); if (path->device != device && path->device->lun_id != CAM_LUN_WILDCARD && device->lun_id != CAM_LUN_WILDCARD) continue; /* * The async callback could free the device. * If it is a broadcast async, it doesn't hold * device reference, so take our own reference. */ xpt_acquire_device(device); (*(bus->xport->async))(async_code, bus, target, device, async_arg); xpt_async_bcast(&device->asyncs, async_code, path, async_arg); xpt_release_device(device); } } /* * If this wasn't a fully wildcarded async, tell all * clients that want all async events. */ if (bus != xpt_periph->path->bus) xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code, path, async_arg); } static void xpt_async_bcast(struct async_list *async_head, u_int32_t async_code, struct cam_path *path, void *async_arg) { struct async_node *cur_entry; cur_entry = SLIST_FIRST(async_head); while (cur_entry != NULL) { struct async_node *next_entry; /* * Grab the next list entry before we call the current * entry's callback. This is because the callback function * can delete its async callback entry. */ next_entry = SLIST_NEXT(cur_entry, links); if ((cur_entry->event_enable & async_code) != 0) cur_entry->callback(cur_entry->callback_arg, async_code, path, async_arg); cur_entry = next_entry; } } static void xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { printf("%s called\n", __func__); } u_int32_t xpt_freeze_devq_rl(struct cam_path *path, cam_rl rl, u_int count) { struct cam_ed *dev = path->device; mtx_assert(path->bus->sim->mtx, MA_OWNED); dev->sim->devq->alloc_openings += cam_ccbq_freeze(&dev->ccbq, rl, count); /* Remove frozen device from allocq. */ if (device_is_alloc_queued(dev) && cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL( CAMQ_GET_PRIO(&dev->drvq)))) { camq_remove(&dev->sim->devq->alloc_queue, dev->alloc_ccb_entry.pinfo.index); } /* Remove frozen device from sendq. */ if (device_is_send_queued(dev) && cam_ccbq_frozen_top(&dev->ccbq)) { camq_remove(&dev->sim->devq->send_queue, dev->send_ccb_entry.pinfo.index); } return (dev->ccbq.queue.qfrozen_cnt[rl]); } u_int32_t xpt_freeze_devq(struct cam_path *path, u_int count) { return (xpt_freeze_devq_rl(path, 0, count)); } u_int32_t xpt_freeze_simq(struct cam_sim *sim, u_int count) { mtx_assert(sim->mtx, MA_OWNED); sim->devq->send_queue.qfrozen_cnt[0] += count; return (sim->devq->send_queue.qfrozen_cnt[0]); } static void xpt_release_devq_timeout(void *arg) { struct cam_ed *device; device = (struct cam_ed *)arg; xpt_release_devq_device(device, /*rl*/0, /*count*/1, /*run_queue*/TRUE); } void xpt_release_devq(struct cam_path *path, u_int count, int run_queue) { mtx_assert(path->bus->sim->mtx, MA_OWNED); xpt_release_devq_device(path->device, /*rl*/0, count, run_queue); } void xpt_release_devq_rl(struct cam_path *path, cam_rl rl, u_int count, int run_queue) { mtx_assert(path->bus->sim->mtx, MA_OWNED); xpt_release_devq_device(path->device, rl, count, run_queue); } static void xpt_release_devq_device(struct cam_ed *dev, cam_rl rl, u_int count, int run_queue) { if (count > dev->ccbq.queue.qfrozen_cnt[rl]) { #ifdef INVARIANTS printf("xpt_release_devq(%d): requested %u > present %u\n", rl, count, dev->ccbq.queue.qfrozen_cnt[rl]); #endif count = dev->ccbq.queue.qfrozen_cnt[rl]; } dev->sim->devq->alloc_openings -= cam_ccbq_release(&dev->ccbq, rl, count); if (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL( CAMQ_GET_PRIO(&dev->drvq))) == 0) { if (xpt_schedule_dev_allocq(dev->target->bus, dev)) xpt_run_dev_allocq(dev->target->bus); } if (cam_ccbq_frozen_top(&dev->ccbq) == 0) { /* * No longer need to wait for a successful * command completion. */ dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; /* * Remove any timeouts that might be scheduled * to release this queue. */ if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { callout_stop(&dev->callout); dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; } if (run_queue == 0) return; /* * Now that we are unfrozen schedule the * device so any pending transactions are * run. */ if (xpt_schedule_dev_sendq(dev->target->bus, dev)) xpt_run_dev_sendq(dev->target->bus); } } void xpt_release_simq(struct cam_sim *sim, int run_queue) { struct camq *sendq; mtx_assert(sim->mtx, MA_OWNED); sendq = &(sim->devq->send_queue); if (sendq->qfrozen_cnt[0] <= 0) { #ifdef INVARIANTS printf("xpt_release_simq: requested 1 > present %u\n", sendq->qfrozen_cnt[0]); #endif } else sendq->qfrozen_cnt[0]--; if (sendq->qfrozen_cnt[0] == 0) { /* * If there is a timeout scheduled to release this * sim queue, remove it. The queue frozen count is * already at 0. */ if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ callout_stop(&sim->callout); sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; } if (run_queue) { struct cam_eb *bus; /* * Now that we are unfrozen run the send queue. */ bus = xpt_find_bus(sim->path_id); xpt_run_dev_sendq(bus); xpt_release_bus(bus); } } } /* * XXX Appears to be unused. */ static void xpt_release_simq_timeout(void *arg) { struct cam_sim *sim; sim = (struct cam_sim *)arg; xpt_release_simq(sim, /* run_queue */ TRUE); } void xpt_done(union ccb *done_ccb) { struct cam_sim *sim; int first; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n")); if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) { /* * Queue up the request for handling by our SWI handler * any of the "non-immediate" type of ccbs. */ sim = done_ccb->ccb_h.path->bus->sim; TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h, sim_links.tqe); done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; if ((sim->flags & (CAM_SIM_ON_DONEQ | CAM_SIM_POLLED | CAM_SIM_BATCH)) == 0) { mtx_lock(&cam_simq_lock); first = TAILQ_EMPTY(&cam_simq); TAILQ_INSERT_TAIL(&cam_simq, sim, links); mtx_unlock(&cam_simq_lock); sim->flags |= CAM_SIM_ON_DONEQ; if (first) swi_sched(cambio_ih, 0); } } } void xpt_batch_start(struct cam_sim *sim) { KASSERT((sim->flags & CAM_SIM_BATCH) == 0, ("Batch flag already set")); sim->flags |= CAM_SIM_BATCH; } void xpt_batch_done(struct cam_sim *sim) { KASSERT((sim->flags & CAM_SIM_BATCH) != 0, ("Batch flag was not set")); sim->flags &= ~CAM_SIM_BATCH; if (!TAILQ_EMPTY(&sim->sim_doneq) && (sim->flags & CAM_SIM_ON_DONEQ) == 0) camisr_runqueue(&sim->sim_doneq); } union ccb * xpt_alloc_ccb() { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); return (new_ccb); } union ccb * xpt_alloc_ccb_nowait() { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); return (new_ccb); } void xpt_free_ccb(union ccb *free_ccb) { free(free_ccb, M_CAMCCB); } /* Private XPT functions */ /* * Get a CAM control block for the caller. Charge the structure to the device * referenced by the path. If the this device has no 'credits' then the * device already has the maximum number of outstanding operations under way * and we return NULL. If we don't have sufficient resources to allocate more * ccbs, we also return NULL. */ static union ccb * xpt_get_ccb(struct cam_ed *device) { union ccb *new_ccb; struct cam_sim *sim; sim = device->sim; if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) { new_ccb = xpt_alloc_ccb_nowait(); if (new_ccb == NULL) { return (NULL); } if ((sim->flags & CAM_SIM_MPSAFE) == 0) callout_handle_init(&new_ccb->ccb_h.timeout_ch); SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h, xpt_links.sle); sim->ccb_count++; } cam_ccbq_take_opening(&device->ccbq); SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle); return (new_ccb); } static void xpt_release_bus(struct cam_eb *bus) { mtx_lock(&xsoftc.xpt_topo_lock); KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); if ((--bus->refcount == 0) && (TAILQ_FIRST(&bus->et_entries) == NULL)) { TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); xsoftc.bus_generation++; mtx_unlock(&xsoftc.xpt_topo_lock); cam_sim_release(bus->sim); free(bus, M_CAMXPT); } else mtx_unlock(&xsoftc.xpt_topo_lock); } static struct cam_et * xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *target; target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT|M_ZERO); if (target != NULL) { struct cam_et *cur_target; TAILQ_INIT(&target->ed_entries); target->bus = bus; target->target_id = target_id; target->refcount = 1; target->generation = 0; target->luns = NULL; timevalclear(&target->last_reset); /* * Hold a reference to our parent bus so it * will not go away before we do. */ mtx_lock(&xsoftc.xpt_topo_lock); bus->refcount++; mtx_unlock(&xsoftc.xpt_topo_lock); /* Insertion sort into our bus's target list */ cur_target = TAILQ_FIRST(&bus->et_entries); while (cur_target != NULL && cur_target->target_id < target_id) cur_target = TAILQ_NEXT(cur_target, links); if (cur_target != NULL) { TAILQ_INSERT_BEFORE(cur_target, target, links); } else { TAILQ_INSERT_TAIL(&bus->et_entries, target, links); } bus->generation++; } return (target); } static void xpt_release_target(struct cam_et *target) { if (target->refcount == 1) { if (TAILQ_FIRST(&target->ed_entries) == NULL) { TAILQ_REMOVE(&target->bus->et_entries, target, links); target->bus->generation++; xpt_release_bus(target->bus); if (target->luns) free(target->luns, M_CAMXPT); free(target, M_CAMXPT); } } else target->refcount--; } static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device, *cur_device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); device->mintags = 1; device->maxtags = 1; bus->sim->max_ccbs += device->ccbq.devq_openings; cur_device = TAILQ_FIRST(&target->ed_entries); while (cur_device != NULL && cur_device->lun_id < lun_id) cur_device = TAILQ_NEXT(cur_device, links); if (cur_device != NULL) { TAILQ_INSERT_BEFORE(cur_device, device, links); } else { TAILQ_INSERT_TAIL(&target->ed_entries, device, links); } target->generation++; return (device); } struct cam_ed * xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; struct cam_devq *devq; cam_status status; /* Make space for us in the device queue on our bus */ devq = bus->sim->devq; status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1); if (status != CAM_REQ_CMP) { device = NULL; } else { device = (struct cam_ed *)malloc(sizeof(*device), M_CAMDEV, M_NOWAIT|M_ZERO); } if (device != NULL) { cam_init_pinfo(&device->alloc_ccb_entry.pinfo); device->alloc_ccb_entry.device = device; cam_init_pinfo(&device->send_ccb_entry.pinfo); device->send_ccb_entry.device = device; device->target = target; device->lun_id = lun_id; device->sim = bus->sim; /* Initialize our queues */ if (camq_init(&device->drvq, 0) != 0) { free(device, M_CAMDEV); return (NULL); } if (cam_ccbq_init(&device->ccbq, bus->sim->max_dev_openings) != 0) { camq_fini(&device->drvq); free(device, M_CAMDEV); return (NULL); } SLIST_INIT(&device->asyncs); SLIST_INIT(&device->periphs); device->generation = 0; device->owner = NULL; device->flags = CAM_DEV_UNCONFIGURED; device->tag_delay_count = 0; device->tag_saved_openings = 0; device->refcount = 1; callout_init_mtx(&device->callout, bus->sim->mtx, 0); /* * Hold a reference to our parent target so it * will not go away before we do. */ target->refcount++; } return (device); } void xpt_acquire_device(struct cam_ed *device) { device->refcount++; } void xpt_release_device(struct cam_ed *device) { if (device->refcount == 1) { struct cam_devq *devq; if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX) panic("Removing device while still queued for ccbs"); if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) callout_stop(&device->callout); TAILQ_REMOVE(&device->target->ed_entries, device,links); device->target->generation++; device->target->bus->sim->max_ccbs -= device->ccbq.devq_openings; /* Release our slot in the devq */ devq = device->target->bus->sim->devq; cam_devq_resize(devq, devq->alloc_queue.array_size - 1); camq_fini(&device->drvq); cam_ccbq_fini(&device->ccbq); /* * Free allocated memory. free(9) does nothing if the * supplied pointer is NULL, so it is safe to call without * checking. */ free(device->supported_vpds, M_CAMXPT); free(device->device_id, M_CAMXPT); free(device->physpath, M_CAMXPT); free(device->rcap_buf, M_CAMXPT); free(device->serial_num, M_CAMXPT); xpt_release_target(device->target); free(device, M_CAMDEV); } else device->refcount--; } u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) { int diff; int result; struct cam_ed *dev; dev = path->device; diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings); result = cam_ccbq_resize(&dev->ccbq, newopenings); if (result == CAM_REQ_CMP && (diff < 0)) { dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED; } if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (dev->inq_flags & SID_CmdQue) != 0) dev->tag_saved_openings = newopenings; /* Adjust the global limit */ dev->sim->max_ccbs += diff; return (result); } static struct cam_eb * xpt_find_bus(path_id_t path_id) { struct cam_eb *bus; mtx_lock(&xsoftc.xpt_topo_lock); for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); bus != NULL; bus = TAILQ_NEXT(bus, links)) { if (bus->path_id == path_id) { bus->refcount++; break; } } mtx_unlock(&xsoftc.xpt_topo_lock); return (bus); } static struct cam_et * xpt_find_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *target; for (target = TAILQ_FIRST(&bus->et_entries); target != NULL; target = TAILQ_NEXT(target, links)) { if (target->target_id == target_id) { target->refcount++; break; } } return (target); } static struct cam_ed * xpt_find_device(struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; for (device = TAILQ_FIRST(&target->ed_entries); device != NULL; device = TAILQ_NEXT(device, links)) { if (device->lun_id == lun_id) { device->refcount++; break; } } return (device); } void xpt_start_tags(struct cam_path *path) { struct ccb_relsim crs; struct cam_ed *device; struct cam_sim *sim; int newopenings; device = path->device; sim = path->bus->sim; device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; xpt_freeze_devq(path, /*count*/1); device->inq_flags |= SID_CmdQue; if (device->tag_saved_openings != 0) newopenings = device->tag_saved_openings; else newopenings = min(device->maxtags, sim->max_tagged_dev_openings); xpt_dev_ccbq_resize(path, newopenings); xpt_async(AC_GETDEV_CHANGED, path, NULL); xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; crs.openings = crs.release_timeout = crs.qfrozen_cnt = 0; xpt_action((union ccb *)&crs); } void xpt_stop_tags(struct cam_path *path) { struct ccb_relsim crs; struct cam_ed *device; struct cam_sim *sim; device = path->device; sim = path->bus->sim; device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; device->tag_delay_count = 0; xpt_freeze_devq(path, /*count*/1); device->inq_flags &= ~SID_CmdQue; xpt_dev_ccbq_resize(path, sim->max_dev_openings); xpt_async(AC_GETDEV_CHANGED, path, NULL); xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; crs.openings = crs.release_timeout = crs.qfrozen_cnt = 0; xpt_action((union ccb *)&crs); } static void xpt_boot_delay(void *arg) { xpt_release_boot(); } static void xpt_config(void *arg) { /* * Now that interrupts are enabled, go find our devices */ /* Setup debugging path */ if (cam_dflags != CAM_DEBUG_NONE) { if (xpt_create_path_unlocked(&cam_dpath, xpt_periph, CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN) != CAM_REQ_CMP) { printf("xpt_config: xpt_create_path() failed for debug" " target %d:%d:%d, debugging disabled\n", CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); cam_dflags = CAM_DEBUG_NONE; } } else cam_dpath = NULL; periphdriver_init(1); xpt_hold_boot(); callout_init(&xsoftc.boot_callout, 1); callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000, xpt_boot_delay, NULL); /* Fire up rescan thread. */ if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) { printf("xpt_config: failed to create rescan thread.\n"); } } void xpt_hold_boot(void) { xpt_lock_buses(); xsoftc.buses_to_config++; xpt_unlock_buses(); } void xpt_release_boot(void) { xpt_lock_buses(); xsoftc.buses_to_config--; if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) { struct xpt_task *task; xsoftc.buses_config_done = 1; xpt_unlock_buses(); /* Call manually because we don't have any busses */ task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT); if (task != NULL) { TASK_INIT(&task->task, 0, xpt_finishconfig_task, task); taskqueue_enqueue(taskqueue_thread, &task->task); } } else xpt_unlock_buses(); } /* * If the given device only has one peripheral attached to it, and if that * peripheral is the passthrough driver, announce it. This insures that the * user sees some sort of announcement for every peripheral in their system. */ static int xptpassannouncefunc(struct cam_ed *device, void *arg) { struct cam_periph *periph; int i; for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; periph = SLIST_NEXT(periph, periph_links), i++); periph = SLIST_FIRST(&device->periphs); if ((i == 1) && (strncmp(periph->periph_name, "pass", 4) == 0)) xpt_announce_periph(periph, NULL); return(1); } static void xpt_finishconfig_task(void *context, int pending) { periphdriver_init(2); /* * Check for devices with no "standard" peripheral driver * attached. For any devices like that, announce the * passthrough driver so the user will see something. */ if (!bootverbose) xpt_for_all_devices(xptpassannouncefunc, NULL); /* Release our hook so that the boot can continue. */ config_intrhook_disestablish(xsoftc.xpt_config_hook); free(xsoftc.xpt_config_hook, M_CAMXPT); xsoftc.xpt_config_hook = NULL; free(context, M_CAMXPT); } cam_status xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, struct cam_path *path) { struct ccb_setasync csa; cam_status status; int xptpath = 0; if (path == NULL) { mtx_lock(&xsoftc.xpt_lock); status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { mtx_unlock(&xsoftc.xpt_lock); return (status); } xptpath = 1; } xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = event; csa.callback = cbfunc; csa.callback_arg = cbarg; xpt_action((union ccb *)&csa); status = csa.ccb_h.status; if (xptpath) { xpt_free_path(path); mtx_unlock(&xsoftc.xpt_lock); } if ((status == CAM_REQ_CMP) && (csa.event_enable & AC_FOUND_DEVICE)) { /* * Get this peripheral up to date with all * the currently existing devices. */ xpt_for_all_devices(xptsetasyncfunc, &csa); } if ((status == CAM_REQ_CMP) && (csa.event_enable & AC_PATH_REGISTERED)) { /* * Get this peripheral up to date with all * the currently existing busses. */ xpt_for_all_busses(xptsetasyncbusfunc, &csa); } return (status); } static void xptaction(struct cam_sim *sim, union ccb *work_ccb) { CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); switch (work_ccb->ccb_h.func_code) { /* Common cases first */ case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi; cpi = &work_ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 0; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "", HBA_IDLEN); strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); cpi->unit_number = sim->unit_number; cpi->bus_id = sim->bus_id; cpi->base_transfer_speed = 0; cpi->protocol = PROTO_UNSPECIFIED; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->transport = XPORT_UNSPECIFIED; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(work_ccb); break; } default: work_ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(work_ccb); break; } } /* * The xpt as a "controller" has no interrupt sources, so polling * is a no-op. */ static void xptpoll(struct cam_sim *sim) { } void xpt_lock_buses(void) { mtx_lock(&xsoftc.xpt_topo_lock); } void xpt_unlock_buses(void) { mtx_unlock(&xsoftc.xpt_topo_lock); } static void camisr(void *dummy) { cam_simq_t queue; struct cam_sim *sim; mtx_lock(&cam_simq_lock); TAILQ_INIT(&queue); while (!TAILQ_EMPTY(&cam_simq)) { TAILQ_CONCAT(&queue, &cam_simq, links); mtx_unlock(&cam_simq_lock); while ((sim = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, sim, links); CAM_SIM_LOCK(sim); camisr_runqueue(&sim->sim_doneq); sim->flags &= ~CAM_SIM_ON_DONEQ; CAM_SIM_UNLOCK(sim); } mtx_lock(&cam_simq_lock); } mtx_unlock(&cam_simq_lock); } static void camisr_runqueue(void *V_queue) { cam_isrq_t *queue = V_queue; struct ccb_hdr *ccb_h; while ((ccb_h = TAILQ_FIRST(queue)) != NULL) { int runq; TAILQ_REMOVE(queue, ccb_h, sim_links.tqe); ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE, ("camisr\n")); runq = FALSE; if (ccb_h->flags & CAM_HIGH_POWER) { struct highpowerlist *hphead; union ccb *send_ccb; mtx_lock(&xsoftc.xpt_lock); hphead = &xsoftc.highpowerq; send_ccb = (union ccb *)STAILQ_FIRST(hphead); /* * Increment the count since this command is done. */ xsoftc.num_highpower++; /* * Any high powered commands queued up? */ if (send_ccb != NULL) { STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe); mtx_unlock(&xsoftc.xpt_lock); xpt_release_devq(send_ccb->ccb_h.path, /*count*/1, /*runqueue*/TRUE); } else mtx_unlock(&xsoftc.xpt_lock); } if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { struct cam_ed *dev; dev = ccb_h->path->device; cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); ccb_h->path->bus->sim->devq->send_active--; ccb_h->path->bus->sim->devq->send_openings++; runq = TRUE; if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 && (dev->ccbq.dev_active == 0))) { dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/FALSE); } if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/FALSE); } if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 && (--dev->tag_delay_count == 0)) xpt_start_tags(ccb_h->path); if (!device_is_send_queued(dev)) { (void)xpt_schedule_dev_sendq(ccb_h->path->bus, dev); } } if (ccb_h->status & CAM_RELEASE_SIMQ) { xpt_release_simq(ccb_h->path->bus->sim, /*run_queue*/TRUE); ccb_h->status &= ~CAM_RELEASE_SIMQ; runq = FALSE; } if ((ccb_h->flags & CAM_DEV_QFRZDIS) && (ccb_h->status & CAM_DEV_QFRZN)) { xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); ccb_h->status &= ~CAM_DEV_QFRZN; } else if (runq) { xpt_run_dev_sendq(ccb_h->path->bus); } /* Call the peripheral driver's callback */ (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); } } Index: projects/physbio/sys/cam/ctl/ctl_frontend_cam_sim.c =================================================================== --- projects/physbio/sys/cam/ctl/ctl_frontend_cam_sim.c (revision 243875) +++ projects/physbio/sys/cam/ctl/ctl_frontend_cam_sim.c (revision 243876) @@ -1,870 +1,869 @@ /*- * Copyright (c) 2009 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_cam_sim.c#4 $ */ /* * CTL frontend to CAM SIM interface. This allows access to CTL LUNs via * the da(4) and pass(4) drivers from inside the system. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define io_ptr spriv_ptr1 struct cfcs_io { union ccb *ccb; }; struct cfcs_softc { struct ctl_frontend fe; char port_name[32]; struct cam_sim *sim; struct cam_devq *devq; struct cam_path *path; struct mtx lock; char lock_desc[32]; uint64_t wwnn; uint64_t wwpn; uint32_t cur_tag_num; int online; }; /* * We can't handle CCBs with these flags. For the most part, we just don't * handle physical addresses yet. That would require mapping things in * order to do the copy. */ -#define CFCS_BAD_CCB_FLAGS (CAM_DATA_PHYS | CAM_SG_LIST_PHYS | \ - CAM_MSG_BUF_PHYS | CAM_SNS_BUF_PHYS | CAM_CDB_PHYS | CAM_SENSE_PTR |\ +#define CFCS_BAD_CCB_FLAGS (CAM_DATA_ISPHYS | CAM_MSG_BUF_PHYS | \ + CAM_SNS_BUF_PHYS | CAM_CDB_PHYS | CAM_SENSE_PTR | \ CAM_SENSE_PHYS) int cfcs_init(void); void cfcs_shutdown(void); static void cfcs_poll(struct cam_sim *sim); static void cfcs_online(void *arg); static void cfcs_offline(void *arg); static int cfcs_targ_enable(void *arg, struct ctl_id targ_id); static int cfcs_targ_disable(void *arg, struct ctl_id targ_id); static int cfcs_lun_enable(void *arg, struct ctl_id target_id, int lun_id); static int cfcs_lun_disable(void *arg, struct ctl_id target_id, int lun_id); static void cfcs_datamove(union ctl_io *io); static void cfcs_done(union ctl_io *io); void cfcs_action(struct cam_sim *sim, union ccb *ccb); static void cfcs_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); struct cfcs_softc cfcs_softc; /* * This is primarly intended to allow for error injection to test the CAM * sense data and sense residual handling code. This sets the maximum * amount of SCSI sense data that we will report to CAM. */ static int cfcs_max_sense = sizeof(struct scsi_sense_data); extern int ctl_disable; SYSINIT(cfcs_init, SI_SUB_CONFIGURE, SI_ORDER_FOURTH, cfcs_init, NULL); SYSCTL_NODE(_kern_cam, OID_AUTO, ctl2cam, CTLFLAG_RD, 0, "CAM Target Layer SIM frontend"); SYSCTL_INT(_kern_cam_ctl2cam, OID_AUTO, max_sense, CTLFLAG_RW, &cfcs_max_sense, 0, "Maximum sense data size"); int cfcs_init(void) { struct cfcs_softc *softc; struct ccb_setasync csa; struct ctl_frontend *fe; #ifdef NEEDTOPORT char wwnn[8]; #endif int retval; /* Don't continue if CTL is disabled */ if (ctl_disable != 0) return (0); softc = &cfcs_softc; retval = 0; bzero(softc, sizeof(*softc)); sprintf(softc->lock_desc, "ctl2cam"); mtx_init(&softc->lock, softc->lock_desc, NULL, MTX_DEF); fe = &softc->fe; fe->port_type = CTL_PORT_INTERNAL; /* XXX KDM what should the real number be here? */ fe->num_requested_ctl_io = 4096; snprintf(softc->port_name, sizeof(softc->port_name), "ctl2cam"); fe->port_name = softc->port_name; fe->port_online = cfcs_online; fe->port_offline = cfcs_offline; fe->onoff_arg = softc; fe->targ_enable = cfcs_targ_enable; fe->targ_disable = cfcs_targ_disable; fe->lun_enable = cfcs_lun_enable; fe->lun_disable = cfcs_lun_disable; fe->targ_lun_arg = softc; fe->fe_datamove = cfcs_datamove; fe->fe_done = cfcs_done; /* XXX KDM what should we report here? */ /* XXX These should probably be fetched from CTL. */ fe->max_targets = 1; fe->max_target_id = 15; retval = ctl_frontend_register(fe, /*master_SC*/ 1); if (retval != 0) { printf("%s: ctl_frontend_register() failed with error %d!\n", __func__, retval); mtx_destroy(&softc->lock); return (1); } /* * Get the WWNN out of the database, and create a WWPN as well. */ #ifdef NEEDTOPORT ddb_GetWWNN((char *)wwnn); softc->wwnn = be64dec(wwnn); softc->wwpn = softc->wwnn + (softc->fe.targ_port & 0xff); #endif /* * If the CTL frontend didn't tell us what our WWNN/WWPN is, go * ahead and set something random. */ if (fe->wwnn == 0) { uint64_t random_bits; arc4rand(&random_bits, sizeof(random_bits), 0); softc->wwnn = (random_bits & 0x0000000fffffff00ULL) | /* Company ID */ 0x5000000000000000ULL | /* NL-Port */ 0x0300; softc->wwpn = softc->wwnn + fe->targ_port + 1; fe->wwnn = softc->wwnn; fe->wwpn = softc->wwpn; } else { softc->wwnn = fe->wwnn; softc->wwpn = fe->wwpn; } mtx_lock(&softc->lock); softc->devq = cam_simq_alloc(fe->num_requested_ctl_io); if (softc->devq == NULL) { printf("%s: error allocating devq\n", __func__); retval = ENOMEM; goto bailout; } softc->sim = cam_sim_alloc(cfcs_action, cfcs_poll, softc->port_name, softc, /*unit*/ 0, &softc->lock, 1, fe->num_requested_ctl_io, softc->devq); if (softc->sim == NULL) { printf("%s: error allocating SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_bus_register(softc->sim, NULL, 0) != CAM_SUCCESS) { printf("%s: error registering SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_create_path(&softc->path, /*periph*/NULL, cam_sim_path(softc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("%s: error creating path\n", __func__); xpt_bus_deregister(cam_sim_path(softc->sim)); retval = 1; goto bailout; } xpt_setup_ccb(&csa.ccb_h, softc->path, CAM_PRIORITY_NONE); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = cfcs_async; csa.callback_arg = softc->sim; xpt_action((union ccb *)&csa); mtx_unlock(&softc->lock); return (retval); bailout: if (softc->sim) cam_sim_free(softc->sim, /*free_devq*/ TRUE); else if (softc->devq) cam_simq_free(softc->devq); mtx_unlock(&softc->lock); mtx_destroy(&softc->lock); return (retval); } static void cfcs_poll(struct cam_sim *sim) { } void cfcs_shutdown(void) { } static void cfcs_online(void *arg) { struct cfcs_softc *softc; union ccb *ccb; softc = (struct cfcs_softc *)arg; mtx_lock(&softc->lock); softc->online = 1; mtx_unlock(&softc->lock); ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { printf("%s: unable to allocate CCB for rescan\n", __func__); return; } if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(softc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("%s: can't allocate path for rescan\n", __func__); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void cfcs_offline(void *arg) { struct cfcs_softc *softc; union ccb *ccb; softc = (struct cfcs_softc *)arg; mtx_lock(&softc->lock); softc->online = 0; mtx_unlock(&softc->lock); ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { printf("%s: unable to allocate CCB for rescan\n", __func__); return; } if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(softc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("%s: can't allocate path for rescan\n", __func__); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static int cfcs_targ_enable(void *arg, struct ctl_id targ_id) { return (0); } static int cfcs_targ_disable(void *arg, struct ctl_id targ_id) { return (0); } static int cfcs_lun_enable(void *arg, struct ctl_id target_id, int lun_id) { return (0); } static int cfcs_lun_disable(void *arg, struct ctl_id target_id, int lun_id) { return (0); } /* * This function is very similar to ctl_ioctl_do_datamove(). Is there a * way to combine the functionality? * * XXX KDM may need to move this into a thread. We're doing a bcopy in the * caller's context, which will usually be the backend. That may not be a * good thing. */ static void cfcs_datamove(union ctl_io *io) { union ccb *ccb; bus_dma_segment_t cam_sg_entry, *cam_sglist; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; int cam_sg_count, ctl_sg_count, cam_sg_start; int cam_sg_offset; int len_to_copy, len_copied; int ctl_watermark, cam_watermark; int i, j; cam_sg_offset = 0; cam_sg_start = 0; ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; /* * Note that we have a check in cfcs_action() to make sure that any * CCBs with "bad" flags are returned with CAM_REQ_INVALID. This * is just to make sure no one removes that check without updating * this code to provide the additional functionality necessary to * support those modes of operation. */ KASSERT(((ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) == 0), ("invalid " "CAM flags %#x", (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS))); /* * Simplify things on both sides by putting single buffers into a * single entry S/G list. */ - if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { - if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) { - /* We should filter this out on entry */ - panic("%s: physical S/G list, should not get here", - __func__); - } else { - int len_seen; + switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { + case CAM_DATA_SG: { + int len_seen; - cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; - cam_sg_count = ccb->csio.sglist_cnt; + cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; + cam_sg_count = ccb->csio.sglist_cnt; - for (i = 0, len_seen = 0; i < cam_sg_count; i++) { - if ((len_seen + cam_sglist[i].ds_len) >= - io->scsiio.kern_rel_offset) { - cam_sg_start = i; - cam_sg_offset = - io->scsiio.kern_rel_offset - - len_seen; - break; - } - len_seen += cam_sglist[i].ds_len; + for (i = 0, len_seen = 0; i < cam_sg_count; i++) { + if ((len_seen + cam_sglist[i].ds_len) >= + io->scsiio.kern_rel_offset) { + cam_sg_start = i; + cam_sg_offset = io->scsiio.kern_rel_offset - + len_seen; + break; } + len_seen += cam_sglist[i].ds_len; } - } else { + break; + } + case CAM_DATA_VADDR: cam_sglist = &cam_sg_entry; cam_sglist[0].ds_len = ccb->csio.dxfer_len; cam_sglist[0].ds_addr = (bus_addr_t)ccb->csio.data_ptr; cam_sg_count = 1; cam_sg_start = 0; cam_sg_offset = io->scsiio.kern_rel_offset; + break; + default: + panic("Invalid CAM flags %#x", ccb->ccb_h.flags); } if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } ctl_watermark = 0; cam_watermark = cam_sg_offset; len_copied = 0; for (i = cam_sg_start, j = 0; i < cam_sg_count && j < ctl_sg_count;) { uint8_t *cam_ptr, *ctl_ptr; len_to_copy = ctl_min(cam_sglist[i].ds_len - cam_watermark, ctl_sglist[j].len - ctl_watermark); cam_ptr = (uint8_t *)cam_sglist[i].ds_addr; cam_ptr = cam_ptr + cam_watermark; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { /* * XXX KDM fix this! */ panic("need to implement bus address support"); #if 0 kern_ptr = bus_to_virt(kern_sglist[j].addr); #endif } else ctl_ptr = (uint8_t *)ctl_sglist[j].addr; ctl_ptr = ctl_ptr + ctl_watermark; ctl_watermark += len_to_copy; cam_watermark += len_to_copy; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("%s: copying %d bytes to CAM\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", ctl_ptr, __func__, cam_ptr)); bcopy(ctl_ptr, cam_ptr, len_to_copy); } else { CTL_DEBUG_PRINT(("%s: copying %d bytes from CAM\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", cam_ptr, __func__, ctl_ptr)); bcopy(cam_ptr, ctl_ptr, len_to_copy); } len_copied += len_to_copy; if (cam_sglist[i].ds_len == cam_watermark) { i++; cam_watermark = 0; } if (ctl_sglist[j].len == ctl_watermark) { j++; ctl_watermark = 0; } } io->scsiio.ext_data_filled += len_copied; io->scsiio.be_move_done(io); } static void cfcs_done(union ctl_io *io) { union ccb *ccb; struct cfcs_softc *softc; struct cam_sim *sim; ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; sim = xpt_path_sim(ccb->ccb_h.path); softc = (struct cfcs_softc *)cam_sim_softc(sim); /* * At this point we should have status. If we don't, that's a bug. */ KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE), ("invalid CTL status %#x", io->io_hdr.status)); /* * Translate CTL status to CAM status. */ switch (io->io_hdr.status & CTL_STATUS_MASK) { case CTL_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case CTL_SCSI_ERROR: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; ccb->csio.scsi_status = io->scsiio.scsi_status; bcopy(&io->scsiio.sense_data, &ccb->csio.sense_data, min(io->scsiio.sense_len, ccb->csio.sense_len)); if (ccb->csio.sense_len > io->scsiio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - io->scsiio.sense_len; else ccb->csio.sense_resid = 0; if ((ccb->csio.sense_len - ccb->csio.sense_resid) > cfcs_max_sense) { ccb->csio.sense_resid = ccb->csio.sense_len - cfcs_max_sense; } break; case CTL_CMD_ABORTED: ccb->ccb_h.status = CAM_REQ_ABORTED; break; case CTL_ERROR: default: ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } mtx_lock(sim->mtx); xpt_done(ccb); mtx_unlock(sim->mtx); ctl_free_io(io); } void cfcs_action(struct cam_sim *sim, union ccb *ccb) { struct cfcs_softc *softc; int err; softc = (struct cfcs_softc *)cam_sim_softc(sim); mtx_assert(&softc->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { union ctl_io *io; struct ccb_scsiio *csio; csio = &ccb->csio; /* * Catch CCB flags, like physical address flags, that * indicate situations we currently can't handle. */ if (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) { ccb->ccb_h.status = CAM_REQ_INVALID; printf("%s: bad CCB flags %#x (all flags %#x)\n", __func__, ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS, ccb->ccb_h.flags); xpt_done(ccb); return; } /* * If we aren't online, there are no devices to see. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io(softc->fe.ctl_pool_ref); if (io == NULL) { printf("%s: can't allocate ctl_io\n", __func__); ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; /* * Only SCSI I/O comes down this path, resets, etc. come * down via the XPT_RESET_BUS/LUN CCBs below. */ io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid.id = 1; io->io_hdr.nexus.targ_port = softc->fe.targ_port; /* * XXX KDM how do we handle target IDs? */ io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id; io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun; /* * This tag scheme isn't the best, since we could in theory * have a very long-lived I/O and tag collision, especially * in a high I/O environment. But it should work well * enough for now. Since we're using unsigned ints, * they'll just wrap around. */ io->scsiio.tag_num = softc->cur_tag_num++; csio->tag_id = io->scsiio.tag_num; switch (csio->tag_action) { case CAM_TAG_ACTION_NONE: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, csio->tag_action); break; } if (csio->cdb_len > sizeof(io->scsiio.cdb)) { printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", __func__, csio->cdb_len, sizeof(io->scsiio.cdb)); } io->scsiio.cdb_len = min(csio->cdb_len, sizeof(io->scsiio.cdb)); bcopy(csio->cdb_io.cdb_bytes, io->scsiio.cdb, io->scsiio.cdb_len); err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s: func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); } else { ccb->ccb_h.status |= CAM_SIM_QUEUED; } break; } case XPT_ABORT: { union ctl_io *io; union ccb *abort_ccb; abort_ccb = ccb->cab.abort_ccb; if (abort_ccb->ccb_h.func_code != XPT_SCSI_IO) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); } /* * If we aren't online, there are no devices to talk to. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io(softc->fe.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid.id = 1; io->io_hdr.nexus.targ_port = softc->fe.targ_port; io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id; io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun; io->taskio.task_action = CTL_TASK_ABORT_TASK; io->taskio.tag_num = abort_ccb->csio.tag_id; switch (abort_ccb->csio.tag_action) { case CAM_TAG_ACTION_NONE: io->taskio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->taskio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->taskio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->taskio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->taskio.tag_type = CTL_TAG_ACA; break; default: io->taskio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, abort_ccb->csio.tag_action); break; } err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); } break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_fc *fc; cts = &ccb->cts; scsi = &cts->proto_specific.scsi; fc = &cts->xport_specific.fc; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_FC; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 800000; fc->wwnn = softc->wwnn; fc->wwpn = softc->wwpn; fc->port = softc->fe.targ_port; fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SET_TRAN_SETTINGS: /* XXX KDM should we actually do something here? */ ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_RESET_BUS: case XPT_RESET_DEV: { union ctl_io *io; /* * If we aren't online, there are no devices to talk to. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io(softc->fe.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid.id = 0; io->io_hdr.nexus.targ_port = softc->fe.targ_port; io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id; io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun; if (ccb->ccb_h.func_code == XPT_RESET_BUS) io->taskio.task_action = CTL_TASK_BUS_RESET; else io->taskio.task_action = CTL_TASK_LUN_RESET; err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); } break; } case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi; cpi = &ccb->cpi; cpi->version_num = 0; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 1; cpi->max_lun = 1024; /* Do we really have a limit? */ cpi->maxio = 1024 * 1024; cpi->async_flags = 0; cpi->hpath_id = 0; cpi->initiator_id = 0; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "FreeBSD", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = 0; cpi->bus_id = 0; cpi->base_transfer_speed = 800000; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC2; /* * Pretend to be Fibre Channel. */ cpi->transport = XPORT_FC; cpi->transport_version = 0; cpi->xport_specific.fc.wwnn = softc->wwnn; cpi->xport_specific.fc.wwpn = softc->wwpn; cpi->xport_specific.fc.port = softc->fe.targ_port; cpi->xport_specific.fc.bitrate = 8 * 1000 * 1000; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_PROVIDE_FAIL; printf("%s: unsupported CCB type %#x\n", __func__, ccb->ccb_h.func_code); xpt_done(ccb); break; } } static void cfcs_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { } Index: projects/physbio/sys/cam/ctl/scsi_ctl.c =================================================================== --- projects/physbio/sys/cam/ctl/scsi_ctl.c (revision 243875) +++ projects/physbio/sys/cam/ctl/scsi_ctl.c (revision 243876) @@ -1,2197 +1,2199 @@ /*- * Copyright (c) 2008, 2009 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ */ /* * Peripheral driver interface between CAM and CTL (CAM Target Layer). * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { CTLFE_CCB_WAITING = 0x01 } ctlfe_ccb_types; struct ctlfe_softc { struct ctl_frontend fe; path_id_t path_id; struct cam_sim *sim; char port_name[DEV_IDLEN]; STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; STAILQ_ENTRY(ctlfe_softc) links; }; STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; struct mtx ctlfe_list_mtx; static char ctlfe_mtx_desc[] = "ctlfelist"; static int ctlfe_dma_enabled = 1; #ifdef CTLFE_INIT_ENABLE static int ctlfe_max_targets = 1; static int ctlfe_num_targets = 0; #endif typedef enum { CTLFE_LUN_NONE = 0x00, CTLFE_LUN_WILDCARD = 0x01 } ctlfe_lun_flags; struct ctlfe_lun_softc { struct ctlfe_softc *parent_softc; struct cam_periph *periph; ctlfe_lun_flags flags; struct callout dma_callout; uint64_t ccbs_alloced; uint64_t ccbs_freed; uint64_t ctios_sent; uint64_t ctios_returned; uint64_t atios_sent; uint64_t atios_returned; uint64_t inots_sent; uint64_t inots_returned; /* bus_dma_tag_t dma_tag; */ TAILQ_HEAD(, ccb_hdr) work_queue; STAILQ_ENTRY(ctlfe_lun_softc) links; }; typedef enum { CTLFE_CMD_NONE = 0x00, CTLFE_CMD_PIECEWISE = 0x01 } ctlfe_cmd_flags; /* * The size limit of this structure is CTL_PORT_PRIV_SIZE, from ctl_io.h. * Currently that is 600 bytes. */ struct ctlfe_lun_cmd_info { int cur_transfer_index; ctlfe_cmd_flags flags; /* * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 * bytes on amd64. So with 32 elements, this is 256 bytes on * i386 and 512 bytes on amd64. */ bus_dma_segment_t cam_sglist[32]; }; /* * When we register the adapter/bus, request that this many ctl_ios be * allocated. This should be the maximum supported by the adapter, but we * currently don't have a way to get that back from the path inquiry. * XXX KDM add that to the path inquiry. */ #define CTLFE_REQ_CTL_IO 4096 /* * Number of Accept Target I/O CCBs to allocate and queue down to the * adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_ATIO_PER_LUN 1024 /* * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to * allocate and queue down to the adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_IN_PER_LUN 1024 /* * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending * status to the initiator. The SIM is expected to have its own timeouts, * so we're not putting this timeout around the CCB execution time. The * SIM should timeout and let us know if it has an issue. */ #define CTLFE_DMA_TIMEOUT 60 /* * Turn this on to enable extra debugging prints. */ #if 0 #define CTLFE_DEBUG #endif /* * Use randomly assigned WWNN/WWPN values. This is to work around an issue * in the FreeBSD initiator that makes it unable to rescan the target if * the target gets rebooted and the WWNN/WWPN stay the same. */ #if 0 #define RANDOM_WWNN #endif SYSCTL_INT(_kern_cam_ctl, OID_AUTO, dma_enabled, CTLFLAG_RW, &ctlfe_dma_enabled, 0, "DMA enabled"); MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); #define ccb_type ppriv_field0 /* This is only used in the ATIO */ #define io_ptr ppriv_ptr1 /* This is only used in the CTIO */ #define ccb_atio ppriv_ptr1 int ctlfeinitialize(void); void ctlfeshutdown(void); static periph_init_t ctlfeinit; static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static periph_ctor_t ctlferegister; static periph_oninv_t ctlfeoninvalidate; static periph_dtor_t ctlfecleanup; static periph_start_t ctlfestart; static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb); static void ctlfe_onoffline(void *arg, int online); static void ctlfe_online(void *arg); static void ctlfe_offline(void *arg); static int ctlfe_targ_enable(void *arg, struct ctl_id targ_id); static int ctlfe_targ_disable(void *arg, struct ctl_id targ_id); static int ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id); static int ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id); static void ctlfe_dump_sim(struct cam_sim *sim); static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); static void ctlfe_dma_timeout(void *arg); static void ctlfe_datamove_done(union ctl_io *io); static void ctlfe_dump(void); static struct periph_driver ctlfe_driver = { ctlfeinit, "ctl", TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0 }; PERIPHDRIVER_DECLARE(ctl, ctlfe_driver); extern struct ctl_softc *control_softc; extern int ctl_disable; int ctlfeinitialize(void) { cam_status status; /* Don't initialize if we're disabled */ if (ctl_disable != 0) return (0); STAILQ_INIT(&ctlfe_softc_list); mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); xpt_lock_buses(); periphdriver_register(&ctlfe_driver); xpt_unlock_buses(); status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | AC_CONTRACT, ctlfeasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ctl: Failed to attach async callback due to CAM " "status 0x%x!\n", status); } return (0); } void ctlfeshutdown(void) { return; } void ctlfeinit(void) { cam_status status; /* Don't initialize if we're disabled */ if (ctl_disable != 0) return; STAILQ_INIT(&ctlfe_softc_list); mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); KASSERT(control_softc != NULL, ("CTL is not initialized!")); status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | AC_CONTRACT, ctlfeasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ctl: Failed to attach async callback due to CAM " "status 0x%x!\n", status); } } static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { #ifdef CTLFEDEBUG printf("%s: entered\n", __func__); #endif /* * When a new path gets registered, and it is capable of target * mode, go ahead and attach. Later on, we may need to be more * selective, but for now this will be sufficient. */ switch (code) { case AC_PATH_REGISTERED: { struct ctl_frontend *fe; struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *lun_softc; struct cam_path *path; struct ccb_pathinq *cpi; cam_status status; int retval; cpi = (struct ccb_pathinq *)arg; /* Don't attach if it doesn't support target mode */ if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { #ifdef CTLFEDEBUG printf("%s: SIM %s%d doesn't support target mode\n", __func__, cpi->dev_name, cpi->unit_number); #endif break; } #ifdef CTLFE_INIT_ENABLE if (ctlfe_num_targets >= ctlfe_max_targets) { union ccb *ccb; struct cam_sim *sim; ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_NOWAIT | M_ZERO); if (ccb == NULL) { printf("%s: unable to malloc CCB!\n", __func__); xpt_free_path(path); return; } xpt_setup_ccb(&ccb->ccb_h, cpi->ccb_h.path, CAM_PRIORITY_NONE); sim = xpt_path_sim(cpi->ccb_h.path); ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR; /* We should hold the SIM lock here */ mtx_assert(sim->mtx, MA_OWNED); xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: SIM %s%d (path id %d) initiator " "enable failed with status %#x\n", __func__, cpi->dev_name, cpi->unit_number, cpi->ccb_h.path_id, ccb->ccb_h.status); } else { printf("%s: SIM %s%d (path id %d) initiator " "enable succeeded\n", __func__, cpi->dev_name, cpi->unit_number, cpi->ccb_h.path_id); } free(ccb, M_TEMP); break; } else { ctlfe_num_targets++; } printf("%s: ctlfe_num_targets = %d\n", __func__, ctlfe_num_targets); #endif /* CTLFE_INIT_ENABLE */ /* * We're in an interrupt context here, so we have to * use M_NOWAIT. Of course this means trouble if we * can't allocate memory. */ bus_softc = malloc(sizeof(*bus_softc), M_CTLFE, M_NOWAIT | M_ZERO); if (bus_softc == NULL) { printf("%s: unable to malloc %zd bytes for softc\n", __func__, sizeof(*bus_softc)); return; } bus_softc->path_id = cpi->ccb_h.path_id; bus_softc->sim = xpt_path_sim(cpi->ccb_h.path); STAILQ_INIT(&bus_softc->lun_softc_list); fe = &bus_softc->fe; /* * XXX KDM should we be more accurate here ? */ if (cpi->transport == XPORT_FC) fe->port_type = CTL_PORT_FC; else fe->port_type = CTL_PORT_SCSI; /* XXX KDM what should the real number be here? */ fe->num_requested_ctl_io = 4096; snprintf(bus_softc->port_name, sizeof(bus_softc->port_name), "%s%d", cpi->dev_name, cpi->unit_number); /* * XXX KDM it would be nice to allocate storage in the * frontend structure itself. */ fe->port_name = bus_softc->port_name; fe->physical_port = cpi->unit_number; fe->virtual_port = cpi->bus_id; fe->port_online = ctlfe_online; fe->port_offline = ctlfe_offline; fe->onoff_arg = bus_softc; fe->targ_enable = ctlfe_targ_enable; fe->targ_disable = ctlfe_targ_disable; fe->lun_enable = ctlfe_lun_enable; fe->lun_disable = ctlfe_lun_disable; fe->targ_lun_arg = bus_softc; fe->fe_datamove = ctlfe_datamove_done; fe->fe_done = ctlfe_datamove_done; fe->fe_dump = ctlfe_dump; /* * XXX KDM the path inquiry doesn't give us the maximum * number of targets supported. */ fe->max_targets = cpi->max_target; fe->max_target_id = cpi->max_target; /* * XXX KDM need to figure out whether we're the master or * slave. */ #ifdef CTLFEDEBUG printf("%s: calling ctl_frontend_register() for %s%d\n", __func__, cpi->dev_name, cpi->unit_number); #endif retval = ctl_frontend_register(fe, /*master_SC*/ 1); if (retval != 0) { printf("%s: ctl_frontend_register() failed with " "error %d!\n", __func__, retval); free(bus_softc, M_CTLFE); break; } else { mtx_lock(&ctlfe_list_mtx); STAILQ_INSERT_TAIL(&ctlfe_softc_list, bus_softc, links); mtx_unlock(&ctlfe_list_mtx); } status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id,CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard " "periph\n", __func__); break; } lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_NOWAIT | M_ZERO); if (lun_softc == NULL) { xpt_print(path, "%s: unable to allocate softc for " "wildcard periph\n", __func__); xpt_free_path(path); break; } lun_softc->parent_softc = bus_softc; lun_softc->flags |= CTLFE_LUN_WILDCARD; status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, lun_softc); xpt_free_path(path); break; } case AC_PATH_DEREGISTERED: /* ctl_frontend_deregister() */ break; case AC_CONTRACT: { struct ac_contract *ac; ac = (struct ac_contract *)arg; switch (ac->contract_number) { case AC_CONTRACT_DEV_CHG: { struct ac_device_changed *dev_chg; struct ctlfe_softc *softc; int retval, found; dev_chg = (struct ac_device_changed *)ac->contract_data; printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", __func__, dev_chg->wwpn, dev_chg->port, xpt_path_path_id(path), dev_chg->target, (dev_chg->arrived == 0) ? "left" : "arrived"); found = 0; mtx_lock(&ctlfe_list_mtx); STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { if (softc->path_id == xpt_path_path_id(path)) { found = 1; break; } } mtx_unlock(&ctlfe_list_mtx); if (found == 0) { printf("%s: CTL port for CAM path %u not " "found!\n", __func__, xpt_path_path_id(path)); break; } if (dev_chg->arrived != 0) { retval = ctl_add_initiator(dev_chg->wwpn, softc->fe.targ_port, dev_chg->target); } else { retval = ctl_remove_initiator( softc->fe.targ_port, dev_chg->target); } if (retval != 0) { printf("%s: could not %s port %d iid %u " "WWPN %#jx!\n", __func__, (dev_chg->arrived != 0) ? "add" : "remove", softc->fe.targ_port, dev_chg->target, (uintmax_t)dev_chg->wwpn); } break; } default: printf("%s: unsupported contract number %ju\n", __func__, (uintmax_t)ac->contract_number); break; } break; } default: break; } } static cam_status ctlferegister(struct cam_periph *periph, void *arg) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; struct cam_sim *sim; union ccb en_lun_ccb; cam_status status; int i; softc = (struct ctlfe_lun_softc *)arg; bus_softc = softc->parent_softc; sim = xpt_path_sim(periph->path); TAILQ_INIT(&softc->work_queue); softc->periph = periph; callout_init_mtx(&softc->dma_callout, sim->mtx, /*flags*/ 0); periph->softc = softc; xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; en_lun_ccb.cel.grp6_len = 0; en_lun_ccb.cel.grp7_len = 0; en_lun_ccb.cel.enable = 1; xpt_action(&en_lun_ccb); status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", __func__, en_lun_ccb.ccb_h.status); return (status); } status = CAM_REQ_CMP; for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { union ccb *new_ccb; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; new_ccb->ccb_h.cbfcnp = ctlfedone; xpt_action(new_ccb); softc->atios_sent++; status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { free(new_ccb, M_CTLFE); break; } } status = cam_periph_acquire(periph); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: could not acquire reference " "count, status = %#x\n", __func__, status); return (status); } if (i == 0) { xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " "status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } for (i = 0; i < CTLFE_IN_PER_LUN; i++) { union ccb *new_ccb; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; new_ccb->ccb_h.cbfcnp = ctlfedone; xpt_action(new_ccb); softc->inots_sent++; status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { /* * Note that we don't free the CCB here. If the * status is not CAM_REQ_INPROG, then we're * probably talking to a SIM that says it is * target-capable but doesn't support the * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the * older API. In that case, it'll call xpt_done() * on the CCB, and we need to free it in our done * routine as a result. */ break; } } if ((i == 0) || (status != CAM_REQ_INPROG)) { xpt_print(periph->path, "%s: could not allocate immediate " "notify CCBs, status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } return (CAM_REQ_CMP); } static void ctlfeoninvalidate(struct cam_periph *periph) { union ccb en_lun_ccb; cam_status status; struct ctlfe_lun_softc *softc; softc = (struct ctlfe_lun_softc *)periph->softc; xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; en_lun_ccb.cel.grp6_len = 0; en_lun_ccb.cel.grp7_len = 0; en_lun_ccb.cel.enable = 0; xpt_action(&en_lun_ccb); status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", __func__, en_lun_ccb.ccb_h.status); /* * XXX KDM what do we do now? */ } xpt_print(periph->path, "LUN removed, %ju ATIOs outstanding, %ju " "INOTs outstanding, %d refs\n", softc->atios_sent - softc->atios_returned, softc->inots_sent - softc->inots_returned, periph->refcount); } static void ctlfecleanup(struct cam_periph *periph) { struct ctlfe_lun_softc *softc; struct ctlfe_softc *bus_softc; xpt_print(periph->path, "%s: Called\n", __func__); softc = (struct ctlfe_lun_softc *)periph->softc; bus_softc = softc->parent_softc; STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc,links); /* * XXX KDM is there anything else that needs to be done here? */ free(softc, M_CTLFE); } static void ctlfestart(struct cam_periph *periph, union ccb *start_ccb) { struct ctlfe_lun_softc *softc; struct ccb_hdr *ccb_h; softc = (struct ctlfe_lun_softc *)periph->softc; softc->ccbs_alloced++; ccb_h = TAILQ_FIRST(&softc->work_queue); if (periph->immediate_priority <= periph->pinfo.priority) { panic("shouldn't get to the CCB waiting case!"); start_ccb->ccb_h.ccb_type = CTLFE_CCB_WAITING; SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, periph_links.sle); periph->immediate_priority = CAM_PRIORITY_NONE; wakeup(&periph->ccb_list); } else if (ccb_h == NULL) { softc->ccbs_freed++; xpt_release_ccb(start_ccb); } else { struct ccb_accept_tio *atio; struct ccb_scsiio *csio; uint8_t *data_ptr; uint32_t dxfer_len; ccb_flags flags; union ctl_io *io; uint8_t scsi_status; /* Take the ATIO off the work queue */ TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe); atio = (struct ccb_accept_tio *)ccb_h; io = (union ctl_io *)ccb_h->io_ptr; csio = &start_ccb->csio; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); if ((io == NULL) || (io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { /* * We're done, send status back. */ flags |= CAM_SEND_STATUS; if (io == NULL) { scsi_status = SCSI_STATUS_BUSY; csio->sense_len = 0; } else if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_CMD_ABORTED) { io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; /* * If this command was aborted, we don't * need to send status back to the SIM. * Just free the CTIO and ctl_io, and * recycle the ATIO back to the SIM. */ xpt_print(periph->path, "%s: aborted " "command 0x%04x discarded\n", __func__, io->scsiio.tag_num); ctl_free_io(io); /* * For a wildcard attachment, commands can * come in with a specific target/lun. Reset * the target and LUN fields back to the * wildcard values before we send them back * down to the SIM. The SIM has a wildcard * LUN enabled, not whatever target/lun * these happened to be. */ if (softc->flags & CTLFE_LUN_WILDCARD) { atio->ccb_h.target_id = CAM_TARGET_WILDCARD; atio->ccb_h.target_lun = CAM_LUN_WILDCARD; } if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(periph->path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); atio->ccb_h.status &= ~CAM_DEV_QFRZN; } ccb_h = TAILQ_FIRST(&softc->work_queue); if (atio->ccb_h.func_code != XPT_ACCEPT_TARGET_IO) { xpt_print(periph->path, "%s: func_code " "is %#x\n", __func__, atio->ccb_h.func_code); } start_ccb->ccb_h.func_code = XPT_ABORT; start_ccb->cab.abort_ccb = (union ccb *)atio; start_ccb->ccb_h.cbfcnp = ctlfedone; /* Tell the SIM that we've aborted this ATIO */ xpt_action(start_ccb); softc->ccbs_freed++; xpt_release_ccb(start_ccb); /* * Send the ATIO back down to the SIM. */ xpt_action((union ccb *)atio); softc->atios_sent++; /* * If we still have work to do, ask for * another CCB. Otherwise, deactivate our * callout. */ if (ccb_h != NULL) xpt_schedule(periph, /*priority*/ 1); else callout_stop(&softc->dma_callout); return; } else { io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; scsi_status = io->scsiio.scsi_status; csio->sense_len = io->scsiio.sense_len; } data_ptr = NULL; dxfer_len = 0; if (io == NULL) { printf("%s: tag %04x io is NULL\n", __func__, atio->tag_id); } else { #ifdef CTLFEDEBUG printf("%s: tag %04x status %x\n", __func__, atio->tag_id, io->io_hdr.status); #endif } csio->sglist_cnt = 0; if (csio->sense_len != 0) { csio->sense_data = io->scsiio.sense_data; flags |= CAM_SEND_SENSE; } else if (scsi_status == SCSI_STATUS_CHECK_COND) { xpt_print(periph->path, "%s: check condition " "with no sense\n", __func__); } } else { struct ctlfe_lun_cmd_info *cmd_info; /* * Datamove call, we need to setup the S/G list. * If we pass in a S/G list, the isp(4) driver at * least expects physical/bus addresses. */ cmd_info = (struct ctlfe_lun_cmd_info *) io->io_hdr.port_priv; KASSERT(sizeof(*cmd_info) < CTL_PORT_PRIV_SIZE, ("%s: sizeof(struct ctlfe_lun_cmd_info) %zd < " "CTL_PORT_PRIV_SIZE %d", __func__, sizeof(*cmd_info), CTL_PORT_PRIV_SIZE)); io->io_hdr.flags &= ~CTL_FLAG_DMA_QUEUED; /* * Need to zero this, in case it has been used for * a previous datamove for this particular I/O. */ bzero(cmd_info, sizeof(*cmd_info)); scsi_status = 0; /* * Set the direction, relative to the initiator. */ flags &= ~CAM_DIR_MASK; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) flags |= CAM_DIR_IN; else flags |= CAM_DIR_OUT; csio->cdb_len = atio->cdb_len; + flags &= ~CAM_DATA_MASK; if (io->scsiio.kern_sg_entries == 0) { /* No S/G list */ data_ptr = io->scsiio.kern_data_ptr; dxfer_len = io->scsiio.kern_data_len; csio->sglist_cnt = 0; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) - flags |= CAM_DATA_PHYS; + flags |= CAM_DATA_PADDR; + else + flags |= CAM_DATA_VADDR; } else if (io->scsiio.kern_sg_entries <= (sizeof(cmd_info->cam_sglist)/ sizeof(cmd_info->cam_sglist[0]))) { /* * S/G list with physical or virtual pointers. * Just populate the CAM S/G list with the * pointers. */ int i; struct ctl_sg_entry *ctl_sglist; bus_dma_segment_t *cam_sglist; ctl_sglist = (struct ctl_sg_entry *) io->scsiio.kern_data_ptr; cam_sglist = cmd_info->cam_sglist; for (i = 0; i < io->scsiio.kern_sg_entries;i++){ cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i].addr; cam_sglist[i].ds_len = ctl_sglist[i].len; } csio->sglist_cnt = io->scsiio.kern_sg_entries; - flags |= CAM_SCATTER_VALID; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) - flags |= CAM_SG_LIST_PHYS; + flags |= CAM_DATA_SG_PADDR; else - flags &= ~CAM_SG_LIST_PHYS; + flags &= ~CAM_DATA_SG; data_ptr = (uint8_t *)cam_sglist; dxfer_len = io->scsiio.kern_data_len; } else { /* S/G list with virtual pointers */ struct ctl_sg_entry *sglist; int *ti; /* * XXX KDM this is a temporary hack. The * isp(4) driver can't deal with S/G lists * with virtual pointers, so we need to * go through and send down one virtual * pointer at a time. */ sglist = (struct ctl_sg_entry *) io->scsiio.kern_data_ptr; ti = &cmd_info->cur_transfer_index; data_ptr = sglist[*ti].addr; dxfer_len = sglist[*ti].len; csio->sglist_cnt = 0; cmd_info->flags |= CTLFE_CMD_PIECEWISE; (*ti)++; } io->scsiio.ext_data_filled += dxfer_len; if (io->scsiio.ext_data_filled > io->scsiio.kern_total_len) { xpt_print(periph->path, "%s: tag 0x%04x " "fill len %u > total %u\n", __func__, io->scsiio.tag_num, io->scsiio.ext_data_filled, io->scsiio.kern_total_len); } } #ifdef CTLFEDEBUG printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, (flags & CAM_SEND_STATUS) ? "done" : "datamove", atio->tag_id, flags, data_ptr, dxfer_len); #endif /* * Valid combinations: * - CAM_SEND_STATUS, SCATTER_VALID = 0, dxfer_len = 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, SCATTER_VALID = 0, dxfer_len != 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, SCATTER_VALID, dxfer_len != 0, * sglist_cnt != 0 */ #ifdef CTLFEDEBUG if (((flags & CAM_SEND_STATUS) && (((flags & CAM_SCATTER_VALID) != 0) || (dxfer_len != 0) || (csio->sglist_cnt != 0))) || (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) || ((flags & CAM_SCATTER_VALID) && (csio->sglist_cnt == 0)) || (((flags & CAM_SCATTER_VALID) == 0) && (csio->sglist_cnt != 0))) { printf("%s: tag %04x cdb %02x flags %#x dxfer_len " "%d sg %u\n", __func__, atio->tag_id, atio->cdb_io.cdb_bytes[0], flags, dxfer_len, csio->sglist_cnt); if (io != NULL) { printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } else { printf("%s: tag %04x no associated io\n", __func__, atio->tag_id); } } #endif cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, scsi_status, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ 5 * 1000); start_ccb->ccb_h.ccb_atio = atio; if (((flags & CAM_SEND_STATUS) == 0) && (io != NULL)) io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; softc->ctios_sent++; xpt_action(start_ccb); if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(periph->path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); atio->ccb_h.status &= ~CAM_DEV_QFRZN; } ccb_h = TAILQ_FIRST(&softc->work_queue); } /* * If we still have work to do, ask for another CCB. Otherwise, * deactivate our callout. */ if (ccb_h != NULL) xpt_schedule(periph, /*priority*/ 1); else callout_stop(&softc->dma_callout); } static void ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) { struct ctlfe_lun_softc *softc; softc = (struct ctlfe_lun_softc *)periph->softc; switch (ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: softc->atios_returned++; break; case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: softc->inots_returned++; break; default: break; } free(ccb, M_CTLFE); KASSERT(softc->atios_returned <= softc->atios_sent, ("%s: " "atios_returned %ju > atios_sent %ju", __func__, softc->atios_returned, softc->atios_sent)); KASSERT(softc->inots_returned <= softc->inots_sent, ("%s: " "inots_returned %ju > inots_sent %ju", __func__, softc->inots_returned, softc->inots_sent)); /* * If we have received all of our CCBs, we can release our * reference on the peripheral driver. It will probably go away * now. */ if ((softc->atios_returned == softc->atios_sent) && (softc->inots_returned == softc->inots_sent)) { cam_periph_release_locked(periph); } } static int ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) { uint64_t lba; uint32_t num_blocks, nbc; uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)? atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes; nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ switch (cmdbyt[0]) { case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; lba = scsi_3btoul(cdb->addr); lba &= 0x1fffff; num_blocks = cdb->length; if (num_blocks == 0) num_blocks = 256; lba += nbc; num_blocks -= nbc; scsi_ulto3b(lba, cdb->addr); cdb->length = num_blocks; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto2b(num_blocks, cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } default: return -1; } return (0); } static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb) { struct ctlfe_lun_softc *softc; struct ctlfe_softc *bus_softc; struct ccb_accept_tio *atio = NULL; union ctl_io *io = NULL; #ifdef CTLFE_DEBUG printf("%s: entered, func_code = %#x, type = %#lx\n", __func__, done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type); #endif softc = (struct ctlfe_lun_softc *)periph->softc; bus_softc = softc->parent_softc; if (done_ccb->ccb_h.ccb_type == CTLFE_CCB_WAITING) { panic("shouldn't get to the CCB waiting case!"); wakeup(&done_ccb->ccb_h.cbfcnp); return; } /* * If the peripheral is invalid, ATIOs and immediate notify CCBs * need to be freed. Most of the ATIOs and INOTs that come back * will be CCBs that are being returned from the SIM as a result of * our disabling the LUN. * * Other CCB types are handled in their respective cases below. */ if (periph->flags & CAM_PERIPH_INVALID) { switch (done_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: ctlfe_free_ccb(periph, done_ccb); return; default: break; } } switch (done_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: { atio = &done_ccb->atio; softc->atios_returned++; resubmit: /* * Allocate a ctl_io, pass it to CTL, and wait for the * datamove or done. */ io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref); if (io == NULL) { atio->ccb_h.flags &= ~CAM_DIR_MASK; atio->ccb_h.flags |= CAM_DIR_NONE; printf("%s: ctl_alloc_io failed!\n", __func__); /* * XXX KDM need to set SCSI_STATUS_BUSY, but there * is no field in the ATIO structure to do that, * and we aren't able to allocate a ctl_io here. * What to do? */ atio->sense_len = 0; done_ccb->ccb_h.io_ptr = NULL; TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, periph_links.tqe); xpt_schedule(periph, /*priority*/ 1); break; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb; done_ccb->ccb_h.io_ptr = io; /* * Only SCSI I/O comes down this path, resets, etc. come * down the immediate notify path below. */ io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid.id = atio->init_id; io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port; io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id; io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; io->scsiio.tag_num = atio->tag_id; switch (atio->tag_action) { case CAM_TAG_ACTION_NONE: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, atio->tag_action); break; } if (atio->cdb_len > sizeof(io->scsiio.cdb)) { printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); } io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb, io->scsiio.cdb_len); #ifdef CTLFEDEBUG printf("%s: %ju:%d:%ju:%d: tag %04x CDB %02x\n", __func__, (uintmax_t)io->io_hdr.nexus.initid.id, io->io_hdr.nexus.targ_port, (uintmax_t)io->io_hdr.nexus.targ_target.id, io->io_hdr.nexus.targ_lun, io->scsiio.tag_num, io->scsiio.cdb[0]); #endif ctl_queue(io); break; } case XPT_CONT_TARGET_IO: { int srr = 0; uint32_t srr_off = 0; atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; io = (union ctl_io *)atio->ccb_h.io_ptr; softc->ctios_returned++; #ifdef CTLFEDEBUG printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", __func__, atio->tag_id, done_ccb->ccb_h.flags); #endif /* * Handle SRR case were the data pointer is pushed back hack */ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV && done_ccb->csio.msg_ptr != NULL && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED && done_ccb->csio.msg_ptr[1] == 5 && done_ccb->csio.msg_ptr[2] == 0) { srr = 1; srr_off = (done_ccb->csio.msg_ptr[3] << 24) | (done_ccb->csio.msg_ptr[4] << 16) | (done_ccb->csio.msg_ptr[5] << 8) | (done_ccb->csio.msg_ptr[6]); } if (srr && (done_ccb->ccb_h.flags & CAM_SEND_STATUS)) { /* * If status was being sent, the back end data is now * history. Hack it up and resubmit a new command with * the CDB adjusted. If the SIM does the right thing, * all of the resid math should work. */ softc->ccbs_freed++; xpt_release_ccb(done_ccb); ctl_free_io(io); if (ctlfe_adjust_cdb(atio, srr_off) == 0) { done_ccb = (union ccb *)atio; goto resubmit; } /* * Fall through to doom.... */ } else if (srr) { /* * If we have an srr and we're still sending data, we * should be able to adjust offsets and cycle again. */ io->scsiio.kern_rel_offset = io->scsiio.ext_data_filled = srr_off; io->scsiio.ext_data_len = io->scsiio.kern_total_len - io->scsiio.kern_rel_offset; softc->ccbs_freed++; io->scsiio.io_hdr.status = CTL_STATUS_NONE; xpt_release_ccb(done_ccb); TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, periph_links.tqe); xpt_schedule(periph, /*priority*/ 1); return; } /* * If we were sending status back to the initiator, free up * resources. If we were doing a datamove, call the * datamove done routine. */ if (done_ccb->ccb_h.flags & CAM_SEND_STATUS) { softc->ccbs_freed++; xpt_release_ccb(done_ccb); ctl_free_io(io); /* * For a wildcard attachment, commands can come in * with a specific target/lun. Reset the target * and LUN fields back to the wildcard values before * we send them back down to the SIM. The SIM has * a wildcard LUN enabled, not whatever target/lun * these happened to be. */ if (softc->flags & CTLFE_LUN_WILDCARD) { atio->ccb_h.target_id = CAM_TARGET_WILDCARD; atio->ccb_h.target_lun = CAM_LUN_WILDCARD; } if (periph->flags & CAM_PERIPH_INVALID) { ctlfe_free_ccb(periph, (union ccb *)atio); return; } else { xpt_action((union ccb *)atio); softc->atios_sent++; } } else { struct ctlfe_lun_cmd_info *cmd_info; struct ccb_scsiio *csio; csio = &done_ccb->csio; cmd_info = (struct ctlfe_lun_cmd_info *) io->io_hdr.port_priv; io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; io->scsiio.ext_data_len += csio->dxfer_len; if (io->scsiio.ext_data_len > io->scsiio.kern_total_len) { xpt_print(periph->path, "%s: tag 0x%04x " "done len %u > total %u sent %u\n", __func__, io->scsiio.tag_num, io->scsiio.ext_data_len, io->scsiio.kern_total_len, io->scsiio.ext_data_filled); } /* * Translate CAM status to CTL status. Success * does not change the overall, ctl_io status. In * that case we just set port_status to 0. If we * have a failure, though, set a data phase error * for the overall ctl_io. */ switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_REQ_CMP: io->io_hdr.port_status = 0; break; default: /* * XXX KDM the isp(4) driver doesn't really * seem to send errors back for data * transfers that I can tell. There is one * case where it'll send CAM_REQ_CMP_ERR, * but probably not that many more cases. * So set a generic data phase error here, * like the SXP driver sets. */ io->io_hdr.port_status = 0xbad1; ctl_set_data_phase_error(&io->scsiio); /* * XXX KDM figure out residual. */ break; } /* * If we had to break this S/G list into multiple * pieces, figure out where we are in the list, and * continue sending pieces if necessary. */ if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) && (io->io_hdr.port_status == 0) && (cmd_info->cur_transfer_index < io->scsiio.kern_sg_entries)) { struct ctl_sg_entry *sglist; ccb_flags flags; uint8_t scsi_status; uint8_t *data_ptr; uint32_t dxfer_len; int *ti; sglist = (struct ctl_sg_entry *) io->scsiio.kern_data_ptr; ti = &cmd_info->cur_transfer_index; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT| CAM_TAG_ACTION_VALID| CAM_DIR_MASK); /* * Set the direction, relative to the initiator. */ flags &= ~CAM_DIR_MASK; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) flags |= CAM_DIR_IN; else flags |= CAM_DIR_OUT; data_ptr = sglist[*ti].addr; dxfer_len = sglist[*ti].len; (*ti)++; scsi_status = 0; if (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) { printf("%s: tag %04x no status or " "len cdb = %02x\n", __func__, atio->tag_id, atio->cdb_io.cdb_bytes[0]); printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, scsi_status, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ 5 * 1000); csio->resid = 0; csio->ccb_h.ccb_atio = atio; io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; softc->ctios_sent++; xpt_action((union ccb *)csio); } else { /* * Release the CTIO. The ATIO will be sent back * down to the SIM once we send status. */ softc->ccbs_freed++; xpt_release_ccb(done_ccb); /* Call the backend move done callback */ io->scsiio.be_move_done(io); } } break; } case XPT_IMMEDIATE_NOTIFY: { union ctl_io *io; struct ccb_immediate_notify *inot; cam_status status; int frozen; inot = &done_ccb->cin1; softc->inots_returned++; frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x " "seq %#x\n", __func__, inot->ccb_h.status, inot->tag_id, inot->seq_id); io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref); if (io != NULL) { int send_ctl_io; send_ctl_io = 1; ctl_zero_io(io); io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb; inot->ccb_h.io_ptr = io; io->io_hdr.nexus.initid.id = inot->initiator_id; io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port; io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id; io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; /* XXX KDM should this be the tag_id? */ io->taskio.tag_num = inot->seq_id; status = inot->ccb_h.status & CAM_STATUS_MASK; switch (status) { case CAM_SCSI_BUS_RESET: io->taskio.task_action = CTL_TASK_BUS_RESET; break; case CAM_BDR_SENT: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case CAM_MESSAGE_RECV: switch (inot->arg) { case MSG_ABORT_TASK_SET: /* * XXX KDM this isn't currently * supported by CTL. It ends up * being a no-op. */ io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; break; case MSG_TARGET_RESET: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case MSG_ABORT_TASK: io->taskio.task_action = CTL_TASK_ABORT_TASK; break; case MSG_LOGICAL_UNIT_RESET: io->taskio.task_action = CTL_TASK_LUN_RESET; break; case MSG_CLEAR_TASK_SET: /* * XXX KDM this isn't currently * supported by CTL. It ends up * being a no-op. */ io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET; break; case MSG_CLEAR_ACA: io->taskio.task_action = CTL_TASK_CLEAR_ACA; break; case MSG_NOOP: send_ctl_io = 0; break; default: xpt_print(periph->path, "%s: " "unsupported message 0x%x\n", __func__, inot->arg); send_ctl_io = 0; break; } break; case CAM_REQ_ABORTED: /* * This request was sent back by the driver. * XXX KDM what do we do here? */ send_ctl_io = 0; break; case CAM_REQ_INVALID: case CAM_PROVIDE_FAIL: default: /* * We should only get here if we're talking * to a talking to a SIM that is target * capable but supports the old API. In * that case, we need to just free the CCB. * If we actually send a notify acknowledge, * it will send that back with an error as * well. */ if ((status != CAM_REQ_INVALID) && (status != CAM_PROVIDE_FAIL)) xpt_print(periph->path, "%s: " "unsupported CAM status " "0x%x\n", __func__, status); ctl_free_io(io); ctlfe_free_ccb(periph, done_ccb); return; } if (send_ctl_io != 0) { ctl_queue(io); } else { ctl_free_io(io); done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; xpt_action(done_ccb); } } else { xpt_print(periph->path, "%s: could not allocate " "ctl_io for immediate notify!\n", __func__); /* requeue this to the adapter */ done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; xpt_action(done_ccb); } if (frozen != 0) { cam_release_devq(periph->path, /*relsim_flags*/ 0, /*opening reduction*/ 0, /*timeout*/ 0, /*getcount_only*/ 0); } break; } case XPT_NOTIFY_ACKNOWLEDGE: /* * Queue this back down to the SIM as an immediate notify. */ done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; xpt_action(done_ccb); softc->inots_sent++; break; case XPT_ABORT: /* * XPT_ABORT is an immediate CCB, we shouldn't get here. */ panic("%s: XPT_ABORT CCB returned!", __func__); break; case XPT_SET_SIM_KNOB: case XPT_GET_SIM_KNOB: break; default: panic("%s: unexpected CCB type %#x", __func__, done_ccb->ccb_h.func_code); break; } } static void ctlfe_onoffline(void *arg, int online) { struct ctlfe_softc *bus_softc; union ccb *ccb; cam_status status; struct cam_path *path; struct cam_sim *sim; int set_wwnn; bus_softc = (struct ctlfe_softc *)arg; set_wwnn = 0; status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path!\n", __func__); return; } ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_WAITOK | M_ZERO); xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); sim = xpt_path_sim(path); /* * Copan WWN format: * * Bits 63-60: 0x5 NAA, IEEE registered name * Bits 59-36: 0x000ED5 IEEE Company name assigned to Copan * Bits 35-12: Copan SSN (Sequential Serial Number) * Bits 11-8: Type of port: * 1 == N-Port * 2 == F-Port * 3 == NL-Port * Bits 7-0: 0 == Node Name, >0 == Port Number */ if (online != 0) { ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; CAM_SIM_LOCK(sim); xpt_action(ccb); CAM_SIM_UNLOCK(sim); if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ #ifdef RANDOM_WWNN uint64_t random_bits; #endif printf("%s: %s current WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s current WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); #ifdef RANDOM_WWNN arc4rand(&random_bits, sizeof(random_bits), 0); #endif /* * XXX KDM this is a bit of a kludge for now. We * take the current WWNN/WWPN from the card, and * replace the company identifier and the NL-Port * indicator and the port number (for the WWPN). * This should be replaced later with ddb_GetWWNN, * or possibly a more centralized scheme. (It * would be nice to have the WWNN/WWPN for each * port stored in the ctl_frontend structure.) */ #ifdef RANDOM_WWNN ccb->knob.xport_specific.fc.wwnn = (random_bits & 0x0000000fffffff00ULL) | /* Company ID */ 0x5000ED5000000000ULL | /* NL-Port */ 0x0300; ccb->knob.xport_specific.fc.wwpn = (random_bits & 0x0000000fffffff00ULL) | /* Company ID */ 0x5000ED5000000000ULL | /* NL-Port */ 0x3000 | /* Port Num */ (bus_softc->fe.targ_port & 0xff); /* * This is a bit of an API break/reversal, but if * we're doing the random WWNN that's a little * different anyway. So record what we're actually * using with the frontend code so it's reported * accurately. */ bus_softc->fe.wwnn = ccb->knob.xport_specific.fc.wwnn; bus_softc->fe.wwpn = ccb->knob.xport_specific.fc.wwpn; set_wwnn = 1; #else /* RANDOM_WWNN */ /* * If the user has specified a WWNN/WWPN, send them * down to the SIM. Otherwise, record what the SIM * has reported. */ if ((bus_softc->fe.wwnn != 0) && (bus_softc->fe.wwpn != 0)) { ccb->knob.xport_specific.fc.wwnn = bus_softc->fe.wwnn; ccb->knob.xport_specific.fc.wwpn = bus_softc->fe.wwpn; set_wwnn = 1; } else { bus_softc->fe.wwnn = ccb->knob.xport_specific.fc.wwnn; bus_softc->fe.wwpn = ccb->knob.xport_specific.fc.wwpn; } #endif /* RANDOM_WWNN */ if (set_wwnn != 0) { printf("%s: %s new WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s new WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); } } else { printf("%s: %s has no valid WWNN/WWPN\n", __func__, bus_softc->port_name); } } ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; if (set_wwnn != 0) ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS; if (online != 0) ccb->knob.xport_specific.fc.role = KNOB_ROLE_TARGET; else ccb->knob.xport_specific.fc.role = KNOB_ROLE_NONE; CAM_SIM_LOCK(sim); xpt_action(ccb); CAM_SIM_UNLOCK(sim); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: SIM %s (path id %d) target %s failed with " "status %#x\n", __func__, bus_softc->port_name, bus_softc->path_id, (online != 0) ? "enable" : "disable", ccb->ccb_h.status); } else { printf("%s: SIM %s (path id %d) target %s succeeded\n", __func__, bus_softc->port_name, bus_softc->path_id, (online != 0) ? "enable" : "disable"); } free(ccb, M_TEMP); xpt_free_path(path); return; } static void ctlfe_online(void *arg) { ctlfe_onoffline(arg, /*online*/ 1); } static void ctlfe_offline(void *arg) { ctlfe_onoffline(arg, /*online*/ 0); } static int ctlfe_targ_enable(void *arg, struct ctl_id targ_id) { return (0); } static int ctlfe_targ_disable(void *arg, struct ctl_id targ_id) { return (0); } /* * This will get called to enable a LUN on every bus that is attached to * CTL. So we only need to create a path/periph for this particular bus. */ static int ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; struct cam_path *path; struct cam_periph *periph; struct cam_sim *sim; cam_status status; bus_softc = (struct ctlfe_softc *)arg; status = xpt_create_path_unlocked(&path, /*periph*/ NULL, bus_softc->path_id, targ_id.id, lun_id); /* XXX KDM need some way to return status to CTL here? */ if (status != CAM_REQ_CMP) { printf("%s: could not create path, status %#x\n", __func__, status); return (1); } softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); sim = xpt_path_sim(path); mtx_lock(sim->mtx); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ xpt_free_path(path); free(softc, M_CTLFE); mtx_unlock(sim->mtx); return (0); } softc->parent_softc = bus_softc; STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, softc); mtx_unlock(sim->mtx); xpt_free_path(path); return (0); } /* * XXX KDM we disable LUN removal here. The problem is that the isp(4) * driver doesn't currently handle LUN removal properly. We need to keep * enough state here at the peripheral level even after LUNs have been * removed inside CTL. * * Once the isp(4) driver is fixed, this can be re-enabled. */ static int ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) { #ifdef NOTYET struct ctlfe_softc *softc; struct ctlfe_lun_softc *lun_softc; softc = (struct ctlfe_softc *)arg; mtx_lock(softc->sim->mtx); STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { struct cam_path *path; path = lun_softc->periph->path; if ((xpt_path_target_id(path) == targ_id.id) && (xpt_path_lun_id(path) == lun_id)) { break; } } if (lun_softc == NULL) { mtx_unlock(softc->sim->mtx); printf("%s: can't find target %d lun %d\n", __func__, targ_id.id, lun_id); return (1); } cam_periph_invalidate(lun_softc->periph); mtx_unlock(softc->sim->mtx); #endif return (0); } static void ctlfe_dump_sim(struct cam_sim *sim) { int i; printf("%s%d: max tagged openings: %d, max dev openings: %d\n", sim->sim_name, sim->unit_number, sim->max_tagged_dev_openings, sim->max_dev_openings); printf("%s%d: max_ccbs: %u, ccb_count: %u\n", sim->sim_name, sim->unit_number, sim->max_ccbs, sim->ccb_count); printf("%s%d: ccb_freeq is %sempty\n", sim->sim_name, sim->unit_number, (SLIST_FIRST(&sim->ccb_freeq) == NULL) ? "" : "NOT "); printf("%s%d: alloc_queue.entries %d, alloc_openings %d\n", sim->sim_name, sim->unit_number, sim->devq->alloc_queue.entries, sim->devq->alloc_openings); printf("%s%d: qfrozen_cnt:", sim->sim_name, sim->unit_number); for (i = 0; i < CAM_RL_VALUES; i++) { printf("%s%u", (i != 0) ? ":" : "", sim->devq->alloc_queue.qfrozen_cnt[i]); } printf("\n"); } /* * Assumes that the SIM lock is held. */ static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc) { struct ccb_hdr *hdr; struct cam_periph *periph; int num_items; periph = softc->periph; num_items = 0; TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) { union ctl_io *io; io = hdr->io_ptr; num_items++; /* * This can happen when we get an ATIO but can't allocate * a ctl_io. See the XPT_ACCEPT_TARGET_IO case in ctlfedone(). */ if (io == NULL) { struct ccb_scsiio *csio; csio = (struct ccb_scsiio *)hdr; xpt_print(periph->path, "CCB %#x ctl_io allocation " "failed\n", csio->tag_id); continue; } /* * Only regular SCSI I/O is put on the work * queue, so we can print sense here. There may be no * sense if it's no the queue for a DMA, but this serves to * print out the CCB as well. * * XXX KDM switch this over to scsi_sense_print() when * CTL is merged in with CAM. */ ctl_io_error_print(io, NULL); /* * We're sending status back to the * initiator, so we're on the queue waiting * for a CTIO to do that. */ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) continue; /* * Otherwise, we're on the queue waiting to * do a data transfer. */ xpt_print(periph->path, "Total %u, Current %u, Resid %u\n", io->scsiio.kern_total_len, io->scsiio.kern_data_len, io->scsiio.kern_data_resid); } xpt_print(periph->path, "%d requests total waiting for CCBs\n", num_items); xpt_print(periph->path, "%ju CCBs oustanding (%ju allocated, %ju " "freed)\n", (uintmax_t)(softc->ccbs_alloced - softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced, (uintmax_t)softc->ccbs_freed); xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju " "returned\n", (uintmax_t)(softc->ctios_sent - softc->ctios_returned), softc->ctios_sent, softc->ctios_returned); } /* * This function is called when we fail to get a CCB for a DMA or status return * to the initiator within the specified time period. * * The callout code should insure that we hold the sim mutex here. */ static void ctlfe_dma_timeout(void *arg) { struct ctlfe_lun_softc *softc; struct cam_periph *periph; struct cam_sim *sim; int num_queued; softc = (struct ctlfe_lun_softc *)arg; periph = softc->periph; sim = xpt_path_sim(periph->path); num_queued = 0; /* * Nothing to do... */ if (TAILQ_FIRST(&softc->work_queue) == NULL) { xpt_print(periph->path, "TIMEOUT triggered after %d " "seconds, but nothing on work queue??\n", CTLFE_DMA_TIMEOUT); return; } xpt_print(periph->path, "TIMEOUT (%d seconds) waiting for DMA to " "start\n", CTLFE_DMA_TIMEOUT); ctlfe_dump_queue(softc); ctlfe_dump_sim(sim); xpt_print(periph->path, "calling xpt_schedule() to attempt to " "unstick our queue\n"); xpt_schedule(periph, /*priority*/ 1); xpt_print(periph->path, "xpt_schedule() call complete\n"); } /* * Datamove/done routine called by CTL. Put ourselves on the queue to * receive a CCB from CAM so we can queue the continue I/O request down * to the adapter. */ static void ctlfe_datamove_done(union ctl_io *io) { union ccb *ccb; struct cam_sim *sim; struct cam_periph *periph; struct ctlfe_lun_softc *softc; ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; sim = xpt_path_sim(ccb->ccb_h.path); mtx_lock(sim->mtx); periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct ctlfe_lun_softc *)periph->softc; if (io->io_hdr.io_type == CTL_IO_TASK) { /* * Task management commands don't require any further * communication back to the adapter. Requeue the CCB * to the adapter, and free the CTL I/O. */ xpt_print(ccb->ccb_h.path, "%s: returning task I/O " "tag %#x seq %#x\n", __func__, ccb->cin1.tag_id, ccb->cin1.seq_id); /* * Send the notify acknowledge down to the SIM, to let it * know we processed the task management command. */ ccb->ccb_h.status = CAM_REQ_INPROG; ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; xpt_action(ccb); ctl_free_io(io); } else { if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; else io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.tqe); /* * Reset the timeout for our latest active DMA. */ callout_reset(&softc->dma_callout, CTLFE_DMA_TIMEOUT * hz, ctlfe_dma_timeout, softc); /* * Ask for the CAM transport layer to send us a CCB to do * the DMA or send status, unless ctlfe_dma_enabled is set * to 0. */ if (ctlfe_dma_enabled != 0) xpt_schedule(periph, /*priority*/ 1); } mtx_unlock(sim->mtx); } static void ctlfe_dump(void) { struct ctlfe_softc *bus_softc; STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { struct ctlfe_lun_softc *lun_softc; ctlfe_dump_sim(bus_softc->sim); STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) { ctlfe_dump_queue(lun_softc); } } } Index: projects/physbio/sys/cam/scsi/scsi_pass.c =================================================================== --- projects/physbio/sys/cam/scsi/scsi_pass.c (revision 243875) +++ projects/physbio/sys/cam/scsi/scsi_pass.c (revision 243876) @@ -1,708 +1,711 @@ /*- * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { PASS_FLAG_OPEN = 0x01, PASS_FLAG_LOCKED = 0x02, PASS_FLAG_INVALID = 0x04, PASS_FLAG_INITIAL_PHYSPATH = 0x08 } pass_flags; typedef enum { PASS_STATE_NORMAL } pass_state; typedef enum { PASS_CCB_BUFFER_IO, PASS_CCB_WAITING } pass_ccb_types; #define ccb_type ppriv_field0 #define ccb_bp ppriv_ptr1 struct pass_softc { pass_state state; pass_flags flags; u_int8_t pd_type; union ccb saved_ccb; struct devstat *device_stats; struct cdev *dev; struct cdev *alias_dev; struct task add_physpath_task; }; static d_open_t passopen; static d_close_t passclose; static d_ioctl_t passioctl; static periph_init_t passinit; static periph_ctor_t passregister; static periph_oninv_t passoninvalidate; static periph_dtor_t passcleanup; static periph_start_t passstart; static void pass_add_physpath(void *context, int pending); static void passasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void passdone(struct cam_periph *periph, union ccb *done_ccb); static int passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb); static struct periph_driver passdriver = { passinit, "pass", TAILQ_HEAD_INITIALIZER(passdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(pass, passdriver); static struct cdevsw pass_cdevsw = { .d_version = D_VERSION, .d_flags = D_TRACKCLOSE, .d_open = passopen, .d_close = passclose, .d_ioctl = passioctl, .d_name = "pass", }; static void passinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, passasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("pass: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void passdevgonecb(void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)arg; xpt_print(periph->path, "%s: devfs entry is gone\n", __func__); cam_periph_release(periph); } static void passoninvalidate(struct cam_periph *periph) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, passasync, periph, periph->path); softc->flags |= PASS_FLAG_INVALID; /* * Tell devfs this device has gone away, and ask for a callback * when it has cleaned up its state. */ destroy_dev_sched_cb(softc->dev, passdevgonecb, periph); /* * XXX Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ if (bootverbose) { xpt_print(periph->path, "lost device\n"); } } static void passcleanup(struct cam_periph *periph) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; if (bootverbose) xpt_print(periph->path, "removing device entry\n"); devstat_remove_entry(softc->device_stats); cam_periph_unlock(periph); taskqueue_drain(taskqueue_thread, &softc->add_physpath_task); cam_periph_lock(periph); free(softc, M_DEVBUF); } static void pass_add_physpath(void *context, int pending) { struct cam_periph *periph; struct pass_softc *softc; char *physpath; /* * If we have one, create a devfs alias for our * physical path. */ periph = context; softc = periph->softc; physpath = malloc(MAXPATHLEN, M_DEVBUF, M_WAITOK); cam_periph_lock(periph); if (periph->flags & CAM_PERIPH_INVALID) { cam_periph_unlock(periph); goto out; } if (xpt_getattr(physpath, MAXPATHLEN, "GEOM::physpath", periph->path) == 0 && strlen(physpath) != 0) { cam_periph_unlock(periph); make_dev_physpath_alias(MAKEDEV_WAITOK, &softc->alias_dev, softc->dev, softc->alias_dev, physpath); cam_periph_lock(periph); } /* * Now that we've made our alias, we no longer have to have a * reference to the device. */ if ((softc->flags & PASS_FLAG_INITIAL_PHYSPATH) == 0) { softc->flags |= PASS_FLAG_INITIAL_PHYSPATH; cam_periph_unlock(periph); dev_rel(softc->dev); } else cam_periph_unlock(periph); out: free(physpath, M_DEVBUF); } static void passasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(passregister, passoninvalidate, passcleanup, passstart, "pass", CAM_PERIPH_BIO, cgd->ccb_h.path, passasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("passasync: Unable to attach new device " "due to status %#x: %s\n", status, entry ? entry->status_text : "Unknown"); } break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; taskqueue_enqueue(taskqueue_thread, &softc->add_physpath_task); } break; } default: cam_periph_async(periph, code, path, arg); break; } } static cam_status passregister(struct cam_periph *periph, void *arg) { struct pass_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; int no_tags; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("%s: no getdev CCB, can't register device\n", __func__); return(CAM_REQ_CMP_ERR); } softc = (struct pass_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT); if (softc == NULL) { printf("%s: Unable to probe new device. " "Unable to allocate softc\n", __func__); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); softc->state = PASS_STATE_NORMAL; if (cgd->protocol == PROTO_SCSI || cgd->protocol == PROTO_ATAPI) softc->pd_type = SID_TYPE(&cgd->inq_data); else if (cgd->protocol == PROTO_SATAPM) softc->pd_type = T_ENCLOSURE; else softc->pd_type = T_DIRECT; periph->softc = softc; bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* * We pass in 0 for a blocksize, since we don't * know what the blocksize of this device is, if * it even has a blocksize. */ mtx_unlock(periph->sim->mtx); no_tags = (cgd->inq_data.flags & SID_CmdQue) == 0; softc->device_stats = devstat_new_entry("pass", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE | (no_tags ? DEVSTAT_NO_ORDERED_TAGS : 0), softc->pd_type | XPORT_DEVSTAT_TYPE(cpi.transport) | DEVSTAT_TYPE_PASS, DEVSTAT_PRIORITY_PASS); /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); mtx_lock(periph->sim->mtx); return (CAM_REQ_CMP_ERR); } /* Register the device */ softc->dev = make_dev(&pass_cdevsw, periph->unit_number, UID_ROOT, GID_OPERATOR, 0600, "%s%d", periph->periph_name, periph->unit_number); /* * Now that we have made the devfs instance, hold a reference to it * until the task queue has run to setup the physical path alias. * That way devfs won't get rid of the device before we add our * alias. */ dev_ref(softc->dev); mtx_lock(periph->sim->mtx); softc->dev->si_drv1 = periph; TASK_INIT(&softc->add_physpath_task, /*priority*/0, pass_add_physpath, periph); /* * See if physical path information is already available. */ taskqueue_enqueue(taskqueue_thread, &softc->add_physpath_task); /* * Add an async callback so that we get notified if * this device goes away or its physical path * (stored in the advanced info data of the EDT) has * changed. */ xpt_register_async(AC_LOST_DEVICE | AC_ADVINFO_CHANGED, passasync, periph, periph->path); if (bootverbose) xpt_announce_periph(periph, NULL); return(CAM_REQ_CMP); } static int passopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) return (ENXIO); cam_periph_lock(periph); softc = (struct pass_softc *)periph->softc; if (softc->flags & PASS_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } /* * Don't allow access when we're running at a high securelevel. */ error = securelevel_gt(td->td_ucred, 1); if (error) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(error); } /* * Only allow read-write access. */ if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(EPERM); } /* * We don't allow nonblocking access. */ if ((flags & O_NONBLOCK) != 0) { xpt_print(periph->path, "can't do nonblocking access\n"); cam_periph_release_locked(periph); cam_periph_unlock(periph); return(EINVAL); } cam_periph_unlock(periph); return (error); } static int passclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; periph = (struct cam_periph *)dev->si_drv1; if (periph == NULL) return (ENXIO); cam_periph_release(periph); return (0); } static void passstart(struct cam_periph *periph, union ccb *start_ccb) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; switch (softc->state) { case PASS_STATE_NORMAL: start_ccb->ccb_h.ccb_type = PASS_CCB_WAITING; SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, periph_links.sle); periph->immediate_priority = CAM_PRIORITY_NONE; wakeup(&periph->ccb_list); break; } } static void passdone(struct cam_periph *periph, union ccb *done_ccb) { struct pass_softc *softc; struct ccb_scsiio *csio; softc = (struct pass_softc *)periph->softc; csio = &done_ccb->csio; switch (csio->ccb_h.ccb_type) { case PASS_CCB_WAITING: /* Caller will release the CCB */ wakeup(&done_ccb->ccb_h.cbfcnp); return; } xpt_release_ccb(done_ccb); } static int passioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int error; uint32_t priority; periph = (struct cam_periph *)dev->si_drv1; if (periph == NULL) return(ENXIO); cam_periph_lock(periph); softc = (struct pass_softc *)periph->softc; error = 0; switch (cmd) { case CAMIOCOMMAND: { union ccb *inccb; union ccb *ccb; int ccb_malloced; inccb = (union ccb *)addr; /* * Some CCB types, like scan bus and scan lun can only go * through the transport layer device. */ if (inccb->ccb_h.func_code & XPT_FC_XPT_ONLY) { xpt_print(periph->path, "CCB function code %#x is " "restricted to the XPT device\n", inccb->ccb_h.func_code); error = ENODEV; break; } /* Compatibility for RL/priority-unaware code. */ priority = inccb->ccb_h.pinfo.priority; if (priority < CAM_RL_TO_PRIORITY(CAM_RL_NORMAL)) priority += CAM_RL_TO_PRIORITY(CAM_RL_NORMAL); /* * Non-immediate CCBs need a CCB from the per-device pool * of CCBs, which is scheduled by the transport layer. * Immediate CCBs and user-supplied CCBs should just be * malloced. */ if ((inccb->ccb_h.func_code & XPT_FC_QUEUED) && ((inccb->ccb_h.func_code & XPT_FC_USER_CCB) == 0)) { ccb = cam_periph_getccb(periph, priority); ccb_malloced = 0; } else { ccb = xpt_alloc_ccb_nowait(); if (ccb != NULL) xpt_setup_ccb(&ccb->ccb_h, periph->path, priority); ccb_malloced = 1; } if (ccb == NULL) { xpt_print(periph->path, "unable to allocate CCB\n"); error = ENOMEM; break; } error = passsendccb(periph, ccb, inccb); if (ccb_malloced) xpt_free_ccb(ccb); else xpt_release_ccb(ccb); break; } default: error = cam_periph_ioctl(periph, cmd, addr, passerror); break; } cam_periph_unlock(periph); return(error); } /* * Generally, "ccb" should be the CCB supplied by the kernel. "inccb" * should be the CCB that is copied in from the user. */ static int passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb) { struct pass_softc *softc; struct cam_periph_map_info mapinfo; int error, need_unmap; softc = (struct pass_softc *)periph->softc; need_unmap = 0; /* * There are some fields in the CCB header that need to be * preserved, the rest we get from the user. */ xpt_merge_ccb(ccb, inccb); /* * There's no way for the user to have a completion * function, so we put our own completion function in here. */ ccb->ccb_h.cbfcnp = passdone; /* * We only attempt to map the user memory into kernel space * if they haven't passed in a physical memory pointer, * and if there is actually an I/O operation to perform. * cam_periph_mapmem() supports SCSI, ATA, SMP, ADVINFO and device * match CCBs. For the SCSI, ATA and ADVINFO CCBs, we only pass the * CCB in if there's actually data to map. cam_periph_mapmem() will * do the right thing, even if there isn't data to map, but since CCBs * without data are a reasonably common occurance (e.g. test unit * ready), it will save a few cycles if we check for it here. + * + * XXX What happens if a sg list is supplied? We don't filter that + * out. */ - if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) + if (((ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && (((ccb->ccb_h.func_code == XPT_SCSI_IO || ccb->ccb_h.func_code == XPT_ATA_IO) && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE)) || (ccb->ccb_h.func_code == XPT_DEV_MATCH) || (ccb->ccb_h.func_code == XPT_SMP_IO) || ((ccb->ccb_h.func_code == XPT_DEV_ADVINFO) && (ccb->cdai.bufsiz > 0)))) { bzero(&mapinfo, sizeof(mapinfo)); /* * cam_periph_mapmem calls into proc and vm functions that can * sleep as well as trigger I/O, so we can't hold the lock. * Dropping it here is reasonably safe. */ cam_periph_unlock(periph); error = cam_periph_mapmem(ccb, &mapinfo); cam_periph_lock(periph); /* * cam_periph_mapmem returned an error, we can't continue. * Return the error to the user. */ if (error) return(error); /* * We successfully mapped the memory in, so we need to * unmap it when the transaction is done. */ need_unmap = 1; } /* * If the user wants us to perform any error recovery, then honor * that request. Otherwise, it's up to the user to perform any * error recovery. */ cam_periph_runccb(ccb, passerror, /* cam_flags */ CAM_RETRY_SELTO, /* sense_flags */ ((ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ? SF_RETRY_UA : SF_NO_RECOVERY) | SF_NO_PRINT, softc->device_stats); if (need_unmap != 0) cam_periph_unmapmem(ccb, &mapinfo); ccb->ccb_h.cbfcnp = NULL; ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv; bcopy(ccb, inccb, sizeof(union ccb)); return(0); } static int passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct cam_periph *periph; struct pass_softc *softc; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct pass_softc *)periph->softc; return(cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } Index: projects/physbio/sys/dev/aac/aac_cam.c =================================================================== --- projects/physbio/sys/dev/aac/aac_cam.c (revision 243875) +++ projects/physbio/sys/dev/aac/aac_cam.c (revision 243876) @@ -1,686 +1,688 @@ /*- * Copyright (c) 2002 Adaptec, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * CAM front-end for communicating with non-DASD devices */ #include "opt_aac.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct aac_cam { device_t dev; struct aac_sim *inf; struct cam_sim *sim; struct cam_path *path; }; static int aac_cam_probe(device_t dev); static int aac_cam_attach(device_t dev); static int aac_cam_detach(device_t dev); static void aac_cam_action(struct cam_sim *, union ccb *); static void aac_cam_poll(struct cam_sim *); static void aac_cam_complete(struct aac_command *); static void aac_cam_rescan(struct aac_softc *sc, uint32_t channel, uint32_t target_id); static u_int32_t aac_cam_reset_bus(struct cam_sim *, union ccb *); static u_int32_t aac_cam_abort_ccb(struct cam_sim *, union ccb *); static u_int32_t aac_cam_term_io(struct cam_sim *, union ccb *); static devclass_t aac_pass_devclass; static device_method_t aac_pass_methods[] = { DEVMETHOD(device_probe, aac_cam_probe), DEVMETHOD(device_attach, aac_cam_attach), DEVMETHOD(device_detach, aac_cam_detach), { 0, 0 } }; static driver_t aac_pass_driver = { "aacp", aac_pass_methods, sizeof(struct aac_cam) }; DRIVER_MODULE(aacp, aac, aac_pass_driver, aac_pass_devclass, 0, 0); MODULE_DEPEND(aacp, cam, 1, 1, 1); static MALLOC_DEFINE(M_AACCAM, "aaccam", "AAC CAM info"); static void aac_cam_rescan(struct aac_softc *sc, uint32_t channel, uint32_t target_id) { union ccb *ccb; struct aac_sim *sim; struct aac_cam *camsc; if (target_id == AAC_CAM_TARGET_WILDCARD) target_id = CAM_TARGET_WILDCARD; TAILQ_FOREACH(sim, &sc->aac_sim_tqh, sim_link) { camsc = sim->aac_cam; if (camsc == NULL || camsc->inf == NULL || camsc->inf->BusNumber != channel) continue; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { device_printf(sc->aac_dev, "Cannot allocate ccb for bus rescan.\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(camsc->sim), target_id, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); device_printf(sc->aac_dev, "Cannot create path for bus rescan.\n"); return; } xpt_rescan(ccb); break; } } static void aac_cam_event(struct aac_softc *sc, struct aac_event *event, void *arg) { union ccb *ccb; struct aac_cam *camsc; switch (event->ev_type) { case AAC_EVENT_CMFREE: ccb = arg; camsc = ccb->ccb_h.sim_priv.entries[0].ptr; free(event, M_AACCAM); xpt_release_simq(camsc->sim, 1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); break; default: device_printf(sc->aac_dev, "unknown event %d in aac_cam\n", event->ev_type); break; } return; } static int aac_cam_probe(device_t dev) { fwprintf(NULL, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return (0); } static int aac_cam_detach(device_t dev) { struct aac_softc *sc; struct aac_cam *camsc; fwprintf(NULL, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); camsc = (struct aac_cam *)device_get_softc(dev); sc = camsc->inf->aac_sc; camsc->inf->aac_cam = NULL; mtx_lock(&sc->aac_io_lock); xpt_async(AC_LOST_DEVICE, camsc->path, NULL); xpt_free_path(camsc->path); xpt_bus_deregister(cam_sim_path(camsc->sim)); cam_sim_free(camsc->sim, /*free_devq*/TRUE); sc->cam_rescan_cb = NULL; mtx_unlock(&sc->aac_io_lock); return (0); } /* * Register the driver as a CAM SIM */ static int aac_cam_attach(device_t dev) { struct cam_devq *devq; struct cam_sim *sim; struct cam_path *path; struct aac_cam *camsc; struct aac_sim *inf; fwprintf(NULL, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); camsc = (struct aac_cam *)device_get_softc(dev); inf = (struct aac_sim *)device_get_ivars(dev); camsc->inf = inf; camsc->inf->aac_cam = camsc; devq = cam_simq_alloc(inf->TargetsPerBus); if (devq == NULL) return (EIO); sim = cam_sim_alloc(aac_cam_action, aac_cam_poll, "aacp", camsc, device_get_unit(dev), &inf->aac_sc->aac_io_lock, 1, 1, devq); if (sim == NULL) { cam_simq_free(devq); return (EIO); } /* Since every bus has it's own sim, every bus 'appears' as bus 0 */ mtx_lock(&inf->aac_sc->aac_io_lock); if (xpt_bus_register(sim, dev, 0) != CAM_SUCCESS) { cam_sim_free(sim, TRUE); mtx_unlock(&inf->aac_sc->aac_io_lock); return (EIO); } if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, TRUE); mtx_unlock(&inf->aac_sc->aac_io_lock); return (EIO); } inf->aac_sc->cam_rescan_cb = aac_cam_rescan; mtx_unlock(&inf->aac_sc->aac_io_lock); camsc->sim = sim; camsc->path = path; return (0); } static void aac_cam_action(struct cam_sim *sim, union ccb *ccb) { struct aac_cam *camsc; struct aac_softc *sc; struct aac_srb *srb; struct aac_fib *fib; struct aac_command *cm; camsc = (struct aac_cam *)cam_sim_softc(sim); sc = camsc->inf->aac_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* Synchronous ops, and ops that don't require communication with the * controller */ switch(ccb->ccb_h.func_code) { case XPT_SCSI_IO: case XPT_RESET_DEV: /* These are handled down below */ break; case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb >= (2 * 1024)) { /* 2GB */ ccg->heads = 255; ccg->secs_per_track = 63; } else if (size_mb >= (1 * 1024)) { /* 1GB */ ccg->heads = 128; ccg->secs_per_track = 32; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_WIDE_16; cpi->target_sprt = 0; /* * Resetting via the passthrough or parallel bus scan * causes problems. */ cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN; cpi->hba_eng_cnt = 0; cpi->max_target = camsc->inf->TargetsPerBus; cpi->max_lun = 8; /* Per the controller spec */ cpi->initiator_id = camsc->inf->InitiatorBusId; cpi->bus_id = camsc->inf->BusNumber; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings_scsi *scsi = &ccb->cts.proto_specific.scsi; struct ccb_trans_settings_spi *spi = &ccb->cts.xport_specific.spi; ccb->cts.protocol = PROTO_SCSI; ccb->cts.protocol_version = SCSI_REV_2; ccb->cts.transport = XPORT_SPI; ccb->cts.transport_version = 2; if (ccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; spi->valid |= CTS_SPI_VALID_DISC; } else { scsi->valid = 0; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); return; case XPT_RESET_BUS: if (!(sc->flags & AAC_FLAGS_CAM_NORESET)) { ccb->ccb_h.status = aac_cam_reset_bus(sim, ccb); } else { ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); return; case XPT_ABORT: ccb->ccb_h.status = aac_cam_abort_ccb(sim, ccb); xpt_done(ccb); return; case XPT_TERM_IO: ccb->ccb_h.status = aac_cam_term_io(sim, ccb); xpt_done(ccb); return; default: device_printf(sc->aac_dev, "Unsupported command 0x%x\n", ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); return; } /* Async ops that require communcation with the controller */ if (aac_alloc_command(sc, &cm)) { struct aac_event *event; xpt_freeze_simq(sim, 1); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; ccb->ccb_h.sim_priv.entries[0].ptr = camsc; event = malloc(sizeof(struct aac_event), M_AACCAM, M_NOWAIT | M_ZERO); if (event == NULL) { device_printf(sc->aac_dev, "Warning, out of memory for event\n"); return; } event->ev_callback = aac_cam_event; event->ev_arg = ccb; event->ev_type = AAC_EVENT_CMFREE; aac_add_event(sc, event); return; } fib = cm->cm_fib; srb = (struct aac_srb *)&fib->data[0]; cm->cm_datalen = 0; switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: srb->flags = AAC_SRB_FLAGS_DATA_IN; cm->cm_flags |= AAC_CMD_DATAIN; break; case CAM_DIR_OUT: srb->flags = AAC_SRB_FLAGS_DATA_OUT; cm->cm_flags |= AAC_CMD_DATAOUT; break; case CAM_DIR_NONE: srb->flags = AAC_SRB_FLAGS_NO_DATA_XFER; break; default: srb->flags = AAC_SRB_FLAGS_UNSPECIFIED_DIRECTION; cm->cm_flags |= AAC_CMD_DATAIN | AAC_CMD_DATAOUT; break; } switch(ccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct ccb_scsiio *csio = &ccb->csio; srb->function = AAC_SRB_FUNC_EXECUTE_SCSI; /* * Copy the CDB into the SRB. It's only 6-16 bytes, * so a copy is not too expensive. */ srb->cdb_len = csio->cdb_len; if (ccb->ccb_h.flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, (u_int8_t *)&srb->cdb[0], srb->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, (u_int8_t *)&srb->cdb[0], srb->cdb_len); /* Set command */ fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ? ScsiPortCommandU64 : ScsiPortCommand; /* Map the s/g list. XXX 32bit addresses only! */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { + switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { + case CAM_DATA_VADDR: srb->data_len = csio->dxfer_len; - if (ccb->ccb_h.flags & CAM_DATA_PHYS) { - /* Send a 32bit command */ - fib->Header.Command = ScsiPortCommand; - srb->sg_map.SgCount = 1; - srb->sg_map.SgEntry[0].SgAddress = - (uint32_t)(uintptr_t)csio->data_ptr; - srb->sg_map.SgEntry[0].SgByteCount = - csio->dxfer_len; - } else { - /* - * Arrange things so that the S/G - * map will get set up automagically - */ - cm->cm_data = (void *)csio->data_ptr; - cm->cm_datalen = csio->dxfer_len; - cm->cm_sgtable = &srb->sg_map; - } - } else { + /* + * Arrange things so that the S/G + * map will get set up automagically + */ + cm->cm_data = (void *)csio->data_ptr; + cm->cm_datalen = csio->dxfer_len; + cm->cm_sgtable = &srb->sg_map; + break; + case CAM_DATA_PADDR: + /* Send a 32bit command */ + fib->Header.Command = ScsiPortCommand; + srb->sg_map.SgCount = 1; + srb->sg_map.SgEntry[0].SgAddress = + (uint32_t)(uintptr_t)csio->data_ptr; + srb->sg_map.SgEntry[0].SgByteCount = + csio->dxfer_len; + srb->data_len = csio->dxfer_len; + break; + default: /* XXX Need to handle multiple s/g elements */ panic("aac_cam: multiple s/g elements"); } } else { srb->sg_map.SgCount = 0; srb->sg_map.SgEntry[0].SgByteCount = 0; srb->data_len = 0; } break; } case XPT_RESET_DEV: if (!(sc->flags & AAC_FLAGS_CAM_NORESET)) { srb->function = AAC_SRB_FUNC_RESET_DEVICE; break; } else { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } default: break; } srb->bus = camsc->inf->BusNumber; /* Bus number relative to the card */ srb->target = ccb->ccb_h.target_id; srb->lun = ccb->ccb_h.target_lun; srb->timeout = ccb->ccb_h.timeout; /* XXX */ srb->retry_limit = 0; cm->cm_complete = aac_cam_complete; cm->cm_private = ccb; cm->cm_timestamp = time_uptime; fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM; fib->Header.Size = sizeof(struct aac_fib_header) + sizeof(struct aac_srb); aac_enqueue_ready(cm); aac_startio(cm->cm_sc); return; } static void aac_cam_poll(struct cam_sim *sim) { /* * Pinging the interrupt routine isn't very safe, nor is it * really necessary. Do nothing. */ } static void aac_cam_fix_inquiry(struct aac_softc *sc, union ccb *ccb) { struct scsi_inquiry_data *inq; uint8_t *data; uint8_t device, qual; /* If this is an inquiry command, fake things out */ if (ccb->ccb_h.flags & CAM_CDB_POINTER) data = ccb->csio.cdb_io.cdb_ptr; else data = ccb->csio.cdb_io.cdb_bytes; if (data[0] != INQUIRY) return; if (ccb->ccb_h.status == CAM_REQ_CMP) { inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr; device = SID_TYPE(inq); qual = SID_QUAL(inq); /* * We want DASD and PROC devices to only be * visible through the pass device. */ if (((device == T_DIRECT) || (device == T_PROCESSOR) || (sc->flags & AAC_FLAGS_CAM_PASSONLY))) { /* * Some aac(4) adapters will always report that a direct * access device is offline in response to a INQUIRY * command that does not retreive vital product data. * Force the qualifier to connected so that upper layers * correctly recognize that a disk is present. */ if ((data[1] & SI_EVPD) == 0 && device == T_DIRECT && qual == SID_QUAL_LU_OFFLINE) qual = SID_QUAL_LU_CONNECTED; ccb->csio.data_ptr[0] = (qual << 5) | T_NODEVICE; } } else if (ccb->ccb_h.status == CAM_SEL_TIMEOUT && ccb->ccb_h.target_lun != 0) { /* fix for INQUIRYs on Lun>0 */ ccb->ccb_h.status = CAM_DEV_NOT_THERE; } } static void aac_cam_complete(struct aac_command *cm) { union ccb *ccb; struct aac_srb_response *srbr; struct aac_softc *sc; int sense_returned; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); ccb = cm->cm_private; srbr = (struct aac_srb_response *)&cm->cm_fib->data[0]; if (srbr->fib_status != 0) { device_printf(sc->aac_dev, "Passthru FIB failed!\n"); ccb->ccb_h.status = CAM_REQ_ABORTED; } else { /* * The SRB error codes just happen to match the CAM error * codes. How convienient! */ ccb->ccb_h.status = srbr->srb_status; /* Take care of SCSI_IO ops. */ if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->csio.scsi_status = srbr->scsi_status; /* Take care of autosense */ if (srbr->sense_len) { sense_returned = srbr->sense_len; if (sense_returned < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - sense_returned; else ccb->csio.sense_resid = 0; bzero(&ccb->csio.sense_data, sizeof(struct scsi_sense_data)); bcopy(&srbr->sense[0], &ccb->csio.sense_data, min(ccb->csio.sense_len, sense_returned)); ccb->ccb_h.status |= CAM_AUTOSNS_VALID; // scsi_sense_print(&ccb->csio); } aac_cam_fix_inquiry(sc, ccb); } } aac_release_command(cm); xpt_done(ccb); return; } static u_int32_t aac_cam_reset_bus(struct cam_sim *sim, union ccb *ccb) { struct aac_fib *fib; struct aac_softc *sc; struct aac_cam *camsc; struct aac_vmioctl *vmi; struct aac_resetbus *rbc; int e; camsc = (struct aac_cam *)cam_sim_softc(sim); sc = camsc->inf->aac_sc; if (sc == NULL) { printf("aac: Null sc?\n"); return (CAM_REQ_ABORTED); } aac_alloc_sync_fib(sc, &fib); vmi = (struct aac_vmioctl *)&fib->data[0]; bzero(vmi, sizeof(struct aac_vmioctl)); vmi->Command = VM_Ioctl; vmi->ObjType = FT_DRIVE; vmi->MethId = sc->scsi_method_id; vmi->ObjId = 0; vmi->IoctlCmd = ResetBus; rbc = (struct aac_resetbus *)&vmi->IoctlBuf[0]; rbc->BusNumber = camsc->inf->BusNumber; e = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_vmioctl)); if (e) { device_printf(sc->aac_dev,"Error %d sending ResetBus command\n", e); aac_release_sync_fib(sc); return (CAM_REQ_ABORTED); } aac_release_sync_fib(sc); return (CAM_REQ_CMP); } static u_int32_t aac_cam_abort_ccb(struct cam_sim *sim, union ccb *ccb) { return (CAM_UA_ABORT); } static u_int32_t aac_cam_term_io(struct cam_sim *sim, union ccb *ccb) { return (CAM_UA_TERMIO); } Index: projects/physbio/sys/dev/ahci/ahci.c =================================================================== --- projects/physbio/sys/dev/ahci/ahci.c (revision 243875) +++ projects/physbio/sys/dev/ahci/ahci.c (revision 243876) @@ -1,2933 +1,2936 @@ /*- * Copyright (c) 2009-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ahci.h" #include #include #include #include #include /* local prototypes */ static int ahci_setup_interrupt(device_t dev); static void ahci_intr(void *data); static void ahci_intr_one(void *data); static int ahci_suspend(device_t dev); static int ahci_resume(device_t dev); static int ahci_ch_init(device_t dev); static int ahci_ch_deinit(device_t dev); static int ahci_ch_suspend(device_t dev); static int ahci_ch_resume(device_t dev); static void ahci_ch_pm(void *arg); static void ahci_ch_intr_locked(void *data); static void ahci_ch_intr(void *data); static int ahci_ctlr_reset(device_t dev); static int ahci_ctlr_setup(device_t dev); static void ahci_begin_transaction(device_t dev, union ccb *ccb); static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void ahci_execute_transaction(struct ahci_slot *slot); static void ahci_timeout(struct ahci_slot *slot); static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et); static int ahci_setup_fis(device_t dev, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag); static void ahci_dmainit(device_t dev); static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void ahci_dmafini(device_t dev); static void ahci_slotsalloc(device_t dev); static void ahci_slotsfree(device_t dev); static void ahci_reset(device_t dev); static void ahci_start(device_t dev, int fbs); static void ahci_stop(device_t dev); static void ahci_clo(device_t dev); static void ahci_start_fr(device_t dev); static void ahci_stop_fr(device_t dev); static int ahci_sata_connect(struct ahci_channel *ch); static int ahci_sata_phy_reset(device_t dev); static int ahci_wait_ready(device_t dev, int t, int t0); static void ahci_issue_recovery(device_t dev); static void ahci_process_read_log(device_t dev, union ccb *ccb); static void ahci_process_request_sense(device_t dev, union ccb *ccb); static void ahciaction(struct cam_sim *sim, union ccb *ccb); static void ahcipoll(struct cam_sim *sim); static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers"); static struct { uint32_t id; uint8_t rev; const char *name; int quirks; #define AHCI_Q_NOFORCE 1 #define AHCI_Q_NOPMP 2 #define AHCI_Q_NONCQ 4 #define AHCI_Q_1CH 8 #define AHCI_Q_2CH 16 #define AHCI_Q_4CH 32 #define AHCI_Q_EDGEIS 64 #define AHCI_Q_SATA2 128 #define AHCI_Q_NOBSYRES 256 #define AHCI_Q_NOAA 512 #define AHCI_Q_NOCOUNT 1024 #define AHCI_Q_ALTSIG 2048 } ahci_ids[] = { {0x43801002, 0x00, "ATI IXP600", 0}, {0x43901002, 0x00, "ATI IXP700", 0}, {0x43911002, 0x00, "ATI IXP700", 0}, {0x43921002, 0x00, "ATI IXP700", 0}, {0x43931002, 0x00, "ATI IXP700", 0}, {0x43941002, 0x00, "ATI IXP800", 0}, {0x43951002, 0x00, "ATI IXP800", 0}, {0x06121b21, 0x00, "ASMedia ASM1061", 0}, {0x26528086, 0x00, "Intel ICH6", AHCI_Q_NOFORCE}, {0x26538086, 0x00, "Intel ICH6M", AHCI_Q_NOFORCE}, {0x26818086, 0x00, "Intel ESB2", 0}, {0x26828086, 0x00, "Intel ESB2", 0}, {0x26838086, 0x00, "Intel ESB2", 0}, {0x27c18086, 0x00, "Intel ICH7", 0}, {0x27c38086, 0x00, "Intel ICH7", 0}, {0x27c58086, 0x00, "Intel ICH7M", 0}, {0x27c68086, 0x00, "Intel ICH7M", 0}, {0x28218086, 0x00, "Intel ICH8", 0}, {0x28228086, 0x00, "Intel ICH8", 0}, {0x28248086, 0x00, "Intel ICH8", 0}, {0x28298086, 0x00, "Intel ICH8M", 0}, {0x282a8086, 0x00, "Intel ICH8M", 0}, {0x29228086, 0x00, "Intel ICH9", 0}, {0x29238086, 0x00, "Intel ICH9", 0}, {0x29248086, 0x00, "Intel ICH9", 0}, {0x29258086, 0x00, "Intel ICH9", 0}, {0x29278086, 0x00, "Intel ICH9", 0}, {0x29298086, 0x00, "Intel ICH9M", 0}, {0x292a8086, 0x00, "Intel ICH9M", 0}, {0x292b8086, 0x00, "Intel ICH9M", 0}, {0x292c8086, 0x00, "Intel ICH9M", 0}, {0x292f8086, 0x00, "Intel ICH9M", 0}, {0x294d8086, 0x00, "Intel ICH9", 0}, {0x294e8086, 0x00, "Intel ICH9M", 0}, {0x3a058086, 0x00, "Intel ICH10", 0}, {0x3a228086, 0x00, "Intel ICH10", 0}, {0x3a258086, 0x00, "Intel ICH10", 0}, {0x3b228086, 0x00, "Intel 5 Series/3400 Series", 0}, {0x3b238086, 0x00, "Intel 5 Series/3400 Series", 0}, {0x3b258086, 0x00, "Intel 5 Series/3400 Series", 0}, {0x3b298086, 0x00, "Intel 5 Series/3400 Series", 0}, {0x3b2c8086, 0x00, "Intel 5 Series/3400 Series", 0}, {0x3b2f8086, 0x00, "Intel 5 Series/3400 Series", 0}, {0x1c028086, 0x00, "Intel Cougar Point", 0}, {0x1c038086, 0x00, "Intel Cougar Point", 0}, {0x1c048086, 0x00, "Intel Cougar Point", 0}, {0x1c058086, 0x00, "Intel Cougar Point", 0}, {0x1d028086, 0x00, "Intel Patsburg", 0}, {0x1d048086, 0x00, "Intel Patsburg", 0}, {0x1d068086, 0x00, "Intel Patsburg", 0}, {0x28268086, 0x00, "Intel Patsburg (RAID)", 0}, {0x1e028086, 0x00, "Intel Panther Point", 0}, {0x1e038086, 0x00, "Intel Panther Point", 0}, {0x1e048086, 0x00, "Intel Panther Point", 0}, {0x1e058086, 0x00, "Intel Panther Point", 0}, {0x1e068086, 0x00, "Intel Panther Point", 0}, {0x1e078086, 0x00, "Intel Panther Point", 0}, {0x1e0e8086, 0x00, "Intel Panther Point", 0}, {0x1e0f8086, 0x00, "Intel Panther Point", 0}, {0x23238086, 0x00, "Intel DH89xxCC", 0}, {0x2360197b, 0x00, "JMicron JMB360", 0}, {0x2361197b, 0x00, "JMicron JMB361", AHCI_Q_NOFORCE}, {0x2362197b, 0x00, "JMicron JMB362", 0}, {0x2363197b, 0x00, "JMicron JMB363", AHCI_Q_NOFORCE}, {0x2365197b, 0x00, "JMicron JMB365", AHCI_Q_NOFORCE}, {0x2366197b, 0x00, "JMicron JMB366", AHCI_Q_NOFORCE}, {0x2368197b, 0x00, "JMicron JMB368", AHCI_Q_NOFORCE}, {0x611111ab, 0x00, "Marvell 88SE6111", AHCI_Q_NOFORCE | AHCI_Q_1CH | AHCI_Q_EDGEIS}, {0x612111ab, 0x00, "Marvell 88SE6121", AHCI_Q_NOFORCE | AHCI_Q_2CH | AHCI_Q_EDGEIS | AHCI_Q_NONCQ | AHCI_Q_NOCOUNT}, {0x614111ab, 0x00, "Marvell 88SE6141", AHCI_Q_NOFORCE | AHCI_Q_4CH | AHCI_Q_EDGEIS | AHCI_Q_NONCQ | AHCI_Q_NOCOUNT}, {0x614511ab, 0x00, "Marvell 88SE6145", AHCI_Q_NOFORCE | AHCI_Q_4CH | AHCI_Q_EDGEIS | AHCI_Q_NONCQ | AHCI_Q_NOCOUNT}, {0x91201b4b, 0x00, "Marvell 88SE912x", AHCI_Q_EDGEIS|AHCI_Q_NOBSYRES}, {0x91231b4b, 0x11, "Marvell 88SE912x", AHCI_Q_NOBSYRES|AHCI_Q_ALTSIG}, {0x91231b4b, 0x00, "Marvell 88SE912x", AHCI_Q_EDGEIS|AHCI_Q_SATA2|AHCI_Q_NOBSYRES}, {0x91251b4b, 0x00, "Marvell 88SE9125", AHCI_Q_NOBSYRES}, {0x91281b4b, 0x00, "Marvell 88SE9128", AHCI_Q_NOBSYRES|AHCI_Q_ALTSIG}, {0x91301b4b, 0x00, "Marvell 88SE9130", AHCI_Q_NOBSYRES|AHCI_Q_ALTSIG}, {0x91721b4b, 0x00, "Marvell 88SE9172", AHCI_Q_NOBSYRES}, {0x91821b4b, 0x00, "Marvell 88SE9182", AHCI_Q_NOBSYRES}, {0x92201b4b, 0x00, "Marvell 88SE9220", AHCI_Q_NOBSYRES|AHCI_Q_ALTSIG}, {0x92301b4b, 0x00, "Marvell 88SE9230", AHCI_Q_NOBSYRES|AHCI_Q_ALTSIG}, {0x92351b4b, 0x00, "Marvell 88SE9235", AHCI_Q_NOBSYRES}, {0x06201103, 0x00, "HighPoint RocketRAID 620", AHCI_Q_NOBSYRES}, {0x06201b4b, 0x00, "HighPoint RocketRAID 620", AHCI_Q_NOBSYRES}, {0x06221103, 0x00, "HighPoint RocketRAID 622", AHCI_Q_NOBSYRES}, {0x06221b4b, 0x00, "HighPoint RocketRAID 622", AHCI_Q_NOBSYRES}, {0x06401103, 0x00, "HighPoint RocketRAID 640", AHCI_Q_NOBSYRES}, {0x06401b4b, 0x00, "HighPoint RocketRAID 640", AHCI_Q_NOBSYRES}, {0x06441103, 0x00, "HighPoint RocketRAID 644", AHCI_Q_NOBSYRES}, {0x06441b4b, 0x00, "HighPoint RocketRAID 644", AHCI_Q_NOBSYRES}, {0x044c10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x044d10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x044e10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x044f10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x045c10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x045d10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x045e10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x045f10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x055010de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055110de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055210de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055310de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055410de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055510de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055610de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055710de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055810de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055910de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055A10de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055B10de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x058410de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x07f010de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f110de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f210de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f310de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f410de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f510de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f610de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f710de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f810de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f910de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07fa10de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07fb10de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x0ad010de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad110de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad210de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad310de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad410de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad510de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad610de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad710de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad810de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad910de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ada10de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0adb10de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ab410de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab510de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab610de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab710de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab810de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab910de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0aba10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abb10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abc10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abd10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abe10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abf10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0d8410de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8510de, 0x00, "NVIDIA MCP89", AHCI_Q_NOFORCE|AHCI_Q_NOAA}, {0x0d8610de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8710de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8810de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8910de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8a10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8b10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8c10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8d10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8e10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8f10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x33491106, 0x00, "VIA VT8251", AHCI_Q_NOPMP|AHCI_Q_NONCQ}, {0x62871106, 0x00, "VIA VT8251", AHCI_Q_NOPMP|AHCI_Q_NONCQ}, {0x11841039, 0x00, "SiS 966", 0}, {0x11851039, 0x00, "SiS 968", 0}, {0x01861039, 0x00, "SiS 968", 0}, {0x00000000, 0x00, NULL, 0} }; #define recovery_type spriv_field0 #define RECOVERY_NONE 0 #define RECOVERY_READ_LOG 1 #define RECOVERY_REQUEST_SENSE 2 #define recovery_slot spriv_field1 static int force_ahci = 1; TUNABLE_INT("hw.ahci.force", &force_ahci); static int ahci_probe(device_t dev) { char buf[64]; int i, valid = 0; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); /* Is this a possible AHCI candidate? */ if (pci_get_class(dev) == PCIC_STORAGE && pci_get_subclass(dev) == PCIS_STORAGE_SATA && pci_get_progif(dev) == PCIP_STORAGE_SATA_AHCI_1_0) valid = 1; /* Is this a known AHCI chip? */ for (i = 0; ahci_ids[i].id != 0; i++) { if (ahci_ids[i].id == devid && ahci_ids[i].rev <= revid && (valid || (force_ahci == 1 && !(ahci_ids[i].quirks & AHCI_Q_NOFORCE)))) { /* Do not attach JMicrons with single PCI function. */ if (pci_get_vendor(dev) == 0x197b && (pci_read_config(dev, 0xdf, 1) & 0x40) == 0) return (ENXIO); snprintf(buf, sizeof(buf), "%s AHCI SATA controller", ahci_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_VENDOR); } } if (!valid) return (ENXIO); device_set_desc_copy(dev, "AHCI SATA controller"); return (BUS_PROBE_VENDOR); } static int ahci_ata_probe(device_t dev) { char buf[64]; int i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); if ((intptr_t)device_get_ivars(dev) >= 0) return (ENXIO); /* Is this a known AHCI chip? */ for (i = 0; ahci_ids[i].id != 0; i++) { if (ahci_ids[i].id == devid && ahci_ids[i].rev <= revid) { snprintf(buf, sizeof(buf), "%s AHCI SATA controller", ahci_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_VENDOR); } } device_set_desc_copy(dev, "AHCI SATA controller"); return (BUS_PROBE_VENDOR); } static int ahci_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); device_t child; int error, unit, speed, i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); u_int32_t version; ctlr->dev = dev; i = 0; while (ahci_ids[i].id != 0 && (ahci_ids[i].id != devid || ahci_ids[i].rev > revid)) i++; ctlr->quirks = ahci_ids[i].quirks; resource_int_value(device_get_name(dev), device_get_unit(dev), "ccc", &ctlr->ccc); /* if we have a memory BAR(5) we are likely on an AHCI part */ ctlr->r_rid = PCIR_BAR(5); if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return ENXIO; /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return (error); } pci_enable_busmaster(dev); /* Reset controller */ if ((error = ahci_ctlr_reset(dev)) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return (error); }; /* Get the HW capabilities */ version = ATA_INL(ctlr->r_mem, AHCI_VS); ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP); if (version >= 0x00010200) ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2); if (ctlr->caps & AHCI_CAP_EMS) ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL); ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI); /* Identify and set separate quirks for HBA and RAID f/w Marvells. */ if ((ctlr->quirks & AHCI_Q_NOBSYRES) && (ctlr->quirks & AHCI_Q_ALTSIG) && (ctlr->caps & AHCI_CAP_SPM) == 0) ctlr->quirks &= ~AHCI_Q_NOBSYRES; if (ctlr->quirks & AHCI_Q_1CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->ichannels &= 0x01; } if (ctlr->quirks & AHCI_Q_2CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->caps |= 1; ctlr->ichannels &= 0x03; } if (ctlr->quirks & AHCI_Q_4CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->caps |= 3; ctlr->ichannels &= 0x0f; } ctlr->channels = MAX(flsl(ctlr->ichannels), (ctlr->caps & AHCI_CAP_NPMASK) + 1); if (ctlr->quirks & AHCI_Q_NOPMP) ctlr->caps &= ~AHCI_CAP_SPM; if (ctlr->quirks & AHCI_Q_NONCQ) ctlr->caps &= ~AHCI_CAP_SNCQ; if ((ctlr->caps & AHCI_CAP_CCCS) == 0) ctlr->ccc = 0; ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC); ahci_ctlr_setup(dev); /* Setup interrupts. */ if (ahci_setup_interrupt(dev)) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return ENXIO; } /* Announce HW capabilities. */ speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT; device_printf(dev, "AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n", ((version >> 20) & 0xf0) + ((version >> 16) & 0x0f), ((version >> 4) & 0xf0) + (version & 0x0f), (ctlr->caps & AHCI_CAP_NPMASK) + 1, ((speed == 1) ? "1.5":((speed == 2) ? "3": ((speed == 3) ? "6":"?"))), (ctlr->caps & AHCI_CAP_SPM) ? "supported" : "not supported", (ctlr->caps & AHCI_CAP_FBSS) ? " with FBS" : ""); if (bootverbose) { device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps", (ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"", (ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"", (ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"", (ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"", (ctlr->caps & AHCI_CAP_SSS) ? " SS":"", (ctlr->caps & AHCI_CAP_SALP) ? " ALP":"", (ctlr->caps & AHCI_CAP_SAL) ? " AL":"", (ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"", ((speed == 1) ? "1.5":((speed == 2) ? "3": ((speed == 3) ? "6":"?")))); printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n", (ctlr->caps & AHCI_CAP_SAM) ? " AM":"", (ctlr->caps & AHCI_CAP_SPM) ? " PM":"", (ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"", (ctlr->caps & AHCI_CAP_PMD) ? " PMD":"", (ctlr->caps & AHCI_CAP_SSC) ? " SSC":"", (ctlr->caps & AHCI_CAP_PSC) ? " PSC":"", ((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1, (ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"", (ctlr->caps & AHCI_CAP_EMS) ? " EM":"", (ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"", (ctlr->caps & AHCI_CAP_NPMASK) + 1); } if (bootverbose && version >= 0x00010200) { device_printf(dev, "Caps2:%s%s%s\n", (ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"", (ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"", (ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":""); } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "ahcich", -1); if (child == NULL) { device_printf(dev, "failed to add channel device\n"); continue; } device_set_ivars(child, (void *)(intptr_t)unit); if ((ctlr->ichannels & (1 << unit)) == 0) device_disable(child); } if (ctlr->caps & AHCI_CAP_EMS) { child = device_add_child(dev, "ahciem", -1); if (child == NULL) device_printf(dev, "failed to add enclosure device\n"); else device_set_ivars(child, (void *)(intptr_t)-1); } bus_generic_attach(dev); return 0; } static int ahci_detach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int i; /* Detach & delete all children */ device_delete_children(dev); /* Free interrupts. */ for (i = 0; i < ctlr->numirqs; i++) { if (ctlr->irqs[i].r_irq) { bus_teardown_intr(dev, ctlr->irqs[i].r_irq, ctlr->irqs[i].handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq); } } pci_release_msi(dev); /* Free memory. */ rman_fini(&ctlr->sc_iomem); if (ctlr->r_mem) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (0); } static int ahci_ctlr_reset(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int timeout; if (pci_read_config(dev, PCIR_DEVVENDOR, 4) == 0x28298086 && (pci_read_config(dev, 0x92, 1) & 0xfe) == 0x04) pci_write_config(dev, 0x92, 0x01, 1); /* Enable AHCI mode */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); /* Reset AHCI controller */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR); for (timeout = 1000; timeout > 0; timeout--) { DELAY(1000); if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0) break; } if (timeout == 0) { device_printf(dev, "AHCI controller reset failure\n"); return ENXIO; } /* Reenable AHCI mode */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); return (0); } static int ahci_ctlr_setup(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); /* Clear interrupts */ ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS)); /* Configure CCC */ if (ctlr->ccc) { ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI)); ATA_OUTL(ctlr->r_mem, AHCI_CCCC, (ctlr->ccc << AHCI_CCCC_TV_SHIFT) | (4 << AHCI_CCCC_CC_SHIFT) | AHCI_CCCC_EN); ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) & AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT; if (bootverbose) { device_printf(dev, "CCC with %dms/4cmd enabled on vector %d\n", ctlr->ccc, ctlr->cccv); } } /* Enable AHCI interrupts */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE); return (0); } static int ahci_suspend(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); bus_generic_suspend(dev); /* Disable interupts, so the state change(s) doesn't trigger */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, ATA_INL(ctlr->r_mem, AHCI_GHC) & (~AHCI_GHC_IE)); return 0; } static int ahci_resume(device_t dev) { int res; if ((res = ahci_ctlr_reset(dev)) != 0) return (res); ahci_ctlr_setup(dev); return (bus_generic_resume(dev)); } static int ahci_setup_interrupt(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int i, msi = 1; /* Process hints. */ resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &msi); if (msi < 0) msi = 0; else if (msi == 1) msi = min(1, pci_msi_count(dev)); else if (msi > 1) msi = pci_msi_count(dev); /* Allocate MSI if needed/present. */ if (msi && pci_alloc_msi(dev, &msi) == 0) { ctlr->numirqs = msi; } else { msi = 0; ctlr->numirqs = 1; } /* Check for single MSI vector fallback. */ if (ctlr->numirqs > 1 && (ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) { device_printf(dev, "Falling back to one MSI\n"); ctlr->numirqs = 1; } /* Allocate all IRQs. */ for (i = 0; i < ctlr->numirqs; i++) { ctlr->irqs[i].ctlr = ctlr; ctlr->irqs[i].r_irq_rid = i + (msi ? 1 : 0); if (ctlr->numirqs == 1 || i >= ctlr->channels || (ctlr->ccc && i == ctlr->cccv)) ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL; else if (i == ctlr->numirqs - 1) ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER; else ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return ENXIO; } if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL, (ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE) ? ahci_intr_one : ahci_intr, &ctlr->irqs[i], &ctlr->irqs[i].handle))) { /* SOS XXX release r_irq */ device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } if (ctlr->numirqs > 1) { bus_describe_intr(dev, ctlr->irqs[i].r_irq, ctlr->irqs[i].handle, ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ? "ch%d" : "%d", i); } } return (0); } /* * Common case interrupt handler. */ static void ahci_intr(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; u_int32_t is, ise = 0; void *arg; int unit; if (irq->mode == AHCI_IRQ_MODE_ALL) { unit = 0; if (ctlr->ccc) is = ctlr->ichannels; else is = ATA_INL(ctlr->r_mem, AHCI_IS); } else { /* AHCI_IRQ_MODE_AFTER */ unit = irq->r_irq_rid - 1; is = ATA_INL(ctlr->r_mem, AHCI_IS); } /* CCC interrupt is edge triggered. */ if (ctlr->ccc) ise = 1 << ctlr->cccv; /* Some controllers have edge triggered IS. */ if (ctlr->quirks & AHCI_Q_EDGEIS) ise |= is; if (ise != 0) ATA_OUTL(ctlr->r_mem, AHCI_IS, ise); for (; unit < ctlr->channels; unit++) { if ((is & (1 << unit)) != 0 && (arg = ctlr->interrupt[unit].argument)) { ctlr->interrupt[unit].function(arg); } } /* AHCI declares level triggered IS. */ if (!(ctlr->quirks & AHCI_Q_EDGEIS)) ATA_OUTL(ctlr->r_mem, AHCI_IS, is); } /* * Simplified interrupt handler for multivector MSI mode. */ static void ahci_intr_one(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; void *arg; int unit; unit = irq->r_irq_rid - 1; /* Some controllers have edge triggered IS. */ if (ctlr->quirks & AHCI_Q_EDGEIS) ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); if ((arg = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(arg); /* AHCI declares level triggered IS. */ if (!(ctlr->quirks & AHCI_Q_EDGEIS)) ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); } static struct resource * ahci_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct ahci_controller *ctlr = device_get_softc(dev); struct resource *res; long st; int offset, size, unit; unit = (intptr_t)device_get_ivars(child); res = NULL; switch (type) { case SYS_RES_MEMORY: if (unit >= 0) { offset = AHCI_OFFSET + (unit << 7); size = 128; } else if (*rid == 0) { offset = AHCI_EM_CTL; size = 4; } else { offset = (ctlr->emloc & 0xffff0000) >> 14; size = (ctlr->emloc & 0x0000ffff) << 2; if (*rid != 1) { if (*rid == 2 && (ctlr->capsem & (AHCI_EM_XMT | AHCI_EM_SMB)) == 0) offset += size; else break; } } st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + size - 1, size, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, 128, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irqs[0].r_irq; break; } return (res); } static int ahci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { switch (type) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: if (rid != ATA_IRQ_RID) return ENOENT; return (0); } return (EINVAL); } static int ahci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct ahci_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); if (filter != NULL) { printf("ahci.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } static int ahci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct ahci_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } static int ahci_print_child(device_t dev, device_t child) { int retval, channel; retval = bus_print_child_header(dev, child); channel = (int)(intptr_t)device_get_ivars(child); if (channel >= 0) retval += printf(" at channel %d", channel); retval += bus_print_child_footer(dev, child); return (retval); } static int ahci_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { int channel; channel = (int)(intptr_t)device_get_ivars(child); if (channel >= 0) snprintf(buf, buflen, "channel=%d", channel); return (0); } devclass_t ahci_devclass; static device_method_t ahci_methods[] = { DEVMETHOD(device_probe, ahci_probe), DEVMETHOD(device_attach, ahci_attach), DEVMETHOD(device_detach, ahci_detach), DEVMETHOD(device_suspend, ahci_suspend), DEVMETHOD(device_resume, ahci_resume), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr,ahci_teardown_intr), DEVMETHOD(bus_child_location_str, ahci_child_location_str), { 0, 0 } }; static driver_t ahci_driver = { "ahci", ahci_methods, sizeof(struct ahci_controller) }; DRIVER_MODULE(ahci, pci, ahci_driver, ahci_devclass, 0, 0); static device_method_t ahci_ata_methods[] = { DEVMETHOD(device_probe, ahci_ata_probe), DEVMETHOD(device_attach, ahci_attach), DEVMETHOD(device_detach, ahci_detach), DEVMETHOD(device_suspend, ahci_suspend), DEVMETHOD(device_resume, ahci_resume), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr,ahci_teardown_intr), DEVMETHOD(bus_child_location_str, ahci_child_location_str), { 0, 0 } }; static driver_t ahci_ata_driver = { "ahci", ahci_ata_methods, sizeof(struct ahci_controller) }; DRIVER_MODULE(ahci, atapci, ahci_ata_driver, ahci_devclass, 0, 0); MODULE_VERSION(ahci, 1); MODULE_DEPEND(ahci, cam, 1, 1, 1); static int ahci_ch_probe(device_t dev) { device_set_desc_copy(dev, "AHCI channel"); return (0); } static int ahci_ch_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ahci_channel *ch = device_get_softc(dev); struct cam_devq *devq; int rid, error, i, sata_rev = 0; u_int32_t version; ch->dev = dev; ch->unit = (intptr_t)device_get_ivars(dev); ch->caps = ctlr->caps; ch->caps2 = ctlr->caps2; ch->quirks = ctlr->quirks; ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1; mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF); resource_int_value(device_get_name(dev), device_get_unit(dev), "pm_level", &ch->pm_level); if (ch->pm_level > 3) callout_init_mtx(&ch->pm_timer, &ch->mtx, 0); callout_init_mtx(&ch->reset_timer, &ch->mtx, 0); /* Limit speed for my onboard JMicron external port. * It is not eSATA really. */ if (pci_get_devid(ctlr->dev) == 0x2363197b && pci_get_subvendor(ctlr->dev) == 0x1043 && pci_get_subdevice(ctlr->dev) == 0x81e4 && ch->unit == 0) sata_rev = 1; if (ch->quirks & AHCI_Q_SATA2) sata_rev = 2; resource_int_value(device_get_name(dev), device_get_unit(dev), "sata_rev", &sata_rev); for (i = 0; i < 16; i++) { ch->user[i].revision = sata_rev; ch->user[i].mode = 0; ch->user[i].bytecount = 8192; ch->user[i].tags = ch->numslots; ch->user[i].caps = 0; ch->curr[i] = ch->user[i]; if (ch->pm_level) { ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ | CTS_SATA_CAPS_H_APST | CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST; } ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA | CTS_SATA_CAPS_H_AN; } rid = 0; if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE))) return (ENXIO); ahci_dmainit(dev); ahci_slotsalloc(dev); ahci_ch_init(dev); mtx_lock(&ch->mtx); rid = ATA_IRQ_RID; if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "Unable to map interrupt\n"); error = ENXIO; goto err0; } if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, ahci_ch_intr_locked, dev, &ch->ih))) { device_printf(dev, "Unable to setup interrupt\n"); error = ENXIO; goto err1; } ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD); version = ATA_INL(ctlr->r_mem, AHCI_VS); if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS)) ch->chcaps |= AHCI_P_CMD_FBSCP; if (bootverbose) { device_printf(dev, "Caps:%s%s%s%s%s\n", (ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"", (ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"", (ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"", (ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"", (ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":""); } /* Create the device queue for our SIM. */ devq = cam_simq_alloc(ch->numslots); if (devq == NULL) { device_printf(dev, "Unable to allocate simq\n"); error = ENOMEM; goto err1; } /* Construct SIM entry */ ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch, device_get_unit(dev), &ch->mtx, min(2, ch->numslots), (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0, devq); if (ch->sim == NULL) { cam_simq_free(devq); device_printf(dev, "unable to allocate sim\n"); error = ENOMEM; goto err1; } if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "unable to register xpt bus\n"); error = ENXIO; goto err2; } if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "unable to create path\n"); error = ENXIO; goto err3; } if (ch->pm_level > 3) { callout_reset(&ch->pm_timer, (ch->pm_level == 4) ? hz / 1000 : hz / 8, ahci_ch_pm, dev); } mtx_unlock(&ch->mtx); return (0); err3: xpt_bus_deregister(cam_sim_path(ch->sim)); err2: cam_sim_free(ch->sim, /*free_devq*/TRUE); err1: bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); err0: bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_unlock(&ch->mtx); mtx_destroy(&ch->mtx); return (error); } static int ahci_ch_detach(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_async(AC_LOST_DEVICE, ch->path, NULL); /* Forget about reset. */ if (ch->resetting) { ch->resetting = 0; xpt_release_simq(ch->sim, TRUE); } xpt_free_path(ch->path); xpt_bus_deregister(cam_sim_path(ch->sim)); cam_sim_free(ch->sim, /*free_devq*/TRUE); mtx_unlock(&ch->mtx); if (ch->pm_level > 3) callout_drain(&ch->pm_timer); callout_drain(&ch->reset_timer); bus_teardown_intr(dev, ch->r_irq, ch->ih); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); ahci_ch_deinit(dev); ahci_slotsfree(dev); ahci_dmafini(dev); bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_destroy(&ch->mtx); return (0); } static int ahci_ch_init(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); uint64_t work; /* Disable port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Setup work areas */ work = ch->dma.work_bus + AHCI_CL_OFFSET; ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff); ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32); work = ch->dma.rfis_bus; ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff); ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32); /* Activate the channel and power/spin up device */ ATA_OUTL(ch->r_mem, AHCI_P_CMD, (AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD | ((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) | ((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 ))); ahci_start_fr(dev); ahci_start(dev, 1); return (0); } static int ahci_ch_deinit(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); /* Disable port interrupts. */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Reset command register. */ ahci_stop(dev); ahci_stop_fr(dev); ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0); /* Allow everything, including partial and slumber modes. */ ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0); /* Request slumber mode transition and give some time to get there. */ ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER); DELAY(100); /* Disable PHY. */ ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); return (0); } static int ahci_ch_suspend(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_freeze_simq(ch->sim, 1); /* Forget about reset. */ if (ch->resetting) { ch->resetting = 0; callout_stop(&ch->reset_timer); xpt_release_simq(ch->sim, TRUE); } while (ch->oslots) msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100); ahci_ch_deinit(dev); mtx_unlock(&ch->mtx); return (0); } static int ahci_ch_resume(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); ahci_ch_init(dev); ahci_reset(dev); xpt_release_simq(ch->sim, TRUE); mtx_unlock(&ch->mtx); return (0); } devclass_t ahcich_devclass; static device_method_t ahcich_methods[] = { DEVMETHOD(device_probe, ahci_ch_probe), DEVMETHOD(device_attach, ahci_ch_attach), DEVMETHOD(device_detach, ahci_ch_detach), DEVMETHOD(device_suspend, ahci_ch_suspend), DEVMETHOD(device_resume, ahci_ch_resume), { 0, 0 } }; static driver_t ahcich_driver = { "ahcich", ahcich_methods, sizeof(struct ahci_channel) }; DRIVER_MODULE(ahcich, ahci, ahcich_driver, ahcich_devclass, 0, 0); struct ahci_dc_cb_args { bus_addr_t maddr; int error; }; static void ahci_dmainit(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); struct ahci_dc_cb_args dcba; size_t rfsize; if (ch->caps & AHCI_CAP_64BIT) ch->dma.max_address = BUS_SPACE_MAXADDR; else ch->dma.max_address = BUS_SPACE_MAXADDR_32BIT; /* Command area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, ch->dma.max_address, BUS_SPACE_MAXADDR, NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE, 0, NULL, NULL, &ch->dma.work_tag)) goto error; if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, 0, &ch->dma.work_map)) goto error; if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) { bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); goto error; } ch->dma.work_bus = dcba.maddr; /* FIS receive area. */ if (ch->chcaps & AHCI_P_CMD_FBSCP) rfsize = 4096; else rfsize = 256; if (bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0, ch->dma.max_address, BUS_SPACE_MAXADDR, NULL, NULL, rfsize, 1, rfsize, 0, NULL, NULL, &ch->dma.rfis_tag)) goto error; if (bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0, &ch->dma.rfis_map)) goto error; if (bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis, rfsize, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) { bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); goto error; } ch->dma.rfis_bus = dcba.maddr; /* Data area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, ch->dma.max_address, BUS_SPACE_MAXADDR, NULL, NULL, AHCI_SG_ENTRIES * PAGE_SIZE * ch->numslots, AHCI_SG_ENTRIES, AHCI_PRD_MAX, 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) { goto error; } return; error: device_printf(dev, "WARNING - DMA initialization failed\n"); ahci_dmafini(dev); } static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc; if (!(dcba->error = error)) dcba->maddr = segs[0].ds_addr; } static void ahci_dmafini(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); if (ch->dma.data_tag) { bus_dma_tag_destroy(ch->dma.data_tag); ch->dma.data_tag = NULL; } if (ch->dma.rfis_bus) { bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map); bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); ch->dma.rfis_bus = 0; ch->dma.rfis_map = NULL; ch->dma.rfis = NULL; } if (ch->dma.work_bus) { bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); ch->dma.work_bus = 0; ch->dma.work_map = NULL; ch->dma.work = NULL; } if (ch->dma.work_tag) { bus_dma_tag_destroy(ch->dma.work_tag); ch->dma.work_tag = NULL; } } static void ahci_slotsalloc(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); int i; /* Alloc and setup command/dma slots */ bzero(ch->slot, sizeof(ch->slot)); for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; slot->dev = dev; slot->slot = i; slot->state = AHCI_SLOT_EMPTY; slot->ccb = NULL; callout_init_mtx(&slot->timeout, &ch->mtx, 0); if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) device_printf(ch->dev, "FAILURE - create data_map\n"); } } static void ahci_slotsfree(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); int i; /* Free all dma slots */ for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; callout_drain(&slot->timeout); if (slot->dma.data_map) { bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); slot->dma.data_map = NULL; } } } static int ahci_phy_check_events(device_t dev, u_int32_t serr) { struct ahci_channel *ch = device_get_softc(dev); if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) || ((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) { u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS); union ccb *ccb; if (bootverbose) { if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) device_printf(dev, "CONNECT requested\n"); else device_printf(dev, "DISCONNECT requested\n"); } ahci_reset(dev); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return (0); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return (0); } xpt_rescan(ccb); return (1); } return (0); } static void ahci_cpd_check_events(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); u_int32_t status; union ccb *ccb; if (ch->pm_level == 0) return; status = ATA_INL(ch->r_mem, AHCI_P_CMD); if ((status & AHCI_P_CMD_CPD) == 0) return; if (bootverbose) { if (status & AHCI_P_CMD_CPS) { device_printf(dev, "COLD CONNECT requested\n"); } else device_printf(dev, "COLD DISCONNECT requested\n"); } ahci_reset(dev); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void ahci_notify_events(device_t dev, u_int32_t status) { struct ahci_channel *ch = device_get_softc(dev); struct cam_path *dpath; int i; if (ch->caps & AHCI_CAP_SSNTF) ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status); if (bootverbose) device_printf(dev, "SNTF 0x%04x\n", status); for (i = 0; i < 16; i++) { if ((status & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, NULL, xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) { xpt_async(AC_SCSI_AEN, dpath, NULL); xpt_free_path(dpath); } } } static void ahci_ch_intr_locked(void *data) { device_t dev = (device_t)data; struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_batch_start(ch->sim); ahci_ch_intr(data); xpt_batch_done(ch->sim); mtx_unlock(&ch->mtx); } static void ahci_ch_pm(void *arg) { device_t dev = (device_t)arg; struct ahci_channel *ch = device_get_softc(dev); uint32_t work; if (ch->numrslots != 0) return; work = ATA_INL(ch->r_mem, AHCI_P_CMD); if (ch->pm_level == 4) work |= AHCI_P_CMD_PARTIAL; else work |= AHCI_P_CMD_SLUMBER; ATA_OUTL(ch->r_mem, AHCI_P_CMD, work); } static void ahci_ch_intr(void *data) { device_t dev = (device_t)data; struct ahci_channel *ch = device_get_softc(dev); uint32_t istatus, sstatus, cstatus, serr = 0, sntf = 0, ok, err; enum ahci_err_type et; int i, ccs, port, reset = 0; /* Read and clear interrupt statuses. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); if (istatus == 0) return; ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus); /* Read command statuses. */ sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); cstatus = ATA_INL(ch->r_mem, AHCI_P_CI); if (istatus & AHCI_P_IX_SDB) { if (ch->caps & AHCI_CAP_SSNTF) sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF); else if (ch->fbs_enabled) { u_int8_t *fis = ch->dma.rfis + 0x58; for (i = 0; i < 16; i++) { if (fis[1] & 0x80) { fis[1] &= 0x7f; sntf |= 1 << i; } fis += 256; } } else { u_int8_t *fis = ch->dma.rfis + 0x58; if (fis[1] & 0x80) sntf = (1 << (fis[1] & 0x0f)); } } /* Process PHY events */ if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF | AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { serr = ATA_INL(ch->r_mem, AHCI_P_SERR); if (serr) { ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr); reset = ahci_phy_check_events(dev, serr); } } /* Process cold presence detection events */ if ((istatus & AHCI_P_IX_CPD) && !reset) ahci_cpd_check_events(dev); /* Process command errors */ if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n", // __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), // serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs); port = -1; if (ch->fbs_enabled) { uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS); if (fbs & AHCI_P_FBS_SDE) { port = (fbs & AHCI_P_FBS_DWE) >> AHCI_P_FBS_DWE_SHIFT; } else { for (i = 0; i < 16; i++) { if (ch->numrslotspd[i] == 0) continue; if (port == -1) port = i; else if (port != i) { port = -2; break; } } } } err = ch->rslots & (cstatus | sstatus); } else { ccs = 0; err = 0; port = -1; } /* Complete all successfull commands. */ ok = ch->rslots & ~(cstatus | sstatus); for (i = 0; i < ch->numslots; i++) { if ((ok >> i) & 1) ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE); } /* On error, complete the rest of commands with error statuses. */ if (err) { if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(fccb); } for (i = 0; i < ch->numslots; i++) { /* XXX: reqests in loading state. */ if (((err >> i) & 1) == 0) continue; if (port >= 0 && ch->slot[i].ccb->ccb_h.target_id != port) continue; if (istatus & AHCI_P_IX_TFE) { if (port != -2) { /* Task File Error */ if (ch->numtslotspd[ ch->slot[i].ccb->ccb_h.target_id] == 0) { /* Untagged operation. */ if (i == ccs) et = AHCI_ERR_TFE; else et = AHCI_ERR_INNOCENT; } else { /* Tagged operation. */ et = AHCI_ERR_NCQ; } } else { et = AHCI_ERR_TFE; ch->fatalerr = 1; } } else if (istatus & AHCI_P_IX_IF) { if (ch->numtslots == 0 && i != ccs && port != -2) et = AHCI_ERR_INNOCENT; else et = AHCI_ERR_SATA; } else et = AHCI_ERR_INVALID; ahci_end_transaction(&ch->slot[i], et); } /* * We can't reinit port if there are some other * commands active, use resume to complete them. */ if (ch->rslots != 0 && !ch->recoverycmd) ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC); } /* Process NOTIFY events */ if (sntf) ahci_notify_events(dev, sntf); } /* Must be called with channel locked. */ static int ahci_check_collision(device_t dev, union ccb *ccb) { struct ahci_channel *ch = device_get_softc(dev); int t = ccb->ccb_h.target_id; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { /* Tagged command while we have no supported tag free. */ if (((~ch->oslots) & (0xffffffff >> (32 - ch->curr[t].tags))) == 0) return (1); /* If we have FBS */ if (ch->fbs_enabled) { /* Tagged command while untagged are active. */ if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0) return (1); } else { /* Tagged command while untagged are active. */ if (ch->numrslots != 0 && ch->numtslots == 0) return (1); /* Tagged command while tagged to other target is active. */ if (ch->numtslots != 0 && ch->taggedtarget != ccb->ccb_h.target_id) return (1); } } else { /* If we have FBS */ if (ch->fbs_enabled) { /* Untagged command while tagged are active. */ if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0) return (1); } else { /* Untagged command while tagged are active. */ if (ch->numrslots != 0 && ch->numtslots != 0) return (1); } } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) { /* Atomic command while anything active. */ if (ch->numrslots != 0) return (1); } /* We have some atomic command running. */ if (ch->aslots != 0) return (1); return (0); } /* Must be called with channel locked. */ static void ahci_begin_transaction(device_t dev, union ccb *ccb) { struct ahci_channel *ch = device_get_softc(dev); struct ahci_slot *slot; int tag, tags; /* Choose empty slot. */ tags = ch->numslots; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) tags = ch->curr[ccb->ccb_h.target_id].tags; tag = ch->lastslot; while (1) { if (tag >= tags) tag = 0; if (ch->slot[tag].state == AHCI_SLOT_EMPTY) break; tag++; }; ch->lastslot = tag; /* Occupy chosen slot. */ slot = &ch->slot[tag]; slot->ccb = ccb; /* Stop PM timer. */ if (ch->numrslots == 0 && ch->pm_level > 3) callout_stop(&ch->pm_timer); /* Update channel stats. */ ch->oslots |= (1 << slot->slot); ch->numrslots++; ch->numrslotspd[ccb->ccb_h.target_id]++; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots++; ch->numtslotspd[ccb->ccb_h.target_id]++; ch->taggedtarget = ccb->ccb_h.target_id; } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) ch->aslots |= (1 << slot->slot); slot->dma.nsegs = 0; - bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, - ahci_dmasetprd, slot, 0); + if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { + bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, + ahci_dmasetprd, slot, 0); + } else + ahci_execute_transaction(slot); } /* Locked by busdma engine. */ static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct ahci_slot *slot = arg; struct ahci_channel *ch = device_get_softc(slot->dev); struct ahci_cmd_tab *ctp; struct ahci_dma_prd *prd; int i; if (error) { device_printf(slot->dev, "DMA load error\n"); ahci_end_transaction(slot, AHCI_ERR_INVALID); return; } KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n")); /* Get a piece of the workspace for this request */ ctp = (struct ahci_cmd_tab *) (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); /* Fill S/G table */ prd = &ctp->prd_tab[0]; for (i = 0; i < nsegs; i++) { prd[i].dba = htole64(segs[i].ds_addr); prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK); } slot->dma.nsegs = nsegs; bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); ahci_execute_transaction(slot); } /* Must be called with channel locked. */ static void ahci_execute_transaction(struct ahci_slot *slot) { device_t dev = slot->dev; struct ahci_channel *ch = device_get_softc(dev); struct ahci_cmd_tab *ctp; struct ahci_cmd_list *clp; union ccb *ccb = slot->ccb; int port = ccb->ccb_h.target_id & 0x0f; int fis_size, i, softreset; uint8_t *fis = ch->dma.rfis + 0x40; uint8_t val; /* Get a piece of the workspace for this request */ ctp = (struct ahci_cmd_tab *) (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); /* Setup the FIS for this request */ if (!(fis_size = ahci_setup_fis(dev, ctp, ccb, slot->slot))) { device_printf(ch->dev, "Setting up SATA FIS failed\n"); ahci_end_transaction(slot, AHCI_ERR_INVALID); return; } /* Setup the command list entry */ clp = (struct ahci_cmd_list *) (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); clp->cmd_flags = htole16( (ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) | (ccb->ccb_h.func_code == XPT_SCSI_IO ? (AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) | (fis_size / sizeof(u_int32_t)) | (port << 12)); clp->prd_length = htole16(slot->dma.nsegs); /* Special handling for Soft Reset command. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) { if (ccb->ataio.cmd.control & ATA_A_RESET) { softreset = 1; /* Kick controller into sane state */ ahci_stop(dev); ahci_clo(dev); ahci_start(dev, 0); clp->cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY; } else { softreset = 2; /* Prepare FIS receive area for check. */ for (i = 0; i < 20; i++) fis[i] = 0xff; } } else softreset = 0; clp->bytecount = 0; clp->cmd_table_phys = htole64(ch->dma.work_bus + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_PREREAD); /* Set ACTIVE bit for NCQ commands. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot); } /* If FBS is enabled, set PMP port. */ if (ch->fbs_enabled) { ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | (port << AHCI_P_FBS_DEV_SHIFT)); } /* Issue command to the controller. */ slot->state = AHCI_SLOT_RUNNING; ch->rslots |= (1 << slot->slot); ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot)); /* Device reset commands doesn't interrupt. Poll them. */ if (ccb->ccb_h.func_code == XPT_ATA_IO && (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) { int count, timeout = ccb->ccb_h.timeout * 100; enum ahci_err_type et = AHCI_ERR_NONE; for (count = 0; count < timeout; count++) { DELAY(10); if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot))) break; if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) && softreset != 1) { #if 0 device_printf(ch->dev, "Poll error on slot %d, TFD: %04x\n", slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD)); #endif et = AHCI_ERR_TFE; break; } /* Workaround for ATI SB600/SB700 chipsets. */ if (ccb->ccb_h.target_id == 15 && pci_get_vendor(device_get_parent(dev)) == 0x1002 && (ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) { et = AHCI_ERR_TIMEOUT; break; } } /* Marvell controllers do not wait for readyness. */ if ((ch->quirks & AHCI_Q_NOBSYRES) && softreset == 2 && et == AHCI_ERR_NONE) { while ((val = fis[2]) & ATA_S_BUSY) { DELAY(10); if (count++ >= timeout) break; } } if (timeout && (count >= timeout)) { device_printf(dev, "Poll timeout on slot %d port %d\n", slot->slot, port); device_printf(dev, "is %08x cs %08x ss %08x " "rs %08x tfd %02x serr %08x cmd %08x\n", ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), ATA_INL(ch->r_mem, AHCI_P_CMD)); et = AHCI_ERR_TIMEOUT; } /* Kick controller into sane state and enable FBS. */ if (softreset == 2) ch->eslots |= (1 << slot->slot); ahci_end_transaction(slot, et); return; } /* Start command execution timeout */ callout_reset(&slot->timeout, (int)ccb->ccb_h.timeout * hz / 2000, (timeout_t*)ahci_timeout, slot); return; } /* Must be called with channel locked. */ static void ahci_process_timeout(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); int i; mtx_assert(&ch->mtx, MA_OWNED); /* Handle the rest of commands. */ for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT); } } /* Must be called with channel locked. */ static void ahci_rearm_timeout(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); int i; mtx_assert(&ch->mtx, MA_OWNED); for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; /* Do we have a running request on slot? */ if (slot->state < AHCI_SLOT_RUNNING) continue; if ((ch->toslots & (1 << i)) == 0) continue; callout_reset(&slot->timeout, (int)slot->ccb->ccb_h.timeout * hz / 2000, (timeout_t*)ahci_timeout, slot); } } /* Locked by callout mechanism. */ static void ahci_timeout(struct ahci_slot *slot) { device_t dev = slot->dev; struct ahci_channel *ch = device_get_softc(dev); uint32_t sstatus; int ccs; int i; /* Check for stale timeout. */ if (slot->state < AHCI_SLOT_RUNNING) return; /* Check if slot was not being executed last time we checked. */ if (slot->state < AHCI_SLOT_EXECUTING) { /* Check if slot started executing. */ sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot || ch->fbs_enabled || ch->wrongccs) slot->state = AHCI_SLOT_EXECUTING; else if ((ch->rslots & (1 << ccs)) == 0) { ch->wrongccs = 1; slot->state = AHCI_SLOT_EXECUTING; } callout_reset(&slot->timeout, (int)slot->ccb->ccb_h.timeout * hz / 2000, (timeout_t*)ahci_timeout, slot); return; } device_printf(dev, "Timeout on slot %d port %d\n", slot->slot, slot->ccb->ccb_h.target_id & 0x0f); device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x " "serr %08x cmd %08x\n", ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), ATA_INL(ch->r_mem, AHCI_P_CMD)); /* Handle frozen command. */ if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(fccb); } if (!ch->fbs_enabled && !ch->wrongccs) { /* Without FBS we know real timeout source. */ ch->fatalerr = 1; /* Handle command with timeout. */ ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT); /* Handle the rest of commands. */ for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); } } else { /* With FBS we wait for other commands timeout and pray. */ if (ch->toslots == 0) xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); if ((ch->rslots & ~ch->toslots) == 0) ahci_process_timeout(dev); else device_printf(dev, " ... waiting for slots %08x\n", ch->rslots & ~ch->toslots); } } /* Must be called with channel locked. */ static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et) { device_t dev = slot->dev; struct ahci_channel *ch = device_get_softc(dev); union ccb *ccb = slot->ccb; struct ahci_cmd_list *clp; int lastto; uint32_t sig; bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); clp = (struct ahci_cmd_list *) (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); /* Read result registers to the result struct * May be incorrect if several commands finished same time, * so read only when sure or have to. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { struct ata_res *res = &ccb->ataio.res; if ((et == AHCI_ERR_TFE) || (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { u_int8_t *fis = ch->dma.rfis + 0x40; bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_POSTREAD); if (ch->fbs_enabled) { fis += ccb->ccb_h.target_id * 256; res->status = fis[2]; res->error = fis[3]; } else { uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD); res->status = tfd; res->error = tfd >> 8; } res->lba_low = fis[4]; res->lba_mid = fis[5]; res->lba_high = fis[6]; res->device = fis[7]; res->lba_low_exp = fis[8]; res->lba_mid_exp = fis[9]; res->lba_high_exp = fis[10]; res->sector_count = fis[12]; res->sector_count_exp = fis[13]; /* * Some weird controllers do not return signature in * FIS receive area. Read it from PxSIG register. */ if ((ch->quirks & AHCI_Q_ALTSIG) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET) == 0) { sig = ATA_INL(ch->r_mem, AHCI_P_SIG); res->lba_high = sig >> 24; res->lba_mid = sig >> 16; res->lba_low = sig >> 8; res->sector_count = sig; } } else bzero(res, sizeof(*res)); if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 && (ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && (ch->quirks & AHCI_Q_NOCOUNT) == 0) { ccb->ataio.resid = ccb->ataio.dxfer_len - le32toh(clp->bytecount); } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && (ch->quirks & AHCI_Q_NOCOUNT) == 0) { ccb->csio.resid = ccb->csio.dxfer_len - le32toh(clp->bytecount); } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, (ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); } if (et != AHCI_ERR_NONE) ch->eslots |= (1 << slot->slot); /* In case of error, freeze device for proper recovery. */ if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) && !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } /* Set proper result status. */ ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (et) { case AHCI_ERR_NONE: ccb->ccb_h.status |= CAM_REQ_CMP; if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.scsi_status = SCSI_STATUS_OK; break; case AHCI_ERR_INVALID: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_INVALID; break; case AHCI_ERR_INNOCENT: ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; case AHCI_ERR_TFE: case AHCI_ERR_NCQ: if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } else { ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; } break; case AHCI_ERR_SATA: ch->fatalerr = 1; if (!ch->recoverycmd) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } ccb->ccb_h.status |= CAM_UNCOR_PARITY; break; case AHCI_ERR_TIMEOUT: if (!ch->recoverycmd) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } ccb->ccb_h.status |= CAM_CMD_TIMEOUT; break; default: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } /* Free slot. */ ch->oslots &= ~(1 << slot->slot); ch->rslots &= ~(1 << slot->slot); ch->aslots &= ~(1 << slot->slot); slot->state = AHCI_SLOT_EMPTY; slot->ccb = NULL; /* Update channel stats. */ ch->numrslots--; ch->numrslotspd[ccb->ccb_h.target_id]--; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots--; ch->numtslotspd[ccb->ccb_h.target_id]--; } /* Cancel timeout state if request completed normally. */ if (et != AHCI_ERR_TIMEOUT) { lastto = (ch->toslots == (1 << slot->slot)); ch->toslots &= ~(1 << slot->slot); if (lastto) xpt_release_simq(ch->sim, TRUE); } /* If it was first request of reset sequence and there is no error, * proceed to second request. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET) && et == AHCI_ERR_NONE) { ccb->ataio.cmd.control &= ~ATA_A_RESET; ahci_begin_transaction(dev, ccb); return; } /* If it was our READ LOG command - process it. */ if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { ahci_process_read_log(dev, ccb); /* If it was our REQUEST SENSE command - process it. */ } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { ahci_process_request_sense(dev, ccb); /* If it was NCQ or ATAPI command error, put result on hold. */ } else if (et == AHCI_ERR_NCQ || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { ch->hold[slot->slot] = ccb; ch->numhslots++; } else xpt_done(ccb); /* If we have no other active commands, ... */ if (ch->rslots == 0) { /* if there was fatal error - reset port. */ if (ch->toslots != 0 || ch->fatalerr) { ahci_reset(dev); } else { /* if we have slots in error, we can reinit port. */ if (ch->eslots != 0) { ahci_stop(dev); ahci_clo(dev); ahci_start(dev, 1); } /* if there commands on hold, we can do READ LOG. */ if (!ch->recoverycmd && ch->numhslots) ahci_issue_recovery(dev); } /* If all the rest of commands are in timeout - give them chance. */ } else if ((ch->rslots & ~ch->toslots) == 0 && et != AHCI_ERR_TIMEOUT) ahci_rearm_timeout(dev); /* Unfreeze frozen command. */ if (ch->frozen && !ahci_check_collision(dev, ch->frozen)) { union ccb *fccb = ch->frozen; ch->frozen = NULL; ahci_begin_transaction(dev, fccb); xpt_release_simq(ch->sim, TRUE); } /* Start PM timer. */ if (ch->numrslots == 0 && ch->pm_level > 3 && (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) { callout_schedule(&ch->pm_timer, (ch->pm_level == 4) ? hz / 1000 : hz / 8); } } static void ahci_issue_recovery(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); union ccb *ccb; struct ccb_ataio *ataio; struct ccb_scsiio *csio; int i; /* Find some held command. */ for (i = 0; i < ch->numslots; i++) { if (ch->hold[i]) break; } ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { device_printf(dev, "Unable to allocate recovery command\n"); completeall: /* We can't do anything -- complete held commands. */ for (i = 0; i < ch->numslots; i++) { if (ch->hold[i] == NULL) continue; ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } ahci_reset(dev); return; } ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { /* READ LOG */ ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; ccb->ccb_h.func_code = XPT_ATA_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ ataio = &ccb->ataio; ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT); if (ataio->data_ptr == NULL) { xpt_free_ccb(ccb); device_printf(dev, "Unable to allocate memory for READ LOG command\n"); goto completeall; } ataio->dxfer_len = 512; bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = CAM_ATAIO_48BIT; ataio->cmd.command = 0x2F; /* READ LOG EXT */ ataio->cmd.sector_count = 1; ataio->cmd.sector_count_exp = 0; ataio->cmd.lba_low = 0x10; ataio->cmd.lba_mid = 0; ataio->cmd.lba_mid_exp = 0; } else { /* REQUEST SENSE */ ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; ccb->ccb_h.recovery_slot = i; ccb->ccb_h.func_code = XPT_SCSI_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.status = 0; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ csio = &ccb->csio; csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; csio->dxfer_len = ch->hold[i]->csio.sense_len; csio->cdb_len = 6; bzero(&csio->cdb_io, sizeof(csio->cdb_io)); csio->cdb_io.cdb_bytes[0] = 0x03; csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; } /* Freeze SIM while doing recovery. */ ch->recoverycmd = 1; xpt_freeze_simq(ch->sim, 1); ahci_begin_transaction(dev, ccb); } static void ahci_process_read_log(device_t dev, union ccb *ccb) { struct ahci_channel *ch = device_get_softc(dev); uint8_t *data; struct ata_res *res; int i; ch->recoverycmd = 0; data = ccb->ataio.data_ptr; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (data[0] & 0x80) == 0) { for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) continue; if ((data[0] & 0x1F) == i) { res = &ch->hold[i]->ataio.res; res->status = data[2]; res->error = data[3]; res->lba_low = data[4]; res->lba_mid = data[5]; res->lba_high = data[6]; res->device = data[7]; res->lba_low_exp = data[8]; res->lba_mid_exp = data[9]; res->lba_high_exp = data[10]; res->sector_count = data[12]; res->sector_count_exp = data[13]; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; } xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } else { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) device_printf(dev, "Error while READ LOG EXT\n"); else if ((data[0] & 0x80) == 0) { device_printf(dev, "Non-queued command error in READ LOG EXT\n"); } for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) continue; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } free(ccb->ataio.data_ptr, M_AHCI); xpt_free_ccb(ccb); xpt_release_simq(ch->sim, TRUE); } static void ahci_process_request_sense(device_t dev, union ccb *ccb) { struct ahci_channel *ch = device_get_softc(dev); int i; ch->recoverycmd = 0; i = ccb->ccb_h.recovery_slot; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; } xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; xpt_free_ccb(ccb); xpt_release_simq(ch->sim, TRUE); } static void ahci_start(device_t dev, int fbs) { struct ahci_channel *ch = device_get_softc(dev); u_int32_t cmd; /* Clear SATA error register */ ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF); /* Clear any interrupts pending on this channel */ ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF); /* Configure FIS-based switching if supported. */ if (ch->chcaps & AHCI_P_CMD_FBSCP) { ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0; ATA_OUTL(ch->r_mem, AHCI_P_FBS, ch->fbs_enabled ? AHCI_P_FBS_EN : 0); } /* Start operations on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); cmd &= ~AHCI_P_CMD_PMA; ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST | (ch->pm_present ? AHCI_P_CMD_PMA : 0)); } static void ahci_stop(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); u_int32_t cmd; int timeout; /* Kill all activity on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST); /* Wait for activity stop. */ timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(dev, "stopping AHCI engine failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR); ch->eslots = 0; } static void ahci_clo(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); u_int32_t cmd; int timeout; /* Issue Command List Override if supported */ if (ch->caps & AHCI_CAP_SCLO) { cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); cmd |= AHCI_P_CMD_CLO; ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd); timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(dev, "executing CLO failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO); } } static void ahci_stop_fr(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); u_int32_t cmd; int timeout; /* Kill all FIS reception on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE); /* Wait for FIS reception stop. */ timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(dev, "stopping AHCI FR engine failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR); } static void ahci_start_fr(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); u_int32_t cmd; /* Start FIS reception on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE); } static int ahci_wait_ready(device_t dev, int t, int t0) { struct ahci_channel *ch = device_get_softc(dev); int timeout = 0; uint32_t val; while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) & (ATA_S_BUSY | ATA_S_DRQ)) { if (timeout > t) { if (t != 0) { device_printf(dev, "AHCI reset: device not ready after %dms " "(tfd = %08x)\n", MAX(t, 0) + t0, val); } return (EBUSY); } DELAY(1000); timeout++; } if (bootverbose) device_printf(dev, "AHCI reset: device ready after %dms\n", timeout + t0); return (0); } static void ahci_reset_to(void *arg) { device_t dev = arg; struct ahci_channel *ch = device_get_softc(dev); if (ch->resetting == 0) return; ch->resetting--; if (ahci_wait_ready(dev, ch->resetting == 0 ? -1 : 0, (310 - ch->resetting) * 100) == 0) { ch->resetting = 0; ahci_start(dev, 1); xpt_release_simq(ch->sim, TRUE); return; } if (ch->resetting == 0) { ahci_clo(dev); ahci_start(dev, 1); xpt_release_simq(ch->sim, TRUE); return; } callout_schedule(&ch->reset_timer, hz / 10); } static void ahci_reset(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev)); int i; xpt_freeze_simq(ch->sim, 1); if (bootverbose) device_printf(dev, "AHCI reset...\n"); /* Forget about previous reset. */ if (ch->resetting) { ch->resetting = 0; callout_stop(&ch->reset_timer); xpt_release_simq(ch->sim, TRUE); } /* Requeue freezed command. */ if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(fccb); } /* Kill the engine and requeue all running commands. */ ahci_stop(dev); for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; /* XXX; Commands in loading state. */ ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); } for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } if (ch->toslots != 0) xpt_release_simq(ch->sim, TRUE); ch->eslots = 0; ch->toslots = 0; ch->wrongccs = 0; ch->fatalerr = 0; /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); /* Disable port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Reset and reconnect PHY, */ if (!ahci_sata_phy_reset(dev)) { if (bootverbose) device_printf(dev, "AHCI reset: device not found\n"); ch->devices = 0; /* Enable wanted port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | AHCI_P_IX_PRC | AHCI_P_IX_PC)); xpt_release_simq(ch->sim, TRUE); return; } if (bootverbose) device_printf(dev, "AHCI reset: device found\n"); /* Wait for clearing busy status. */ if (ahci_wait_ready(dev, dumping ? 31000 : 0, 0)) { if (dumping) ahci_clo(dev); else ch->resetting = 310; } ch->devices = 1; /* Enable wanted port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | AHCI_P_IX_TFE | AHCI_P_IX_HBF | AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF | ((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC | AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) | AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR))); if (ch->resetting) callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, dev); else { ahci_start(dev, 1); xpt_release_simq(ch->sim, TRUE); } } static int ahci_setup_fis(device_t dev, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag) { struct ahci_channel *ch = device_get_softc(dev); u_int8_t *fis = &ctp->cfis[0]; bzero(ctp->cfis, 64); fis[0] = 0x27; /* host to device */ fis[1] = (ccb->ccb_h.target_id & 0x0f); if (ccb->ccb_h.func_code == XPT_SCSI_IO) { fis[1] |= 0x80; fis[2] = ATA_PACKET_CMD; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) fis[3] = ATA_F_DMA; else { fis[5] = ccb->csio.dxfer_len; fis[6] = ccb->csio.dxfer_len >> 8; } fis[7] = ATA_D_LBA; fis[15] = ATA_A_4BIT; bzero(ctp->acmd, 32); bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, ctp->acmd, ccb->csio.cdb_len); } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) { fis[1] |= 0x80; fis[2] = ccb->ataio.cmd.command; fis[3] = ccb->ataio.cmd.features; fis[4] = ccb->ataio.cmd.lba_low; fis[5] = ccb->ataio.cmd.lba_mid; fis[6] = ccb->ataio.cmd.lba_high; fis[7] = ccb->ataio.cmd.device; fis[8] = ccb->ataio.cmd.lba_low_exp; fis[9] = ccb->ataio.cmd.lba_mid_exp; fis[10] = ccb->ataio.cmd.lba_high_exp; fis[11] = ccb->ataio.cmd.features_exp; if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { fis[12] = tag << 3; fis[13] = 0; } else { fis[12] = ccb->ataio.cmd.sector_count; fis[13] = ccb->ataio.cmd.sector_count_exp; } fis[15] = ATA_A_4BIT; } else { fis[15] = ccb->ataio.cmd.control; } return (20); } static int ahci_sata_connect(struct ahci_channel *ch) { u_int32_t status; int timeout, found = 0; /* Wait up to 100ms for "connect well" */ for (timeout = 0; timeout < 1000 ; timeout++) { status = ATA_INL(ch->r_mem, AHCI_P_SSTS); if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) found = 1; if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) break; if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) { if (bootverbose) { device_printf(ch->dev, "SATA offline status=%08x\n", status); } return (0); } if (found == 0 && timeout >= 100) break; DELAY(100); } if (timeout >= 1000 || !found) { if (bootverbose) { device_printf(ch->dev, "SATA connect timeout time=%dus status=%08x\n", timeout * 100, status); } return (0); } if (bootverbose) { device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", timeout * 100, status); } /* Clear SATA error register */ ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff); return (1); } static int ahci_sata_phy_reset(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); int sata_rev; uint32_t val; if (ch->listening) { val = ATA_INL(ch->r_mem, AHCI_P_CMD); val |= AHCI_P_CMD_SUD; ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); ch->listening = 0; } sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; if (sata_rev == 1) val = ATA_SC_SPD_SPEED_GEN1; else if (sata_rev == 2) val = ATA_SC_SPD_SPEED_GEN2; else if (sata_rev == 3) val = ATA_SC_SPD_SPEED_GEN3; else val = 0; ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_RESET | val | ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER); DELAY(1000); ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_IDLE | val | ((ch->pm_level > 0) ? 0 : (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER))); if (!ahci_sata_connect(ch)) { if (ch->caps & AHCI_CAP_SSS) { val = ATA_INL(ch->r_mem, AHCI_P_CMD); val &= ~AHCI_P_CMD_SUD; ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); ch->listening = 1; } else if (ch->pm_level > 0) ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); return (0); } return (1); } static int ahci_check_ids(device_t dev, union ccb *ccb) { struct ahci_channel *ch = device_get_softc(dev); if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return (-1); } if (ccb->ccb_h.target_lun != 0) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return (-1); } return (0); } static void ahciaction(struct cam_sim *sim, union ccb *ccb) { device_t dev, parent; struct ahci_channel *ch; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n", ccb->ccb_h.func_code)); ch = (struct ahci_channel *)cam_sim_softc(sim); dev = ch->dev; switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ATA_IO: /* Execute the requested I/O operation */ case XPT_SCSI_IO: if (ahci_check_ids(dev, ccb)) return; if (ch->devices == 0 || (ch->pm_present == 0 && ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } ccb->ccb_h.recovery_type = RECOVERY_NONE; /* Check for command collision. */ if (ahci_check_collision(dev, ccb)) { /* Freeze command. */ ch->frozen = ccb; /* We have only one frozen slot, so freeze simq also. */ xpt_freeze_simq(ch->sim, 1); return; } ahci_begin_transaction(dev, ccb); return; case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ahci_device *d; if (ahci_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) d->revision = cts->xport_specific.sata.revision; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) d->mode = cts->xport_specific.sata.mode; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) d->bytecount = min(8192, cts->xport_specific.sata.bytecount); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) d->tags = min(ch->numslots, cts->xport_specific.sata.tags); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) ch->pm_present = cts->xport_specific.sata.pm_present; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) d->atapi = cts->xport_specific.sata.atapi; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) d->caps = cts->xport_specific.sata.caps; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct ahci_device *d; uint32_t status; if (ahci_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; cts->protocol = PROTO_UNSPECIFIED; cts->protocol_version = PROTO_VERSION_UNSPECIFIED; cts->transport = XPORT_SATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->proto_specific.valid = 0; cts->xport_specific.sata.valid = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (ccb->ccb_h.target_id == 15 || (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK; if (status & 0x0f0) { cts->xport_specific.sata.revision = (status & 0x0f0) >> 4; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; } cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; if (ch->pm_level) { if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC)) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; if (ch->caps2 & AHCI_CAP2_APST) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST; } if ((ch->caps & AHCI_CAP_SNCQ) && (ch->quirks & AHCI_Q_NOAA) == 0) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA; cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; cts->xport_specific.sata.caps &= ch->user[ccb->ccb_h.target_id].caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } else { cts->xport_specific.sata.revision = d->revision; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; cts->xport_specific.sata.caps = d->caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } cts->xport_specific.sata.mode = d->mode; cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; cts->xport_specific.sata.bytecount = d->bytecount; cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; cts->xport_specific.sata.pm_present = ch->pm_present; cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; cts->xport_specific.sata.tags = d->tags; cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; cts->xport_specific.sata.atapi = d->atapi; cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ ahci_reset(dev); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; parent = device_get_parent(dev); cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; if (ch->caps & AHCI_CAP_SNCQ) cpi->hba_inquiry |= PI_TAG_ABLE; if (ch->caps & AHCI_CAP_SPM) cpi->hba_inquiry |= PI_SATAPM; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN; cpi->hba_eng_cnt = 0; if (ch->caps & AHCI_CAP_SPM) cpi->max_target = 15; else cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "AHCI", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = MAXPHYS; /* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */ if (pci_get_devid(parent) == 0x43801002) cpi->maxio = min(cpi->maxio, 128 * 512); cpi->hba_vendor = pci_get_vendor(parent); cpi->hba_device = pci_get_device(parent); cpi->hba_subvendor = pci_get_subvendor(parent); cpi->hba_subdevice = pci_get_subdevice(parent); cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } static void ahcipoll(struct cam_sim *sim) { struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim); ahci_ch_intr(ch->dev); if (ch->resetting != 0 && (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) { ch->resetpolldiv = 1000; ahci_reset_to(ch->dev); } } Index: projects/physbio/sys/dev/aic/aic.c =================================================================== --- projects/physbio/sys/dev/aic/aic.c (revision 243875) +++ projects/physbio/sys/dev/aic/aic.c (revision 243876) @@ -1,1598 +1,1598 @@ /*- * Copyright (c) 1999 Luoqi Chen. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void aic_action(struct cam_sim *sim, union ccb *ccb); static void aic_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error); static void aic_intr_locked(struct aic_softc *aic); static void aic_start(struct aic_softc *aic); static void aic_select(struct aic_softc *aic); static void aic_selected(struct aic_softc *aic); static void aic_reselected(struct aic_softc *aic); static void aic_reconnect(struct aic_softc *aic, int tag); static void aic_cmd(struct aic_softc *aic); static void aic_msgin(struct aic_softc *aic); static void aic_handle_msgin(struct aic_softc *aic); static void aic_msgout(struct aic_softc *aic); static void aic_datain(struct aic_softc *aic); static void aic_dataout(struct aic_softc *aic); static void aic_done(struct aic_softc *aic, struct aic_scb *scb); static void aic_poll(struct cam_sim *sim); static void aic_timeout(void *arg); static void aic_scsi_reset(struct aic_softc *aic); static void aic_chip_reset(struct aic_softc *aic); static void aic_reset(struct aic_softc *aic, int initiate_reset); devclass_t aic_devclass; static struct aic_scb * aic_get_scb(struct aic_softc *aic) { struct aic_scb *scb; if (!dumping) mtx_assert(&aic->lock, MA_OWNED); if ((scb = SLIST_FIRST(&aic->free_scbs)) != NULL) SLIST_REMOVE_HEAD(&aic->free_scbs, link); return (scb); } static void aic_free_scb(struct aic_softc *aic, struct aic_scb *scb) { if (!dumping) mtx_assert(&aic->lock, MA_OWNED); if ((aic->flags & AIC_RESOURCE_SHORTAGE) != 0 && (scb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { scb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; aic->flags &= ~AIC_RESOURCE_SHORTAGE; } scb->flags = 0; SLIST_INSERT_HEAD(&aic->free_scbs, scb, link); } static void aic_action(struct cam_sim *sim, union ccb *ccb) { struct aic_softc *aic; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("aic_action\n")); aic = (struct aic_softc *)cam_sim_softc(sim); mtx_assert(&aic->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { struct aic_scb *scb; if ((scb = aic_get_scb(aic)) == NULL) { aic->flags |= AIC_RESOURCE_SHORTAGE; xpt_freeze_simq(aic->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } scb->ccb = ccb; ccb->ccb_h.ccb_scb_ptr = scb; ccb->ccb_h.ccb_aic_ptr = aic; scb->target = ccb->ccb_h.target_id; scb->lun = ccb->ccb_h.target_lun; if (ccb->ccb_h.func_code == XPT_SCSI_IO) { scb->cmd_len = ccb->csio.cdb_len; if (ccb->ccb_h.flags & CAM_CDB_POINTER) { if (ccb->ccb_h.flags & CAM_CDB_PHYS) { ccb->ccb_h.status = CAM_REQ_INVALID; aic_free_scb(aic, scb); xpt_done(ccb); return; } scb->cmd_ptr = ccb->csio.cdb_io.cdb_ptr; } else { scb->cmd_ptr = ccb->csio.cdb_io.cdb_bytes; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) || - (ccb->ccb_h.flags & CAM_DATA_PHYS)) { + if ((ccb->ccb_h.flags & CAM_DATA_MASK) != + CAM_DATA_VADDR) { ccb->ccb_h.status = CAM_REQ_INVALID; aic_free_scb(aic, scb); xpt_done(ccb); return; } scb->data_ptr = ccb->csio.data_ptr; scb->data_len = ccb->csio.dxfer_len; } else { scb->data_ptr = NULL; scb->data_len = 0; } aic_execute_scb(scb, NULL, 0, 0); } else { scb->flags |= SCB_DEVICE_RESET; aic_execute_scb(scb, NULL, 0, 0); } break; } case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct aic_tinfo *ti = &aic->tinfo[ccb->ccb_h.target_id]; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_DISC) != 0 && (aic->flags & AIC_DISC_ENABLE) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ti->flags |= TINFO_DISC_ENB; else ti->flags &= ~TINFO_DISC_ENB; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ti->flags |= TINFO_TAG_ENB; else ti->flags &= ~TINFO_TAG_ENB; } if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { ti->goal.period = spi->sync_period; if (ti->goal.period > aic->min_period) { ti->goal.period = 0; ti->goal.offset = 0; } else if (ti->goal.period < aic->max_period) ti->goal.period = aic->max_period; } if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) { ti->goal.offset = spi->sync_offset; if (ti->goal.offset == 0) ti->goal.period = 0; else if (ti->goal.offset > AIC_SYNC_OFFSET) ti->goal.offset = AIC_SYNC_OFFSET; } if ((ti->goal.period != ti->current.period) || (ti->goal.offset != ti->current.offset)) ti->flags |= TINFO_SDTR_NEGO; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct aic_tinfo *ti = &aic->tinfo[ccb->ccb_h.target_id]; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if ((ti->flags & TINFO_DISC_ENB) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((ti->flags & TINFO_TAG_ENB) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { spi->sync_period = ti->current.period; spi->sync_offset = ti->current.offset; } else { spi->sync_period = ti->user.period; spi->sync_offset = ti->user.offset; } spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ aic_reset(aic, /*initiate_reset*/TRUE); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = aic->initiator; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void aic_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct aic_scb *scb = (struct aic_scb *)arg; union ccb *ccb = scb->ccb; struct aic_softc *aic = (struct aic_softc *)ccb->ccb_h.ccb_aic_ptr; if (!dumping) mtx_assert(&aic->lock, MA_OWNED); if (ccb->ccb_h.status != CAM_REQ_INPROG) { aic_free_scb(aic, scb); xpt_done(ccb); return; } scb->flags |= SCB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; TAILQ_INSERT_TAIL(&aic->pending_ccbs, &ccb->ccb_h, sim_links.tqe); callout_reset(&scb->timer, (ccb->ccb_h.timeout * hz) / 1000, aic_timeout, scb); aic_start(aic); } /* * Start another command if the controller is not busy. */ static void aic_start(struct aic_softc *aic) { struct ccb_hdr *ccb_h; struct aic_tinfo *ti; if (aic->state != AIC_IDLE) return; TAILQ_FOREACH(ccb_h, &aic->pending_ccbs, sim_links.tqe) { ti = &aic->tinfo[ccb_h->target_id]; if ((ti->lubusy & (1 << ccb_h->target_lun)) == 0) { TAILQ_REMOVE(&aic->pending_ccbs, ccb_h, sim_links.tqe); aic->nexus = (struct aic_scb *)ccb_h->ccb_scb_ptr; aic_select(aic); return; } } CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_start: idle\n")); aic_outb(aic, SIMODE0, ENSELDI); aic_outb(aic, SIMODE1, ENSCSIRST); aic_outb(aic, SCSISEQ, ENRESELI); } /* * Start a selection. */ static void aic_select(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; CAM_DEBUG(scb->ccb->ccb_h.path, CAM_DEBUG_TRACE, ("aic_select - ccb %p\n", scb->ccb)); aic->state = AIC_SELECTING; aic_outb(aic, DMACNTRL1, 0); aic_outb(aic, SCSIID, aic->initiator << OID_S | scb->target); aic_outb(aic, SXFRCTL1, STIMO_256ms | ENSTIMER | (aic->flags & AIC_PARITY_ENABLE ? ENSPCHK : 0)); aic_outb(aic, SIMODE0, ENSELDI|ENSELDO); aic_outb(aic, SIMODE1, ENSCSIRST|ENSELTIMO); aic_outb(aic, SCSISEQ, ENRESELI|ENSELO|ENAUTOATNO); } /* * We have successfully selected a target, prepare for the information * transfer phases. */ static void aic_selected(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; union ccb *ccb = scb->ccb; struct aic_tinfo *ti = &aic->tinfo[scb->target]; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("aic_selected - ccb %p\n", ccb)); aic->state = AIC_HASNEXUS; if (scb->flags & SCB_DEVICE_RESET) { aic->msg_buf[0] = MSG_BUS_DEV_RESET; aic->msg_len = 1; aic->msg_outq = AIC_MSG_MSGBUF; } else { aic->msg_outq = AIC_MSG_IDENTIFY; if ((ti->flags & TINFO_TAG_ENB) != 0 && (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) aic->msg_outq |= AIC_MSG_TAG_Q; else ti->lubusy |= 1 << scb->lun; if ((ti->flags & TINFO_SDTR_NEGO) != 0) aic->msg_outq |= AIC_MSG_SDTR; } aic_outb(aic, CLRSINT0, CLRSELDO); aic_outb(aic, CLRSINT1, CLRBUSFREE); aic_outb(aic, SCSISEQ, ENAUTOATNP); aic_outb(aic, SIMODE0, 0); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); aic_outb(aic, SCSIRATE, ti->scsirate); } /* * We are re-selected by a target, save the target id and wait for the * target to further identify itself. */ static void aic_reselected(struct aic_softc *aic) { u_int8_t selid; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_reselected\n")); /* * If we have started a selection, it must have lost out in * the arbitration, put the command back to the pending queue. */ if (aic->nexus) { TAILQ_INSERT_HEAD(&aic->pending_ccbs, &aic->nexus->ccb->ccb_h, sim_links.tqe); aic->nexus = NULL; } selid = aic_inb(aic, SELID) & ~(1 << aic->initiator); if (selid & (selid - 1)) { /* this should never have happened */ printf("aic_reselected: invalid selid %x\n", selid); aic_reset(aic, /*initiate_reset*/TRUE); return; } aic->state = AIC_RESELECTED; aic->target = ffs(selid) - 1; aic->lun = -1; aic_outb(aic, CLRSINT0, CLRSELDI); aic_outb(aic, CLRSINT1, CLRBUSFREE); aic_outb(aic, SIMODE0, 0); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); aic_outb(aic, SCSISEQ, ENAUTOATNP); aic_outb(aic, SCSIRATE, aic->tinfo[aic->target].scsirate); } /* * Raise ATNO to signal the target that we have a message for it. */ static __inline void aic_sched_msgout(struct aic_softc *aic, u_int8_t msg) { if (msg) { aic->msg_buf[0] = msg; aic->msg_len = 1; } aic->msg_outq |= AIC_MSG_MSGBUF; aic_outb(aic, SCSISIGO, aic_inb(aic, SCSISIGI) | ATNO); } /* * Wait for SPIORDY (SCSI PIO ready) flag, or a phase change. */ static __inline int aic_spiordy(struct aic_softc *aic) { while (!(aic_inb(aic, DMASTAT) & INTSTAT) && !(aic_inb(aic, SSTAT0) & SPIORDY)) ; return !(aic_inb(aic, DMASTAT) & INTSTAT); } /* * Reestablish a disconnected nexus. */ static void aic_reconnect(struct aic_softc *aic, int tag) { struct aic_scb *scb; struct ccb_hdr *ccb_h; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_reconnect\n")); /* Find the nexus */ scb = NULL; TAILQ_FOREACH(ccb_h, &aic->nexus_ccbs, sim_links.tqe) { scb = (struct aic_scb *)ccb_h->ccb_scb_ptr; if (scb->target == aic->target && scb->lun == aic->lun && (tag == -1 || scb->tag == tag)) break; } /* ABORT if nothing is found */ if (!ccb_h) { if (tag == -1) aic_sched_msgout(aic, MSG_ABORT); else aic_sched_msgout(aic, MSG_ABORT_TAG); xpt_async(AC_UNSOL_RESEL, aic->path, NULL); return; } /* Reestablish the nexus */ TAILQ_REMOVE(&aic->nexus_ccbs, ccb_h, sim_links.tqe); aic->nexus = scb; scb->flags &= ~SCB_DISCONNECTED; aic->state = AIC_HASNEXUS; } /* * Read messages. */ static void aic_msgin(struct aic_softc *aic) { int msglen; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_msgin\n")); aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, SXFRCTL0, CHEN|SPIOEN); aic->flags &= ~AIC_DROP_MSGIN; aic->msg_len = 0; do { /* * If a parity error is detected, drop the remaining * bytes and inform the target so it could resend * the messages. */ if (aic_inb(aic, SSTAT1) & SCSIPERR) { aic_outb(aic, CLRSINT1, CLRSCSIPERR); aic->flags |= AIC_DROP_MSGIN; aic_sched_msgout(aic, MSG_PARITY_ERROR); } if ((aic->flags & AIC_DROP_MSGIN)) { aic_inb(aic, SCSIDAT); continue; } /* read the message byte without ACKing on it */ aic->msg_buf[aic->msg_len++] = aic_inb(aic, SCSIBUS); if (aic->msg_buf[0] == MSG_EXTENDED) { if (aic->msg_len < 2) { (void) aic_inb(aic, SCSIDAT); continue; } switch (aic->msg_buf[2]) { case MSG_EXT_SDTR: msglen = MSG_EXT_SDTR_LEN; break; case MSG_EXT_WDTR: msglen = MSG_EXT_WDTR_LEN; break; default: msglen = 0; break; } if (aic->msg_buf[1] != msglen) { aic->flags |= AIC_DROP_MSGIN; aic_sched_msgout(aic, MSG_MESSAGE_REJECT); } msglen += 2; } else if (aic->msg_buf[0] >= 0x20 && aic->msg_buf[0] <= 0x2f) msglen = 2; else msglen = 1; /* * If we have a complete message, handle it before the final * ACK (in case we decide to reject the message). */ if (aic->msg_len == msglen) { aic_handle_msgin(aic); aic->msg_len = 0; } /* ACK on the message byte */ (void) aic_inb(aic, SCSIDAT); } while (aic_spiordy(aic)); aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Handle a message. */ static void aic_handle_msgin(struct aic_softc *aic) { struct aic_scb *scb; struct ccb_hdr *ccb_h; struct aic_tinfo *ti; struct ccb_trans_settings neg; struct ccb_trans_settings_spi *spi = &neg.xport_specific.spi; if (aic->state == AIC_RESELECTED) { if (!MSG_ISIDENTIFY(aic->msg_buf[0])) { aic_sched_msgout(aic, MSG_MESSAGE_REJECT); return; } aic->lun = aic->msg_buf[0] & MSG_IDENTIFY_LUNMASK; if (aic->tinfo[aic->target].lubusy & (1 << aic->lun)) aic_reconnect(aic, -1); else aic->state = AIC_RECONNECTING; return; } if (aic->state == AIC_RECONNECTING) { if (aic->msg_buf[0] != MSG_SIMPLE_Q_TAG) { aic_sched_msgout(aic, MSG_MESSAGE_REJECT); return; } aic_reconnect(aic, aic->msg_buf[1]); return; } switch (aic->msg_buf[0]) { case MSG_CMDCOMPLETE: { struct ccb_scsiio *csio; scb = aic->nexus; ccb_h = &scb->ccb->ccb_h; csio = &scb->ccb->csio; if ((scb->flags & SCB_SENSE) != 0) { /* auto REQUEST SENSE command */ scb->flags &= ~SCB_SENSE; csio->sense_resid = scb->data_len; if (scb->status == SCSI_STATUS_OK) { ccb_h->status |= CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID; /*scsi_sense_print(csio);*/ } else { ccb_h->status |= CAM_AUTOSENSE_FAIL; printf("ccb %p sense failed %x\n", ccb_h, scb->status); } } else { csio->scsi_status = scb->status; csio->resid = scb->data_len; if (scb->status == SCSI_STATUS_OK) { /* everything goes well */ ccb_h->status |= CAM_REQ_CMP; } else if ((ccb_h->flags & CAM_DIS_AUTOSENSE) == 0 && (csio->scsi_status == SCSI_STATUS_CHECK_COND || csio->scsi_status == SCSI_STATUS_CMD_TERMINATED)) { /* try to retrieve sense information */ scb->flags |= SCB_SENSE; aic->flags |= AIC_BUSFREE_OK; return; } else ccb_h->status |= CAM_SCSI_STATUS_ERROR; } aic_done(aic, scb); aic->flags |= AIC_BUSFREE_OK; break; } case MSG_EXTENDED: switch (aic->msg_buf[2]) { case MSG_EXT_SDTR: scb = aic->nexus; ti = &aic->tinfo[scb->target]; if (ti->flags & TINFO_SDTR_SENT) { ti->current.period = aic->msg_buf[3]; ti->current.offset = aic->msg_buf[4]; } else { ti->current.period = aic->msg_buf[3] = max(ti->goal.period, aic->msg_buf[3]); ti->current.offset = aic->msg_buf[4] = min(ti->goal.offset, aic->msg_buf[4]); /* * The target initiated the negotiation, * send back a response. */ aic_sched_msgout(aic, 0); } ti->flags &= ~(TINFO_SDTR_SENT|TINFO_SDTR_NEGO); ti->scsirate = ti->current.offset ? ti->current.offset | ((ti->current.period * 4 + 49) / 50 - 2) << 4 : 0; aic_outb(aic, SCSIRATE, ti->scsirate); memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; spi->sync_period = ti->goal.period = ti->current.period; spi->sync_offset = ti->goal.offset = ti->current.offset; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; ccb_h = &scb->ccb->ccb_h; xpt_setup_ccb(&neg.ccb_h, ccb_h->path, 1); xpt_async(AC_TRANSFER_NEG, ccb_h->path, &neg); break; case MSG_EXT_WDTR: default: aic_sched_msgout(aic, MSG_MESSAGE_REJECT); break; } break; case MSG_DISCONNECT: scb = aic->nexus; ccb_h = &scb->ccb->ccb_h; TAILQ_INSERT_TAIL(&aic->nexus_ccbs, ccb_h, sim_links.tqe); scb->flags |= SCB_DISCONNECTED; aic->flags |= AIC_BUSFREE_OK; aic->nexus = NULL; CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE, ("disconnected\n")); break; case MSG_MESSAGE_REJECT: switch (aic->msg_outq & -aic->msg_outq) { case AIC_MSG_TAG_Q: scb = aic->nexus; ti = &aic->tinfo[scb->target]; ti->flags &= ~TINFO_TAG_ENB; ti->lubusy |= 1 << scb->lun; break; case AIC_MSG_SDTR: scb = aic->nexus; ti = &aic->tinfo[scb->target]; ti->current.period = ti->goal.period = 0; ti->current.offset = ti->goal.offset = 0; ti->flags &= ~(TINFO_SDTR_SENT|TINFO_SDTR_NEGO); ti->scsirate = 0; aic_outb(aic, SCSIRATE, ti->scsirate); memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; spi->sync_period = ti->current.period; spi->sync_offset = ti->current.offset; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; ccb_h = &scb->ccb->ccb_h; xpt_setup_ccb(&neg.ccb_h, ccb_h->path, 1); xpt_async(AC_TRANSFER_NEG, ccb_h->path, &neg); break; default: break; } break; case MSG_SAVEDATAPOINTER: break; case MSG_RESTOREPOINTERS: break; case MSG_NOOP: break; default: aic_sched_msgout(aic, MSG_MESSAGE_REJECT); break; } } /* * Send messages. */ static void aic_msgout(struct aic_softc *aic) { struct aic_scb *scb; union ccb *ccb; struct aic_tinfo *ti; int msgidx = 0; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_msgout\n")); aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, SXFRCTL0, CHEN|SPIOEN); /* * If the previous phase is also the message out phase, * we need to retransmit all the messages, probably * because the target has detected a parity error during * the past transmission. */ if (aic->prev_phase == PH_MSGOUT) aic->msg_outq = aic->msg_sent; do { int q = aic->msg_outq; if (msgidx > 0 && msgidx == aic->msg_len) { /* complete message sent, start the next one */ q &= -q; aic->msg_sent |= q; aic->msg_outq ^= q; q = aic->msg_outq; msgidx = 0; } if (msgidx == 0) { /* setup the message */ switch (q & -q) { case AIC_MSG_IDENTIFY: scb = aic->nexus; ccb = scb->ccb; ti = &aic->tinfo[scb->target]; aic->msg_buf[0] = MSG_IDENTIFY(scb->lun, (ti->flags & TINFO_DISC_ENB) && !(ccb->ccb_h.flags & CAM_DIS_DISCONNECT)); aic->msg_len = 1; break; case AIC_MSG_TAG_Q: scb = aic->nexus; ccb = scb->ccb; aic->msg_buf[0] = ccb->csio.tag_action; aic->msg_buf[1] = scb->tag; aic->msg_len = 2; break; case AIC_MSG_SDTR: scb = aic->nexus; ti = &aic->tinfo[scb->target]; aic->msg_buf[0] = MSG_EXTENDED; aic->msg_buf[1] = MSG_EXT_SDTR_LEN; aic->msg_buf[2] = MSG_EXT_SDTR; aic->msg_buf[3] = ti->goal.period; aic->msg_buf[4] = ti->goal.offset; aic->msg_len = MSG_EXT_SDTR_LEN + 2; ti->flags |= TINFO_SDTR_SENT; break; case AIC_MSG_MSGBUF: /* a single message already in the buffer */ if (aic->msg_buf[0] == MSG_BUS_DEV_RESET || aic->msg_buf[0] == MSG_ABORT || aic->msg_buf[0] == MSG_ABORT_TAG) aic->flags |= AIC_BUSFREE_OK; break; } } /* * If this is the last message byte of all messages, * clear ATNO to signal transmission complete. */ if ((q & (q - 1)) == 0 && msgidx == aic->msg_len - 1) aic_outb(aic, CLRSINT1, CLRATNO); /* transmit the message byte */ aic_outb(aic, SCSIDAT, aic->msg_buf[msgidx++]); } while (aic_spiordy(aic)); aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Read data bytes. */ static void aic_datain(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; u_int8_t dmastat, dmacntrl0; int n; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_datain\n")); aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, SXFRCTL0, SCSIEN|DMAEN|CHEN); dmacntrl0 = ENDMA; if (aic->flags & AIC_DWIO_ENABLE) dmacntrl0 |= DWORDPIO; aic_outb(aic, DMACNTRL0, dmacntrl0); while (scb->data_len > 0) { for (;;) { /* wait for the fifo to fill up or a phase change */ dmastat = aic_inb(aic, DMASTAT); if (dmastat & (INTSTAT|DFIFOFULL)) break; } if (dmastat & DFIFOFULL) { n = FIFOSIZE; } else { /* * No more data, wait for the remaining bytes in * the scsi fifo to be transfer to the host fifo. */ while (!(aic_inb(aic, SSTAT2) & SEMPTY)) ; n = aic_inb(aic, FIFOSTAT); } n = imin(scb->data_len, n); if (aic->flags & AIC_DWIO_ENABLE) { if (n >= 12) { aic_insl(aic, DMADATALONG, scb->data_ptr, n>>2); scb->data_ptr += n & ~3; scb->data_len -= n & ~3; n &= 3; } } else { if (n >= 8) { aic_insw(aic, DMADATA, scb->data_ptr, n >> 1); scb->data_ptr += n & ~1; scb->data_len -= n & ~1; n &= 1; } } if (n) { aic_outb(aic, DMACNTRL0, ENDMA|B8MODE); aic_insb(aic, DMADATA, scb->data_ptr, n); scb->data_ptr += n; scb->data_len -= n; aic_outb(aic, DMACNTRL0, dmacntrl0); } if (dmastat & INTSTAT) break; } aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Send data bytes. */ static void aic_dataout(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; u_int8_t dmastat, dmacntrl0, sstat2; int n; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_dataout\n")); aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, SXFRCTL0, SCSIEN|DMAEN|CHEN); dmacntrl0 = ENDMA|WRITE; if (aic->flags & AIC_DWIO_ENABLE) dmacntrl0 |= DWORDPIO; aic_outb(aic, DMACNTRL0, dmacntrl0); while (scb->data_len > 0) { for (;;) { /* wait for the fifo to clear up or a phase change */ dmastat = aic_inb(aic, DMASTAT); if (dmastat & (INTSTAT|DFIFOEMP)) break; } if (dmastat & INTSTAT) break; n = imin(scb->data_len, FIFOSIZE); if (aic->flags & AIC_DWIO_ENABLE) { if (n >= 12) { aic_outsl(aic, DMADATALONG, scb->data_ptr,n>>2); scb->data_ptr += n & ~3; scb->data_len -= n & ~3; n &= 3; } } else { if (n >= 8) { aic_outsw(aic, DMADATA, scb->data_ptr, n >> 1); scb->data_ptr += n & ~1; scb->data_len -= n & ~1; n &= 1; } } if (n) { aic_outb(aic, DMACNTRL0, ENDMA|WRITE|B8MODE); aic_outsb(aic, DMADATA, scb->data_ptr, n); scb->data_ptr += n; scb->data_len -= n; aic_outb(aic, DMACNTRL0, dmacntrl0); } } for (;;) { /* wait until all bytes in the fifos are transmitted */ dmastat = aic_inb(aic, DMASTAT); sstat2 = aic_inb(aic, SSTAT2); if ((dmastat & DFIFOEMP) && (sstat2 & SEMPTY)) break; if (dmastat & INTSTAT) { /* adjust for untransmitted bytes */ n = aic_inb(aic, FIFOSTAT) + (sstat2 & 0xf); scb->data_ptr -= n; scb->data_len += n; /* clear the fifo */ aic_outb(aic, SXFRCTL0, CHEN|CLRCH); aic_outb(aic, DMACNTRL0, RSTFIFO); break; } } aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Send the scsi command. */ static void aic_cmd(struct aic_softc *aic) { struct aic_scb *scb = aic->nexus; struct scsi_request_sense sense_cmd; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_cmd\n")); if (scb->flags & SCB_SENSE) { /* autosense request */ sense_cmd.opcode = REQUEST_SENSE; sense_cmd.byte2 = scb->lun << 5; sense_cmd.length = scb->ccb->csio.sense_len; sense_cmd.control = 0; sense_cmd.unused[0] = 0; sense_cmd.unused[1] = 0; scb->cmd_ptr = (u_int8_t *)&sense_cmd; scb->cmd_len = sizeof(sense_cmd); scb->data_ptr = (u_int8_t *)&scb->ccb->csio.sense_data; scb->data_len = scb->ccb->csio.sense_len; } aic_outb(aic, SIMODE1, ENSCSIRST|ENPHASEMIS|ENBUSFREE); aic_outb(aic, DMACNTRL0, ENDMA|WRITE); aic_outb(aic, SXFRCTL0, SCSIEN|DMAEN|CHEN); aic_outsw(aic, DMADATA, (u_int16_t *)scb->cmd_ptr, scb->cmd_len >> 1); while ((aic_inb(aic, SSTAT2) & SEMPTY) == 0 && (aic_inb(aic, DMASTAT) & INTSTAT) == 0) ; aic_outb(aic, SXFRCTL0, CHEN); aic_outb(aic, SIMODE1, ENSCSIRST|ENBUSFREE|ENREQINIT); } /* * Finish off a command. The caller is responsible to remove the ccb * from any queue. */ static void aic_done(struct aic_softc *aic, struct aic_scb *scb) { union ccb *ccb = scb->ccb; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("aic_done - ccb %p status %x resid %d\n", ccb, ccb->ccb_h.status, ccb->csio.resid)); callout_stop(&scb->timer); if ((scb->flags & SCB_DEVICE_RESET) != 0 && ccb->ccb_h.func_code != XPT_RESET_DEV) { struct cam_path *path; struct ccb_hdr *ccb_h; cam_status error; error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(aic->sim), scb->target, CAM_LUN_WILDCARD); if (error == CAM_REQ_CMP) { xpt_async(AC_SENT_BDR, path, NULL); xpt_free_path(path); } ccb_h = TAILQ_FIRST(&aic->pending_ccbs); while (ccb_h != NULL) { struct aic_scb *pending_scb; pending_scb = (struct aic_scb *)ccb_h->ccb_scb_ptr; if (ccb_h->target_id == scb->target) { ccb_h->status |= CAM_BDR_SENT; ccb_h = TAILQ_NEXT(ccb_h, sim_links.tqe); TAILQ_REMOVE(&aic->pending_ccbs, &pending_scb->ccb->ccb_h, sim_links.tqe); aic_done(aic, pending_scb); } else { callout_reset(&pending_scb->timer, (ccb_h->timeout * hz) / 1000, aic_timeout, pending_scb); ccb_h = TAILQ_NEXT(ccb_h, sim_links.tqe); } } ccb_h = TAILQ_FIRST(&aic->nexus_ccbs); while (ccb_h != NULL) { struct aic_scb *nexus_scb; nexus_scb = (struct aic_scb *)ccb_h->ccb_scb_ptr; if (ccb_h->target_id == scb->target) { ccb_h->status |= CAM_BDR_SENT; ccb_h = TAILQ_NEXT(ccb_h, sim_links.tqe); TAILQ_REMOVE(&aic->nexus_ccbs, &nexus_scb->ccb->ccb_h, sim_links.tqe); aic_done(aic, nexus_scb); } else { callout_reset(&nexus_scb->timer, (ccb_h->timeout * hz) / 1000, aic_timeout, nexus_scb); ccb_h = TAILQ_NEXT(ccb_h, sim_links.tqe); } } } if (aic->nexus == scb || scb->flags & SCB_DISCONNECTED) aic->tinfo[scb->target].lubusy &= ~(1 << scb->lun); if (aic->nexus == scb) { aic->nexus = NULL; } aic_free_scb(aic, scb); xpt_done(ccb); } static void aic_poll(struct cam_sim *sim) { aic_intr_locked(cam_sim_softc(sim)); } static void aic_timeout(void *arg) { struct aic_scb *scb = (struct aic_scb *)arg; union ccb *ccb = scb->ccb; struct aic_softc *aic = (struct aic_softc *)ccb->ccb_h.ccb_aic_ptr; mtx_assert(&aic->lock, MA_OWNED); xpt_print_path(ccb->ccb_h.path); printf("ccb %p - timed out", ccb); if (aic->nexus && aic->nexus != scb) printf(", nexus %p", aic->nexus->ccb); printf(", phase 0x%x, state %d\n", aic_inb(aic, SCSISIGI), aic->state); if ((scb->flags & SCB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("ccb %p - timed out already completed\n", ccb); return; } if ((scb->flags & SCB_DEVICE_RESET) == 0 && aic->nexus == scb) { struct ccb_hdr *ccb_h = &scb->ccb->ccb_h; struct aic_scb *pending_scb; if ((ccb_h->status & CAM_RELEASE_SIMQ) == 0) { xpt_freeze_simq(aic->sim, /*count*/1); ccb_h->status |= CAM_RELEASE_SIMQ; } TAILQ_FOREACH(ccb_h, &aic->pending_ccbs, sim_links.tqe) { pending_scb = ccb_h->ccb_scb_ptr; callout_stop(&pending_scb->timer); } TAILQ_FOREACH(ccb_h, &aic->nexus_ccbs, sim_links.tqe) { pending_scb = ccb_h->ccb_scb_ptr; callout_stop(&pending_scb->timer); } scb->flags |= SCB_DEVICE_RESET; callout_reset(&scb->timer, 5 * hz, aic_timeout, scb); aic_sched_msgout(aic, MSG_BUS_DEV_RESET); } else { if (aic->nexus == scb) { ccb->ccb_h.status |= CAM_CMD_TIMEOUT; aic_done(aic, scb); } aic_reset(aic, /*initiate_reset*/TRUE); } } void aic_intr(void *arg) { struct aic_softc *aic = (struct aic_softc *)arg; mtx_lock(&aic->lock); aic_intr_locked(aic); mtx_unlock(&aic->lock); } void aic_intr_locked(struct aic_softc *aic) { u_int8_t sstat0, sstat1; union ccb *ccb; struct aic_scb *scb; if (!(aic_inb(aic, DMASTAT) & INTSTAT)) return; aic_outb(aic, DMACNTRL0, 0); sstat0 = aic_inb(aic, SSTAT0); sstat1 = aic_inb(aic, SSTAT1); if ((sstat1 & SCSIRSTI) != 0) { /* a device-initiated bus reset */ aic_outb(aic, CLRSINT1, CLRSCSIRSTI); aic_reset(aic, /*initiate_reset*/FALSE); return; } if ((sstat1 & SCSIPERR) != 0) { aic_outb(aic, CLRSINT1, CLRSCSIPERR); aic_sched_msgout(aic, MSG_PARITY_ERROR); aic_outb(aic, DMACNTRL0, INTEN); return; } if (aic_inb(aic, SSTAT4)) { aic_outb(aic, CLRSERR, CLRSYNCERR|CLRFWERR|CLRFRERR); aic_reset(aic, /*initiate_reset*/TRUE); return; } if (aic->state <= AIC_SELECTING) { if ((sstat0 & SELDI) != 0) { aic_reselected(aic); aic_outb(aic, DMACNTRL0, INTEN); return; } if ((sstat0 & SELDO) != 0) { aic_selected(aic); aic_outb(aic, DMACNTRL0, INTEN); return; } if ((sstat1 & SELTO) != 0) { scb = aic->nexus; ccb = scb->ccb; ccb->ccb_h.status = CAM_SEL_TIMEOUT; aic_done(aic, scb); while ((sstat1 & BUSFREE) == 0) sstat1 = aic_inb(aic, SSTAT1); aic->flags |= AIC_BUSFREE_OK; } } if ((sstat1 & BUSFREE) != 0) { aic_outb(aic, SCSISEQ, 0); aic_outb(aic, CLRSINT0, sstat0); aic_outb(aic, CLRSINT1, sstat1); if ((scb = aic->nexus)) { if ((aic->flags & AIC_BUSFREE_OK) == 0) { ccb = scb->ccb; ccb->ccb_h.status = CAM_UNEXP_BUSFREE; aic_done(aic, scb); } else if (scb->flags & SCB_DEVICE_RESET) { ccb = scb->ccb; if (ccb->ccb_h.func_code == XPT_RESET_DEV) { xpt_async(AC_SENT_BDR, ccb->ccb_h.path, NULL); ccb->ccb_h.status |= CAM_REQ_CMP; } else ccb->ccb_h.status |= CAM_CMD_TIMEOUT; aic_done(aic, scb); } else if (scb->flags & SCB_SENSE) { /* autosense request */ aic->flags &= ~AIC_BUSFREE_OK; aic->tinfo[scb->target].lubusy &= ~(1 << scb->lun); aic_select(aic); aic_outb(aic, DMACNTRL0, INTEN); return; } } aic->flags &= ~AIC_BUSFREE_OK; aic->state = AIC_IDLE; aic_start(aic); aic_outb(aic, DMACNTRL0, INTEN); return; } if ((sstat1 & REQINIT) != 0) { u_int8_t phase = aic_inb(aic, SCSISIGI) & PH_MASK; aic_outb(aic, SCSISIGO, phase); aic_outb(aic, CLRSINT1, CLRPHASECHG); switch (phase) { case PH_MSGOUT: aic_msgout(aic); break; case PH_MSGIN: aic_msgin(aic); break; case PH_STAT: scb = aic->nexus; ccb = scb->ccb; aic_outb(aic, DMACNTRL0, 0); aic_outb(aic, SXFRCTL0, CHEN|SPIOEN); scb->status = aic_inb(aic, SCSIDAT); aic_outb(aic, SXFRCTL0, CHEN); break; case PH_CMD: aic_cmd(aic); break; case PH_DATAIN: aic_datain(aic); break; case PH_DATAOUT: aic_dataout(aic); break; } aic->prev_phase = phase; aic_outb(aic, DMACNTRL0, INTEN); return; } printf("aic_intr: unexpected intr sstat0 %x sstat1 %x\n", sstat0, sstat1); aic_outb(aic, DMACNTRL0, INTEN); } /* * Reset ourselves. */ static void aic_chip_reset(struct aic_softc *aic) { /* * Doc. recommends to clear these two registers before * operations commence */ aic_outb(aic, SCSITEST, 0); aic_outb(aic, TEST, 0); /* Reset SCSI-FIFO and abort any transfers */ aic_outb(aic, SXFRCTL0, CHEN|CLRCH|CLRSTCNT); /* Reset HOST-FIFO */ aic_outb(aic, DMACNTRL0, RSTFIFO); aic_outb(aic, DMACNTRL1, 0); /* Disable all selection features */ aic_outb(aic, SCSISEQ, 0); aic_outb(aic, SXFRCTL1, 0); /* Disable interrupts */ aic_outb(aic, SIMODE0, 0); aic_outb(aic, SIMODE1, 0); /* Clear interrupts */ aic_outb(aic, CLRSINT0, 0x7f); aic_outb(aic, CLRSINT1, 0xef); /* Disable synchronous transfers */ aic_outb(aic, SCSIRATE, 0); /* Haven't seen ant errors (yet) */ aic_outb(aic, CLRSERR, 0x07); /* Set our SCSI-ID */ aic_outb(aic, SCSIID, aic->initiator << OID_S); aic_outb(aic, BRSTCNTRL, EISA_BRST_TIM); } /* * Reset the SCSI bus */ static void aic_scsi_reset(struct aic_softc *aic) { aic_outb(aic, SCSISEQ, SCSIRSTO); DELAY(500); aic_outb(aic, SCSISEQ, 0); DELAY(50); } /* * Reset. Abort all pending commands. */ static void aic_reset(struct aic_softc *aic, int initiate_reset) { struct ccb_hdr *ccb_h; CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("aic_reset\n")); if (initiate_reset) aic_scsi_reset(aic); aic_chip_reset(aic); xpt_async(AC_BUS_RESET, aic->path, NULL); while ((ccb_h = TAILQ_FIRST(&aic->pending_ccbs)) != NULL) { TAILQ_REMOVE(&aic->pending_ccbs, ccb_h, sim_links.tqe); ccb_h->status |= CAM_SCSI_BUS_RESET; aic_done(aic, (struct aic_scb *)ccb_h->ccb_scb_ptr); } while ((ccb_h = TAILQ_FIRST(&aic->nexus_ccbs)) != NULL) { TAILQ_REMOVE(&aic->nexus_ccbs, ccb_h, sim_links.tqe); ccb_h->status |= CAM_SCSI_BUS_RESET; aic_done(aic, (struct aic_scb *)ccb_h->ccb_scb_ptr); } if (aic->nexus) { ccb_h = &aic->nexus->ccb->ccb_h; ccb_h->status |= CAM_SCSI_BUS_RESET; aic_done(aic, aic->nexus); } aic->state = AIC_IDLE; aic_outb(aic, DMACNTRL0, INTEN); } static char *aic_chip_names[] = { "AIC6260", "AIC6360", "AIC6370", "GM82C700", }; static struct { int type; char *idstring; } aic_chip_ids[] = { { AIC6360, IDSTRING_AIC6360 }, { AIC6370, IDSTRING_AIC6370 }, { GM82C700, IDSTRING_GM82C700 }, }; static void aic_init(struct aic_softc *aic) { struct aic_scb *scb; struct aic_tinfo *ti; u_int8_t porta, portb; char chip_id[33]; int i; TAILQ_INIT(&aic->pending_ccbs); TAILQ_INIT(&aic->nexus_ccbs); SLIST_INIT(&aic->free_scbs); aic->nexus = NULL; aic->state = AIC_IDLE; aic->prev_phase = -1; aic->flags = 0; aic_chip_reset(aic); aic_scsi_reset(aic); /* determine the chip type from its ID string */ aic->chip_type = AIC6260; aic_insb(aic, ID, chip_id, sizeof(chip_id) - 1); chip_id[sizeof(chip_id) - 1] = '\0'; for (i = 0; i < sizeof(aic_chip_ids) / sizeof(aic_chip_ids[0]); i++) { if (!strcmp(chip_id, aic_chip_ids[i].idstring)) { aic->chip_type = aic_chip_ids[i].type; break; } } porta = aic_inb(aic, PORTA); portb = aic_inb(aic, PORTB); aic->initiator = PORTA_ID(porta); if (PORTA_PARITY(porta)) aic->flags |= AIC_PARITY_ENABLE; if (PORTB_DISC(portb)) aic->flags |= AIC_DISC_ENABLE; if (PORTB_DMA(portb)) aic->flags |= AIC_DMA_ENABLE; /* * We can do fast SCSI (10MHz clock rate) if bit 4 of portb * is set and we've got a 6360. The 6260 can only do standard * 5MHz SCSI. */ if (aic->chip_type > AIC6260 || aic_inb(aic, REV)) { if (PORTB_FSYNC(portb)) aic->flags |= AIC_FAST_ENABLE; aic->flags |= AIC_DWIO_ENABLE; } if (aic->flags & AIC_FAST_ENABLE) aic->max_period = AIC_FAST_SYNC_PERIOD; else aic->max_period = AIC_SYNC_PERIOD; aic->min_period = AIC_MIN_SYNC_PERIOD; for (i = 255; i >= 0; i--) { scb = &aic->scbs[i]; scb->tag = i; callout_init_mtx(&scb->timer, &aic->lock, 0); aic_free_scb(aic, scb); } for (i = 0; i < 8; i++) { if (i == aic->initiator) continue; ti = &aic->tinfo[i]; bzero(ti, sizeof(*ti)); ti->flags = TINFO_TAG_ENB; if (aic->flags & AIC_DISC_ENABLE) ti->flags |= TINFO_DISC_ENB; ti->user.period = aic->max_period; ti->user.offset = AIC_SYNC_OFFSET; ti->scsirate = 0; } aic_outb(aic, DMACNTRL0, INTEN); } int aic_probe(struct aic_softc *aic) { int i; /* Remove aic6360 from possible powerdown mode */ aic_outb(aic, DMACNTRL0, 0); #define STSIZE 16 aic_outb(aic, DMACNTRL1, 0); /* Reset stack pointer */ for (i = 0; i < STSIZE; i++) aic_outb(aic, STACK, i); /* See if we can pull out the same sequence */ aic_outb(aic, DMACNTRL1, 0); for (i = 0; i < STSIZE && aic_inb(aic, STACK) == i; i++) ; if (i != STSIZE) return (ENXIO); #undef STSIZE return (0); } int aic_attach(struct aic_softc *aic) { struct cam_devq *devq; /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(256); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry */ aic->sim = cam_sim_alloc(aic_action, aic_poll, "aic", aic, device_get_unit(aic->dev), &aic->lock, 2, 256, devq); if (aic->sim == NULL) { cam_simq_free(devq); return (ENOMEM); } mtx_lock(&aic->lock); if (xpt_bus_register(aic->sim, aic->dev, 0) != CAM_SUCCESS) { cam_sim_free(aic->sim, /*free_devq*/TRUE); mtx_unlock(&aic->lock); return (ENXIO); } if (xpt_create_path(&aic->path, /*periph*/NULL, cam_sim_path(aic->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(aic->sim)); cam_sim_free(aic->sim, /*free_devq*/TRUE); mtx_unlock(&aic->lock); return (ENXIO); } aic_init(aic); device_printf(aic->dev, "%s", aic_chip_names[aic->chip_type]); if (aic->flags & AIC_DMA_ENABLE) printf(", dma"); if (aic->flags & AIC_DISC_ENABLE) printf(", disconnection"); if (aic->flags & AIC_PARITY_ENABLE) printf(", parity check"); if (aic->flags & AIC_FAST_ENABLE) printf(", fast SCSI"); printf("\n"); mtx_unlock(&aic->lock); return (0); } int aic_detach(struct aic_softc *aic) { struct aic_scb *scb; int i; mtx_lock(&aic->lock); xpt_async(AC_LOST_DEVICE, aic->path, NULL); xpt_free_path(aic->path); xpt_bus_deregister(cam_sim_path(aic->sim)); cam_sim_free(aic->sim, /*free_devq*/TRUE); mtx_unlock(&aic->lock); for (i = 255; i >= 0; i--) { scb = &aic->scbs[i]; callout_drain(&scb->timer); } return (0); } Index: projects/physbio/sys/dev/arcmsr/arcmsr.c =================================================================== --- projects/physbio/sys/dev/arcmsr/arcmsr.c (revision 243875) +++ projects/physbio/sys/dev/arcmsr/arcmsr.c (revision 243876) @@ -1,3905 +1,3905 @@ /* ***************************************************************************************** ** O.S : FreeBSD ** FILE NAME : arcmsr.c ** BY : Erich Chen, Ching Huang ** Description: SCSI RAID Device Driver for ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) SATA/SAS RAID HOST Adapter ** ARCMSR RAID Host adapter ** [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set] ****************************************************************************************** ************************************************************************ ** ** Copyright (c) 2004-2010 ARECA Co. Ltd. ** Erich Chen, Taipei Taiwan All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************** ** History ** ** REV# DATE NAME DESCRIPTION ** 1.00.00.00 03/31/2004 Erich Chen First release ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error ** 1.20.00.03 04/19/2005 Erich Chen add SATA 24 Ports adapter type support ** clean unused function ** 1.20.00.12 09/12/2005 Erich Chen bug fix with abort command handling, ** firmware version check ** and firmware update notify for hardware bug fix ** handling if none zero high part physical address ** of srb resource ** 1.20.00.13 08/18/2006 Erich Chen remove pending srb and report busy ** add iop message xfer ** with scsi pass-through command ** add new device id of sas raid adapters ** code fit for SPARC64 & PPC ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report ** and cause g_vfs_done() read write error ** 1.20.00.15 10/10/2007 Erich Chen support new RAID adapter type ARC120x ** 1.20.00.16 10/10/2009 Erich Chen Bug fix for RAID adapter type ARC120x ** bus_dmamem_alloc() with BUS_DMA_ZERO ** 1.20.00.17 07/15/2010 Ching Huang Added support ARC1880 ** report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed, ** prevent cam_periph_error removing all LUN devices of one Target id ** for any one LUN device failed ** 1.20.00.18 10/14/2010 Ching Huang Fixed "inquiry data fails comparion at DV1 step" ** 10/25/2010 Ching Huang Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B ** 1.20.00.19 11/11/2010 Ching Huang Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0 ** 1.20.00.20 12/08/2010 Ching Huang Avoid calling atomic_set_int function ** 1.20.00.21 02/08/2011 Ching Huang Implement I/O request timeout ** 02/14/2011 Ching Huang Modified pktRequestCount ** 1.20.00.21 03/03/2011 Ching Huang if a command timeout, then wait its ccb back before free it ** 1.20.00.22 07/04/2011 Ching Huang Fixed multiple MTX panic ** 1.20.00.23 10/28/2011 Ching Huang Added TIMEOUT_DELAY in case of too many HDDs need to start ** 1.20.00.23 11/08/2011 Ching Huang Added report device transfer speed ** 1.20.00.23 01/30/2012 Ching Huang Fixed Request requeued and Retrying command ** 1.20.00.24 06/11/2012 Ching Huang Fixed return sense data condition ** 1.20.00.25 08/17/2012 Ching Huang Fixed hotplug device no function on type A adapter ****************************************************************************************** */ #include __FBSDID("$FreeBSD$"); #if 0 #define ARCMSR_DEBUG1 1 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version >= 500005 #include #include #include #include #include #define ARCMSR_LOCK_INIT(l, s) mtx_init(l, s, NULL, MTX_DEF) #define ARCMSR_LOCK_DESTROY(l) mtx_destroy(l) #define ARCMSR_LOCK_ACQUIRE(l) mtx_lock(l) #define ARCMSR_LOCK_RELEASE(l) mtx_unlock(l) #define ARCMSR_LOCK_TRY(l) mtx_trylock(l) #define arcmsr_htole32(x) htole32(x) typedef struct mtx arcmsr_lock_t; #else #include #include #include #define ARCMSR_LOCK_INIT(l, s) simple_lock_init(l) #define ARCMSR_LOCK_DESTROY(l) #define ARCMSR_LOCK_ACQUIRE(l) simple_lock(l) #define ARCMSR_LOCK_RELEASE(l) simple_unlock(l) #define ARCMSR_LOCK_TRY(l) simple_lock_try(l) #define arcmsr_htole32(x) (x) typedef struct simplelock arcmsr_lock_t; #endif #if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025 #define CAM_NEW_TRAN_CODE 1 #endif #if __FreeBSD_version > 500000 #define arcmsr_callout_init(a) callout_init(a, /*mpsafe*/1); #else #define arcmsr_callout_init(a) callout_init(a); #endif #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.25 2012-08-17" #include #define SRB_SIZE ((sizeof(struct CommandControlBlock)+0x1f) & 0xffe0) #define ARCMSR_SRBS_POOL_SIZE (SRB_SIZE * ARCMSR_MAX_FREESRB_NUM) /* ************************************************************************** ************************************************************************** */ #define CHIP_REG_READ32(s, b, r) bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r)) #define CHIP_REG_WRITE32(s, b, r, d) bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d) /* ************************************************************************** ************************************************************************** */ static void arcmsr_free_srb(struct CommandControlBlock *srb); static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb); static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb); static int arcmsr_probe(device_t dev); static int arcmsr_attach(device_t dev); static int arcmsr_detach(device_t dev); static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg); static void arcmsr_iop_parking(struct AdapterControlBlock *acb); static int arcmsr_shutdown(device_t dev); static void arcmsr_interrupt(struct AdapterControlBlock *acb); static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb); static void arcmsr_free_resource(struct AdapterControlBlock *acb); static void arcmsr_bus_reset(struct AdapterControlBlock *acb); static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_iop_init(struct AdapterControlBlock *acb); static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb); static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb); static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag); static void arcmsr_iop_reset(struct AdapterControlBlock *acb); static void arcmsr_report_sense_info(struct CommandControlBlock *srb); static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg); static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb); static int arcmsr_resume(device_t dev); static int arcmsr_suspend(device_t dev); static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb); static void arcmsr_polling_devmap(void* arg); static void arcmsr_srb_timeout(void* arg); #ifdef ARCMSR_DEBUG1 static void arcmsr_dump_data(struct AdapterControlBlock *acb); #endif /* ************************************************************************** ************************************************************************** */ static void UDELAY(u_int32_t us) { DELAY(us); } /* ************************************************************************** ************************************************************************** */ static bus_dmamap_callback_t arcmsr_map_free_srb; static bus_dmamap_callback_t arcmsr_execute_srb; /* ************************************************************************** ************************************************************************** */ static d_open_t arcmsr_open; static d_close_t arcmsr_close; static d_ioctl_t arcmsr_ioctl; static device_method_t arcmsr_methods[]={ DEVMETHOD(device_probe, arcmsr_probe), DEVMETHOD(device_attach, arcmsr_attach), DEVMETHOD(device_detach, arcmsr_detach), DEVMETHOD(device_shutdown, arcmsr_shutdown), DEVMETHOD(device_suspend, arcmsr_suspend), DEVMETHOD(device_resume, arcmsr_resume), DEVMETHOD_END }; static driver_t arcmsr_driver={ "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock) }; static devclass_t arcmsr_devclass; DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0); MODULE_DEPEND(arcmsr, pci, 1, 1, 1); MODULE_DEPEND(arcmsr, cam, 1, 1, 1); #ifndef BUS_DMA_COHERENT #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ #endif #if __FreeBSD_version >= 501000 static struct cdevsw arcmsr_cdevsw={ #if __FreeBSD_version >= 503000 .d_version = D_VERSION, #endif #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) .d_flags = D_NEEDGIANT, #endif .d_open = arcmsr_open, /* open */ .d_close = arcmsr_close, /* close */ .d_ioctl = arcmsr_ioctl, /* ioctl */ .d_name = "arcmsr", /* name */ }; #else #define ARCMSR_CDEV_MAJOR 180 static struct cdevsw arcmsr_cdevsw = { arcmsr_open, /* open */ arcmsr_close, /* close */ noread, /* read */ nowrite, /* write */ arcmsr_ioctl, /* ioctl */ nopoll, /* poll */ nommap, /* mmap */ nostrategy, /* strategy */ "arcmsr", /* name */ ARCMSR_CDEV_MAJOR, /* major */ nodump, /* dump */ nopsize, /* psize */ 0 /* flags */ }; #endif /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_open(dev_t dev, int flags, int fmt, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_open(dev_t dev, int flags, int fmt, struct thread *proc) #else static int arcmsr_open(struct cdev *dev, int flags, int fmt, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb=dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb==NULL) { return ENXIO; } return (0); } /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_close(dev_t dev, int flags, int fmt, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_close(dev_t dev, int flags, int fmt, struct thread *proc) #else static int arcmsr_close(struct cdev *dev, int flags, int fmt, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb=dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb==NULL) { return ENXIO; } return 0; } /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) #else static int arcmsr_ioctl(struct cdev *dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb=dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb==NULL) { return ENXIO; } return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg)); } /* ********************************************************************** ********************************************************************** */ static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb) { u_int32_t intmask_org=0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* disable all outbound interrupt */ intmask_org=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); } break; case ACB_ADAPTER_TYPE_B: { /* disable all outbound interrupt */ intmask_org=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */ CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */ } break; case ACB_ADAPTER_TYPE_C: { /* disable all outbound interrupt */ intmask_org=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE); } break; } return (intmask_org); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org) { u_int32_t mask; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; } break; case ACB_ADAPTER_TYPE_B: { /* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */ mask=(ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/ acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; } break; case ACB_ADAPTER_TYPE_C: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask=~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask); acb->outbound_int_enable= ~(intmask_org & mask) & 0x0000000f; } break; } } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries=0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries=0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/ CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries=0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) { int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); do { if(arcmsr_hba_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count!=0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) { int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE); do { if(arcmsr_hbb_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count!=0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb) { int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); do { if(arcmsr_hbc_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count!=0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_flush_hba_cache(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_flush_hbb_cache(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_flush_hbc_cache(acb); } break; } } /* ******************************************************************************* ******************************************************************************* */ static int arcmsr_suspend(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); /* flush controller */ arcmsr_iop_parking(acb); /* disable all outbound interrupt */ arcmsr_disable_allintr(acb); return(0); } /* ******************************************************************************* ******************************************************************************* */ static int arcmsr_resume(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); arcmsr_iop_init(acb); return(0); } /* ********************************************************************************* ********************************************************************************* */ static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg) { struct AdapterControlBlock *acb; u_int8_t target_id, target_lun; struct cam_sim * sim; sim=(struct cam_sim *) cb_arg; acb =(struct AdapterControlBlock *) cam_sim_softc(sim); switch (code) { case AC_LOST_DEVICE: target_id=xpt_path_target_id(path); target_lun=xpt_path_lun_id(path); if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) { break; } // printf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun); break; default: break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_report_sense_info(struct CommandControlBlock *srb) { union ccb * pccb=srb->pccb; pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; if(pccb->csio.sense_len) { memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data)); memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data))); ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ pccb->ccb_h.status |= CAM_AUTOSNS_VALID; } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_abort_hba_allcmd(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_abort_hbb_allcmd(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_abort_hbc_allcmd(acb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag) { struct AdapterControlBlock *acb=srb->acb; union ccb * pccb=srb->pccb; if(srb->srb_flags & SRB_FLAG_TIMER_START) callout_stop(&srb->ccb_callout); if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_POSTREAD; } else { op = BUS_DMASYNC_POSTWRITE; } bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); } if(stand_flag==1) { atomic_subtract_int(&acb->srboutstandingcount, 1); if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && ( acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) { acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN; pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } } if(srb->srb_state != ARCMSR_SRB_TIMEOUT) arcmsr_free_srb(srb); #ifdef ARCMSR_DEBUG1 acb->pktReturnCount++; #endif xpt_done(pccb); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error) { int target, lun; target=srb->pccb->ccb_h.target_id; lun=srb->pccb->ccb_h.target_lun; if(error == FALSE) { if(acb->devstate[target][lun]==ARECA_RAID_GONE) { acb->devstate[target][lun]=ARECA_RAID_GOOD; } srb->pccb->ccb_h.status |= CAM_REQ_CMP; arcmsr_srb_complete(srb, 1); } else { switch(srb->arcmsr_cdb.DeviceStatus) { case ARCMSR_DEV_SELECT_TIMEOUT: { if(acb->devstate[target][lun]==ARECA_RAID_GOOD) { printf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun); } acb->devstate[target][lun]=ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 1); } break; case ARCMSR_DEV_ABORTED: case ARCMSR_DEV_INIT_FAIL: { acb->devstate[target][lun]=ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 1); } break; case SCSISTAT_CHECK_CONDITION: { acb->devstate[target][lun]=ARECA_RAID_GOOD; arcmsr_report_sense_info(srb); arcmsr_srb_complete(srb, 1); } break; default: printf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknown DeviceStatus=0x%x \n" , acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus); acb->devstate[target][lun]=ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; /*unknown error or crc error just for retry*/ arcmsr_srb_complete(srb, 1); break; } } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error) { struct CommandControlBlock *srb; /* check if command done with no error*/ switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_C: srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ break; case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_B: default: srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ break; } if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_TIMEOUT) { arcmsr_free_srb(srb); printf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb); return; } printf("arcmsr%d: return srb has been completed\n" "srb='%p' srb_state=0x%x outstanding srb count=%d \n", acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount); return; } arcmsr_report_srb_state(acb, srb, error); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_srb_timeout(void* arg) { struct CommandControlBlock *srb = (struct CommandControlBlock *)arg; struct AdapterControlBlock *acb; int target, lun; u_int8_t cmd; target=srb->pccb->ccb_h.target_id; lun=srb->pccb->ccb_h.target_lun; acb = srb->acb; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if(srb->srb_state == ARCMSR_SRB_START) { cmd = srb->pccb->csio.cdb_io.cdb_bytes[0]; srb->srb_state = ARCMSR_SRB_TIMEOUT; srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n", acb->pci_unit, target, lun, cmd, srb); } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); #ifdef ARCMSR_DEBUG1 arcmsr_dump_data(acb); #endif } /* ********************************************************************** ********************************************************************** */ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) { int i=0; u_int32_t flag_srb; u_int16_t error; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { u_int32_t outbound_intstatus; /*clear and abort all outbound posted Q*/ outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/ while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; /*clear all outbound posted Q*/ CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { if((flag_srb=phbbmu->done_qbuffer[i])!=0) { phbbmu->done_qbuffer[i]=0; error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } phbbmu->post_qbuffer[i]=0; }/*drain reply FIFO*/ phbbmu->doneq_index=0; phbbmu->postq_index=0; } break; case ACB_ADAPTER_TYPE_C: { while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } } break; } } /* **************************************************************************** **************************************************************************** */ static void arcmsr_iop_reset(struct AdapterControlBlock *acb) { struct CommandControlBlock *srb; u_int32_t intmask_org; u_int32_t i=0; if(acb->srboutstandingcount>0) { /* disable all outbound interrupt */ intmask_org=arcmsr_disable_allintr(acb); /*clear and abort all outbound posted Q*/ arcmsr_done4abort_postqueue(acb); /* talk to iop 331 outstanding command aborted*/ arcmsr_abort_allcmd(acb); for(i=0;ipsrb_pool[i]; if(srb->srb_state==ARCMSR_SRB_START) { srb->srb_state=ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: scsi id=%d lun=%d srb='%p' aborted\n" , acb->pci_unit, srb->pccb->ccb_h.target_id , srb->pccb->ccb_h.target_lun, srb); } } /* enable all outbound interrupt */ arcmsr_enable_allintr(acb, intmask_org); } acb->srboutstandingcount=0; acb->workingsrb_doneindex=0; acb->workingsrb_startindex=0; #ifdef ARCMSR_DEBUG1 acb->pktRequestCount = 0; acb->pktReturnCount = 0; #endif } /* ********************************************************************** ********************************************************************** */ static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg) { struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb; u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u; u_int32_t address_lo, address_hi; union ccb * pccb=srb->pccb; struct ccb_scsiio * pcsio= &pccb->csio; u_int32_t arccdbsize=0x30; memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); arcmsr_cdb->Bus=0; arcmsr_cdb->TargetID=pccb->ccb_h.target_id; arcmsr_cdb->LUN=pccb->ccb_h.target_lun; arcmsr_cdb->Function=1; arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len; arcmsr_cdb->Context=0; bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len); if(nseg != 0) { struct AdapterControlBlock *acb=srb->acb; bus_dmasync_op_t op; u_int32_t length, i, cdb_sgcount=0; if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op=BUS_DMASYNC_PREREAD; } else { op=BUS_DMASYNC_PREWRITE; arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE; srb->srb_flags|=SRB_FLAG_WRITE; } bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); for(i=0;iaddress=address_lo; pdma_sg->length=length; psge += sizeof(struct SG32ENTRY); arccdbsize += sizeof(struct SG32ENTRY); } else { u_int32_t sg64s_size=0, tmplength=length; while(1) { u_int64_t span4G, length0; struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge; span4G=(u_int64_t)address_lo + tmplength; pdma_sg->addresshigh=address_hi; pdma_sg->address=address_lo; if(span4G > 0x100000000) { /*see if cross 4G boundary*/ length0=0x100000000-address_lo; pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR; address_hi=address_hi+1; address_lo=0; tmplength=tmplength-(u_int32_t)length0; sg64s_size += sizeof(struct SG64ENTRY); psge += sizeof(struct SG64ENTRY); cdb_sgcount++; } else { pdma_sg->length=tmplength|IS_SG64_ADDR; sg64s_size += sizeof(struct SG64ENTRY); psge += sizeof(struct SG64ENTRY); break; } } arccdbsize += sg64s_size; } cdb_sgcount++; } arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount; arcmsr_cdb->DataLength=pcsio->dxfer_len; if( arccdbsize > 256) { arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE; } } else { arcmsr_cdb->DataLength = 0; } srb->arc_cdb_size=arccdbsize; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb) { u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr; struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD); atomic_add_int(&acb->srboutstandingcount, 1); srb->srb_state=ARCMSR_SRB_START; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE); } else { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr); } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; int ending_index, index; index=phbbmu->postq_index; ending_index=((index+1)%ARCMSR_MAX_HBB_POSTQUEUE); phbbmu->post_qbuffer[ending_index]=0; if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE; } else { phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr; } index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->postq_index=index; CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED); } break; case ACB_ADAPTER_TYPE_C: { u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32; arc_cdb_size=(srb->arc_cdb_size>0x300)?0x300:srb->arc_cdb_size; ccb_post_stamp=(cdb_shifted_phyaddr | ((arc_cdb_size-1) >> 6) | 1); cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; if(cdb_phyaddr_hi32) { CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32); CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); } else { CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); } } break; } } /* ************************************************************************ ************************************************************************ */ static struct QBUFFER * arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb) { struct QBUFFER *qbuffer=NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu; qbuffer=(struct QBUFFER *)&phbamu->message_rbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu; qbuffer=(struct QBUFFER *)&phbcmu->message_rbuffer; } break; } return(qbuffer); } /* ************************************************************************ ************************************************************************ */ static struct QBUFFER * arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb) { struct QBUFFER *qbuffer=NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu; qbuffer=(struct QBUFFER *)&phbamu->message_wbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu; qbuffer=(struct QBUFFER *)&phbcmu->message_wbuffer; } break; } return(qbuffer); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_B: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_C: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); } } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_B: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_C: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) { u_int8_t *pQbuffer; struct QBUFFER *pwbuffer; u_int8_t * iop_data; int32_t allxfer_len=0; pwbuffer=arcmsr_get_iop_wqbuffer(acb); iop_data=(u_int8_t *)pwbuffer->data; if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) { pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex]; memcpy(iop_data, pQbuffer, 1); acb->wqbuf_firstindex++; acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ iop_data++; allxfer_len++; } pwbuffer->data_len=allxfer_len; /* ** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post */ arcmsr_iop_message_wrote(acb); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &=~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &=~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_stop_hba_bgrb(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_stop_hbb_bgrb(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_stop_hbc_bgrb(acb); } break; } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_poll(struct cam_sim * psim) { struct AdapterControlBlock *acb; int mutex; acb = (struct AdapterControlBlock *)cam_sim_softc(psim); mutex = mtx_owned(&acb->qbuffer_lock); if( mutex == 0 ) ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); arcmsr_interrupt(acb); if( mutex == 0 ) ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) { struct QBUFFER *prbuffer; u_int8_t *pQbuffer; u_int8_t *iop_data; int my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; /*check this iop data if overflow my rqbuffer*/ rqbuf_lastindex=acb->rqbuf_lastindex; rqbuf_firstindex=acb->rqbuf_firstindex; prbuffer=arcmsr_get_iop_rqbuffer(acb); iop_data=(u_int8_t *)prbuffer->data; iop_len=prbuffer->data_len; my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); if(my_empty_len>=iop_len) { while(iop_len > 0) { pQbuffer=&acb->rqbuffer[rqbuf_lastindex]; memcpy(pQbuffer, iop_data, 1); rqbuf_lastindex++; rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;/*if last index number set it to 0 */ iop_data++; iop_len--; } acb->rqbuf_lastindex=rqbuf_lastindex; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been read */ } else { acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW; } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ; /* ***************************************************************** ** check if there are any mail packages from user space program ** in my post bag, now is the time to send them into Areca's firmware ***************************************************************** */ if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) { u_int8_t *pQbuffer; struct QBUFFER *pwbuffer; u_int8_t *iop_data; int allxfer_len=0; acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); pwbuffer=arcmsr_get_iop_wqbuffer(acb); iop_data=(u_int8_t *)pwbuffer->data; while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) { pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex]; memcpy(iop_data, pQbuffer, 1); acb->wqbuf_firstindex++; acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ iop_data++; allxfer_len++; } pwbuffer->data_len=allxfer_len; /* ** push inbound doorbell tell iop driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ arcmsr_iop_message_wrote(acb); } if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) { acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; } } static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb) { /* if (ccb->ccb_h.status != CAM_REQ_CMP) printf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x, failure status=%x\n",ccb->ccb_h.target_id,ccb->ccb_h.target_lun,ccb->ccb_h.status); else printf("arcmsr_rescanLun_cb: Rescan lun successfully!\n"); */ xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); } static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun) { struct cam_path *path; union ccb *ccb; if ((ccb = (union ccb *)xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } /* printf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */ bzero(ccb, sizeof(union ccb)); xpt_setup_ccb(&ccb->ccb_h, path, 5); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = arcmsr_rescanLun_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); } static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun) { struct CommandControlBlock *srb; u_int32_t intmask_org; int i; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); /* disable all outbound interrupts */ intmask_org = arcmsr_disable_allintr(acb); for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if (srb->srb_state == ARCMSR_SRB_START) { if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun)) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb); } } } /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_dr_handle(struct AdapterControlBlock *acb) { u_int32_t devicemap; u_int32_t target, lun; u_int32_t deviceMapCurrent[4]={0}; u_int8_t *pDevMap; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target= 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_B: devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target= 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_C: devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target= 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; } if(acb->acb_flags & ACB_F_BUS_HANG_ON) { acb->acb_flags &= ~ACB_F_BUS_HANG_ON; } /* ** adapter posted CONFIG message ** copy the new map, note if there are differences with the current map */ pDevMap = (u_int8_t *)&deviceMapCurrent[0]; for (target= 0; target < ARCMSR_MAX_TARGETID - 1; target++) { if (*pDevMap != acb->device_map[target]) { u_int8_t difference, bit_check; difference= *pDevMap ^ acb->device_map[target]; for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++) { bit_check=(1 << lun); /*check bit from 0....31*/ if(difference & bit_check) { if(acb->device_map[target] & bit_check) {/* unit departed */ printf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun); arcmsr_abort_dr_ccbs(acb, target, lun); arcmsr_rescan_lun(acb, target, lun); acb->devstate[target][lun] = ARECA_RAID_GONE; } else {/* unit arrived */ printf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun); arcmsr_rescan_lun(acb, target, lun); acb->devstate[target][lun] = ARECA_RAID_GOOD; } } } /* printf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */ acb->device_map[target]= *pDevMap; } pDevMap++; } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT); outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; /* clear interrupts */ CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN); outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR); outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ outbound_doorbell=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */ if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ outbound_doorbell=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */ if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbc_message_isr(acb); /* messenger of "driver to iop commands" */ } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) { u_int32_t flag_srb; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) { /* check if command done with no error*/ error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } /*drain reply FIFO*/ } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; u_int32_t flag_srb; int index; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); index=phbbmu->doneq_index; while((flag_srb=phbbmu->done_qbuffer[index]) != 0) { phbbmu->done_qbuffer[index]=0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->doneq_index=index; /* check if command done with no error*/ error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } /*drain reply FIFO*/ } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) { u_int32_t flag_srb,throttling=0; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); /* check if command done with no error*/ error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); if(throttling==ARCMSR_HBC_ISR_THROTTLING_LEVEL) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING); break; } throttling++; } /*drain reply FIFO*/ } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb) { u_int32_t outbound_intStatus; /* ********************************************* ** check outbound intstatus ********************************************* */ outbound_intStatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; if(!outbound_intStatus) { /*it must be share irq*/ return; } CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus);/*clear interrupt*/ /* MU doorbell interrupts*/ if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { arcmsr_hba_doorbell_isr(acb); } /* MU post queue interrupts*/ if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { arcmsr_hba_postqueue_isr(acb); } if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { arcmsr_hba_message_isr(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; /* ********************************************* ** check outbound intstatus ********************************************* */ outbound_doorbell=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable; if(!outbound_doorbell) { /*it must be share irq*/ return; } CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */ CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell); CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); /* MU ioctl transfer doorbell interrupts*/ if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } /* MU post queue interrupts*/ if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { arcmsr_hbb_postqueue_isr(acb); } if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbb_message_isr(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb) { u_int32_t host_interrupt_status; /* ********************************************* ** check outbound intstatus ********************************************* */ host_interrupt_status=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status); if(!host_interrupt_status) { /*it must be share irq*/ return; } /* MU doorbell interrupts*/ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) { arcmsr_hbc_doorbell_isr(acb); } /* MU post queue interrupts*/ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { arcmsr_hbc_postqueue_isr(acb); } } /* ****************************************************************************** ****************************************************************************** */ static void arcmsr_interrupt(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_handle_hba_isr(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_handle_hbb_isr(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_handle_hbc_isr(acb); break; default: printf("arcmsr%d: interrupt service," " unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type); break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_intr_handler(void *arg) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); arcmsr_interrupt(acb); ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ****************************************************************************** ****************************************************************************** */ static void arcmsr_polling_devmap(void* arg) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); break; case ACB_ADAPTER_TYPE_B: CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); break; case ACB_ADAPTER_TYPE_C: CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); break; } if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0) { callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb); /* polling per 5 seconds */ } } /* ******************************************************************************* ** ******************************************************************************* */ static void arcmsr_iop_parking(struct AdapterControlBlock *acb) { u_int32_t intmask_org; if(acb!=NULL) { /* stop adapter background rebuild */ if(acb->acb_flags & ACB_F_MSG_START_BGRB) { intmask_org = arcmsr_disable_allintr(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); arcmsr_enable_allintr(acb, intmask_org); } } } /* *********************************************************************** ** ************************************************************************ */ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg) { struct CMD_MESSAGE_FIELD * pcmdmessagefld; u_int32_t retvalue=EINVAL; pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg; if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) { return retvalue; } ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); switch(ioctl_cmd) { case ARCMSR_MESSAGE_READ_RQBUFFER: { u_int8_t * pQbuffer; u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer; u_int32_t allxfer_len=0; while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex) && (allxfer_len<1031)) { /*copy READ QBUFFER to srb*/ pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex]; memcpy(ptmpQbuffer, pQbuffer, 1); acb->rqbuf_firstindex++; acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ ptmpQbuffer++; allxfer_len++; } if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER * prbuffer; u_int8_t * iop_data; u_int32_t iop_len; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer=arcmsr_get_iop_rqbuffer(acb); iop_data=(u_int8_t *)prbuffer->data; iop_len=(u_int32_t)prbuffer->data_len; /*this iop data does no chance to make me overflow again here, so just do it*/ while(iop_len>0) { pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; memcpy(pQbuffer, iop_data, 1); acb->rqbuf_lastindex++; acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ iop_data++; iop_len--; } arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } pcmdmessagefld->cmdmessage.Length=allxfer_len; pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_WRITE_WQBUFFER: { u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; u_int8_t * pQbuffer; u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; user_len=pcmdmessagefld->cmdmessage.Length; /*check if data xfer length of this request will overflow my array qbuffer */ wqbuf_lastindex=acb->wqbuf_lastindex; wqbuf_firstindex=acb->wqbuf_firstindex; if(wqbuf_lastindex!=wqbuf_firstindex) { arcmsr_post_ioctldata2iop(acb); pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; } else { my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); if(my_empty_len>=user_len) { while(user_len>0) { /*copy srb data to wqbuffer*/ pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex]; memcpy(pQbuffer, ptmpuserbuffer, 1); acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ ptmpuserbuffer++; user_len--; } /*post fist Qbuffer*/ if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_post_ioctldata2iop(acb); } pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; } else { pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; } } retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { u_int8_t * pQbuffer=acb->rqbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex=0; acb->rqbuf_lastindex=0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { u_int8_t * pQbuffer=acb->wqbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); acb->wqbuf_firstindex=0; acb->wqbuf_lastindex=0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { u_int8_t * pQbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |ACB_F_MESSAGE_RQBUFFER_CLEARED |ACB_F_MESSAGE_WQBUFFER_READ); acb->rqbuf_firstindex=0; acb->rqbuf_lastindex=0; acb->wqbuf_firstindex=0; acb->wqbuf_lastindex=0; pQbuffer=acb->rqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); pQbuffer=acb->wqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_SAY_HELLO: { u_int8_t * hello_string="Hello! I am ARCMSR"; u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer; if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) { pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return ENOIOCTL; } pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_SAY_GOODBYE: { arcmsr_iop_parking(acb); retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { arcmsr_flush_adapter_cache(acb); retvalue=ARCMSR_MESSAGE_SUCCESS; } break; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return (retvalue); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_free_srb(struct CommandControlBlock *srb) { struct AdapterControlBlock *acb; int mutex; acb = srb->acb; mutex = mtx_owned(&acb->qbuffer_lock); if( mutex == 0 ) ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); srb->srb_state=ARCMSR_SRB_DONE; srb->srb_flags=0; acb->srbworkingQ[acb->workingsrb_doneindex]=srb; acb->workingsrb_doneindex++; acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM; if( mutex == 0 ) ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ************************************************************************** ************************************************************************** */ struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb) { struct CommandControlBlock *srb=NULL; u_int32_t workingsrb_startindex, workingsrb_doneindex; int mutex; mutex = mtx_owned(&acb->qbuffer_lock); if( mutex == 0 ) ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); workingsrb_doneindex=acb->workingsrb_doneindex; workingsrb_startindex=acb->workingsrb_startindex; srb=acb->srbworkingQ[workingsrb_startindex]; workingsrb_startindex++; workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM; if(workingsrb_doneindex!=workingsrb_startindex) { acb->workingsrb_startindex=workingsrb_startindex; } else { srb=NULL; } if( mutex == 0 ) ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return(srb); } /* ************************************************************************** ************************************************************************** */ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb) { struct CMD_MESSAGE_FIELD * pcmdmessagefld; int retvalue = 0, transfer_len = 0; char *buffer; u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 | (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 | (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 | (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8]; /* 4 bytes: Areca io control code */ /* XXX Does not handle alternate data formats. */ - if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { + if ((pccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) { buffer = pccb->csio.data_ptr; transfer_len = pccb->csio.dxfer_len; } else { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; switch(controlcode) { case ARCMSR_MESSAGE_READ_RQBUFFER: { u_int8_t *pQbuffer; u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer; int32_t allxfer_len = 0; while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) && (allxfer_len < 1031)) { pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; memcpy(ptmpQbuffer, pQbuffer, 1); acb->rqbuf_firstindex++; acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; ptmpQbuffer++; allxfer_len++; } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER *prbuffer; u_int8_t *iop_data; int32_t iop_len; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer=arcmsr_get_iop_rqbuffer(acb); iop_data = (u_int8_t *)prbuffer->data; iop_len =(u_int32_t)prbuffer->data_len; while (iop_len > 0) { pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; memcpy(pQbuffer, iop_data, 1); acb->rqbuf_lastindex++; acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; iop_data++; iop_len--; } arcmsr_iop_message_read(acb); } pcmdmessagefld->cmdmessage.Length = allxfer_len; pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_WRITE_WQBUFFER: { int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; u_int8_t *pQbuffer; u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; user_len = pcmdmessagefld->cmdmessage.Length; wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; if (wqbuf_lastindex != wqbuf_firstindex) { arcmsr_post_ioctldata2iop(acb); /* has error report sensedata */ if(pccb->csio.sense_len) { ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; /* AdditionalSenseLength */ ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; /* AdditionalSenseCode */ } retvalue = ARCMSR_MESSAGE_FAIL; } else { my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) &(ARCMSR_MAX_QBUFFER - 1); if (my_empty_len >= user_len) { while (user_len > 0) { pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; memcpy(pQbuffer, ptmpuserbuffer, 1); acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; ptmpuserbuffer++; user_len--; } if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_post_ioctldata2iop(acb); } } else { /* has error report sensedata */ if(pccb->csio.sense_len) { ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; /* AdditionalSenseLength */ ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; /* AdditionalSenseCode */ } retvalue = ARCMSR_MESSAGE_FAIL; } } } break; case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { u_int8_t *pQbuffer = acb->rqbuffer; if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } break; case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { u_int8_t *pQbuffer = acb->wqbuffer; if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ); acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } break; case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { u_int8_t *pQbuffer; if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ); acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } break; case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; } break; case ARCMSR_MESSAGE_SAY_HELLO: { int8_t * hello_string = "Hello! I am ARCMSR"; memcpy(pcmdmessagefld->messagedatabuffer, hello_string , (int16_t)strlen(hello_string)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } break; case ARCMSR_MESSAGE_SAY_GOODBYE: arcmsr_iop_parking(acb); break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: arcmsr_flush_adapter_cache(acb); break; default: retvalue = ARCMSR_MESSAGE_FAIL; } message_out: return (retvalue); } /* ********************************************************************* ********************************************************************* */ static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct CommandControlBlock *srb=(struct CommandControlBlock *)arg; struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb; union ccb * pccb; int target, lun; pccb=srb->pccb; target=pccb->ccb_h.target_id; lun=pccb->ccb_h.target_lun; #ifdef ARCMSR_DEBUG1 acb->pktRequestCount++; #endif if(error != 0) { if(error != EFBIG) { printf("arcmsr%d: unexpected error %x" " returned from 'bus_dmamap_load' \n" , acb->pci_unit, error); } if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { pccb->ccb_h.status |= CAM_REQ_TOO_BIG; } arcmsr_srb_complete(srb, 0); return; } if(nseg > ARCMSR_MAX_SG_ENTRIES) { pccb->ccb_h.status |= CAM_REQ_TOO_BIG; arcmsr_srb_complete(srb, 0); return; } if(acb->acb_flags & ACB_F_BUS_RESET) { printf("arcmsr%d: bus reset and return busy \n", acb->pci_unit); pccb->ccb_h.status |= CAM_SCSI_BUS_RESET; arcmsr_srb_complete(srb, 0); return; } if(acb->devstate[target][lun]==ARECA_RAID_GONE) { u_int8_t block_cmd, cmd; cmd = pccb->csio.cdb_io.cdb_bytes[0]; block_cmd= cmd & 0x0f; if(block_cmd==0x08 || block_cmd==0x0a) { printf("arcmsr%d:block 'read/write' command " "with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n" , acb->pci_unit, cmd, target, lun); pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 0); return; } } if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if(nseg != 0) { bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); } arcmsr_srb_complete(srb, 0); return; } if(acb->srboutstandingcount > ARCMSR_MAX_OUTSTANDING_CMD) { xpt_freeze_simq(acb->psim, 1); pccb->ccb_h.status = CAM_REQUEUE_REQ; acb->acb_flags |= ACB_F_CAM_DEV_QFRZN; arcmsr_srb_complete(srb, 0); return; } pccb->ccb_h.status |= CAM_SIM_QUEUED; arcmsr_build_srb(srb, dm_segs, nseg); arcmsr_post_srb(acb, srb); if (pccb->ccb_h.timeout != CAM_TIME_INFINITY) { arcmsr_callout_init(&srb->ccb_callout); callout_reset(&srb->ccb_callout, ((pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)) * hz) / 1000, arcmsr_srb_timeout, srb); srb->srb_flags |= SRB_FLAG_TIMER_START; } } /* ***************************************************************************************** ***************************************************************************************** */ static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb) { struct CommandControlBlock *srb; struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr; u_int32_t intmask_org; int i=0; acb->num_aborts++; /* *************************************************************************** ** It is the upper layer do abort command this lock just prior to calling us. ** First determine if we currently own this command. ** Start by searching the device queue. If not found ** at all, and the system wanted us to just abort the ** command return success. *************************************************************************** */ if(acb->srboutstandingcount!=0) { /* disable all outbound interrupt */ intmask_org=arcmsr_disable_allintr(acb); for(i=0;ipsrb_pool[i]; if(srb->srb_state==ARCMSR_SRB_START) { if(srb->pccb==abortccb) { srb->srb_state=ARCMSR_SRB_ABORTED; printf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'" "outstanding command \n" , acb->pci_unit, abortccb->ccb_h.target_id , abortccb->ccb_h.target_lun, srb); arcmsr_polling_srbdone(acb, srb); /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); return (TRUE); } } } /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); } return(FALSE); } /* **************************************************************************** **************************************************************************** */ static void arcmsr_bus_reset(struct AdapterControlBlock *acb) { int retry=0; acb->num_resets++; acb->acb_flags |=ACB_F_BUS_RESET; while(acb->srboutstandingcount!=0 && retry < 400) { arcmsr_interrupt(acb); UDELAY(25000); retry++; } arcmsr_iop_reset(acb); acb->acb_flags &= ~ACB_F_BUS_RESET; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, union ccb * pccb) { pccb->ccb_h.status |= CAM_REQ_CMP; switch (pccb->csio.cdb_io.cdb_bytes[0]) { case INQUIRY: { unsigned char inqdata[36]; char *buffer=pccb->csio.data_ptr; if (pccb->ccb_h.target_lun) { pccb->ccb_h.status |= CAM_SEL_TIMEOUT; xpt_done(pccb); return; } inqdata[0] = T_PROCESSOR; /* Periph Qualifier & Periph Dev Type */ inqdata[1] = 0; /* rem media bit & Dev Type Modifier */ inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */ inqdata[3] = 0; inqdata[4] = 31; /* length of additional data */ inqdata[5] = 0; inqdata[6] = 0; inqdata[7] = 0; strncpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */ strncpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */ strncpy(&inqdata[32], "R001", 4); /* Product Revision */ memcpy(buffer, inqdata, sizeof(inqdata)); xpt_done(pccb); } break; case WRITE_BUFFER: case READ_BUFFER: { if (arcmsr_iop_message_xfer(acb, pccb)) { pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } xpt_done(pccb); } break; default: xpt_done(pccb); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_action(struct cam_sim * psim, union ccb * pccb) { struct AdapterControlBlock * acb; acb=(struct AdapterControlBlock *) cam_sim_softc(psim); if(acb==NULL) { pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); return; } switch (pccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct CommandControlBlock *srb; int target=pccb->ccb_h.target_id; int error; if(target == 16) { /* virtual device for iop message transfer */ arcmsr_handle_virtual_command(acb, pccb); return; } if((srb=arcmsr_get_freesrb(acb)) == NULL) { pccb->ccb_h.status |= CAM_RESRC_UNAVAIL; xpt_done(pccb); return; } pccb->ccb_h.arcmsr_ccbsrb_ptr=srb; pccb->ccb_h.arcmsr_ccbacb_ptr=acb; srb->pccb=pccb; error = bus_dmamap_load_ccb(acb->dm_segs_dmat , srb->dm_segs_dmamap , pccb , arcmsr_execute_srb, srb, /*flags*/0); if(error == EINPROGRESS) { xpt_freeze_simq(acb->psim, 1); pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } break; } case XPT_TARGET_IO: { /* target mode not yet support vendor specific commands. */ pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_PATH_INQ: { struct ccb_pathinq *cpi= &pccb->cpi; cpi->version_num=1; cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE; cpi->target_sprt=0; cpi->hba_misc=0; cpi->hba_eng_cnt=0; cpi->max_target=ARCMSR_MAX_TARGETID; /* 0-16 */ cpi->max_lun=ARCMSR_MAX_TARGETLUN; /* 0-7 */ cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */ cpi->bus_id=cam_sim_bus(psim); strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); cpi->unit_number=cam_sim_unit(psim); #ifdef CAM_NEW_TRAN_CODE if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G) cpi->base_transfer_speed = 600000; else cpi->base_transfer_speed = 300000; if((acb->vendor_device_id == PCIDevVenIDARC1880) || (acb->vendor_device_id == PCIDevVenIDARC1680)) { cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol_version = SCSI_REV_SPC2; } else { cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol_version = SCSI_REV_2; } cpi->protocol = PROTO_SCSI; #endif cpi->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_ABORT: { union ccb *pabort_ccb; pabort_ccb=pccb->cab.abort_ccb; switch (pabort_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMED_NOTIFY: case XPT_CONT_TARGET_IO: if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) { pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED; xpt_done(pabort_ccb); pccb->ccb_h.status |= CAM_REQ_CMP; } else { xpt_print_path(pabort_ccb->ccb_h.path); printf("Not found\n"); pccb->ccb_h.status |= CAM_PATH_INVALID; } break; case XPT_SCSI_IO: pccb->ccb_h.status |= CAM_UA_ABORT; break; default: pccb->ccb_h.status |= CAM_REQ_INVALID; break; } xpt_done(pccb); break; } case XPT_RESET_BUS: case XPT_RESET_DEV: { u_int32_t i; arcmsr_bus_reset(acb); for (i=0; i < 500; i++) { DELAY(1000); } pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_TERM_IO: { pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; if(pccb->ccb_h.target_id == 16) { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } cts= &pccb->cts; #ifdef CAM_NEW_TRAN_CODE { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings_sas *sas; scsi = &cts->proto_specific.scsi; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; scsi->valid = CTS_SCSI_VALID_TQ; cts->protocol = PROTO_SCSI; if((acb->vendor_device_id == PCIDevVenIDARC1880) || (acb->vendor_device_id == PCIDevVenIDARC1680)) { cts->protocol_version = SCSI_REV_SPC2; cts->transport_version = 0; cts->transport = XPORT_SAS; sas = &cts->xport_specific.sas; sas->valid = CTS_SAS_VALID_SPEED; if(acb->vendor_device_id == PCIDevVenIDARC1880) sas->bitrate = 600000; else if(acb->vendor_device_id == PCIDevVenIDARC1680) sas->bitrate = 300000; } else { cts->protocol_version = SCSI_REV_2; cts->transport_version = 2; cts->transport = XPORT_SPI; spi = &cts->xport_specific.spi; spi->flags = CTS_SPI_FLAGS_DISC_ENB; spi->sync_period=2; spi->sync_offset=32; spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT; spi->valid = CTS_SPI_VALID_DISC | CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; } } #else { cts->flags=(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); cts->sync_period=2; cts->sync_offset=32; cts->bus_width=MSG_EXT_WDTR_BUS_16_BIT; cts->valid=CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; } #endif pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_SET_TRAN_SETTINGS: { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } case XPT_CALC_GEOMETRY: if(pccb->ccb_h.target_id == 16) { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } #if __FreeBSD_version >= 500000 cam_calc_geometry(&pccb->ccg, 1); #else { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg= &pccb->ccg; if (ccg->block_size == 0) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } if(((1024L * 1024L)/ccg->block_size) < 0) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } size_mb=ccg->volume_size/((1024L * 1024L)/ccg->block_size); if(size_mb > 1024 ) { ccg->heads=255; ccg->secs_per_track=63; } else { ccg->heads=64; ccg->secs_per_track=32; } secs_per_cylinder=ccg->heads * ccg->secs_per_track; ccg->cylinders=ccg->volume_size / secs_per_cylinder; pccb->ccb_h.status |= CAM_REQ_CMP; } #endif xpt_done(pccb); break; default: pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_start_hba_bgrb(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_start_hbb_bgrb(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_start_hbc_bgrb(acb); break; } } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct CommandControlBlock *srb; u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0; u_int16_t error; polling_ccb_retry: poll_count++; outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus); /*clear interrupt*/ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { if((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport))==0xFFFFFFFF) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } goto polling_ccb_retry; } } /* check if command done with no error*/ srb=(struct CommandControlBlock *) (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; poll_srb_done = (srb==poll_srb) ? 1:0; if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) { if(srb->srb_state==ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%d srb='%p'" "poll command abort successfully \n" , acb->pci_unit , srb->pccb->ccb_h.target_id , srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'" "srboutstandingcount=%d \n" , acb->pci_unit , srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; int index; u_int16_t error; polling_ccb_retry: poll_count++; CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { index=phbbmu->doneq_index; if((flag_srb=phbbmu->done_qbuffer[index]) == 0) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } goto polling_ccb_retry; } } phbbmu->done_qbuffer[index]=0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->doneq_index=index; /* check if command done with no error*/ srb=(struct CommandControlBlock *) (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; poll_srb_done = (srb==poll_srb) ? 1:0; if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) { if(srb->srb_state==ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%d srb='%p'" "poll command abort successfully \n" , acb->pci_unit , srb->pccb->ccb_h.target_id , srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'" "srboutstandingcount=%d \n" , acb->pci_unit , srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; u_int16_t error; polling_ccb_retry: poll_count++; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } if (acb->srboutstandingcount == 0) { break; } goto polling_ccb_retry; } } flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); /* check if command done with no error*/ srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; if (poll_srb != NULL) poll_srb_done = (srb==poll_srb) ? 1:0; if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) { if(srb->srb_state==ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n" , acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" , acb->pci_unit, srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ********************************************************************** */ static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_polling_hba_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_polling_hbb_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_polling_hbc_srbdone(acb, poll_srb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) { char *acb_firm_model=acb->firm_model; char *acb_firm_version=acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i=0; while(i<8) { *acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i=0; while(i<16) { *acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i=0; while(i<16) { *acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); printf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); acb->firm_request_len=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) { char *acb_firm_model=acb->firm_model; char *acb_firm_version=acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i=0; while(i<8) { *acb_firm_model=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i=0; while(i<16) { *acb_firm_version=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i=0; while(i<16) { *acb_device_map=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i); acb_device_map++; i++; } printf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); printf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); acb->firm_request_len=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb) { char *acb_firm_model=acb->firm_model; char *acb_firm_version=acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i=0; while(i<8) { *acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i=0; while(i<16) { *acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i=0; while(i<16) { *acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); printf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); acb->firm_request_len =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_get_hba_config(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_get_hbb_config(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_get_hbc_config(acb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb) { int timeout=0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; case ACB_ADAPTER_TYPE_B: { while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); } break; case ACB_ADAPTER_TYPE_C: { while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_B: { CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/ CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); /* let IOP know data has been read */ } break; case ACB_ADAPTER_TYPE_C: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); } break; } } /* ************************************************************************ ************************************************************************ */ static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb) { unsigned long srb_phyaddr; u_int32_t srb_phyaddr_hi32; /* ******************************************************************** ** here we need to tell iop 331 our freesrb.HighPart ** if freesrb.HighPart is not zero ******************************************************************** */ srb_phyaddr= (unsigned long) acb->srb_phyaddr.phyaddr; // srb_phyaddr_hi32=(u_int32_t) ((srb_phyaddr>>16)>>16); srb_phyaddr_hi32=acb->srb_phyaddr.B.phyadd_high; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if(srb_phyaddr_hi32!=0) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } } break; /* *********************************************************************** ** if adapter type B, set window of "post command Q" *********************************************************************** */ case ACB_ADAPTER_TYPE_B: { u_int32_t post_queue_phyaddr; struct HBB_MessageUnit *phbbmu; phbbmu=(struct HBB_MessageUnit *)acb->pmu; phbbmu->postq_index=0; phbbmu->doneq_index=0; CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit); return FALSE; } post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBB_MessageUnit, post_qbuffer); CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */ CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit); return FALSE; } CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit); return FALSE; } } break; case ACB_ADAPTER_TYPE_C: { if(srb_phyaddr_hi32!=0) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } } break; } return (TRUE); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_C: break; case ACB_ADAPTER_TYPE_B: { CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit); return; } } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_iop_init(struct AdapterControlBlock *acb) { u_int32_t intmask_org; /* disable all outbound interrupt */ intmask_org=arcmsr_disable_allintr(acb); arcmsr_wait_firmware_ready(acb); arcmsr_iop_confirm(acb); arcmsr_get_firmware_spec(acb); /*start background rebuild*/ arcmsr_start_adapter_bgrb(acb); /* empty doorbell Qbuffer if door bell ringed */ arcmsr_clear_doorbell_queue_buffer(acb); arcmsr_enable_eoi_mode(acb); /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); acb->acb_flags |=ACB_F_IOP_INITED; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct AdapterControlBlock *acb=arg; struct CommandControlBlock *srb_tmp; u_int8_t * dma_memptr; u_int32_t i; unsigned long srb_phyaddr=(unsigned long)segs->ds_addr; dma_memptr=acb->uncacheptr; acb->srb_phyaddr.phyaddr=srb_phyaddr; srb_tmp=(struct CommandControlBlock *)dma_memptr; for(i=0;idm_segs_dmat, /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) { acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; printf("arcmsr%d:" " srb dmamap bus_dmamap_create error\n", acb->pci_unit); return; } srb_tmp->cdb_shifted_phyaddr=(acb->adapter_type==ACB_ADAPTER_TYPE_C)?srb_phyaddr:(srb_phyaddr >> 5); srb_tmp->acb=acb; acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp; srb_phyaddr=srb_phyaddr+SRB_SIZE; srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp+SRB_SIZE); } acb->vir2phy_offset=(unsigned long)srb_tmp-(unsigned long)srb_phyaddr; } /* ************************************************************************ ** ** ************************************************************************ */ static void arcmsr_free_resource(struct AdapterControlBlock *acb) { /* remove the control device */ if(acb->ioctl_dev != NULL) { destroy_dev(acb->ioctl_dev); } bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap); bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap); bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); } /* ************************************************************************ ************************************************************************ */ static u_int32_t arcmsr_initialize(device_t dev) { struct AdapterControlBlock *acb=device_get_softc(dev); u_int16_t pci_command; int i, j,max_coherent_size; u_int32_t vendor_dev_id; vendor_dev_id = pci_get_devid(dev); acb->vendor_device_id = vendor_dev_id; switch (vendor_dev_id) { case PCIDevVenIDARC1880: case PCIDevVenIDARC1882: case PCIDevVenIDARC1213: case PCIDevVenIDARC1223: { acb->adapter_type=ACB_ADAPTER_TYPE_C; acb->adapter_bus_speed = ACB_BUS_SPEED_6G; max_coherent_size=ARCMSR_SRBS_POOL_SIZE; } break; case PCIDevVenIDARC1200: case PCIDevVenIDARC1201: { acb->adapter_type=ACB_ADAPTER_TYPE_B; acb->adapter_bus_speed = ACB_BUS_SPEED_3G; max_coherent_size=ARCMSR_SRBS_POOL_SIZE+(sizeof(struct HBB_MessageUnit)); } break; case PCIDevVenIDARC1110: case PCIDevVenIDARC1120: case PCIDevVenIDARC1130: case PCIDevVenIDARC1160: case PCIDevVenIDARC1170: case PCIDevVenIDARC1210: case PCIDevVenIDARC1220: case PCIDevVenIDARC1230: case PCIDevVenIDARC1231: case PCIDevVenIDARC1260: case PCIDevVenIDARC1261: case PCIDevVenIDARC1270: case PCIDevVenIDARC1280: case PCIDevVenIDARC1212: case PCIDevVenIDARC1222: case PCIDevVenIDARC1380: case PCIDevVenIDARC1381: case PCIDevVenIDARC1680: case PCIDevVenIDARC1681: { acb->adapter_type=ACB_ADAPTER_TYPE_A; acb->adapter_bus_speed = ACB_BUS_SPEED_3G; max_coherent_size=ARCMSR_SRBS_POOL_SIZE; } break; default: { printf("arcmsr%d:" " unknown RAID adapter type \n", device_get_unit(dev)); return ENOMEM; } } if(bus_dma_tag_create( /*PCI parent*/ bus_get_dma_tag(dev), /*alignemnt*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/ BUS_SPACE_UNRESTRICTED, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ NULL, /*lockarg*/ NULL, #endif &acb->parent_dmat) != 0) { printf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENOMEM; } /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, /*alignment*/ 1, /*boundary*/ 0, #ifdef PAE /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, #else /*lowaddr*/ BUS_SPACE_MAXADDR, #endif /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM, /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ busdma_lock_mutex, /*lockarg*/ &acb->qbuffer_lock, #endif &acb->dm_segs_dmat) != 0) { bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENOMEM; } /* DMA tag for our srb structures.... Allocate the freesrb memory */ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, /*alignment*/ 0x20, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ max_coherent_size, /*nsegments*/ 1, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ NULL, /*lockarg*/ NULL, #endif &acb->srb_dmat) != 0) { bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENXIO; } /* Allocation for our srbs */ if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) { bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev)); return ENXIO; } /* And permanently map them */ if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) { bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev)); return ENXIO; } pci_command=pci_read_config(dev, PCIR_COMMAND, 2); pci_command |= PCIM_CMD_BUSMASTEREN; pci_command |= PCIM_CMD_PERRESPEN; pci_command |= PCIM_CMD_MWRICEN; /* Enable Busmaster/Mem */ pci_command |= PCIM_CMD_MEMEN; pci_write_config(dev, PCIR_COMMAND, pci_command, 2); switch(acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { u_int32_t rid0=PCIR_BAR(0); vm_offset_t mem_base0; acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0==0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu=(struct MessageUnit_UNION *)mem_base0; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu; struct CommandControlBlock *freesrb; u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) }; vm_offset_t mem_base[]={0,0}; for(i=0; i<2; i++) { if(i==0) { acb->sys_res_arcmsr[i]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i], 0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE); } else { acb->sys_res_arcmsr[i]=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i], 0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE); } if(acb->sys_res_arcmsr[i] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i); return ENXIO; } mem_base[i]=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]); if(mem_base[i]==0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i); return ENXIO; } acb->btag[i]=rman_get_bustag(acb->sys_res_arcmsr[i]); acb->bhandle[i]=rman_get_bushandle(acb->sys_res_arcmsr[i]); } freesrb=(struct CommandControlBlock *)acb->uncacheptr; // acb->pmu=(struct MessageUnit_UNION *)&freesrb[ARCMSR_MAX_FREESRB_NUM]; acb->pmu=(struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE); phbbmu=(struct HBB_MessageUnit *)acb->pmu; phbbmu->hbb_doorbell=(struct HBB_DOORBELL *)mem_base[0]; phbbmu->hbb_rwbuffer=(struct HBB_RWBUFFER *)mem_base[1]; } break; case ACB_ADAPTER_TYPE_C: { u_int32_t rid0=PCIR_BAR(1); vm_offset_t mem_base0; acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0==0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu=(struct MessageUnit_UNION *)mem_base0; } break; } if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) { arcmsr_free_resource(acb); printf("arcmsr%d: map free srb failure!\n", device_get_unit(dev)); return ENXIO; } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; /* ******************************************************************** ** init raid volume state ******************************************************************** */ for(i=0;idevstate[i][j]=ARECA_RAID_GONE; } } arcmsr_iop_init(acb); return(0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_attach(device_t dev) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); u_int32_t unit=device_get_unit(dev); struct ccb_setasync csa; struct cam_devq *devq; /* Device Queue to use for this SIM */ struct resource *irqres; int rid; if(acb == NULL) { printf("arcmsr%d: cannot allocate softc\n", unit); return (ENOMEM); } ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock"); if(arcmsr_initialize(dev)) { printf("arcmsr%d: initialize failure!\n", unit); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); return ENXIO; } /* After setting up the adapter, map our interrupt */ rid=0; irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE); if(irqres == NULL || #if __FreeBSD_version >= 700025 bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, NULL, arcmsr_intr_handler, acb, &acb->ih)) { #else bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih)) { #endif arcmsr_free_resource(acb); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); printf("arcmsr%d: unable to register interrupt handler!\n", unit); return ENXIO; } acb->irqres=irqres; acb->pci_dev=dev; acb->pci_unit=unit; /* * Now let the CAM generic SCSI layer find the SCSI devices on * the bus * start queue to reset to the idle loop. * * Create device queue of SIM(s) * (MAX_START_JOB - 1) : * max_sim_transactions */ devq=cam_simq_alloc(ARCMSR_MAX_START_JOB); if(devq == NULL) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); printf("arcmsr%d: cam_simq_alloc failure!\n", unit); return ENXIO; } #if __FreeBSD_version >= 700025 acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->qbuffer_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); #else acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); #endif if(acb->psim == NULL) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); cam_simq_free(devq); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); printf("arcmsr%d: cam_sim_alloc failure!\n", unit); return ENXIO; } ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); #if __FreeBSD_version >= 700044 if(xpt_bus_register(acb->psim, dev, 0) != CAM_SUCCESS) { #else if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) { #endif arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); cam_sim_free(acb->psim, /*free_devq*/TRUE); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); printf("arcmsr%d: xpt_bus_register failure!\n", unit); return ENXIO; } if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); xpt_bus_deregister(cam_sim_path(acb->psim)); cam_sim_free(acb->psim, /* free_simq */ TRUE); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); printf("arcmsr%d: xpt_create_path failure!\n", unit); return ENXIO; } /* **************************************************** */ xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5); csa.ccb_h.func_code=XPT_SASYNC_CB; csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE; csa.callback=arcmsr_async; csa.callback_arg=acb->psim; xpt_action((union ccb *)&csa); ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); /* Create the control device. */ acb->ioctl_dev=make_dev(&arcmsr_cdevsw, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit); #if __FreeBSD_version < 503000 acb->ioctl_dev->si_drv1=acb; #endif #if __FreeBSD_version > 500005 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit); #endif arcmsr_callout_init(&acb->devmap_callout); callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb); return (0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_probe(device_t dev) { u_int32_t id; static char buf[256]; char x_type[]={"X-TYPE"}; char *type; int raid6 = 1; if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) { return (ENXIO); } switch(id=pci_get_devid(dev)) { case PCIDevVenIDARC1110: case PCIDevVenIDARC1200: case PCIDevVenIDARC1201: case PCIDevVenIDARC1210: raid6 = 0; /*FALLTHRU*/ case PCIDevVenIDARC1120: case PCIDevVenIDARC1130: case PCIDevVenIDARC1160: case PCIDevVenIDARC1170: case PCIDevVenIDARC1220: case PCIDevVenIDARC1230: case PCIDevVenIDARC1231: case PCIDevVenIDARC1260: case PCIDevVenIDARC1261: case PCIDevVenIDARC1270: case PCIDevVenIDARC1280: type = "SATA"; break; case PCIDevVenIDARC1212: case PCIDevVenIDARC1222: case PCIDevVenIDARC1380: case PCIDevVenIDARC1381: case PCIDevVenIDARC1680: case PCIDevVenIDARC1681: type = "SAS 3G"; break; case PCIDevVenIDARC1880: case PCIDevVenIDARC1882: case PCIDevVenIDARC1213: case PCIDevVenIDARC1223: type = "SAS 6G"; break; default: type = x_type; break; } if(type == x_type) return(ENXIO); sprintf(buf, "Areca %s Host Adapter RAID Controller %s\n", type, raid6 ? "(RAID6 capable)" : ""); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_shutdown(device_t dev) { u_int32_t i; u_int32_t intmask_org; struct CommandControlBlock *srb; struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); /* stop adapter background rebuild */ ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); /* disable all outbound interrupt */ intmask_org=arcmsr_disable_allintr(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); /* abort all outstanding command */ acb->acb_flags |= ACB_F_SCSISTOPADAPTER; acb->acb_flags &= ~ACB_F_IOP_INITED; if(acb->srboutstandingcount!=0) { /*clear and abort all outbound posted Q*/ arcmsr_done4abort_postqueue(acb); /* talk to iop 331 outstanding command aborted*/ arcmsr_abort_allcmd(acb); for(i=0;ipsrb_pool[i]; if(srb->srb_state==ARCMSR_SRB_START) { srb->srb_state=ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); } } } acb->srboutstandingcount=0; acb->workingsrb_doneindex=0; acb->workingsrb_startindex=0; #ifdef ARCMSR_DEBUG1 acb->pktRequestCount = 0; acb->pktReturnCount = 0; #endif ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return (0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_detach(device_t dev) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); int i; callout_stop(&acb->devmap_callout); bus_teardown_intr(dev, acb->irqres, acb->ih); arcmsr_shutdown(dev); arcmsr_free_resource(acb); for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]); } bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); xpt_async(AC_LOST_DEVICE, acb->ppath, NULL); xpt_free_path(acb->ppath); xpt_bus_deregister(cam_sim_path(acb->psim)); cam_sim_free(acb->psim, TRUE); ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); return (0); } #ifdef ARCMSR_DEBUG1 static void arcmsr_dump_data(struct AdapterControlBlock *acb) { if((acb->pktRequestCount - acb->pktReturnCount) == 0) return; printf("Command Request Count =0x%x\n",acb->pktRequestCount); printf("Command Return Count =0x%x\n",acb->pktReturnCount); printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount)); printf("Queued Command Count =0x%x\n",acb->srboutstandingcount); } #endif Index: projects/physbio/sys/dev/ciss/ciss.c =================================================================== --- projects/physbio/sys/dev/ciss/ciss.c (revision 243875) +++ projects/physbio/sys/dev/ciss/ciss.c (revision 243876) @@ -1,4665 +1,4654 @@ /*- * Copyright (c) 2001 Michael Smith * Copyright (c) 2004 Paul Saab * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Common Interface for SCSI-3 Support driver. * * CISS claims to provide a common interface between a generic SCSI * transport and an intelligent host adapter. * * This driver supports CISS as defined in the document "CISS Command * Interface for SCSI-3 Support Open Specification", Version 1.04, * Valence Number 1, dated 20001127, produced by Compaq Computer * Corporation. This document appears to be a hastily and somewhat * arbitrarlily cut-down version of a larger (and probably even more * chaotic and inconsistent) Compaq internal document. Various * details were also gleaned from Compaq's "cciss" driver for Linux. * * We provide a shim layer between the CISS interface and CAM, * offloading most of the queueing and being-a-disk chores onto CAM. * Entry to the driver is via the PCI bus attachment (ciss_probe, * ciss_attach, etc) and via the CAM interface (ciss_cam_action, * ciss_cam_poll). The Compaq CISS adapters are, however, poor SCSI * citizens and we have to fake up some responses to get reasonable * behaviour out of them. In addition, the CISS command set is by no * means adequate to support the functionality of a RAID controller, * and thus the supported Compaq adapters utilise portions of the * control protocol from earlier Compaq adapter families. * * Note that we only support the "simple" transport layer over PCI. * This interface (ab)uses the I2O register set (specifically the post * queues) to exchange commands with the adapter. Other interfaces * are available, but we aren't supposed to know about them, and it is * dubious whether they would provide major performance improvements * except under extreme load. * * Currently the only supported CISS adapters are the Compaq Smart * Array 5* series (5300, 5i, 532). Even with only three adapters, * Compaq still manage to have interface variations. * * * Thanks must go to Fred Harris and Darryl DeVinney at Compaq, as * well as Paul Saab at Yahoo! for their assistance in making this * driver happen. * * More thanks must go to John Cagle at HP for the countless hours * spent making this driver "work" with the MSA* series storage * enclosures. Without his help (and nagging), this driver could not * be used with these enclosures. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(CISS_MALLOC_CLASS, "ciss_data", "ciss internal data buffers"); /* pci interface */ static int ciss_lookup(device_t dev); static int ciss_probe(device_t dev); static int ciss_attach(device_t dev); static int ciss_detach(device_t dev); static int ciss_shutdown(device_t dev); /* (de)initialisation functions, control wrappers */ static int ciss_init_pci(struct ciss_softc *sc); static int ciss_setup_msix(struct ciss_softc *sc); static int ciss_init_perf(struct ciss_softc *sc); static int ciss_wait_adapter(struct ciss_softc *sc); static int ciss_flush_adapter(struct ciss_softc *sc); static int ciss_init_requests(struct ciss_softc *sc); static void ciss_command_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int ciss_identify_adapter(struct ciss_softc *sc); static int ciss_init_logical(struct ciss_softc *sc); static int ciss_init_physical(struct ciss_softc *sc); static int ciss_filter_physical(struct ciss_softc *sc, struct ciss_lun_report *cll); static int ciss_identify_logical(struct ciss_softc *sc, struct ciss_ldrive *ld); static int ciss_get_ldrive_status(struct ciss_softc *sc, struct ciss_ldrive *ld); static int ciss_update_config(struct ciss_softc *sc); static int ciss_accept_media(struct ciss_softc *sc, struct ciss_ldrive *ld); static void ciss_init_sysctl(struct ciss_softc *sc); static void ciss_soft_reset(struct ciss_softc *sc); static void ciss_free(struct ciss_softc *sc); static void ciss_spawn_notify_thread(struct ciss_softc *sc); static void ciss_kill_notify_thread(struct ciss_softc *sc); /* request submission/completion */ static int ciss_start(struct ciss_request *cr); static void ciss_done(struct ciss_softc *sc, cr_qhead_t *qh); static void ciss_perf_done(struct ciss_softc *sc, cr_qhead_t *qh); static void ciss_intr(void *arg); static void ciss_perf_intr(void *arg); static void ciss_perf_msi_intr(void *arg); static void ciss_complete(struct ciss_softc *sc, cr_qhead_t *qh); static int _ciss_report_request(struct ciss_request *cr, int *command_status, int *scsi_status, const char *func); static int ciss_synch_request(struct ciss_request *cr, int timeout); static int ciss_poll_request(struct ciss_request *cr, int timeout); static int ciss_wait_request(struct ciss_request *cr, int timeout); #if 0 static int ciss_abort_request(struct ciss_request *cr); #endif /* request queueing */ static int ciss_get_request(struct ciss_softc *sc, struct ciss_request **crp); static void ciss_preen_command(struct ciss_request *cr); static void ciss_release_request(struct ciss_request *cr); /* request helpers */ static int ciss_get_bmic_request(struct ciss_softc *sc, struct ciss_request **crp, int opcode, void **bufp, size_t bufsize); static int ciss_user_command(struct ciss_softc *sc, IOCTL_Command_struct *ioc); /* DMA map/unmap */ static int ciss_map_request(struct ciss_request *cr); static void ciss_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void ciss_unmap_request(struct ciss_request *cr); /* CAM interface */ static int ciss_cam_init(struct ciss_softc *sc); static void ciss_cam_rescan_target(struct ciss_softc *sc, int bus, int target); static void ciss_cam_action(struct cam_sim *sim, union ccb *ccb); static int ciss_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio); static int ciss_cam_emulate(struct ciss_softc *sc, struct ccb_scsiio *csio); static void ciss_cam_poll(struct cam_sim *sim); static void ciss_cam_complete(struct ciss_request *cr); static void ciss_cam_complete_fixup(struct ciss_softc *sc, struct ccb_scsiio *csio); static struct cam_periph *ciss_find_periph(struct ciss_softc *sc, int bus, int target); static int ciss_name_device(struct ciss_softc *sc, int bus, int target); /* periodic status monitoring */ static void ciss_periodic(void *arg); static void ciss_nop_complete(struct ciss_request *cr); static void ciss_disable_adapter(struct ciss_softc *sc); static void ciss_notify_event(struct ciss_softc *sc); static void ciss_notify_complete(struct ciss_request *cr); static int ciss_notify_abort(struct ciss_softc *sc); static int ciss_notify_abort_bmic(struct ciss_softc *sc); static void ciss_notify_hotplug(struct ciss_softc *sc, struct ciss_notify *cn); static void ciss_notify_logical(struct ciss_softc *sc, struct ciss_notify *cn); static void ciss_notify_physical(struct ciss_softc *sc, struct ciss_notify *cn); /* debugging output */ static void ciss_print_request(struct ciss_request *cr); static void ciss_print_ldrive(struct ciss_softc *sc, struct ciss_ldrive *ld); static const char *ciss_name_ldrive_status(int status); static int ciss_decode_ldrive_status(int status); static const char *ciss_name_ldrive_org(int org); static const char *ciss_name_command_status(int status); /* * PCI bus interface. */ static device_method_t ciss_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ciss_probe), DEVMETHOD(device_attach, ciss_attach), DEVMETHOD(device_detach, ciss_detach), DEVMETHOD(device_shutdown, ciss_shutdown), { 0, 0 } }; static driver_t ciss_pci_driver = { "ciss", ciss_methods, sizeof(struct ciss_softc) }; static devclass_t ciss_devclass; DRIVER_MODULE(ciss, pci, ciss_pci_driver, ciss_devclass, 0, 0); MODULE_DEPEND(ciss, cam, 1, 1, 1); MODULE_DEPEND(ciss, pci, 1, 1, 1); /* * Control device interface. */ static d_open_t ciss_open; static d_close_t ciss_close; static d_ioctl_t ciss_ioctl; static struct cdevsw ciss_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ciss_open, .d_close = ciss_close, .d_ioctl = ciss_ioctl, .d_name = "ciss", }; /* * This tunable can be set at boot time and controls whether physical devices * that are marked hidden by the firmware should be exposed anyways. */ static unsigned int ciss_expose_hidden_physical = 0; TUNABLE_INT("hw.ciss.expose_hidden_physical", &ciss_expose_hidden_physical); static unsigned int ciss_nop_message_heartbeat = 0; TUNABLE_INT("hw.ciss.nop_message_heartbeat", &ciss_nop_message_heartbeat); /* * This tunable can force a particular transport to be used: * <= 0 : use default * 1 : force simple * 2 : force performant */ static int ciss_force_transport = 0; TUNABLE_INT("hw.ciss.force_transport", &ciss_force_transport); /* * This tunable can force a particular interrupt delivery method to be used: * <= 0 : use default * 1 : force INTx * 2 : force MSIX */ static int ciss_force_interrupt = 0; TUNABLE_INT("hw.ciss.force_interrupt", &ciss_force_interrupt); /************************************************************************ * CISS adapters amazingly don't have a defined programming interface * value. (One could say some very despairing things about PCI and * people just not getting the general idea.) So we are forced to * stick with matching against subvendor/subdevice, and thus have to * be updated for every new CISS adapter that appears. */ #define CISS_BOARD_UNKNWON 0 #define CISS_BOARD_SA5 1 #define CISS_BOARD_SA5B 2 #define CISS_BOARD_NOMSI (1<<4) static struct { u_int16_t subvendor; u_int16_t subdevice; int flags; char *desc; } ciss_vendor_data[] = { { 0x0e11, 0x4070, CISS_BOARD_SA5|CISS_BOARD_NOMSI, "Compaq Smart Array 5300" }, { 0x0e11, 0x4080, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "Compaq Smart Array 5i" }, { 0x0e11, 0x4082, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "Compaq Smart Array 532" }, { 0x0e11, 0x4083, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "HP Smart Array 5312" }, { 0x0e11, 0x4091, CISS_BOARD_SA5, "HP Smart Array 6i" }, { 0x0e11, 0x409A, CISS_BOARD_SA5, "HP Smart Array 641" }, { 0x0e11, 0x409B, CISS_BOARD_SA5, "HP Smart Array 642" }, { 0x0e11, 0x409C, CISS_BOARD_SA5, "HP Smart Array 6400" }, { 0x0e11, 0x409D, CISS_BOARD_SA5, "HP Smart Array 6400 EM" }, { 0x103C, 0x3211, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3212, CISS_BOARD_SA5, "HP Smart Array E200" }, { 0x103C, 0x3213, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3214, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3215, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3220, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3222, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3223, CISS_BOARD_SA5, "HP Smart Array P800" }, { 0x103C, 0x3225, CISS_BOARD_SA5, "HP Smart Array P600" }, { 0x103C, 0x3230, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3231, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3232, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3233, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3234, CISS_BOARD_SA5, "HP Smart Array P400" }, { 0x103C, 0x3235, CISS_BOARD_SA5, "HP Smart Array P400i" }, { 0x103C, 0x3236, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3237, CISS_BOARD_SA5, "HP Smart Array E500" }, { 0x103C, 0x3238, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3239, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323A, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323B, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323C, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323D, CISS_BOARD_SA5, "HP Smart Array P700m" }, { 0x103C, 0x3241, CISS_BOARD_SA5, "HP Smart Array P212" }, { 0x103C, 0x3243, CISS_BOARD_SA5, "HP Smart Array P410" }, { 0x103C, 0x3245, CISS_BOARD_SA5, "HP Smart Array P410i" }, { 0x103C, 0x3247, CISS_BOARD_SA5, "HP Smart Array P411" }, { 0x103C, 0x3249, CISS_BOARD_SA5, "HP Smart Array P812" }, { 0x103C, 0x324A, CISS_BOARD_SA5, "HP Smart Array P712m" }, { 0x103C, 0x324B, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3350, CISS_BOARD_SA5, "HP Smart Array P222" }, { 0x103C, 0x3351, CISS_BOARD_SA5, "HP Smart Array P420" }, { 0x103C, 0x3352, CISS_BOARD_SA5, "HP Smart Array P421" }, { 0x103C, 0x3353, CISS_BOARD_SA5, "HP Smart Array P822" }, { 0x103C, 0x3354, CISS_BOARD_SA5, "HP Smart Array P420i" }, { 0x103C, 0x3355, CISS_BOARD_SA5, "HP Smart Array P220i" }, { 0x103C, 0x3356, CISS_BOARD_SA5, "HP Smart Array P721m" }, { 0, 0, 0, NULL } }; /************************************************************************ * Find a match for the device in our list of known adapters. */ static int ciss_lookup(device_t dev) { int i; for (i = 0; ciss_vendor_data[i].desc != NULL; i++) if ((pci_get_subvendor(dev) == ciss_vendor_data[i].subvendor) && (pci_get_subdevice(dev) == ciss_vendor_data[i].subdevice)) { return(i); } return(-1); } /************************************************************************ * Match a known CISS adapter. */ static int ciss_probe(device_t dev) { int i; i = ciss_lookup(dev); if (i != -1) { device_set_desc(dev, ciss_vendor_data[i].desc); return(BUS_PROBE_DEFAULT); } return(ENOENT); } /************************************************************************ * Attach the driver to this adapter. */ static int ciss_attach(device_t dev) { struct ciss_softc *sc; int error; debug_called(1); #ifdef CISS_DEBUG /* print structure/union sizes */ debug_struct(ciss_command); debug_struct(ciss_header); debug_union(ciss_device_address); debug_struct(ciss_cdb); debug_struct(ciss_report_cdb); debug_struct(ciss_notify_cdb); debug_struct(ciss_notify); debug_struct(ciss_message_cdb); debug_struct(ciss_error_info_pointer); debug_struct(ciss_error_info); debug_struct(ciss_sg_entry); debug_struct(ciss_config_table); debug_struct(ciss_bmic_cdb); debug_struct(ciss_bmic_id_ldrive); debug_struct(ciss_bmic_id_lstatus); debug_struct(ciss_bmic_id_table); debug_struct(ciss_bmic_id_pdrive); debug_struct(ciss_bmic_blink_pdrive); debug_struct(ciss_bmic_flush_cache); debug_const(CISS_MAX_REQUESTS); debug_const(CISS_MAX_LOGICAL); debug_const(CISS_INTERRUPT_COALESCE_DELAY); debug_const(CISS_INTERRUPT_COALESCE_COUNT); debug_const(CISS_COMMAND_ALLOC_SIZE); debug_const(CISS_COMMAND_SG_LENGTH); debug_type(cciss_pci_info_struct); debug_type(cciss_coalint_struct); debug_type(cciss_coalint_struct); debug_type(NodeName_type); debug_type(NodeName_type); debug_type(Heartbeat_type); debug_type(BusTypes_type); debug_type(FirmwareVer_type); debug_type(DriverVer_type); debug_type(IOCTL_Command_struct); #endif sc = device_get_softc(dev); sc->ciss_dev = dev; mtx_init(&sc->ciss_mtx, "cissmtx", NULL, MTX_DEF); callout_init_mtx(&sc->ciss_periodic, &sc->ciss_mtx, 0); /* * Do PCI-specific init. */ if ((error = ciss_init_pci(sc)) != 0) goto out; /* * Initialise driver queues. */ ciss_initq_free(sc); ciss_initq_notify(sc); /* * Initalize device sysctls. */ ciss_init_sysctl(sc); /* * Initialise command/request pool. */ if ((error = ciss_init_requests(sc)) != 0) goto out; /* * Get adapter information. */ if ((error = ciss_identify_adapter(sc)) != 0) goto out; /* * Find all the physical devices. */ if ((error = ciss_init_physical(sc)) != 0) goto out; /* * Build our private table of logical devices. */ if ((error = ciss_init_logical(sc)) != 0) goto out; /* * Enable interrupts so that the CAM scan can complete. */ CISS_TL_SIMPLE_ENABLE_INTERRUPTS(sc); /* * Initialise the CAM interface. */ if ((error = ciss_cam_init(sc)) != 0) goto out; /* * Start the heartbeat routine and event chain. */ ciss_periodic(sc); /* * Create the control device. */ sc->ciss_dev_t = make_dev(&ciss_cdevsw, device_get_unit(sc->ciss_dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "ciss%d", device_get_unit(sc->ciss_dev)); sc->ciss_dev_t->si_drv1 = sc; /* * The adapter is running; synchronous commands can now sleep * waiting for an interrupt to signal completion. */ sc->ciss_flags |= CISS_FLAG_RUNNING; ciss_spawn_notify_thread(sc); error = 0; out: if (error != 0) { /* ciss_free() expects the mutex to be held */ mtx_lock(&sc->ciss_mtx); ciss_free(sc); } return(error); } /************************************************************************ * Detach the driver from this adapter. */ static int ciss_detach(device_t dev) { struct ciss_softc *sc = device_get_softc(dev); debug_called(1); mtx_lock(&sc->ciss_mtx); if (sc->ciss_flags & CISS_FLAG_CONTROL_OPEN) { mtx_unlock(&sc->ciss_mtx); return (EBUSY); } /* flush adapter cache */ ciss_flush_adapter(sc); /* release all resources. The mutex is released and freed here too. */ ciss_free(sc); return(0); } /************************************************************************ * Prepare adapter for system shutdown. */ static int ciss_shutdown(device_t dev) { struct ciss_softc *sc = device_get_softc(dev); debug_called(1); mtx_lock(&sc->ciss_mtx); /* flush adapter cache */ ciss_flush_adapter(sc); if (sc->ciss_soft_reset) ciss_soft_reset(sc); mtx_unlock(&sc->ciss_mtx); return(0); } static void ciss_init_sysctl(struct ciss_softc *sc) { SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->ciss_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ciss_dev)), OID_AUTO, "soft_reset", CTLFLAG_RW, &sc->ciss_soft_reset, 0, ""); } /************************************************************************ * Perform PCI-specific attachment actions. */ static int ciss_init_pci(struct ciss_softc *sc) { uintptr_t cbase, csize, cofs; uint32_t method, supported_methods; int error, sqmask, i; void *intr; debug_called(1); /* * Work out adapter type. */ i = ciss_lookup(sc->ciss_dev); if (i < 0) { ciss_printf(sc, "unknown adapter type\n"); return (ENXIO); } if (ciss_vendor_data[i].flags & CISS_BOARD_SA5) { sqmask = CISS_TL_SIMPLE_INTR_OPQ_SA5; } else if (ciss_vendor_data[i].flags & CISS_BOARD_SA5B) { sqmask = CISS_TL_SIMPLE_INTR_OPQ_SA5B; } else { /* * XXX Big hammer, masks/unmasks all possible interrupts. This should * work on all hardware variants. Need to add code to handle the * "controller crashed" interupt bit that this unmasks. */ sqmask = ~0; } /* * Allocate register window first (we need this to find the config * struct). */ error = ENXIO; sc->ciss_regs_rid = CISS_TL_SIMPLE_BAR_REGS; if ((sc->ciss_regs_resource = bus_alloc_resource_any(sc->ciss_dev, SYS_RES_MEMORY, &sc->ciss_regs_rid, RF_ACTIVE)) == NULL) { ciss_printf(sc, "can't allocate register window\n"); return(ENXIO); } sc->ciss_regs_bhandle = rman_get_bushandle(sc->ciss_regs_resource); sc->ciss_regs_btag = rman_get_bustag(sc->ciss_regs_resource); /* * Find the BAR holding the config structure. If it's not the one * we already mapped for registers, map it too. */ sc->ciss_cfg_rid = CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_CFG_BAR) & 0xffff; if (sc->ciss_cfg_rid != sc->ciss_regs_rid) { if ((sc->ciss_cfg_resource = bus_alloc_resource_any(sc->ciss_dev, SYS_RES_MEMORY, &sc->ciss_cfg_rid, RF_ACTIVE)) == NULL) { ciss_printf(sc, "can't allocate config window\n"); return(ENXIO); } cbase = (uintptr_t)rman_get_virtual(sc->ciss_cfg_resource); csize = rman_get_end(sc->ciss_cfg_resource) - rman_get_start(sc->ciss_cfg_resource) + 1; } else { cbase = (uintptr_t)rman_get_virtual(sc->ciss_regs_resource); csize = rman_get_end(sc->ciss_regs_resource) - rman_get_start(sc->ciss_regs_resource) + 1; } cofs = CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_CFG_OFF); /* * Use the base/size/offset values we just calculated to * sanity-check the config structure. If it's OK, point to it. */ if ((cofs + sizeof(struct ciss_config_table)) > csize) { ciss_printf(sc, "config table outside window\n"); return(ENXIO); } sc->ciss_cfg = (struct ciss_config_table *)(cbase + cofs); debug(1, "config struct at %p", sc->ciss_cfg); /* * Calculate the number of request structures/commands we are * going to provide for this adapter. */ sc->ciss_max_requests = min(CISS_MAX_REQUESTS, sc->ciss_cfg->max_outstanding_commands); /* * Validate the config structure. If we supported other transport * methods, we could select amongst them at this point in time. */ if (strncmp(sc->ciss_cfg->signature, "CISS", 4)) { ciss_printf(sc, "config signature mismatch (got '%c%c%c%c')\n", sc->ciss_cfg->signature[0], sc->ciss_cfg->signature[1], sc->ciss_cfg->signature[2], sc->ciss_cfg->signature[3]); return(ENXIO); } /* * Select the mode of operation, prefer Performant. */ if (!(sc->ciss_cfg->supported_methods & (CISS_TRANSPORT_METHOD_SIMPLE | CISS_TRANSPORT_METHOD_PERF))) { ciss_printf(sc, "No supported transport layers: 0x%x\n", sc->ciss_cfg->supported_methods); } switch (ciss_force_transport) { case 1: supported_methods = CISS_TRANSPORT_METHOD_SIMPLE; break; case 2: supported_methods = CISS_TRANSPORT_METHOD_PERF; break; default: supported_methods = sc->ciss_cfg->supported_methods; break; } setup: if ((supported_methods & CISS_TRANSPORT_METHOD_PERF) != 0) { method = CISS_TRANSPORT_METHOD_PERF; sc->ciss_perf = (struct ciss_perf_config *)(cbase + cofs + sc->ciss_cfg->transport_offset); if (ciss_init_perf(sc)) { supported_methods &= ~method; goto setup; } } else if (supported_methods & CISS_TRANSPORT_METHOD_SIMPLE) { method = CISS_TRANSPORT_METHOD_SIMPLE; } else { ciss_printf(sc, "No supported transport methods: 0x%x\n", sc->ciss_cfg->supported_methods); return(ENXIO); } /* * Tell it we're using the low 4GB of RAM. Set the default interrupt * coalescing options. */ sc->ciss_cfg->requested_method = method; sc->ciss_cfg->command_physlimit = 0; sc->ciss_cfg->interrupt_coalesce_delay = CISS_INTERRUPT_COALESCE_DELAY; sc->ciss_cfg->interrupt_coalesce_count = CISS_INTERRUPT_COALESCE_COUNT; #ifdef __i386__ sc->ciss_cfg->host_driver |= CISS_DRIVER_SCSI_PREFETCH; #endif if (ciss_update_config(sc)) { ciss_printf(sc, "adapter refuses to accept config update (IDBR 0x%x)\n", CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_IDBR)); return(ENXIO); } if ((sc->ciss_cfg->active_method & method) == 0) { supported_methods &= ~method; if (supported_methods == 0) { ciss_printf(sc, "adapter refuses to go into available transports " "mode (0x%x, 0x%x)\n", supported_methods, sc->ciss_cfg->active_method); return(ENXIO); } else goto setup; } /* * Wait for the adapter to come ready. */ if ((error = ciss_wait_adapter(sc)) != 0) return(error); /* Prepare to possibly use MSIX and/or PERFORMANT interrupts. Normal * interrupts have a rid of 0, this will be overridden if MSIX is used. */ sc->ciss_irq_rid[0] = 0; if (method == CISS_TRANSPORT_METHOD_PERF) { ciss_printf(sc, "PERFORMANT Transport\n"); if ((ciss_force_interrupt != 1) && (ciss_setup_msix(sc) == 0)) { intr = ciss_perf_msi_intr; } else { intr = ciss_perf_intr; } /* XXX The docs say that the 0x01 bit is only for SAS controllers. * Unfortunately, there is no good way to know if this is a SAS * controller. Hopefully enabling this bit universally will work OK. * It seems to work fine for SA6i controllers. */ sc->ciss_interrupt_mask = CISS_TL_PERF_INTR_OPQ | CISS_TL_PERF_INTR_MSI; } else { ciss_printf(sc, "SIMPLE Transport\n"); /* MSIX doesn't seem to work in SIMPLE mode, only enable if it forced */ if (ciss_force_interrupt == 2) /* If this fails, we automatically revert to INTx */ ciss_setup_msix(sc); sc->ciss_perf = NULL; intr = ciss_intr; sc->ciss_interrupt_mask = sqmask; } /* * Turn off interrupts before we go routing anything. */ CISS_TL_SIMPLE_DISABLE_INTERRUPTS(sc); /* * Allocate and set up our interrupt. */ if ((sc->ciss_irq_resource = bus_alloc_resource_any(sc->ciss_dev, SYS_RES_IRQ, &sc->ciss_irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) { ciss_printf(sc, "can't allocate interrupt\n"); return(ENXIO); } if (bus_setup_intr(sc->ciss_dev, sc->ciss_irq_resource, INTR_TYPE_CAM|INTR_MPSAFE, NULL, intr, sc, &sc->ciss_intr)) { ciss_printf(sc, "can't set up interrupt\n"); return(ENXIO); } /* * Allocate the parent bus DMA tag appropriate for our PCI * interface. * * Note that "simple" adapters can only address within a 32-bit * span. */ if (bus_dma_tag_create(bus_get_dma_tag(sc->ciss_dev),/* PCI parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ CISS_MAX_SG_ELEMENTS, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ciss_parent_dmat)) { ciss_printf(sc, "can't allocate parent DMA tag\n"); return(ENOMEM); } /* * Create DMA tag for mapping buffers into adapter-addressable * space. */ if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, CISS_MAX_SG_ELEMENTS, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, &sc->ciss_mtx, /* lockfunc, lockarg */ &sc->ciss_buffer_dmat)) { ciss_printf(sc, "can't allocate buffer DMA tag\n"); return(ENOMEM); } return(0); } /************************************************************************ * Setup MSI/MSIX operation (Performant only) * Four interrupts are available, but we only use 1 right now. If MSI-X * isn't avaialble, try using MSI instead. */ static int ciss_setup_msix(struct ciss_softc *sc) { int val, i; /* Weed out devices that don't actually support MSI */ i = ciss_lookup(sc->ciss_dev); if (ciss_vendor_data[i].flags & CISS_BOARD_NOMSI) return (EINVAL); /* * Only need to use the minimum number of MSI vectors, as the driver * doesn't support directed MSIX interrupts. */ val = pci_msix_count(sc->ciss_dev); if (val < CISS_MSI_COUNT) { val = pci_msi_count(sc->ciss_dev); device_printf(sc->ciss_dev, "got %d MSI messages]\n", val); if (val < CISS_MSI_COUNT) return (EINVAL); } val = MIN(val, CISS_MSI_COUNT); if (pci_alloc_msix(sc->ciss_dev, &val) != 0) { if (pci_alloc_msi(sc->ciss_dev, &val) != 0) return (EINVAL); } sc->ciss_msi = val; if (bootverbose) ciss_printf(sc, "Using %d MSIX interrupt%s\n", val, (val != 1) ? "s" : ""); for (i = 0; i < val; i++) sc->ciss_irq_rid[i] = i + 1; return (0); } /************************************************************************ * Setup the Performant structures. */ static int ciss_init_perf(struct ciss_softc *sc) { struct ciss_perf_config *pc = sc->ciss_perf; int reply_size; /* * Create the DMA tag for the reply queue. */ reply_size = sizeof(uint64_t) * sc->ciss_max_requests; if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ reply_size, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ciss_reply_dmat)) { ciss_printf(sc, "can't allocate reply DMA tag\n"); return(ENOMEM); } /* * Allocate memory and make it available for DMA. */ if (bus_dmamem_alloc(sc->ciss_reply_dmat, (void **)&sc->ciss_reply, BUS_DMA_NOWAIT, &sc->ciss_reply_map)) { ciss_printf(sc, "can't allocate reply memory\n"); return(ENOMEM); } bus_dmamap_load(sc->ciss_reply_dmat, sc->ciss_reply_map, sc->ciss_reply, reply_size, ciss_command_map_helper, &sc->ciss_reply_phys, 0); bzero(sc->ciss_reply, reply_size); sc->ciss_cycle = 0x1; sc->ciss_rqidx = 0; /* * Preload the fetch table with common command sizes. This allows the * hardware to not waste bus cycles for typical i/o commands, but also not * tax the driver to be too exact in choosing sizes. The table is optimized * for page-aligned i/o's, but since most i/o comes from the various pagers, * it's a reasonable assumption to make. */ pc->fetch_count[CISS_SG_FETCH_NONE] = (sizeof(struct ciss_command) + 15) / 16; pc->fetch_count[CISS_SG_FETCH_1] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 1 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_2] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 2 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_4] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 4 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_8] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 8 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_16] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 16 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_32] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 32 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_MAX] = (CISS_COMMAND_ALLOC_SIZE + 15) / 16; pc->rq_size = sc->ciss_max_requests; /* XXX less than the card supports? */ pc->rq_count = 1; /* XXX Hardcode for a single queue */ pc->rq_bank_hi = 0; pc->rq_bank_lo = 0; pc->rq[0].rq_addr_hi = 0x0; pc->rq[0].rq_addr_lo = sc->ciss_reply_phys; return(0); } /************************************************************************ * Wait for the adapter to come ready. */ static int ciss_wait_adapter(struct ciss_softc *sc) { int i; debug_called(1); /* * Wait for the adapter to come ready. */ if (!(sc->ciss_cfg->active_method & CISS_TRANSPORT_METHOD_READY)) { ciss_printf(sc, "waiting for adapter to come ready...\n"); for (i = 0; !(sc->ciss_cfg->active_method & CISS_TRANSPORT_METHOD_READY); i++) { DELAY(1000000); /* one second */ if (i > 30) { ciss_printf(sc, "timed out waiting for adapter to come ready\n"); return(EIO); } } } return(0); } /************************************************************************ * Flush the adapter cache. */ static int ciss_flush_adapter(struct ciss_softc *sc) { struct ciss_request *cr; struct ciss_bmic_flush_cache *cbfc; int error, command_status; debug_called(1); cr = NULL; cbfc = NULL; /* * Build a BMIC request to flush the cache. We don't disable * it, as we may be going to do more I/O (eg. we are emulating * the Synchronise Cache command). */ if ((cbfc = malloc(sizeof(*cbfc), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) { error = ENOMEM; goto out; } if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_FLUSH_CACHE, (void **)&cbfc, sizeof(*cbfc))) != 0) goto out; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC FLUSH_CACHE command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: break; default: ciss_printf(sc, "error flushing cache (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } out: if (cbfc != NULL) free(cbfc, CISS_MALLOC_CLASS); if (cr != NULL) ciss_release_request(cr); return(error); } static void ciss_soft_reset(struct ciss_softc *sc) { struct ciss_request *cr = NULL; struct ciss_command *cc; int i, error = 0; for (i = 0; i < sc->ciss_max_logical_bus; i++) { /* only reset proxy controllers */ if (sc->ciss_controllers[i].physical.bus == 0) continue; if ((error = ciss_get_request(sc, &cr)) != 0) break; if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_SOFT_RESET, NULL, 0)) != 0) break; cc = cr->cr_cc; cc->header.address = sc->ciss_controllers[i]; if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) break; ciss_release_request(cr); } if (error) ciss_printf(sc, "error resetting controller (%d)\n", error); if (cr != NULL) ciss_release_request(cr); } /************************************************************************ * Allocate memory for the adapter command structures, initialise * the request structures. * * Note that the entire set of commands are allocated in a single * contiguous slab. */ static int ciss_init_requests(struct ciss_softc *sc) { struct ciss_request *cr; int i; debug_called(1); if (bootverbose) ciss_printf(sc, "using %d of %d available commands\n", sc->ciss_max_requests, sc->ciss_cfg->max_outstanding_commands); /* * Create the DMA tag for commands. */ if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */ 32, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ciss_command_dmat)) { ciss_printf(sc, "can't allocate command DMA tag\n"); return(ENOMEM); } /* * Allocate memory and make it available for DMA. */ if (bus_dmamem_alloc(sc->ciss_command_dmat, (void **)&sc->ciss_command, BUS_DMA_NOWAIT, &sc->ciss_command_map)) { ciss_printf(sc, "can't allocate command memory\n"); return(ENOMEM); } bus_dmamap_load(sc->ciss_command_dmat, sc->ciss_command_map,sc->ciss_command, CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests, ciss_command_map_helper, &sc->ciss_command_phys, 0); bzero(sc->ciss_command, CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests); /* * Set up the request and command structures, push requests onto * the free queue. */ for (i = 1; i < sc->ciss_max_requests; i++) { cr = &sc->ciss_request[i]; cr->cr_sc = sc; cr->cr_tag = i; cr->cr_cc = (struct ciss_command *)((uintptr_t)sc->ciss_command + CISS_COMMAND_ALLOC_SIZE * i); cr->cr_ccphys = sc->ciss_command_phys + CISS_COMMAND_ALLOC_SIZE * i; bus_dmamap_create(sc->ciss_buffer_dmat, 0, &cr->cr_datamap); ciss_enqueue_free(cr); } return(0); } static void ciss_command_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { uint32_t *addr; addr = arg; *addr = segs[0].ds_addr; } /************************************************************************ * Identify the adapter, print some information about it. */ static int ciss_identify_adapter(struct ciss_softc *sc) { struct ciss_request *cr; int error, command_status; debug_called(1); cr = NULL; /* * Get a request, allocate storage for the adapter data. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_CTLR, (void **)&sc->ciss_id, sizeof(*sc->ciss_id))) != 0) goto out; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC ID_CTLR command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ break; case CISS_CMD_STATUS_DATA_UNDERRUN: case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "data over/underrun reading adapter information\n"); default: ciss_printf(sc, "error reading adapter information (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } /* sanity-check reply */ if (!sc->ciss_id->big_map_supported) { ciss_printf(sc, "adapter does not support BIG_MAP\n"); error = ENXIO; goto out; } #if 0 /* XXX later revisions may not need this */ sc->ciss_flags |= CISS_FLAG_FAKE_SYNCH; #endif /* XXX only really required for old 5300 adapters? */ sc->ciss_flags |= CISS_FLAG_BMIC_ABORT; /* print information */ if (bootverbose) { #if 0 /* XXX proxy volumes??? */ ciss_printf(sc, " %d logical drive%s configured\n", sc->ciss_id->configured_logical_drives, (sc->ciss_id->configured_logical_drives == 1) ? "" : "s"); #endif ciss_printf(sc, " firmware %4.4s\n", sc->ciss_id->running_firmware_revision); ciss_printf(sc, " %d SCSI channels\n", sc->ciss_id->scsi_bus_count); ciss_printf(sc, " signature '%.4s'\n", sc->ciss_cfg->signature); ciss_printf(sc, " valence %d\n", sc->ciss_cfg->valence); ciss_printf(sc, " supported I/O methods 0x%b\n", sc->ciss_cfg->supported_methods, "\20\1READY\2simple\3performant\4MEMQ\n"); ciss_printf(sc, " active I/O method 0x%b\n", sc->ciss_cfg->active_method, "\20\2simple\3performant\4MEMQ\n"); ciss_printf(sc, " 4G page base 0x%08x\n", sc->ciss_cfg->command_physlimit); ciss_printf(sc, " interrupt coalesce delay %dus\n", sc->ciss_cfg->interrupt_coalesce_delay); ciss_printf(sc, " interrupt coalesce count %d\n", sc->ciss_cfg->interrupt_coalesce_count); ciss_printf(sc, " max outstanding commands %d\n", sc->ciss_cfg->max_outstanding_commands); ciss_printf(sc, " bus types 0x%b\n", sc->ciss_cfg->bus_types, "\20\1ultra2\2ultra3\10fibre1\11fibre2\n"); ciss_printf(sc, " server name '%.16s'\n", sc->ciss_cfg->server_name); ciss_printf(sc, " heartbeat 0x%x\n", sc->ciss_cfg->heartbeat); } out: if (error) { if (sc->ciss_id != NULL) { free(sc->ciss_id, CISS_MALLOC_CLASS); sc->ciss_id = NULL; } } if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Helper routine for generating a list of logical and physical luns. */ static struct ciss_lun_report * ciss_report_luns(struct ciss_softc *sc, int opcode, int nunits) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_report_cdb *crc; struct ciss_lun_report *cll; int command_status; int report_size; int error = 0; debug_called(1); cr = NULL; cll = NULL; /* * Get a request, allocate storage for the address list. */ if ((error = ciss_get_request(sc, &cr)) != 0) goto out; report_size = sizeof(*cll) + nunits * sizeof(union ciss_device_address); if ((cll = malloc(report_size, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) { ciss_printf(sc, "can't allocate memory for lun report\n"); error = ENOMEM; goto out; } /* * Build the Report Logical/Physical LUNs command. */ cc = cr->cr_cc; cr->cr_data = cll; cr->cr_length = report_size; cr->cr_flags = CISS_REQ_DATAIN; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*crc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 30; /* XXX better suggestions? */ crc = (struct ciss_report_cdb *)&(cc->cdb.cdb[0]); bzero(crc, sizeof(*crc)); crc->opcode = opcode; crc->length = htonl(report_size); /* big-endian field */ cll->list_size = htonl(report_size - sizeof(*cll)); /* big-endian field */ /* * Submit the request and wait for it to complete. (timeout * here should be much greater than above) */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending %d LUN command (%d)\n", opcode, error); goto out; } /* * Check response. Note that data over/underrun is OK. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ case CISS_CMD_STATUS_DATA_UNDERRUN: /* buffer too large, not bad */ break; case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "WARNING: more units than driver limit (%d)\n", CISS_MAX_LOGICAL); break; default: ciss_printf(sc, "error detecting logical drive configuration (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } ciss_release_request(cr); cr = NULL; out: if (cr != NULL) ciss_release_request(cr); if (error && cll != NULL) { free(cll, CISS_MALLOC_CLASS); cll = NULL; } return(cll); } /************************************************************************ * Find logical drives on the adapter. */ static int ciss_init_logical(struct ciss_softc *sc) { struct ciss_lun_report *cll; int error = 0, i, j; int ndrives; debug_called(1); cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_LOGICAL_LUNS, CISS_MAX_LOGICAL); if (cll == NULL) { error = ENXIO; goto out; } /* sanity-check reply */ ndrives = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); if ((ndrives < 0) || (ndrives > CISS_MAX_LOGICAL)) { ciss_printf(sc, "adapter claims to report absurd number of logical drives (%d > %d)\n", ndrives, CISS_MAX_LOGICAL); error = ENXIO; goto out; } /* * Save logical drive information. */ if (bootverbose) { ciss_printf(sc, "%d logical drive%s\n", ndrives, (ndrives > 1 || ndrives == 0) ? "s" : ""); } sc->ciss_logical = malloc(sc->ciss_max_logical_bus * sizeof(struct ciss_ldrive *), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_logical == NULL) { error = ENXIO; goto out; } for (i = 0; i <= sc->ciss_max_logical_bus; i++) { sc->ciss_logical[i] = malloc(CISS_MAX_LOGICAL * sizeof(struct ciss_ldrive), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_logical[i] == NULL) { error = ENXIO; goto out; } for (j = 0; j < CISS_MAX_LOGICAL; j++) sc->ciss_logical[i][j].cl_status = CISS_LD_NONEXISTENT; } for (i = 0; i < CISS_MAX_LOGICAL; i++) { if (i < ndrives) { struct ciss_ldrive *ld; int bus, target; bus = CISS_LUN_TO_BUS(cll->lun[i].logical.lun); target = CISS_LUN_TO_TARGET(cll->lun[i].logical.lun); ld = &sc->ciss_logical[bus][target]; ld->cl_address = cll->lun[i]; ld->cl_controller = &sc->ciss_controllers[bus]; if (ciss_identify_logical(sc, ld) != 0) continue; /* * If the drive has had media exchanged, we should bring it online. */ if (ld->cl_lstatus->media_exchanged) ciss_accept_media(sc, ld); } } out: if (cll != NULL) free(cll, CISS_MALLOC_CLASS); return(error); } static int ciss_init_physical(struct ciss_softc *sc) { struct ciss_lun_report *cll; int error = 0, i; int nphys; int bus, target; debug_called(1); bus = 0; target = 0; cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_PHYSICAL_LUNS, CISS_MAX_PHYSICAL); if (cll == NULL) { error = ENXIO; goto out; } nphys = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); if (bootverbose) { ciss_printf(sc, "%d physical device%s\n", nphys, (nphys > 1 || nphys == 0) ? "s" : ""); } /* * Figure out the bus mapping. * Logical buses include both the local logical bus for local arrays and * proxy buses for remote arrays. Physical buses are numbered by the * controller and represent physical buses that hold physical devices. * We shift these bus numbers so that everything fits into a single flat * numbering space for CAM. Logical buses occupy the first 32 CAM bus * numbers, and the physical bus numbers are shifted to be above that. * This results in the various driver arrays being indexed as follows: * * ciss_controllers[] - indexed by logical bus * ciss_cam_sim[] - indexed by both logical and physical, with physical * being shifted by 32. * ciss_logical[][] - indexed by logical bus * ciss_physical[][] - indexed by physical bus * * XXX This is getting more and more hackish. CISS really doesn't play * well with a standard SCSI model; devices are addressed via magic * cookies, not via b/t/l addresses. Since there is no way to store * the cookie in the CAM device object, we have to keep these lookup * tables handy so that the devices can be found quickly at the cost * of wasting memory and having a convoluted lookup scheme. This * driver should probably be converted to block interface. */ /* * If the L2 and L3 SCSI addresses are 0, this signifies a proxy * controller. A proxy controller is another physical controller * behind the primary PCI controller. We need to know about this * so that BMIC commands can be properly targeted. There can be * proxy controllers attached to a single PCI controller, so * find the highest numbered one so the array can be properly * sized. */ sc->ciss_max_logical_bus = 1; for (i = 0; i < nphys; i++) { if (cll->lun[i].physical.extra_address == 0) { bus = cll->lun[i].physical.bus; sc->ciss_max_logical_bus = max(sc->ciss_max_logical_bus, bus) + 1; } else { bus = CISS_EXTRA_BUS2(cll->lun[i].physical.extra_address); sc->ciss_max_physical_bus = max(sc->ciss_max_physical_bus, bus); } } sc->ciss_controllers = malloc(sc->ciss_max_logical_bus * sizeof (union ciss_device_address), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_controllers == NULL) { ciss_printf(sc, "Could not allocate memory for controller map\n"); error = ENOMEM; goto out; } /* setup a map of controller addresses */ for (i = 0; i < nphys; i++) { if (cll->lun[i].physical.extra_address == 0) { sc->ciss_controllers[cll->lun[i].physical.bus] = cll->lun[i]; } } sc->ciss_physical = malloc(sc->ciss_max_physical_bus * sizeof(struct ciss_pdrive *), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_physical == NULL) { ciss_printf(sc, "Could not allocate memory for physical device map\n"); error = ENOMEM; goto out; } for (i = 0; i < sc->ciss_max_physical_bus; i++) { sc->ciss_physical[i] = malloc(sizeof(struct ciss_pdrive) * CISS_MAX_PHYSTGT, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_physical[i] == NULL) { ciss_printf(sc, "Could not allocate memory for target map\n"); error = ENOMEM; goto out; } } ciss_filter_physical(sc, cll); out: if (cll != NULL) free(cll, CISS_MALLOC_CLASS); return(error); } static int ciss_filter_physical(struct ciss_softc *sc, struct ciss_lun_report *cll) { u_int32_t ea; int i, nphys; int bus, target; nphys = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); for (i = 0; i < nphys; i++) { if (cll->lun[i].physical.extra_address == 0) continue; /* * Filter out devices that we don't want. Level 3 LUNs could * probably be supported, but the docs don't give enough of a * hint to know how. * * The mode field of the physical address is likely set to have * hard disks masked out. Honor it unless the user has overridden * us with the tunable. We also munge the inquiry data for these * disks so that they only show up as passthrough devices. Keeping * them visible in this fashion is useful for doing things like * flashing firmware. */ ea = cll->lun[i].physical.extra_address; if ((CISS_EXTRA_BUS3(ea) != 0) || (CISS_EXTRA_TARGET3(ea) != 0) || (CISS_EXTRA_MODE2(ea) == 0x3)) continue; if ((ciss_expose_hidden_physical == 0) && (cll->lun[i].physical.mode == CISS_HDR_ADDRESS_MODE_MASK_PERIPHERAL)) continue; /* * Note: CISS firmware numbers physical busses starting at '1', not * '0'. This numbering is internal to the firmware and is only * used as a hint here. */ bus = CISS_EXTRA_BUS2(ea) - 1; target = CISS_EXTRA_TARGET2(ea); sc->ciss_physical[bus][target].cp_address = cll->lun[i]; sc->ciss_physical[bus][target].cp_online = 1; } return (0); } static int ciss_inquiry_logical(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct scsi_inquiry *inq; int error; int command_status; cr = NULL; bzero(&ld->cl_geometry, sizeof(ld->cl_geometry)); if ((error = ciss_get_request(sc, &cr)) != 0) goto out; cc = cr->cr_cc; cr->cr_data = &ld->cl_geometry; cr->cr_length = sizeof(ld->cl_geometry); cr->cr_flags = CISS_REQ_DATAIN; cc->header.address = ld->cl_address; cc->cdb.cdb_length = 6; cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 30; inq = (struct scsi_inquiry *)&(cc->cdb.cdb[0]); inq->opcode = INQUIRY; inq->byte2 = SI_EVPD; inq->page_code = CISS_VPD_LOGICAL_DRIVE_GEOMETRY; scsi_ulto2b(sizeof(ld->cl_geometry), inq->length); if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error getting geometry (%d)\n", error); goto out; } ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: case CISS_CMD_STATUS_DATA_UNDERRUN: break; case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "WARNING: Data overrun\n"); break; default: ciss_printf(sc, "Error detecting logical drive geometry (%s)\n", ciss_name_command_status(command_status)); break; } out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Identify a logical drive, initialise state related to it. */ static int ciss_identify_logical(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; int error, command_status; debug_called(1); cr = NULL; /* * Build a BMIC request to fetch the drive ID. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_LDRIVE, (void **)&ld->cl_ldrive, sizeof(*ld->cl_ldrive))) != 0) goto out; cc = cr->cr_cc; cc->header.address = *ld->cl_controller; /* target controller */ cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); cbc->log_drive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun); /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC LDRIVE command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ break; case CISS_CMD_STATUS_DATA_UNDERRUN: case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "data over/underrun reading logical drive ID\n"); default: ciss_printf(sc, "error reading logical drive ID (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } ciss_release_request(cr); cr = NULL; /* * Build a CISS BMIC command to get the logical drive status. */ if ((error = ciss_get_ldrive_status(sc, ld)) != 0) goto out; /* * Get the logical drive geometry. */ if ((error = ciss_inquiry_logical(sc, ld)) != 0) goto out; /* * Print the drive's basic characteristics. */ if (bootverbose) { ciss_printf(sc, "logical drive (b%dt%d): %s, %dMB ", CISS_LUN_TO_BUS(ld->cl_address.logical.lun), CISS_LUN_TO_TARGET(ld->cl_address.logical.lun), ciss_name_ldrive_org(ld->cl_ldrive->fault_tolerance), ((ld->cl_ldrive->blocks_available / (1024 * 1024)) * ld->cl_ldrive->block_size)); ciss_print_ldrive(sc, ld); } out: if (error != 0) { /* make the drive not-exist */ ld->cl_status = CISS_LD_NONEXISTENT; if (ld->cl_ldrive != NULL) { free(ld->cl_ldrive, CISS_MALLOC_CLASS); ld->cl_ldrive = NULL; } if (ld->cl_lstatus != NULL) { free(ld->cl_lstatus, CISS_MALLOC_CLASS); ld->cl_lstatus = NULL; } } if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Get status for a logical drive. * * XXX should we also do this in response to Test Unit Ready? */ static int ciss_get_ldrive_status(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; int error, command_status; /* * Build a CISS BMIC command to get the logical drive status. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_LSTATUS, (void **)&ld->cl_lstatus, sizeof(*ld->cl_lstatus))) != 0) goto out; cc = cr->cr_cc; cc->header.address = *ld->cl_controller; /* target controller */ cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); cbc->log_drive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun); /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC LSTATUS command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ break; case CISS_CMD_STATUS_DATA_UNDERRUN: case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "data over/underrun reading logical drive status\n"); default: ciss_printf(sc, "error reading logical drive status (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } /* * Set the drive's summary status based on the returned status. * * XXX testing shows that a failed JBOD drive comes back at next * boot in "queued for expansion" mode. WTF? */ ld->cl_status = ciss_decode_ldrive_status(ld->cl_lstatus->status); out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Notify the adapter of a config update. */ static int ciss_update_config(struct ciss_softc *sc) { int i; debug_called(1); CISS_TL_SIMPLE_WRITE(sc, CISS_TL_SIMPLE_IDBR, CISS_TL_SIMPLE_IDBR_CFG_TABLE); for (i = 0; i < 1000; i++) { if (!(CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_IDBR) & CISS_TL_SIMPLE_IDBR_CFG_TABLE)) { return(0); } DELAY(1000); } return(1); } /************************************************************************ * Accept new media into a logical drive. * * XXX The drive has previously been offline; it would be good if we * could make sure it's not open right now. */ static int ciss_accept_media(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; int command_status; int error = 0, ldrive; ldrive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun); debug(0, "bringing logical drive %d back online"); /* * Build a CISS BMIC command to bring the drive back online. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ACCEPT_MEDIA, NULL, 0)) != 0) goto out; cc = cr->cr_cc; cc->header.address = *ld->cl_controller; /* target controller */ cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); cbc->log_drive = ldrive; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC ACCEPT MEDIA command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* all OK */ /* we should get a logical drive status changed event here */ break; default: ciss_printf(cr->cr_sc, "error accepting media into failed logical drive (%s)\n", ciss_name_command_status(command_status)); break; } out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Release adapter resources. */ static void ciss_free(struct ciss_softc *sc) { struct ciss_request *cr; int i, j; debug_called(1); /* we're going away */ sc->ciss_flags |= CISS_FLAG_ABORTING; /* terminate the periodic heartbeat routine */ callout_stop(&sc->ciss_periodic); /* cancel the Event Notify chain */ ciss_notify_abort(sc); ciss_kill_notify_thread(sc); /* disconnect from CAM */ if (sc->ciss_cam_sim) { for (i = 0; i < sc->ciss_max_logical_bus; i++) { if (sc->ciss_cam_sim[i]) { xpt_bus_deregister(cam_sim_path(sc->ciss_cam_sim[i])); cam_sim_free(sc->ciss_cam_sim[i], 0); } } for (i = CISS_PHYSICAL_BASE; i < sc->ciss_max_physical_bus + CISS_PHYSICAL_BASE; i++) { if (sc->ciss_cam_sim[i]) { xpt_bus_deregister(cam_sim_path(sc->ciss_cam_sim[i])); cam_sim_free(sc->ciss_cam_sim[i], 0); } } free(sc->ciss_cam_sim, CISS_MALLOC_CLASS); } if (sc->ciss_cam_devq) cam_simq_free(sc->ciss_cam_devq); /* remove the control device */ mtx_unlock(&sc->ciss_mtx); if (sc->ciss_dev_t != NULL) destroy_dev(sc->ciss_dev_t); /* Final cleanup of the callout. */ callout_drain(&sc->ciss_periodic); mtx_destroy(&sc->ciss_mtx); /* free the controller data */ if (sc->ciss_id != NULL) free(sc->ciss_id, CISS_MALLOC_CLASS); /* release I/O resources */ if (sc->ciss_regs_resource != NULL) bus_release_resource(sc->ciss_dev, SYS_RES_MEMORY, sc->ciss_regs_rid, sc->ciss_regs_resource); if (sc->ciss_cfg_resource != NULL) bus_release_resource(sc->ciss_dev, SYS_RES_MEMORY, sc->ciss_cfg_rid, sc->ciss_cfg_resource); if (sc->ciss_intr != NULL) bus_teardown_intr(sc->ciss_dev, sc->ciss_irq_resource, sc->ciss_intr); if (sc->ciss_irq_resource != NULL) bus_release_resource(sc->ciss_dev, SYS_RES_IRQ, sc->ciss_irq_rid[0], sc->ciss_irq_resource); if (sc->ciss_msi) pci_release_msi(sc->ciss_dev); while ((cr = ciss_dequeue_free(sc)) != NULL) bus_dmamap_destroy(sc->ciss_buffer_dmat, cr->cr_datamap); if (sc->ciss_buffer_dmat) bus_dma_tag_destroy(sc->ciss_buffer_dmat); /* destroy command memory and DMA tag */ if (sc->ciss_command != NULL) { bus_dmamap_unload(sc->ciss_command_dmat, sc->ciss_command_map); bus_dmamem_free(sc->ciss_command_dmat, sc->ciss_command, sc->ciss_command_map); } if (sc->ciss_command_dmat) bus_dma_tag_destroy(sc->ciss_command_dmat); if (sc->ciss_reply) { bus_dmamap_unload(sc->ciss_reply_dmat, sc->ciss_reply_map); bus_dmamem_free(sc->ciss_reply_dmat, sc->ciss_reply, sc->ciss_reply_map); } if (sc->ciss_reply_dmat) bus_dma_tag_destroy(sc->ciss_reply_dmat); /* destroy DMA tags */ if (sc->ciss_parent_dmat) bus_dma_tag_destroy(sc->ciss_parent_dmat); if (sc->ciss_logical) { for (i = 0; i <= sc->ciss_max_logical_bus; i++) { for (j = 0; j < CISS_MAX_LOGICAL; j++) { if (sc->ciss_logical[i][j].cl_ldrive) free(sc->ciss_logical[i][j].cl_ldrive, CISS_MALLOC_CLASS); if (sc->ciss_logical[i][j].cl_lstatus) free(sc->ciss_logical[i][j].cl_lstatus, CISS_MALLOC_CLASS); } free(sc->ciss_logical[i], CISS_MALLOC_CLASS); } free(sc->ciss_logical, CISS_MALLOC_CLASS); } if (sc->ciss_physical) { for (i = 0; i < sc->ciss_max_physical_bus; i++) free(sc->ciss_physical[i], CISS_MALLOC_CLASS); free(sc->ciss_physical, CISS_MALLOC_CLASS); } if (sc->ciss_controllers) free(sc->ciss_controllers, CISS_MALLOC_CLASS); } /************************************************************************ * Give a command to the adapter. * * Note that this uses the simple transport layer directly. If we * want to add support for other layers, we'll need a switch of some * sort. * * Note that the simple transport layer has no way of refusing a * command; we only have as many request structures as the adapter * supports commands, so we don't have to check (this presumes that * the adapter can handle commands as fast as we throw them at it). */ static int ciss_start(struct ciss_request *cr) { struct ciss_command *cc; /* XXX debugging only */ int error; cc = cr->cr_cc; debug(2, "post command %d tag %d ", cr->cr_tag, cc->header.host_tag); /* * Map the request's data. */ if ((error = ciss_map_request(cr))) return(error); #if 0 ciss_print_request(cr); #endif return(0); } /************************************************************************ * Fetch completed request(s) from the adapter, queue them for * completion handling. * * Note that this uses the simple transport layer directly. If we * want to add support for other layers, we'll need a switch of some * sort. * * Note that the simple transport mechanism does not require any * reentrancy protection; the OPQ read is atomic. If there is a * chance of a race with something else that might move the request * off the busy list, then we will have to lock against that * (eg. timeouts, etc.) */ static void ciss_done(struct ciss_softc *sc, cr_qhead_t *qh) { struct ciss_request *cr; struct ciss_command *cc; u_int32_t tag, index; debug_called(3); /* * Loop quickly taking requests from the adapter and moving them * to the completed queue. */ for (;;) { tag = CISS_TL_SIMPLE_FETCH_CMD(sc); if (tag == CISS_TL_SIMPLE_OPQ_EMPTY) break; index = tag >> 2; debug(2, "completed command %d%s", index, (tag & CISS_HDR_HOST_TAG_ERROR) ? " with error" : ""); if (index >= sc->ciss_max_requests) { ciss_printf(sc, "completed invalid request %d (0x%x)\n", index, tag); continue; } cr = &(sc->ciss_request[index]); cc = cr->cr_cc; cc->header.host_tag = tag; /* not updated by adapter */ ciss_enqueue_complete(cr, qh); } } static void ciss_perf_done(struct ciss_softc *sc, cr_qhead_t *qh) { struct ciss_request *cr; struct ciss_command *cc; u_int32_t tag, index; debug_called(3); /* * Loop quickly taking requests from the adapter and moving them * to the completed queue. */ for (;;) { tag = sc->ciss_reply[sc->ciss_rqidx]; if ((tag & CISS_CYCLE_MASK) != sc->ciss_cycle) break; index = tag >> 2; debug(2, "completed command %d%s\n", index, (tag & CISS_HDR_HOST_TAG_ERROR) ? " with error" : ""); if (index < sc->ciss_max_requests) { cr = &(sc->ciss_request[index]); cc = cr->cr_cc; cc->header.host_tag = tag; /* not updated by adapter */ ciss_enqueue_complete(cr, qh); } else { ciss_printf(sc, "completed invalid request %d (0x%x)\n", index, tag); } if (++sc->ciss_rqidx == sc->ciss_max_requests) { sc->ciss_rqidx = 0; sc->ciss_cycle ^= 1; } } } /************************************************************************ * Take an interrupt from the adapter. */ static void ciss_intr(void *arg) { cr_qhead_t qh; struct ciss_softc *sc = (struct ciss_softc *)arg; /* * The only interrupt we recognise indicates that there are * entries in the outbound post queue. */ STAILQ_INIT(&qh); ciss_done(sc, &qh); mtx_lock(&sc->ciss_mtx); ciss_complete(sc, &qh); mtx_unlock(&sc->ciss_mtx); } static void ciss_perf_intr(void *arg) { struct ciss_softc *sc = (struct ciss_softc *)arg; /* Clear the interrupt and flush the bridges. Docs say that the flush * needs to be done twice, which doesn't seem right. */ CISS_TL_PERF_CLEAR_INT(sc); CISS_TL_PERF_FLUSH_INT(sc); ciss_perf_msi_intr(sc); } static void ciss_perf_msi_intr(void *arg) { cr_qhead_t qh; struct ciss_softc *sc = (struct ciss_softc *)arg; STAILQ_INIT(&qh); ciss_perf_done(sc, &qh); mtx_lock(&sc->ciss_mtx); ciss_complete(sc, &qh); mtx_unlock(&sc->ciss_mtx); } /************************************************************************ * Process completed requests. * * Requests can be completed in three fashions: * * - by invoking a callback function (cr_complete is non-null) * - by waking up a sleeper (cr_flags has CISS_REQ_SLEEP set) * - by clearing the CISS_REQ_POLL flag in interrupt/timeout context */ static void ciss_complete(struct ciss_softc *sc, cr_qhead_t *qh) { struct ciss_request *cr; debug_called(2); /* * Loop taking requests off the completed queue and performing * completion processing on them. */ for (;;) { if ((cr = ciss_dequeue_complete(sc, qh)) == NULL) break; ciss_unmap_request(cr); if ((cr->cr_flags & CISS_REQ_BUSY) == 0) ciss_printf(sc, "WARNING: completing non-busy request\n"); cr->cr_flags &= ~CISS_REQ_BUSY; /* * If the request has a callback, invoke it. */ if (cr->cr_complete != NULL) { cr->cr_complete(cr); continue; } /* * If someone is sleeping on this request, wake them up. */ if (cr->cr_flags & CISS_REQ_SLEEP) { cr->cr_flags &= ~CISS_REQ_SLEEP; wakeup(cr); continue; } /* * If someone is polling this request for completion, signal. */ if (cr->cr_flags & CISS_REQ_POLL) { cr->cr_flags &= ~CISS_REQ_POLL; continue; } /* * Give up and throw the request back on the free queue. This * should never happen; resources will probably be lost. */ ciss_printf(sc, "WARNING: completed command with no submitter\n"); ciss_enqueue_free(cr); } } /************************************************************************ * Report on the completion status of a request, and pass back SCSI * and command status values. */ static int _ciss_report_request(struct ciss_request *cr, int *command_status, int *scsi_status, const char *func) { struct ciss_command *cc; struct ciss_error_info *ce; debug_called(2); cc = cr->cr_cc; ce = (struct ciss_error_info *)&(cc->sg[0]); /* * We don't consider data under/overrun an error for the Report * Logical/Physical LUNs commands. */ if ((cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) && ((ce->command_status == CISS_CMD_STATUS_DATA_OVERRUN) || (ce->command_status == CISS_CMD_STATUS_DATA_UNDERRUN)) && ((cc->cdb.cdb[0] == CISS_OPCODE_REPORT_LOGICAL_LUNS) || (cc->cdb.cdb[0] == CISS_OPCODE_REPORT_PHYSICAL_LUNS) || (cc->cdb.cdb[0] == INQUIRY))) { cc->header.host_tag &= ~CISS_HDR_HOST_TAG_ERROR; debug(2, "ignoring irrelevant under/overrun error"); } /* * Check the command's error bit, if clear, there's no status and * everything is OK. */ if (!(cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR)) { if (scsi_status != NULL) *scsi_status = SCSI_STATUS_OK; if (command_status != NULL) *command_status = CISS_CMD_STATUS_SUCCESS; return(0); } else { if (command_status != NULL) *command_status = ce->command_status; if (scsi_status != NULL) { if (ce->command_status == CISS_CMD_STATUS_TARGET_STATUS) { *scsi_status = ce->scsi_status; } else { *scsi_status = -1; } } if (bootverbose) ciss_printf(cr->cr_sc, "command status 0x%x (%s) scsi status 0x%x\n", ce->command_status, ciss_name_command_status(ce->command_status), ce->scsi_status); if (ce->command_status == CISS_CMD_STATUS_INVALID_COMMAND) { ciss_printf(cr->cr_sc, "invalid command, offense size %d at %d, value 0x%x, function %s\n", ce->additional_error_info.invalid_command.offense_size, ce->additional_error_info.invalid_command.offense_offset, ce->additional_error_info.invalid_command.offense_value, func); } } #if 0 ciss_print_request(cr); #endif return(1); } /************************************************************************ * Issue a request and don't return until it's completed. * * Depending on adapter status, we may poll or sleep waiting for * completion. */ static int ciss_synch_request(struct ciss_request *cr, int timeout) { if (cr->cr_sc->ciss_flags & CISS_FLAG_RUNNING) { return(ciss_wait_request(cr, timeout)); } else { return(ciss_poll_request(cr, timeout)); } } /************************************************************************ * Issue a request and poll for completion. * * Timeout in milliseconds. */ static int ciss_poll_request(struct ciss_request *cr, int timeout) { cr_qhead_t qh; struct ciss_softc *sc; int error; debug_called(2); STAILQ_INIT(&qh); sc = cr->cr_sc; cr->cr_flags |= CISS_REQ_POLL; if ((error = ciss_start(cr)) != 0) return(error); do { if (sc->ciss_perf) ciss_perf_done(sc, &qh); else ciss_done(sc, &qh); ciss_complete(sc, &qh); if (!(cr->cr_flags & CISS_REQ_POLL)) return(0); DELAY(1000); } while (timeout-- >= 0); return(EWOULDBLOCK); } /************************************************************************ * Issue a request and sleep waiting for completion. * * Timeout in milliseconds. Note that a spurious wakeup will reset * the timeout. */ static int ciss_wait_request(struct ciss_request *cr, int timeout) { int error; debug_called(2); cr->cr_flags |= CISS_REQ_SLEEP; if ((error = ciss_start(cr)) != 0) return(error); while ((cr->cr_flags & CISS_REQ_SLEEP) && (error != EWOULDBLOCK)) { error = msleep(cr, &cr->cr_sc->ciss_mtx, PRIBIO, "cissREQ", (timeout * hz) / 1000); } return(error); } #if 0 /************************************************************************ * Abort a request. Note that a potential exists here to race the * request being completed; the caller must deal with this. */ static int ciss_abort_request(struct ciss_request *ar) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_message_cdb *cmc; int error; debug_called(1); /* get a request */ if ((error = ciss_get_request(ar->cr_sc, &cr)) != 0) return(error); /* build the abort command */ cc = cr->cr_cc; cc->header.address.mode.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; /* addressing? */ cc->header.address.physical.target = 0; cc->header.address.physical.bus = 0; cc->cdb.cdb_length = sizeof(*cmc); cc->cdb.type = CISS_CDB_TYPE_MESSAGE; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_NONE; cc->cdb.timeout = 30; cmc = (struct ciss_message_cdb *)&(cc->cdb.cdb[0]); cmc->opcode = CISS_OPCODE_MESSAGE_ABORT; cmc->type = CISS_MESSAGE_ABORT_TASK; cmc->abort_tag = ar->cr_tag; /* endianness?? */ /* * Send the request and wait for a response. If we believe we * aborted the request OK, clear the flag that indicates it's * running. */ error = ciss_synch_request(cr, 35 * 1000); if (!error) error = ciss_report_request(cr, NULL, NULL); ciss_release_request(cr); return(error); } #endif /************************************************************************ * Fetch and initialise a request */ static int ciss_get_request(struct ciss_softc *sc, struct ciss_request **crp) { struct ciss_request *cr; debug_called(2); /* * Get a request and clean it up. */ if ((cr = ciss_dequeue_free(sc)) == NULL) return(ENOMEM); cr->cr_data = NULL; cr->cr_flags = 0; cr->cr_complete = NULL; cr->cr_private = NULL; cr->cr_sg_tag = CISS_SG_MAX; /* Backstop to prevent accidents */ ciss_preen_command(cr); *crp = cr; return(0); } static void ciss_preen_command(struct ciss_request *cr) { struct ciss_command *cc; u_int32_t cmdphys; /* * Clean up the command structure. * * Note that we set up the error_info structure here, since the * length can be overwritten by any command. */ cc = cr->cr_cc; cc->header.sg_in_list = 0; /* kinda inefficient this way */ cc->header.sg_total = 0; cc->header.host_tag = cr->cr_tag << 2; cc->header.host_tag_zeroes = 0; cmdphys = cr->cr_ccphys; cc->error_info.error_info_address = cmdphys + sizeof(struct ciss_command); cc->error_info.error_info_length = CISS_COMMAND_ALLOC_SIZE - sizeof(struct ciss_command); } /************************************************************************ * Release a request to the free list. */ static void ciss_release_request(struct ciss_request *cr) { struct ciss_softc *sc; debug_called(2); sc = cr->cr_sc; /* release the request to the free queue */ ciss_requeue_free(cr); } /************************************************************************ * Allocate a request that will be used to send a BMIC command. Do some * of the common setup here to avoid duplicating it everywhere else. */ static int ciss_get_bmic_request(struct ciss_softc *sc, struct ciss_request **crp, int opcode, void **bufp, size_t bufsize) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; void *buf; int error; int dataout; debug_called(2); cr = NULL; buf = NULL; /* * Get a request. */ if ((error = ciss_get_request(sc, &cr)) != 0) goto out; /* * Allocate data storage if requested, determine the data direction. */ dataout = 0; if ((bufsize > 0) && (bufp != NULL)) { if (*bufp == NULL) { if ((buf = malloc(bufsize, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) { error = ENOMEM; goto out; } } else { buf = *bufp; dataout = 1; /* we are given a buffer, so we are writing */ } } /* * Build a CISS BMIC command to get the logical drive ID. */ cr->cr_data = buf; cr->cr_length = bufsize; if (!dataout) cr->cr_flags = CISS_REQ_DATAIN; cc = cr->cr_cc; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*cbc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = dataout ? CISS_CDB_DIRECTION_WRITE : CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 0; cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); bzero(cbc, sizeof(*cbc)); cbc->opcode = dataout ? CISS_ARRAY_CONTROLLER_WRITE : CISS_ARRAY_CONTROLLER_READ; cbc->bmic_opcode = opcode; cbc->size = htons((u_int16_t)bufsize); out: if (error) { if (cr != NULL) ciss_release_request(cr); } else { *crp = cr; if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL)) *bufp = buf; } return(error); } /************************************************************************ * Handle a command passed in from userspace. */ static int ciss_user_command(struct ciss_softc *sc, IOCTL_Command_struct *ioc) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_error_info *ce; int error = 0; debug_called(1); cr = NULL; /* * Get a request. */ while (ciss_get_request(sc, &cr) != 0) msleep(sc, &sc->ciss_mtx, PPAUSE, "cissREQ", hz); cc = cr->cr_cc; /* * Allocate an in-kernel databuffer if required, copy in user data. */ mtx_unlock(&sc->ciss_mtx); cr->cr_length = ioc->buf_size; if (ioc->buf_size > 0) { if ((cr->cr_data = malloc(ioc->buf_size, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) { error = ENOMEM; goto out_unlocked; } if ((error = copyin(ioc->buf, cr->cr_data, ioc->buf_size))) { debug(0, "copyin: bad data buffer %p/%d", ioc->buf, ioc->buf_size); goto out_unlocked; } } /* * Build the request based on the user command. */ bcopy(&ioc->LUN_info, &cc->header.address, sizeof(cc->header.address)); bcopy(&ioc->Request, &cc->cdb, sizeof(cc->cdb)); /* XXX anything else to populate here? */ mtx_lock(&sc->ciss_mtx); /* * Run the command. */ if ((error = ciss_synch_request(cr, 60 * 1000))) { debug(0, "request failed - %d", error); goto out; } /* * Check to see if the command succeeded. */ ce = (struct ciss_error_info *)&(cc->sg[0]); if ((cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) == 0) bzero(ce, sizeof(*ce)); /* * Copy the results back to the user. */ bcopy(ce, &ioc->error_info, sizeof(*ce)); mtx_unlock(&sc->ciss_mtx); if ((ioc->buf_size > 0) && (error = copyout(cr->cr_data, ioc->buf, ioc->buf_size))) { debug(0, "copyout: bad data buffer %p/%d", ioc->buf, ioc->buf_size); goto out_unlocked; } /* done OK */ error = 0; out_unlocked: mtx_lock(&sc->ciss_mtx); out: if ((cr != NULL) && (cr->cr_data != NULL)) free(cr->cr_data, CISS_MALLOC_CLASS); if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Map a request into bus-visible space, initialise the scatter/gather * list. */ static int ciss_map_request(struct ciss_request *cr) { struct ciss_softc *sc; int error = 0; debug_called(2); sc = cr->cr_sc; /* check that mapping is necessary */ if (cr->cr_flags & CISS_REQ_MAPPED) return(0); cr->cr_flags |= CISS_REQ_MAPPED; bus_dmamap_sync(sc->ciss_command_dmat, sc->ciss_command_map, BUS_DMASYNC_PREWRITE); if (cr->cr_data != NULL) { if (cr->cr_flags & CISS_REQ_CCB) error = bus_dmamap_load_ccb(sc->ciss_buffer_dmat, cr->cr_datamap, cr->cr_data, ciss_request_map_helper, cr, 0); else error = bus_dmamap_load(sc->ciss_buffer_dmat, cr->cr_datamap, cr->cr_data, cr->cr_length, ciss_request_map_helper, cr, 0); if (error != 0) return (error); } else { /* * Post the command to the adapter. */ cr->cr_sg_tag = CISS_SG_NONE; cr->cr_flags |= CISS_REQ_BUSY; if (sc->ciss_perf) CISS_TL_PERF_POST_CMD(sc, cr); else CISS_TL_SIMPLE_POST_CMD(sc, cr->cr_ccphys); } return(0); } static void ciss_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct ciss_command *cc; struct ciss_request *cr; struct ciss_softc *sc; int i; debug_called(2); cr = (struct ciss_request *)arg; sc = cr->cr_sc; cc = cr->cr_cc; for (i = 0; i < nseg; i++) { cc->sg[i].address = segs[i].ds_addr; cc->sg[i].length = segs[i].ds_len; cc->sg[i].extension = 0; } /* we leave the s/g table entirely within the command */ cc->header.sg_in_list = nseg; cc->header.sg_total = nseg; if (cr->cr_flags & CISS_REQ_DATAIN) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_PREREAD); if (cr->cr_flags & CISS_REQ_DATAOUT) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_PREWRITE); if (nseg == 0) cr->cr_sg_tag = CISS_SG_NONE; else if (nseg == 1) cr->cr_sg_tag = CISS_SG_1; else if (nseg == 2) cr->cr_sg_tag = CISS_SG_2; else if (nseg <= 4) cr->cr_sg_tag = CISS_SG_4; else if (nseg <= 8) cr->cr_sg_tag = CISS_SG_8; else if (nseg <= 16) cr->cr_sg_tag = CISS_SG_16; else if (nseg <= 32) cr->cr_sg_tag = CISS_SG_32; else cr->cr_sg_tag = CISS_SG_MAX; /* * Post the command to the adapter. */ cr->cr_flags |= CISS_REQ_BUSY; if (sc->ciss_perf) CISS_TL_PERF_POST_CMD(sc, cr); else CISS_TL_SIMPLE_POST_CMD(sc, cr->cr_ccphys); } /************************************************************************ * Unmap a request from bus-visible space. */ static void ciss_unmap_request(struct ciss_request *cr) { struct ciss_softc *sc; debug_called(2); sc = cr->cr_sc; /* check that unmapping is necessary */ if ((cr->cr_flags & CISS_REQ_MAPPED) == 0) return; bus_dmamap_sync(sc->ciss_command_dmat, sc->ciss_command_map, BUS_DMASYNC_POSTWRITE); if (cr->cr_data == NULL) goto out; if (cr->cr_flags & CISS_REQ_DATAIN) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_POSTREAD); if (cr->cr_flags & CISS_REQ_DATAOUT) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->ciss_buffer_dmat, cr->cr_datamap); out: cr->cr_flags &= ~CISS_REQ_MAPPED; } /************************************************************************ * Attach the driver to CAM. * * We put all the logical drives on a single SCSI bus. */ static int ciss_cam_init(struct ciss_softc *sc) { int i, maxbus; debug_called(1); /* * Allocate a devq. We can reuse this for the masked physical * devices if we decide to export these as well. */ if ((sc->ciss_cam_devq = cam_simq_alloc(sc->ciss_max_requests - 2)) == NULL) { ciss_printf(sc, "can't allocate CAM SIM queue\n"); return(ENOMEM); } /* * Create a SIM. * * This naturally wastes a bit of memory. The alternative is to allocate * and register each bus as it is found, and then track them on a linked * list. Unfortunately, the driver has a few places where it needs to * look up the SIM based solely on bus number, and it's unclear whether * a list traversal would work for these situations. */ maxbus = max(sc->ciss_max_logical_bus, sc->ciss_max_physical_bus + CISS_PHYSICAL_BASE); sc->ciss_cam_sim = malloc(maxbus * sizeof(struct cam_sim*), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_cam_sim == NULL) { ciss_printf(sc, "can't allocate memory for controller SIM\n"); return(ENOMEM); } for (i = 0; i < sc->ciss_max_logical_bus; i++) { if ((sc->ciss_cam_sim[i] = cam_sim_alloc(ciss_cam_action, ciss_cam_poll, "ciss", sc, device_get_unit(sc->ciss_dev), &sc->ciss_mtx, 2, sc->ciss_max_requests - 2, sc->ciss_cam_devq)) == NULL) { ciss_printf(sc, "can't allocate CAM SIM for controller %d\n", i); return(ENOMEM); } /* * Register bus with this SIM. */ mtx_lock(&sc->ciss_mtx); if (i == 0 || sc->ciss_controllers[i].physical.bus != 0) { if (xpt_bus_register(sc->ciss_cam_sim[i], sc->ciss_dev, i) != 0) { ciss_printf(sc, "can't register SCSI bus %d\n", i); mtx_unlock(&sc->ciss_mtx); return (ENXIO); } } mtx_unlock(&sc->ciss_mtx); } for (i = CISS_PHYSICAL_BASE; i < sc->ciss_max_physical_bus + CISS_PHYSICAL_BASE; i++) { if ((sc->ciss_cam_sim[i] = cam_sim_alloc(ciss_cam_action, ciss_cam_poll, "ciss", sc, device_get_unit(sc->ciss_dev), &sc->ciss_mtx, 1, sc->ciss_max_requests - 2, sc->ciss_cam_devq)) == NULL) { ciss_printf(sc, "can't allocate CAM SIM for controller %d\n", i); return (ENOMEM); } mtx_lock(&sc->ciss_mtx); if (xpt_bus_register(sc->ciss_cam_sim[i], sc->ciss_dev, i) != 0) { ciss_printf(sc, "can't register SCSI bus %d\n", i); mtx_unlock(&sc->ciss_mtx); return (ENXIO); } mtx_unlock(&sc->ciss_mtx); } return(0); } /************************************************************************ * Initiate a rescan of the 'logical devices' SIM */ static void ciss_cam_rescan_target(struct ciss_softc *sc, int bus, int target) { union ccb *ccb; debug_called(1); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) { ciss_printf(sc, "rescan failed (can't allocate CCB)\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sc->ciss_cam_sim[bus]), target, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { ciss_printf(sc, "rescan failed (can't create path)\n"); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); /* scan is now in progress */ } /************************************************************************ * Handle requests coming from CAM */ static void ciss_cam_action(struct cam_sim *sim, union ccb *ccb) { struct ciss_softc *sc; struct ccb_scsiio *csio; int bus, target; int physical; sc = cam_sim_softc(sim); bus = cam_sim_bus(sim); csio = (struct ccb_scsiio *)&ccb->csio; target = csio->ccb_h.target_id; physical = CISS_IS_PHYSICAL(bus); switch (ccb->ccb_h.func_code) { /* perform SCSI I/O */ case XPT_SCSI_IO: if (!ciss_cam_action_io(sim, csio)) return; break; /* perform geometry calculations */ case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg = &ccb->ccg; struct ciss_ldrive *ld; debug(1, "XPT_CALC_GEOMETRY %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); ld = NULL; if (!physical) ld = &sc->ciss_logical[bus][target]; /* * Use the cached geometry settings unless the fault tolerance * is invalid. */ if (physical || ld->cl_geometry.fault_tolerance == 0xFF) { u_int32_t secs_per_cylinder; ccg->heads = 255; ccg->secs_per_track = 32; secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; } else { ccg->heads = ld->cl_geometry.heads; ccg->secs_per_track = ld->cl_geometry.sectors; ccg->cylinders = ntohs(ld->cl_geometry.cylinders); } ccb->ccb_h.status = CAM_REQ_CMP; break; } /* handle path attribute inquiry */ case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; debug(1, "XPT_PATH_INQ %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; /* XXX is this correct? */ cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->max_target = CISS_MAX_LOGICAL; cpi->max_lun = 0; /* 'logical drive' channel only */ cpi->initiator_id = CISS_MAX_LOGICAL; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "msmith@freebsd.org", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 132 * 1024; /* XXX what to set this to? */ cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->maxio = (CISS_MAX_SG_ELEMENTS - 1) * PAGE_SIZE; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; int bus, target; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; bus = cam_sim_bus(sim); target = cts->ccb_h.target_id; debug(1, "XPT_GET_TRAN_SETTINGS %d:%d", bus, target); /* disconnect always OK */ cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; cts->ccb_h.status = CAM_REQ_CMP; break; } default: /* we can't do this */ debug(1, "unspported func_code = 0x%x", ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } /************************************************************************ * Handle a CAM SCSI I/O request. */ static int ciss_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio) { struct ciss_softc *sc; int bus, target; struct ciss_request *cr; struct ciss_command *cc; int error; sc = cam_sim_softc(sim); bus = cam_sim_bus(sim); target = csio->ccb_h.target_id; debug(2, "XPT_SCSI_IO %d:%d:%d", bus, target, csio->ccb_h.target_lun); /* check that the CDB pointer is not to a physical address */ if ((csio->ccb_h.flags & CAM_CDB_POINTER) && (csio->ccb_h.flags & CAM_CDB_PHYS)) { debug(3, " CDB pointer is to physical address"); csio->ccb_h.status = CAM_REQ_CMP_ERR; } - /* if there is data transfer, it must be to/from a virtual address */ - if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if (csio->ccb_h.flags & CAM_DATA_PHYS) { /* we can't map it */ - debug(3, " data pointer is to physical address"); - csio->ccb_h.status = CAM_REQ_CMP_ERR; - } - if (csio->ccb_h.flags & CAM_SCATTER_VALID) { /* we want to do the s/g setup */ - debug(3, " data has premature s/g setup"); - csio->ccb_h.status = CAM_REQ_CMP_ERR; - } - } - /* abandon aborted ccbs or those that have failed validation */ if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { debug(3, "abandoning CCB due to abort/validation failure"); return(EINVAL); } /* handle emulation of some SCSI commands ourself */ if (ciss_cam_emulate(sc, csio)) return(0); /* * Get a request to manage this command. If we can't, return the * ccb, freeze the queue and flag so that we unfreeze it when a * request completes. */ if ((error = ciss_get_request(sc, &cr)) != 0) { xpt_freeze_simq(sim, 1); sc->ciss_flags |= CISS_FLAG_BUSY; csio->ccb_h.status |= CAM_REQUEUE_REQ; return(error); } /* * Build the command. */ cc = cr->cr_cc; cr->cr_data = csio; cr->cr_length = csio->dxfer_len; cr->cr_complete = ciss_cam_complete; cr->cr_private = csio; /* * Target the right logical volume. */ if (CISS_IS_PHYSICAL(bus)) cc->header.address = sc->ciss_physical[CISS_CAM_TO_PBUS(bus)][target].cp_address; else cc->header.address = sc->ciss_logical[bus][target].cl_address; cc->cdb.cdb_length = csio->cdb_len; cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; /* XXX ordered tags? */ if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { cr->cr_flags = CISS_REQ_DATAOUT; cc->cdb.direction = CISS_CDB_DIRECTION_WRITE; } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cr->cr_flags = CISS_REQ_DATAIN; cc->cdb.direction = CISS_CDB_DIRECTION_READ; } else { + cr->cr_data = NULL; cr->cr_flags = 0; cc->cdb.direction = CISS_CDB_DIRECTION_NONE; } cr->cr_flags |= CISS_REQ_CCB; cc->cdb.timeout = (csio->ccb_h.timeout / 1000) + 1; if (csio->ccb_h.flags & CAM_CDB_POINTER) { bcopy(csio->cdb_io.cdb_ptr, &cc->cdb.cdb[0], csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, &cc->cdb.cdb[0], csio->cdb_len); } /* * Submit the request to the adapter. * * Note that this may fail if we're unable to map the request (and * if we ever learn a transport layer other than simple, may fail * if the adapter rejects the command). */ if ((error = ciss_start(cr)) != 0) { xpt_freeze_simq(sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; if (error == EINPROGRESS) { error = 0; } else { csio->ccb_h.status |= CAM_REQUEUE_REQ; ciss_release_request(cr); } return(error); } return(0); } /************************************************************************ * Emulate SCSI commands the adapter doesn't handle as we might like. */ static int ciss_cam_emulate(struct ciss_softc *sc, struct ccb_scsiio *csio) { int bus, target; u_int8_t opcode; target = csio->ccb_h.target_id; bus = cam_sim_bus(xpt_path_sim(csio->ccb_h.path)); opcode = (csio->ccb_h.flags & CAM_CDB_POINTER) ? *(u_int8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes[0]; if (CISS_IS_PHYSICAL(bus)) { if (sc->ciss_physical[CISS_CAM_TO_PBUS(bus)][target].cp_online != 1) { csio->ccb_h.status |= CAM_SEL_TIMEOUT; xpt_done((union ccb *)csio); return(1); } else return(0); } /* * Handle requests for volumes that don't exist or are not online. * A selection timeout is slightly better than an illegal request. * Other errors might be better. */ if (sc->ciss_logical[bus][target].cl_status != CISS_LD_ONLINE) { csio->ccb_h.status |= CAM_SEL_TIMEOUT; xpt_done((union ccb *)csio); return(1); } /* if we have to fake Synchronise Cache */ if (sc->ciss_flags & CISS_FLAG_FAKE_SYNCH) { /* * If this is a Synchronise Cache command, typically issued when * a device is closed, flush the adapter and complete now. */ if (((csio->ccb_h.flags & CAM_CDB_POINTER) ? *(u_int8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) { ciss_flush_adapter(sc); csio->ccb_h.status |= CAM_REQ_CMP; xpt_done((union ccb *)csio); return(1); } } return(0); } /************************************************************************ * Check for possibly-completed commands. */ static void ciss_cam_poll(struct cam_sim *sim) { cr_qhead_t qh; struct ciss_softc *sc = cam_sim_softc(sim); debug_called(2); STAILQ_INIT(&qh); if (sc->ciss_perf) ciss_perf_done(sc, &qh); else ciss_done(sc, &qh); ciss_complete(sc, &qh); } /************************************************************************ * Handle completion of a command - pass results back through the CCB */ static void ciss_cam_complete(struct ciss_request *cr) { struct ciss_softc *sc; struct ciss_command *cc; struct ciss_error_info *ce; struct ccb_scsiio *csio; int scsi_status; int command_status; debug_called(2); sc = cr->cr_sc; cc = cr->cr_cc; ce = (struct ciss_error_info *)&(cc->sg[0]); csio = (struct ccb_scsiio *)cr->cr_private; /* * Extract status values from request. */ ciss_report_request(cr, &command_status, &scsi_status); csio->scsi_status = scsi_status; /* * Handle specific SCSI status values. */ switch(scsi_status) { /* no status due to adapter error */ case -1: debug(0, "adapter error"); csio->ccb_h.status |= CAM_REQ_CMP_ERR; break; /* no status due to command completed OK */ case SCSI_STATUS_OK: /* CISS_SCSI_STATUS_GOOD */ debug(2, "SCSI_STATUS_OK"); csio->ccb_h.status |= CAM_REQ_CMP; break; /* check condition, sense data included */ case SCSI_STATUS_CHECK_COND: /* CISS_SCSI_STATUS_CHECK_CONDITION */ debug(0, "SCSI_STATUS_CHECK_COND sense size %d resid %d\n", ce->sense_length, ce->residual_count); bzero(&csio->sense_data, SSD_FULL_SIZE); bcopy(&ce->sense_info[0], &csio->sense_data, ce->sense_length); if (csio->sense_len > ce->sense_length) csio->sense_resid = csio->sense_len - ce->sense_length; else csio->sense_resid = 0; csio->resid = ce->residual_count; csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; #ifdef CISS_DEBUG { struct scsi_sense_data *sns = (struct scsi_sense_data *)&ce->sense_info[0]; debug(0, "sense key %x", scsi_get_sense_key(sns, csio->sense_len - csio->sense_resid, /*show_errors*/ 1)); } #endif break; case SCSI_STATUS_BUSY: /* CISS_SCSI_STATUS_BUSY */ debug(0, "SCSI_STATUS_BUSY"); csio->ccb_h.status |= CAM_SCSI_BUSY; break; default: debug(0, "unknown status 0x%x", csio->scsi_status); csio->ccb_h.status |= CAM_REQ_CMP_ERR; break; } /* handle post-command fixup */ ciss_cam_complete_fixup(sc, csio); ciss_release_request(cr); if (sc->ciss_flags & CISS_FLAG_BUSY) { sc->ciss_flags &= ~CISS_FLAG_BUSY; if (csio->ccb_h.status & CAM_RELEASE_SIMQ) xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); else csio->ccb_h.status |= CAM_RELEASE_SIMQ; } xpt_done((union ccb *)csio); } /******************************************************************************** * Fix up the result of some commands here. */ static void ciss_cam_complete_fixup(struct ciss_softc *sc, struct ccb_scsiio *csio) { struct scsi_inquiry_data *inq; struct ciss_ldrive *cl; uint8_t *cdb; int bus, target; cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ? (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes; if (cdb[0] == INQUIRY && (cdb[1] & SI_EVPD) == 0 && (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && csio->dxfer_len >= SHORT_INQUIRY_LENGTH) { inq = (struct scsi_inquiry_data *)csio->data_ptr; target = csio->ccb_h.target_id; bus = cam_sim_bus(xpt_path_sim(csio->ccb_h.path)); /* * Don't let hard drives be seen by the DA driver. They will still be * attached by the PASS driver. */ if (CISS_IS_PHYSICAL(bus)) { if (SID_TYPE(inq) == T_DIRECT) inq->device = (inq->device & 0xe0) | T_NODEVICE; return; } cl = &sc->ciss_logical[bus][target]; padstr(inq->vendor, "COMPAQ", 8); padstr(inq->product, ciss_name_ldrive_org(cl->cl_ldrive->fault_tolerance), 8); padstr(inq->revision, ciss_name_ldrive_status(cl->cl_lstatus->status), 16); } } /******************************************************************************** * Find a peripheral attached at (target) */ static struct cam_periph * ciss_find_periph(struct ciss_softc *sc, int bus, int target) { struct cam_periph *periph; struct cam_path *path; int status; status = xpt_create_path(&path, NULL, cam_sim_path(sc->ciss_cam_sim[bus]), target, 0); if (status == CAM_REQ_CMP) { periph = cam_periph_find(path, NULL); xpt_free_path(path); } else { periph = NULL; } return(periph); } /******************************************************************************** * Name the device at (target) * * XXX is this strictly correct? */ static int ciss_name_device(struct ciss_softc *sc, int bus, int target) { struct cam_periph *periph; if (CISS_IS_PHYSICAL(bus)) return (0); if ((periph = ciss_find_periph(sc, bus, target)) != NULL) { sprintf(sc->ciss_logical[bus][target].cl_name, "%s%d", periph->periph_name, periph->unit_number); return(0); } sc->ciss_logical[bus][target].cl_name[0] = 0; return(ENOENT); } /************************************************************************ * Periodic status monitoring. */ static void ciss_periodic(void *arg) { struct ciss_softc *sc; struct ciss_request *cr = NULL; struct ciss_command *cc = NULL; int error = 0; debug_called(1); sc = (struct ciss_softc *)arg; /* * Check the adapter heartbeat. */ if (sc->ciss_cfg->heartbeat == sc->ciss_heartbeat) { sc->ciss_heart_attack++; debug(0, "adapter heart attack in progress 0x%x/%d", sc->ciss_heartbeat, sc->ciss_heart_attack); if (sc->ciss_heart_attack == 3) { ciss_printf(sc, "ADAPTER HEARTBEAT FAILED\n"); ciss_disable_adapter(sc); return; } } else { sc->ciss_heartbeat = sc->ciss_cfg->heartbeat; sc->ciss_heart_attack = 0; debug(3, "new heartbeat 0x%x", sc->ciss_heartbeat); } /* * Send the NOP message and wait for a response. */ if (ciss_nop_message_heartbeat != 0 && (error = ciss_get_request(sc, &cr)) == 0) { cc = cr->cr_cc; cr->cr_complete = ciss_nop_complete; cc->cdb.cdb_length = 1; cc->cdb.type = CISS_CDB_TYPE_MESSAGE; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_WRITE; cc->cdb.timeout = 0; cc->cdb.cdb[0] = CISS_OPCODE_MESSAGE_NOP; if ((error = ciss_start(cr)) != 0) { ciss_printf(sc, "SENDING NOP MESSAGE FAILED\n"); } } /* * If the notify event request has died for some reason, or has * not started yet, restart it. */ if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK)) { debug(0, "(re)starting Event Notify chain"); ciss_notify_event(sc); } /* * Reschedule. */ callout_reset(&sc->ciss_periodic, CISS_HEARTBEAT_RATE * hz, ciss_periodic, sc); } static void ciss_nop_complete(struct ciss_request *cr) { struct ciss_softc *sc; static int first_time = 1; sc = cr->cr_sc; if (ciss_report_request(cr, NULL, NULL) != 0) { if (first_time == 1) { first_time = 0; ciss_printf(sc, "SENDING NOP MESSAGE FAILED (not logging anymore)\n"); } } ciss_release_request(cr); } /************************************************************************ * Disable the adapter. * * The all requests in completed queue is failed with hardware error. * This will cause failover in a multipath configuration. */ static void ciss_disable_adapter(struct ciss_softc *sc) { cr_qhead_t qh; struct ciss_request *cr; struct ciss_command *cc; struct ciss_error_info *ce; int i; CISS_TL_SIMPLE_DISABLE_INTERRUPTS(sc); pci_disable_busmaster(sc->ciss_dev); sc->ciss_flags &= ~CISS_FLAG_RUNNING; for (i = 1; i < sc->ciss_max_requests; i++) { cr = &sc->ciss_request[i]; if ((cr->cr_flags & CISS_REQ_BUSY) == 0) continue; cc = cr->cr_cc; ce = (struct ciss_error_info *)&(cc->sg[0]); ce->command_status = CISS_CMD_STATUS_HARDWARE_ERROR; ciss_enqueue_complete(cr, &qh); } for (;;) { if ((cr = ciss_dequeue_complete(sc, &qh)) == NULL) break; /* * If the request has a callback, invoke it. */ if (cr->cr_complete != NULL) { cr->cr_complete(cr); continue; } /* * If someone is sleeping on this request, wake them up. */ if (cr->cr_flags & CISS_REQ_SLEEP) { cr->cr_flags &= ~CISS_REQ_SLEEP; wakeup(cr); continue; } } } /************************************************************************ * Request a notification response from the adapter. * * If (cr) is NULL, this is the first request of the adapter, so * reset the adapter's message pointer and start with the oldest * message available. */ static void ciss_notify_event(struct ciss_softc *sc) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_notify_cdb *cnc; int error; debug_called(1); cr = sc->ciss_periodic_notify; /* get a request if we don't already have one */ if (cr == NULL) { if ((error = ciss_get_request(sc, &cr)) != 0) { debug(0, "can't get notify event request"); goto out; } sc->ciss_periodic_notify = cr; cr->cr_complete = ciss_notify_complete; debug(1, "acquired request %d", cr->cr_tag); } /* * Get a databuffer if we don't already have one, note that the * adapter command wants a larger buffer than the actual * structure. */ if (cr->cr_data == NULL) { if ((cr->cr_data = malloc(CISS_NOTIFY_DATA_SIZE, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) { debug(0, "can't get notify event request buffer"); error = ENOMEM; goto out; } cr->cr_length = CISS_NOTIFY_DATA_SIZE; } /* re-setup the request's command (since we never release it) XXX overkill*/ ciss_preen_command(cr); /* (re)build the notify event command */ cc = cr->cr_cc; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*cnc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 0; /* no timeout, we hope */ cnc = (struct ciss_notify_cdb *)&(cc->cdb.cdb[0]); bzero(cr->cr_data, CISS_NOTIFY_DATA_SIZE); cnc->opcode = CISS_OPCODE_READ; cnc->command = CISS_COMMAND_NOTIFY_ON_EVENT; cnc->timeout = 0; /* no timeout, we hope */ cnc->synchronous = 0; cnc->ordered = 0; cnc->seek_to_oldest = 0; if ((sc->ciss_flags & CISS_FLAG_RUNNING) == 0) cnc->new_only = 1; else cnc->new_only = 0; cnc->length = htonl(CISS_NOTIFY_DATA_SIZE); /* submit the request */ error = ciss_start(cr); out: if (error) { if (cr != NULL) { if (cr->cr_data != NULL) free(cr->cr_data, CISS_MALLOC_CLASS); ciss_release_request(cr); } sc->ciss_periodic_notify = NULL; debug(0, "can't submit notify event request"); sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; } else { debug(1, "notify event submitted"); sc->ciss_flags |= CISS_FLAG_NOTIFY_OK; } } static void ciss_notify_complete(struct ciss_request *cr) { struct ciss_command *cc; struct ciss_notify *cn; struct ciss_softc *sc; int scsi_status; int command_status; debug_called(1); cc = cr->cr_cc; cn = (struct ciss_notify *)cr->cr_data; sc = cr->cr_sc; /* * Report request results, decode status. */ ciss_report_request(cr, &command_status, &scsi_status); /* * Abort the chain on a fatal error. * * XXX which of these are actually errors? */ if ((command_status != CISS_CMD_STATUS_SUCCESS) && (command_status != CISS_CMD_STATUS_TARGET_STATUS) && (command_status != CISS_CMD_STATUS_TIMEOUT)) { /* XXX timeout? */ ciss_printf(sc, "fatal error in Notify Event request (%s)\n", ciss_name_command_status(command_status)); ciss_release_request(cr); sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; return; } /* * If the adapter gave us a text message, print it. */ if (cn->message[0] != 0) ciss_printf(sc, "*** %.80s\n", cn->message); debug(0, "notify event class %d subclass %d detail %d", cn->class, cn->subclass, cn->detail); /* * If the response indicates that the notifier has been aborted, * release the notifier command. */ if ((cn->class == CISS_NOTIFY_NOTIFIER) && (cn->subclass == CISS_NOTIFY_NOTIFIER_STATUS) && (cn->detail == 1)) { debug(0, "notifier exiting"); sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; ciss_release_request(cr); sc->ciss_periodic_notify = NULL; wakeup(&sc->ciss_periodic_notify); } else { /* Handle notify events in a kernel thread */ ciss_enqueue_notify(cr); sc->ciss_periodic_notify = NULL; wakeup(&sc->ciss_periodic_notify); wakeup(&sc->ciss_notify); } /* * Send a new notify event command, if we're not aborting. */ if (!(sc->ciss_flags & CISS_FLAG_ABORTING)) { ciss_notify_event(sc); } } /************************************************************************ * Abort the Notify Event chain. * * Note that we can't just abort the command in progress; we have to * explicitly issue an Abort Notify Event command in order for the * adapter to clean up correctly. * * If we are called with CISS_FLAG_ABORTING set in the adapter softc, * the chain will not restart itself. */ static int ciss_notify_abort(struct ciss_softc *sc) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_notify_cdb *cnc; int error, command_status, scsi_status; debug_called(1); cr = NULL; error = 0; /* verify that there's an outstanding command */ if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK)) goto out; /* get a command to issue the abort with */ if ((error = ciss_get_request(sc, &cr))) goto out; /* get a buffer for the result */ if ((cr->cr_data = malloc(CISS_NOTIFY_DATA_SIZE, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) { debug(0, "can't get notify event request buffer"); error = ENOMEM; goto out; } cr->cr_length = CISS_NOTIFY_DATA_SIZE; /* build the CDB */ cc = cr->cr_cc; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*cnc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 0; /* no timeout, we hope */ cnc = (struct ciss_notify_cdb *)&(cc->cdb.cdb[0]); bzero(cnc, sizeof(*cnc)); cnc->opcode = CISS_OPCODE_WRITE; cnc->command = CISS_COMMAND_ABORT_NOTIFY; cnc->length = htonl(CISS_NOTIFY_DATA_SIZE); ciss_print_request(cr); /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "Abort Notify Event command failed (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, &scsi_status); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: break; case CISS_CMD_STATUS_INVALID_COMMAND: /* * Some older adapters don't support the CISS version of this * command. Fall back to using the BMIC version. */ error = ciss_notify_abort_bmic(sc); if (error != 0) goto out; break; case CISS_CMD_STATUS_TARGET_STATUS: /* * This can happen if the adapter thinks there wasn't an outstanding * Notify Event command but we did. We clean up here. */ if (scsi_status == CISS_SCSI_STATUS_CHECK_CONDITION) { if (sc->ciss_periodic_notify != NULL) ciss_release_request(sc->ciss_periodic_notify); error = 0; goto out; } /* FALLTHROUGH */ default: ciss_printf(sc, "Abort Notify Event command failed (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } /* * Sleep waiting for the notifier command to complete. Note * that if it doesn't, we may end up in a bad situation, since * the adapter may deliver it later. Also note that the adapter * requires the Notify Event command to be cancelled in order to * maintain internal bookkeeping. */ while (sc->ciss_periodic_notify != NULL) { error = msleep(&sc->ciss_periodic_notify, &sc->ciss_mtx, PRIBIO, "cissNEA", hz * 5); if (error == EWOULDBLOCK) { ciss_printf(sc, "Notify Event command failed to abort, adapter may wedge.\n"); break; } } out: /* release the cancel request */ if (cr != NULL) { if (cr->cr_data != NULL) free(cr->cr_data, CISS_MALLOC_CLASS); ciss_release_request(cr); } if (error == 0) sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; return(error); } /************************************************************************ * Abort the Notify Event chain using a BMIC command. */ static int ciss_notify_abort_bmic(struct ciss_softc *sc) { struct ciss_request *cr; int error, command_status; debug_called(1); cr = NULL; error = 0; /* verify that there's an outstanding command */ if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK)) goto out; /* * Build a BMIC command to cancel the Notify on Event command. * * Note that we are sending a CISS opcode here. Odd. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_COMMAND_ABORT_NOTIFY, NULL, 0)) != 0) goto out; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC Cancel Notify on Event command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: break; default: ciss_printf(sc, "error cancelling Notify on Event (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Handle rescanning all the logical volumes when a notify event * causes the drives to come online or offline. */ static void ciss_notify_rescan_logical(struct ciss_softc *sc) { struct ciss_lun_report *cll; struct ciss_ldrive *ld; int i, j, ndrives; /* * We must rescan all logical volumes to get the right logical * drive address. */ cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_LOGICAL_LUNS, CISS_MAX_LOGICAL); if (cll == NULL) return; ndrives = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); /* * Delete any of the drives which were destroyed by the * firmware. */ for (i = 0; i < sc->ciss_max_logical_bus; i++) { for (j = 0; j < CISS_MAX_LOGICAL; j++) { ld = &sc->ciss_logical[i][j]; if (ld->cl_update == 0) continue; if (ld->cl_status != CISS_LD_ONLINE) { ciss_cam_rescan_target(sc, i, j); ld->cl_update = 0; if (ld->cl_ldrive) free(ld->cl_ldrive, CISS_MALLOC_CLASS); if (ld->cl_lstatus) free(ld->cl_lstatus, CISS_MALLOC_CLASS); ld->cl_ldrive = NULL; ld->cl_lstatus = NULL; } } } /* * Scan for new drives. */ for (i = 0; i < ndrives; i++) { int bus, target; bus = CISS_LUN_TO_BUS(cll->lun[i].logical.lun); target = CISS_LUN_TO_TARGET(cll->lun[i].logical.lun); ld = &sc->ciss_logical[bus][target]; if (ld->cl_update == 0) continue; ld->cl_update = 0; ld->cl_address = cll->lun[i]; ld->cl_controller = &sc->ciss_controllers[bus]; if (ciss_identify_logical(sc, ld) == 0) { ciss_cam_rescan_target(sc, bus, target); } } free(cll, CISS_MALLOC_CLASS); } /************************************************************************ * Handle a notify event relating to the status of a logical drive. * * XXX need to be able to defer some of these to properly handle * calling the "ID Physical drive" command, unless the 'extended' * drive IDs are always in BIG_MAP format. */ static void ciss_notify_logical(struct ciss_softc *sc, struct ciss_notify *cn) { struct ciss_ldrive *ld; int ostatus, bus, target; debug_called(2); bus = cn->device.physical.bus; target = cn->data.logical_status.logical_drive; ld = &sc->ciss_logical[bus][target]; switch (cn->subclass) { case CISS_NOTIFY_LOGICAL_STATUS: switch (cn->detail) { case 0: ciss_name_device(sc, bus, target); ciss_printf(sc, "logical drive %d (%s) changed status %s->%s, spare status 0x%b\n", cn->data.logical_status.logical_drive, ld->cl_name, ciss_name_ldrive_status(cn->data.logical_status.previous_state), ciss_name_ldrive_status(cn->data.logical_status.new_state), cn->data.logical_status.spare_state, "\20\1configured\2rebuilding\3failed\4in use\5available\n"); /* * Update our idea of the drive's status. */ ostatus = ciss_decode_ldrive_status(cn->data.logical_status.previous_state); ld->cl_status = ciss_decode_ldrive_status(cn->data.logical_status.new_state); if (ld->cl_lstatus != NULL) ld->cl_lstatus->status = cn->data.logical_status.new_state; /* * Have CAM rescan the drive if its status has changed. */ if (ostatus != ld->cl_status) { ld->cl_update = 1; ciss_notify_rescan_logical(sc); } break; case 1: /* logical drive has recognised new media, needs Accept Media Exchange */ ciss_name_device(sc, bus, target); ciss_printf(sc, "logical drive %d (%s) media exchanged, ready to go online\n", cn->data.logical_status.logical_drive, ld->cl_name); ciss_accept_media(sc, ld); ld->cl_update = 1; ld->cl_status = ciss_decode_ldrive_status(cn->data.logical_status.new_state); ciss_notify_rescan_logical(sc); break; case 2: case 3: ciss_printf(sc, "rebuild of logical drive %d (%s) failed due to %s error\n", cn->data.rebuild_aborted.logical_drive, ld->cl_name, (cn->detail == 2) ? "read" : "write"); break; } break; case CISS_NOTIFY_LOGICAL_ERROR: if (cn->detail == 0) { ciss_printf(sc, "FATAL I/O ERROR on logical drive %d (%s), SCSI port %d ID %d\n", cn->data.io_error.logical_drive, ld->cl_name, cn->data.io_error.failure_bus, cn->data.io_error.failure_drive); /* XXX should we take the drive down at this point, or will we be told? */ } break; case CISS_NOTIFY_LOGICAL_SURFACE: if (cn->detail == 0) ciss_printf(sc, "logical drive %d (%s) completed consistency initialisation\n", cn->data.consistency_completed.logical_drive, ld->cl_name); break; } } /************************************************************************ * Handle a notify event relating to the status of a physical drive. */ static void ciss_notify_physical(struct ciss_softc *sc, struct ciss_notify *cn) { } /************************************************************************ * Handle a notify event relating to the status of a physical drive. */ static void ciss_notify_hotplug(struct ciss_softc *sc, struct ciss_notify *cn) { struct ciss_lun_report *cll = NULL; int bus, target; switch (cn->subclass) { case CISS_NOTIFY_HOTPLUG_PHYSICAL: case CISS_NOTIFY_HOTPLUG_NONDISK: bus = CISS_BIG_MAP_BUS(sc, cn->data.drive.big_physical_drive_number); target = CISS_BIG_MAP_TARGET(sc, cn->data.drive.big_physical_drive_number); if (cn->detail == 0) { /* * Mark the device offline so that it'll start producing selection * timeouts to the upper layer. */ if ((bus >= 0) && (target >= 0)) sc->ciss_physical[bus][target].cp_online = 0; } else { /* * Rescan the physical lun list for new items */ cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_PHYSICAL_LUNS, CISS_MAX_PHYSICAL); if (cll == NULL) { ciss_printf(sc, "Warning, cannot get physical lun list\n"); break; } ciss_filter_physical(sc, cll); } break; default: ciss_printf(sc, "Unknown hotplug event %d\n", cn->subclass); return; } if (cll != NULL) free(cll, CISS_MALLOC_CLASS); } /************************************************************************ * Handle deferred processing of notify events. Notify events may need * sleep which is unsafe during an interrupt. */ static void ciss_notify_thread(void *arg) { struct ciss_softc *sc; struct ciss_request *cr; struct ciss_notify *cn; sc = (struct ciss_softc *)arg; #if __FreeBSD_version >= 500000 mtx_lock(&sc->ciss_mtx); #endif for (;;) { if (STAILQ_EMPTY(&sc->ciss_notify) != 0 && (sc->ciss_flags & CISS_FLAG_THREAD_SHUT) == 0) { msleep(&sc->ciss_notify, &sc->ciss_mtx, PUSER, "idle", 0); } if (sc->ciss_flags & CISS_FLAG_THREAD_SHUT) break; cr = ciss_dequeue_notify(sc); if (cr == NULL) panic("cr null"); cn = (struct ciss_notify *)cr->cr_data; switch (cn->class) { case CISS_NOTIFY_HOTPLUG: ciss_notify_hotplug(sc, cn); break; case CISS_NOTIFY_LOGICAL: ciss_notify_logical(sc, cn); break; case CISS_NOTIFY_PHYSICAL: ciss_notify_physical(sc, cn); break; } ciss_release_request(cr); } sc->ciss_notify_thread = NULL; wakeup(&sc->ciss_notify_thread); #if __FreeBSD_version >= 500000 mtx_unlock(&sc->ciss_mtx); #endif kproc_exit(0); } /************************************************************************ * Start the notification kernel thread. */ static void ciss_spawn_notify_thread(struct ciss_softc *sc) { #if __FreeBSD_version > 500005 if (kproc_create((void(*)(void *))ciss_notify_thread, sc, &sc->ciss_notify_thread, 0, 0, "ciss_notify%d", device_get_unit(sc->ciss_dev))) #else if (kproc_create((void(*)(void *))ciss_notify_thread, sc, &sc->ciss_notify_thread, "ciss_notify%d", device_get_unit(sc->ciss_dev))) #endif panic("Could not create notify thread\n"); } /************************************************************************ * Kill the notification kernel thread. */ static void ciss_kill_notify_thread(struct ciss_softc *sc) { if (sc->ciss_notify_thread == NULL) return; sc->ciss_flags |= CISS_FLAG_THREAD_SHUT; wakeup(&sc->ciss_notify); msleep(&sc->ciss_notify_thread, &sc->ciss_mtx, PUSER, "thtrm", 0); } /************************************************************************ * Print a request. */ static void ciss_print_request(struct ciss_request *cr) { struct ciss_softc *sc; struct ciss_command *cc; int i; sc = cr->cr_sc; cc = cr->cr_cc; ciss_printf(sc, "REQUEST @ %p\n", cr); ciss_printf(sc, " data %p/%d tag %d flags %b\n", cr->cr_data, cr->cr_length, cr->cr_tag, cr->cr_flags, "\20\1mapped\2sleep\3poll\4dataout\5datain\n"); ciss_printf(sc, " sg list/total %d/%d host tag 0x%x\n", cc->header.sg_in_list, cc->header.sg_total, cc->header.host_tag); switch(cc->header.address.mode.mode) { case CISS_HDR_ADDRESS_MODE_PERIPHERAL: case CISS_HDR_ADDRESS_MODE_MASK_PERIPHERAL: ciss_printf(sc, " physical bus %d target %d\n", cc->header.address.physical.bus, cc->header.address.physical.target); break; case CISS_HDR_ADDRESS_MODE_LOGICAL: ciss_printf(sc, " logical unit %d\n", cc->header.address.logical.lun); break; } ciss_printf(sc, " %s cdb length %d type %s attribute %s\n", (cc->cdb.direction == CISS_CDB_DIRECTION_NONE) ? "no-I/O" : (cc->cdb.direction == CISS_CDB_DIRECTION_READ) ? "READ" : (cc->cdb.direction == CISS_CDB_DIRECTION_WRITE) ? "WRITE" : "??", cc->cdb.cdb_length, (cc->cdb.type == CISS_CDB_TYPE_COMMAND) ? "command" : (cc->cdb.type == CISS_CDB_TYPE_MESSAGE) ? "message" : "??", (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_UNTAGGED) ? "untagged" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_SIMPLE) ? "simple" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_HEAD_OF_QUEUE) ? "head-of-queue" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_ORDERED) ? "ordered" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_AUTO_CONTINGENT) ? "auto-contingent" : "??"); ciss_printf(sc, " %*D\n", cc->cdb.cdb_length, &cc->cdb.cdb[0], " "); if (cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) { /* XXX print error info */ } else { /* since we don't use chained s/g, don't support it here */ for (i = 0; i < cc->header.sg_in_list; i++) { if ((i % 4) == 0) ciss_printf(sc, " "); printf("0x%08x/%d ", (u_int32_t)cc->sg[i].address, cc->sg[i].length); if ((((i + 1) % 4) == 0) || (i == (cc->header.sg_in_list - 1))) printf("\n"); } } } /************************************************************************ * Print information about the status of a logical drive. */ static void ciss_print_ldrive(struct ciss_softc *sc, struct ciss_ldrive *ld) { int bus, target, i; if (ld->cl_lstatus == NULL) { printf("does not exist\n"); return; } /* print drive status */ switch(ld->cl_lstatus->status) { case CISS_LSTATUS_OK: printf("online\n"); break; case CISS_LSTATUS_INTERIM_RECOVERY: printf("in interim recovery mode\n"); break; case CISS_LSTATUS_READY_RECOVERY: printf("ready to begin recovery\n"); break; case CISS_LSTATUS_RECOVERING: bus = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_rebuilding); target = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_rebuilding); printf("being recovered, working on physical drive %d.%d, %u blocks remaining\n", bus, target, ld->cl_lstatus->blocks_to_recover); break; case CISS_LSTATUS_EXPANDING: printf("being expanded, %u blocks remaining\n", ld->cl_lstatus->blocks_to_recover); break; case CISS_LSTATUS_QUEUED_FOR_EXPANSION: printf("queued for expansion\n"); break; case CISS_LSTATUS_FAILED: printf("queued for expansion\n"); break; case CISS_LSTATUS_WRONG_PDRIVE: printf("wrong physical drive inserted\n"); break; case CISS_LSTATUS_MISSING_PDRIVE: printf("missing a needed physical drive\n"); break; case CISS_LSTATUS_BECOMING_READY: printf("becoming ready\n"); break; } /* print failed physical drives */ for (i = 0; i < CISS_BIG_MAP_ENTRIES / 8; i++) { bus = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_failure_map[i]); target = CISS_BIG_MAP_TARGET(sc, ld->cl_lstatus->drive_failure_map[i]); if (bus == -1) continue; ciss_printf(sc, "physical drive %d:%d (%x) failed\n", bus, target, ld->cl_lstatus->drive_failure_map[i]); } } #ifdef CISS_DEBUG /************************************************************************ * Print information about the controller/driver. */ static void ciss_print_adapter(struct ciss_softc *sc) { int i, j; ciss_printf(sc, "ADAPTER:\n"); for (i = 0; i < CISSQ_COUNT; i++) { ciss_printf(sc, "%s %d/%d\n", i == 0 ? "free" : i == 1 ? "busy" : "complete", sc->ciss_qstat[i].q_length, sc->ciss_qstat[i].q_max); } ciss_printf(sc, "max_requests %d\n", sc->ciss_max_requests); ciss_printf(sc, "flags %b\n", sc->ciss_flags, "\20\1notify_ok\2control_open\3aborting\4running\21fake_synch\22bmic_abort\n"); for (i = 0; i < sc->ciss_max_logical_bus; i++) { for (j = 0; j < CISS_MAX_LOGICAL; j++) { ciss_printf(sc, "LOGICAL DRIVE %d: ", i); ciss_print_ldrive(sc, &sc->ciss_logical[i][j]); } } /* XXX Should physical drives be printed out here? */ for (i = 1; i < sc->ciss_max_requests; i++) ciss_print_request(sc->ciss_request + i); } /* DDB hook */ static void ciss_print0(void) { struct ciss_softc *sc; sc = devclass_get_softc(devclass_find("ciss"), 0); if (sc == NULL) { printf("no ciss controllers\n"); } else { ciss_print_adapter(sc); } } #endif /************************************************************************ * Return a name for a logical drive status value. */ static const char * ciss_name_ldrive_status(int status) { switch (status) { case CISS_LSTATUS_OK: return("OK"); case CISS_LSTATUS_FAILED: return("failed"); case CISS_LSTATUS_NOT_CONFIGURED: return("not configured"); case CISS_LSTATUS_INTERIM_RECOVERY: return("interim recovery"); case CISS_LSTATUS_READY_RECOVERY: return("ready for recovery"); case CISS_LSTATUS_RECOVERING: return("recovering"); case CISS_LSTATUS_WRONG_PDRIVE: return("wrong physical drive inserted"); case CISS_LSTATUS_MISSING_PDRIVE: return("missing physical drive"); case CISS_LSTATUS_EXPANDING: return("expanding"); case CISS_LSTATUS_BECOMING_READY: return("becoming ready"); case CISS_LSTATUS_QUEUED_FOR_EXPANSION: return("queued for expansion"); } return("unknown status"); } /************************************************************************ * Return an online/offline/nonexistent value for a logical drive * status value. */ static int ciss_decode_ldrive_status(int status) { switch(status) { case CISS_LSTATUS_NOT_CONFIGURED: return(CISS_LD_NONEXISTENT); case CISS_LSTATUS_OK: case CISS_LSTATUS_INTERIM_RECOVERY: case CISS_LSTATUS_READY_RECOVERY: case CISS_LSTATUS_RECOVERING: case CISS_LSTATUS_EXPANDING: case CISS_LSTATUS_QUEUED_FOR_EXPANSION: return(CISS_LD_ONLINE); case CISS_LSTATUS_FAILED: case CISS_LSTATUS_WRONG_PDRIVE: case CISS_LSTATUS_MISSING_PDRIVE: case CISS_LSTATUS_BECOMING_READY: default: return(CISS_LD_OFFLINE); } } /************************************************************************ * Return a name for a logical drive's organisation. */ static const char * ciss_name_ldrive_org(int org) { switch(org) { case CISS_LDRIVE_RAID0: return("RAID 0"); case CISS_LDRIVE_RAID1: return("RAID 1(1+0)"); case CISS_LDRIVE_RAID4: return("RAID 4"); case CISS_LDRIVE_RAID5: return("RAID 5"); case CISS_LDRIVE_RAID51: return("RAID 5+1"); case CISS_LDRIVE_RAIDADG: return("RAID ADG"); } return("unkown"); } /************************************************************************ * Return a name for a command status value. */ static const char * ciss_name_command_status(int status) { switch(status) { case CISS_CMD_STATUS_SUCCESS: return("success"); case CISS_CMD_STATUS_TARGET_STATUS: return("target status"); case CISS_CMD_STATUS_DATA_UNDERRUN: return("data underrun"); case CISS_CMD_STATUS_DATA_OVERRUN: return("data overrun"); case CISS_CMD_STATUS_INVALID_COMMAND: return("invalid command"); case CISS_CMD_STATUS_PROTOCOL_ERROR: return("protocol error"); case CISS_CMD_STATUS_HARDWARE_ERROR: return("hardware error"); case CISS_CMD_STATUS_CONNECTION_LOST: return("connection lost"); case CISS_CMD_STATUS_ABORTED: return("aborted"); case CISS_CMD_STATUS_ABORT_FAILED: return("abort failed"); case CISS_CMD_STATUS_UNSOLICITED_ABORT: return("unsolicited abort"); case CISS_CMD_STATUS_TIMEOUT: return("timeout"); case CISS_CMD_STATUS_UNABORTABLE: return("unabortable"); } return("unknown status"); } /************************************************************************ * Handle an open on the control device. */ static int ciss_open(struct cdev *dev, int flags, int fmt, struct thread *p) { struct ciss_softc *sc; debug_called(1); sc = (struct ciss_softc *)dev->si_drv1; /* we might want to veto if someone already has us open */ mtx_lock(&sc->ciss_mtx); sc->ciss_flags |= CISS_FLAG_CONTROL_OPEN; mtx_unlock(&sc->ciss_mtx); return(0); } /************************************************************************ * Handle the last close on the control device. */ static int ciss_close(struct cdev *dev, int flags, int fmt, struct thread *p) { struct ciss_softc *sc; debug_called(1); sc = (struct ciss_softc *)dev->si_drv1; mtx_lock(&sc->ciss_mtx); sc->ciss_flags &= ~CISS_FLAG_CONTROL_OPEN; mtx_unlock(&sc->ciss_mtx); return (0); } /******************************************************************************** * Handle adapter-specific control operations. * * Note that the API here is compatible with the Linux driver, in order to * simplify the porting of Compaq's userland tools. */ static int ciss_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *p) { struct ciss_softc *sc; IOCTL_Command_struct *ioc = (IOCTL_Command_struct *)addr; #ifdef __amd64__ IOCTL_Command_struct32 *ioc32 = (IOCTL_Command_struct32 *)addr; IOCTL_Command_struct ioc_swab; #endif int error; debug_called(1); sc = (struct ciss_softc *)dev->si_drv1; error = 0; mtx_lock(&sc->ciss_mtx); switch(cmd) { case CCISS_GETQSTATS: { union ciss_statrequest *cr = (union ciss_statrequest *)addr; switch (cr->cs_item) { case CISSQ_FREE: case CISSQ_NOTIFY: bcopy(&sc->ciss_qstat[cr->cs_item], &cr->cs_qstat, sizeof(struct ciss_qstat)); break; default: error = ENOIOCTL; break; } break; } case CCISS_GETPCIINFO: { cciss_pci_info_struct *pis = (cciss_pci_info_struct *)addr; pis->bus = pci_get_bus(sc->ciss_dev); pis->dev_fn = pci_get_slot(sc->ciss_dev); pis->board_id = (pci_get_subvendor(sc->ciss_dev) << 16) | pci_get_subdevice(sc->ciss_dev); break; } case CCISS_GETINTINFO: { cciss_coalint_struct *cis = (cciss_coalint_struct *)addr; cis->delay = sc->ciss_cfg->interrupt_coalesce_delay; cis->count = sc->ciss_cfg->interrupt_coalesce_count; break; } case CCISS_SETINTINFO: { cciss_coalint_struct *cis = (cciss_coalint_struct *)addr; if ((cis->delay == 0) && (cis->count == 0)) { error = EINVAL; break; } /* * XXX apparently this is only safe if the controller is idle, * we should suspend it before doing this. */ sc->ciss_cfg->interrupt_coalesce_delay = cis->delay; sc->ciss_cfg->interrupt_coalesce_count = cis->count; if (ciss_update_config(sc)) error = EIO; /* XXX resume the controller here */ break; } case CCISS_GETNODENAME: bcopy(sc->ciss_cfg->server_name, (NodeName_type *)addr, sizeof(NodeName_type)); break; case CCISS_SETNODENAME: bcopy((NodeName_type *)addr, sc->ciss_cfg->server_name, sizeof(NodeName_type)); if (ciss_update_config(sc)) error = EIO; break; case CCISS_GETHEARTBEAT: *(Heartbeat_type *)addr = sc->ciss_cfg->heartbeat; break; case CCISS_GETBUSTYPES: *(BusTypes_type *)addr = sc->ciss_cfg->bus_types; break; case CCISS_GETFIRMVER: bcopy(sc->ciss_id->running_firmware_revision, (FirmwareVer_type *)addr, sizeof(FirmwareVer_type)); break; case CCISS_GETDRIVERVER: *(DriverVer_type *)addr = CISS_DRIVER_VERSION; break; case CCISS_REVALIDVOLS: /* * This is a bit ugly; to do it "right" we really need * to find any disks that have changed, kick CAM off them, * then rescan only these disks. It'd be nice if they * a) told us which disk(s) they were going to play with, * and b) which ones had arrived. 8( */ break; #ifdef __amd64__ case CCISS_PASSTHRU32: ioc_swab.LUN_info = ioc32->LUN_info; ioc_swab.Request = ioc32->Request; ioc_swab.error_info = ioc32->error_info; ioc_swab.buf_size = ioc32->buf_size; ioc_swab.buf = (u_int8_t *)(uintptr_t)ioc32->buf; ioc = &ioc_swab; /* FALLTHROUGH */ #endif case CCISS_PASSTHRU: error = ciss_user_command(sc, ioc); break; default: debug(0, "unknown ioctl 0x%lx", cmd); debug(1, "CCISS_GETPCIINFO: 0x%lx", CCISS_GETPCIINFO); debug(1, "CCISS_GETINTINFO: 0x%lx", CCISS_GETINTINFO); debug(1, "CCISS_SETINTINFO: 0x%lx", CCISS_SETINTINFO); debug(1, "CCISS_GETNODENAME: 0x%lx", CCISS_GETNODENAME); debug(1, "CCISS_SETNODENAME: 0x%lx", CCISS_SETNODENAME); debug(1, "CCISS_GETHEARTBEAT: 0x%lx", CCISS_GETHEARTBEAT); debug(1, "CCISS_GETBUSTYPES: 0x%lx", CCISS_GETBUSTYPES); debug(1, "CCISS_GETFIRMVER: 0x%lx", CCISS_GETFIRMVER); debug(1, "CCISS_GETDRIVERVER: 0x%lx", CCISS_GETDRIVERVER); debug(1, "CCISS_REVALIDVOLS: 0x%lx", CCISS_REVALIDVOLS); debug(1, "CCISS_PASSTHRU: 0x%lx", CCISS_PASSTHRU); error = ENOIOCTL; break; } mtx_unlock(&sc->ciss_mtx); return(error); } Index: projects/physbio/sys/dev/hpt27xx/osm_bsd.c =================================================================== --- projects/physbio/sys/dev/hpt27xx/osm_bsd.c (revision 243875) +++ projects/physbio/sys/dev/hpt27xx/osm_bsd.c (revision 243876) @@ -1,1400 +1,1374 @@ /*- * Copyright (c) 2011 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include static int hpt_probe(device_t dev) { PCI_ID pci_id; HIM *him; int i; PHBA hba; for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if (him->get_controller_count) him->get_controller_count(&pci_id,0,0); if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); hba = (PHBA)device_get_softc(dev); memset(hba, 0, sizeof(HBA)); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; return 0; } } } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him = hba->ldm_adapter.him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); #if __FreeBSD_version >=440000 pci_enable_busmaster(dev); #endif pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); if (!hba->ldm_adapter.him_handle) return ENXIO; hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); if (!vbus_ext) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } memset(vbus_ext, 0, sizeof(VBUS_EXT)); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_lock_vbus(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = MAX(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { hpt_unlock_vbus(vbus_ext); return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); hpt_unlock_vbus(vbus_ext); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); untimeout(hpt_timeout, pCmd, ccb->ccb_h.timeout_ch); switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { - POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; - union ccb *ccb = ext->ccb; - bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; - int idx; - - if(logical) { - if (ccb->ccb_h.flags & CAM_DATA_PHYS) - panic("physical address unsupported"); - - if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { - if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) - panic("physical address unsupported"); - - for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { - os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr); - pSg[idx].size = sgList[idx].ds_len; - pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; - } - } - else { - os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); - pSg->size = ccb->csio.dxfer_len; - pSg->eot = 1; - } - return TRUE; - } - /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); - if (error || nsegs == 0) + if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } - psg[-1].eot = 1; + if (nsegs) + psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } ext->ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: { PINQUIRYDATA inquiryData; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; inquiryData->AdditionalLength = 31; inquiryData->CommandQueue = 1; memcpy(&inquiryData->VendorId, "HPT ", 8); memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); if (vd->target_id / 10) { inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; } else inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; } break; case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; if (vd->capacity>0xfffffffful) cap = 0xfffffffful; else cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2; rbuf[11] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: case 0x8f: /* VERIFY_16 */ { int error; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: case 0x8f: /* VERIFY_16 */ { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hpt_lock_vbus(vbus_ext); hpt_scsi_io(vbus_ext, ccb); hpt_unlock_vbus(vbus_ext); return; case XPT_RESET_BUS: hpt_lock_vbus(vbus_ext); ldm_reset_vbus((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: ccb->ccg.heads = 255; ccb->ccg.secs_per_track = 63; ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); #if (__FreeBSD_version >= 800000) cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; #endif cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { hpt_pci_intr(cam_sim_softc(sim)); } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; i=503000 .d_version = D_VERSION, #endif #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) .d_flags = D_NEEDGIANT, #endif #if __FreeBSD_version<600034 #if __FreeBSD_version>501000 .d_maj = MAJOR_AUTO, #else .d_maj = HPT_DEV_MAJOR, #endif #endif }; static struct intr_config_hook hpt_ich; /* * hpt_final_init will be called after all hpt_attach. */ static void hpt_final_init(void *dummy) { int i; PVBUS_EXT vbus_ext; PVBUS vbus; PHBA hba; /* Clear the config hook */ config_intrhook_disestablish(&hpt_ich); /* allocate memory */ i = 0; ldm_for_each_vbus(vbus, vbus_ext) { if (hpt_alloc_mem(vbus_ext)) { os_printk("out of memory"); return; } i++; } if (!i) { os_printk("no controller detected."); return; } /* initializing hardware */ ldm_for_each_vbus(vbus, vbus_ext) { /* make timer available here */ callout_handle_init(&vbus_ext->timer); if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; #if (__FreeBSD_version >= 500000) mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF); #endif if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ #if __FreeBSD_version>502000 busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ #endif &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } #if __FreeBSD_version > 700025 vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, 0, &Giant, os_max_queue_comm, /*tagged*/8, devq); #else vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, 0, os_max_queue_comm, /*tagged*/8, devq); #endif if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); return ; } #if __FreeBSD_version > 700044 if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { #else if (xpt_bus_register(vbus_ext->sim, 0) != CAM_SUCCESS) { #endif os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); vbus_ext->sim = NULL; return ; } xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, #if __FreeBSD_version > 700025 NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) #else hpt_pci_intr, vbus_ext, &hba->irq_handle)) #endif { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, driver_name); } #if defined(KLD_MODULE) && (__FreeBSD_version >= 503000) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), { 0, 0 } }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); #if __FreeBSD_version>503000 typedef struct cdev * ioctl_dev_t; #else typedef dev_t ioctl_dev_t; #endif #if __FreeBSD_version >= 500000 typedef struct thread * ioctl_thread_t; #else typedef struct proc * ioctl_thread_t; #endif static int hpt_open(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t td) { return 0; } static int hpt_close(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t td) { return 0; } static int hpt_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data, int fflag, ioctl_thread_t td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; HPT_U32 bytesReturned; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpOutBuffer) goto invalid; } #if (__FreeBSD_version >= 500000) mtx_lock(&Giant); #endif hpt_do_ioctl(&ioctl_args); #if (__FreeBSD_version >= 500000) mtx_unlock(&Giant); #endif if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { struct cam_path *path; union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; #if (__FreeBSD_version >= 500000) mtx_lock(&Giant); #endif ldm_for_each_vbus(vbus, vbus_ext) { if (xpt_create_path(&path, xpt_periph, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) return(EIO); if ((ccb = malloc(sizeof(union ccb), M_TEMP, M_WAITOK)) == NULL) return(ENOMEM); bzero(ccb, sizeof(union ccb)); xpt_setup_ccb(&ccb->ccb_h, path, 5); ccb->ccb_h.func_code = XPT_SCAN_BUS; ccb->ccb_h.cbfcnp = hpt_bus_scan_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); } #if (__FreeBSD_version >= 500000) mtx_unlock(&Giant); #endif return(0); } static void hpt_bus_scan_cb(struct cam_periph *periph, union ccb *ccb) { if (ccb->ccb_h.status != CAM_REQ_CMP) KdPrint(("cam_scan_callback: failure status = %x",ccb->ccb_h.status)); else KdPrint(("Scan bus successfully!")); xpt_free_path(ccb->ccb_h.path); free(ccb, M_TEMP); return; } Index: projects/physbio/sys/dev/hptmv/entry.c =================================================================== --- projects/physbio/sys/dev/hptmv/entry.c (revision 243875) +++ projects/physbio/sys/dev/hptmv/entry.c (revision 243876) @@ -1,3103 +1,3080 @@ /* * Copyright (c) 2004-2005 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #if (__FreeBSD_version >= 500000) #include #include #endif #if (__FreeBSD_version >= 500000) #include #include #else #include #include #include #include #endif #ifndef __KERNEL__ #define __KERNEL__ #endif #include #include #include #include #ifdef DEBUG #ifdef DEBUG_LEVEL int hpt_dbg_level = DEBUG_LEVEL; #else int hpt_dbg_level = 0; #endif #endif #define MV_ERROR printf /* * CAM SIM entry points */ static int hpt_probe (device_t dev); static void launch_worker_thread(void); static int hpt_attach(device_t dev); static int hpt_detach(device_t dev); static int hpt_shutdown(device_t dev); static void hpt_poll(struct cam_sim *sim); static void hpt_intr(void *arg); static void hpt_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void hpt_action(struct cam_sim *sim, union ccb *ccb); static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), DEVMETHOD_END }; static driver_t hpt_pci_driver = { __str(PROC_DIR_NAME), driver_methods, sizeof(IAL_ADAPTER_T) }; static devclass_t hpt_devclass; #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) __DRIVER_MODULE(PROC_DIR_NAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); MODULE_DEPEND(PROC_DIR_NAME, cam, 1, 1, 1); #define ccb_ccb_ptr spriv_ptr0 #define ccb_adapter ccb_h.spriv_ptr1 static void SetInquiryData(PINQUIRYDATA inquiryData, PVDevice pVDev); static void HPTLIBAPI OsSendCommand (_VBUS_ARG union ccb * ccb); static void HPTLIBAPI fOsCommandDone(_VBUS_ARG PCommand pCmd); static void ccb_done(union ccb *ccb); static void hpt_queue_ccb(union ccb **ccb_Q, union ccb *ccb); static void hpt_free_ccb(union ccb **ccb_Q, union ccb *ccb); static void hptmv_free_edma_queues(IAL_ADAPTER_T *pAdapter); static void hptmv_free_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static void handleEdmaError(_VBUS_ARG PCommand pCmd); static int hptmv_init_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static int fResetActiveCommands(PVBus _vbus_p); static void fRegisterVdevice(IAL_ADAPTER_T *pAdapter); static int hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter); static void hptmv_handle_event_disconnect(void *data); static void hptmv_handle_event_connect(void *data); static int start_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static void init_vdev_params(IAL_ADAPTER_T *pAdapter, MV_U8 channel); static int hptmv_parse_identify_results(MV_SATA_CHANNEL *pMvSataChannel); static int HPTLIBAPI fOsBuildSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSg, int logical); static MV_BOOLEAN CommandCompletionCB(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channelNum, MV_COMPLETION_TYPE comp_type, MV_VOID_PTR commandId, MV_U16 responseFlags, MV_U32 timeStamp, MV_STORAGE_DEVICE_REGISTERS *registerStruct); static MV_BOOLEAN hptmv_event_notify(MV_SATA_ADAPTER *pMvSataAdapter, MV_EVENT_TYPE eventType, MV_U32 param1, MV_U32 param2); #define ccb_ccb_ptr spriv_ptr0 #define ccb_adapter ccb_h.spriv_ptr1 IAL_ADAPTER_T *gIal_Adapter = 0; IAL_ADAPTER_T *pCurAdapter = 0; static MV_SATA_CHANNEL gMvSataChannels[MAX_VBUS][MV_SATA_CHANNELS_NUM]; typedef struct st_HPT_DPC { IAL_ADAPTER_T *pAdapter; void (*dpc)(IAL_ADAPTER_T *, void *, UCHAR); void *arg; UCHAR flags; } ST_HPT_DPC; #define MAX_DPC 16 UCHAR DPC_Request_Nums = 0; static ST_HPT_DPC DpcQueue[MAX_DPC]; static int DpcQueue_First=0; static int DpcQueue_Last = 0; char DRIVER_VERSION[] = "v1.16"; #if (__FreeBSD_version >= 500000) static struct mtx driver_lock; intrmask_t lock_driver() { intrmask_t spl = 0; mtx_lock(&driver_lock); return spl; } void unlock_driver(intrmask_t spl) { mtx_unlock(&driver_lock); } #else static int driver_locked = 0; intrmask_t lock_driver() { intrmask_t spl = splcam(); loop: while (driver_locked) tsleep(&driver_locked, PRIBIO, "hptlck", hz); atomic_add_int(&driver_locked, 1); if (driver_locked>1) { atomic_subtract_int(&driver_locked, 1); goto loop; } return spl; } void unlock_driver(intrmask_t spl) { atomic_subtract_int(&driver_locked, 1); if (driver_locked==0) { wakeup(&driver_locked); } splx(spl); } #endif /******************************************************************************* * Name: hptmv_free_channel * * Description: free allocated queues for the given channel * * Parameters: pMvSataAdapter - pointer to the RR18xx controler this * channel connected to. * channelNum - channel number. * ******************************************************************************/ static void hptmv_free_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { HPT_ASSERT(channelNum < MV_SATA_CHANNELS_NUM); pAdapter->mvSataAdapter.sataChannel[channelNum] = NULL; } static void failDevice(PVDevice pVDev) { PVBus _vbus_p = pVDev->pVBus; IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)_vbus_p->OsExt; pVDev->u.disk.df_on_line = 0; pVDev->vf_online = 0; if (pVDev->pfnDeviceFailed) CallWhenIdle(_VBUS_P (DPC_PROC)pVDev->pfnDeviceFailed, pVDev); fNotifyGUI(ET_DEVICE_REMOVED, pVDev); #ifndef FOR_DEMO if (pAdapter->ver_601==2 && !pAdapter->beeping) { pAdapter->beeping = 1; BeepOn(pAdapter->mvSataAdapter.adapterIoBaseAddress); set_fail_led(&pAdapter->mvSataAdapter, pVDev->u.disk.mv->channelNumber, 1); } #endif } int MvSataResetChannel(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channel); static void handleEdmaError(_VBUS_ARG PCommand pCmd) { PDevice pDevice = &pCmd->pVDevice->u.disk; MV_SATA_ADAPTER * pSataAdapter = pDevice->mv->mvSataAdapter; if (!pDevice->df_on_line) { KdPrint(("Device is offline")); pCmd->Result = RETURN_BAD_DEVICE; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } if (pCmd->RetryCount++>5) { hpt_printk(("too many retries on channel(%d)\n", pDevice->mv->channelNumber)); failed: failDevice(pCmd->pVDevice); pCmd->Result = RETURN_IDE_ERROR; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } /* reset the channel and retry the command */ if (MvSataResetChannel(pSataAdapter, pDevice->mv->channelNumber)) goto failed; fNotifyGUI(ET_DEVICE_ERROR, Map2pVDevice(pDevice)); hpt_printk(("Retry on channel(%d)\n", pDevice->mv->channelNumber)); fDeviceSendCommand(_VBUS_P pCmd); } /**************************************************************** * Name: hptmv_init_channel * * Description: allocate request and response queues for the EDMA of the * given channel and sets other fields. * * Parameters: * pAdapter - pointer to the emulated adapter data structure * channelNum - channel number. * Return: 0 on success, otherwise on failure ****************************************************************/ static int hptmv_init_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { MV_SATA_CHANNEL *pMvSataChannel; dma_addr_t req_dma_addr; dma_addr_t rsp_dma_addr; if (channelNum >= MV_SATA_CHANNELS_NUM) { MV_ERROR("RR18xx[%d]: Bad channelNum=%d", pAdapter->mvSataAdapter.adapterId, channelNum); return -1; } pMvSataChannel = &gMvSataChannels[pAdapter->mvSataAdapter.adapterId][channelNum]; pAdapter->mvSataAdapter.sataChannel[channelNum] = pMvSataChannel; pMvSataChannel->channelNumber = channelNum; pMvSataChannel->lba48Address = MV_FALSE; pMvSataChannel->maxReadTransfer = MV_FALSE; pMvSataChannel->requestQueue = (struct mvDmaRequestQueueEntry *) (pAdapter->requestsArrayBaseAlignedAddr + (channelNum * MV_EDMA_REQUEST_QUEUE_SIZE)); req_dma_addr = pAdapter->requestsArrayBaseDmaAlignedAddr + (channelNum * MV_EDMA_REQUEST_QUEUE_SIZE); KdPrint(("requestQueue addr is 0x%llX", (HPT_U64)(ULONG_PTR)req_dma_addr)); /* check the 1K alignment of the request queue*/ if (req_dma_addr & 0x3ff) { MV_ERROR("RR18xx[%d]: request queue allocated isn't 1 K aligned," " dma_addr=%llx channel=%d\n", pAdapter->mvSataAdapter.adapterId, (HPT_U64)(ULONG_PTR)req_dma_addr, channelNum); return -1; } pMvSataChannel->requestQueuePciLowAddress = req_dma_addr; pMvSataChannel->requestQueuePciHiAddress = 0; KdPrint(("RR18xx[%d,%d]: request queue allocated: 0x%p", pAdapter->mvSataAdapter.adapterId, channelNum, pMvSataChannel->requestQueue)); pMvSataChannel->responseQueue = (struct mvDmaResponseQueueEntry *) (pAdapter->responsesArrayBaseAlignedAddr + (channelNum * MV_EDMA_RESPONSE_QUEUE_SIZE)); rsp_dma_addr = pAdapter->responsesArrayBaseDmaAlignedAddr + (channelNum * MV_EDMA_RESPONSE_QUEUE_SIZE); /* check the 256 alignment of the response queue*/ if (rsp_dma_addr & 0xff) { MV_ERROR("RR18xx[%d,%d]: response queue allocated isn't 256 byte " "aligned, dma_addr=%llx\n", pAdapter->mvSataAdapter.adapterId, channelNum, (HPT_U64)(ULONG_PTR)rsp_dma_addr); return -1; } pMvSataChannel->responseQueuePciLowAddress = rsp_dma_addr; pMvSataChannel->responseQueuePciHiAddress = 0; KdPrint(("RR18xx[%d,%d]: response queue allocated: 0x%p", pAdapter->mvSataAdapter.adapterId, channelNum, pMvSataChannel->responseQueue)); pAdapter->mvChannel[channelNum].online = MV_TRUE; return 0; } /****************************************************************************** * Name: hptmv_parse_identify_results * * Description: this functions parses the identify command results, checks * that the connected deives can be accesed by RR18xx EDMA, * and updates the channel stucture accordingly. * * Parameters: pMvSataChannel, pointer to the channel data structure. * * Returns: =0 ->success, < 0 ->failure. * ******************************************************************************/ static int hptmv_parse_identify_results(MV_SATA_CHANNEL *pMvSataChannel) { MV_U16 *iden = pMvSataChannel->identifyDevice; /*LBA addressing*/ if (! (iden[IDEN_CAPACITY_1_OFFSET] & 0x200)) { KdPrint(("IAL Error in IDENTIFY info: LBA not supported\n")); return -1; } else { KdPrint(("%25s - %s\n", "Capabilities", "LBA supported")); } /*DMA support*/ if (! (iden[IDEN_CAPACITY_1_OFFSET] & 0x100)) { KdPrint(("IAL Error in IDENTIFY info: DMA not supported\n")); return -1; } else { KdPrint(("%25s - %s\n", "Capabilities", "DMA supported")); } /* PIO */ if ((iden[IDEN_VALID] & 2) == 0) { KdPrint(("IAL Error in IDENTIFY info: not able to find PIO mode\n")); return -1; } KdPrint(("%25s - 0x%02x\n", "PIO modes supported", iden[IDEN_PIO_MODE_SPPORTED] & 0xff)); /*UDMA*/ if ((iden[IDEN_VALID] & 4) == 0) { KdPrint(("IAL Error in IDENTIFY info: not able to find UDMA mode\n")); return -1; } /* 48 bit address */ if ((iden[IDEN_SUPPORTED_COMMANDS2] & 0x400)) { KdPrint(("%25s - %s\n", "LBA48 addressing", "supported")); pMvSataChannel->lba48Address = MV_TRUE; } else { KdPrint(("%25s - %s\n", "LBA48 addressing", "Not supported")); pMvSataChannel->lba48Address = MV_FALSE; } return 0; } static void init_vdev_params(IAL_ADAPTER_T *pAdapter, MV_U8 channel) { PVDevice pVDev = &pAdapter->VDevices[channel]; MV_SATA_CHANNEL *pMvSataChannel = pAdapter->mvSataAdapter.sataChannel[channel]; MV_U16_PTR IdentifyData = pMvSataChannel->identifyDevice; pMvSataChannel->outstandingCommands = 0; pVDev->u.disk.mv = pMvSataChannel; pVDev->u.disk.df_on_line = 1; pVDev->u.disk.pVBus = &pAdapter->VBus; pVDev->pVBus = &pAdapter->VBus; #ifdef SUPPORT_48BIT_LBA if (pMvSataChannel->lba48Address == MV_TRUE) pVDev->u.disk.dDeRealCapacity = ((IdentifyData[101]<<16) | IdentifyData[100]) - 1; else #endif if(IdentifyData[53] & 1) { pVDev->u.disk.dDeRealCapacity = (((IdentifyData[58]<<16 | IdentifyData[57]) < (IdentifyData[61]<<16 | IdentifyData[60])) ? (IdentifyData[61]<<16 | IdentifyData[60]) : (IdentifyData[58]<<16 | IdentifyData[57])) - 1; } else pVDev->u.disk.dDeRealCapacity = (IdentifyData[61]<<16 | IdentifyData[60]) - 1; pVDev->u.disk.bDeUsable_Mode = pVDev->u.disk.bDeModeSetting = pAdapter->mvChannel[channel].maxPioModeSupported - MV_ATA_TRANSFER_PIO_0; if (pAdapter->mvChannel[channel].maxUltraDmaModeSupported!=0xFF) { pVDev->u.disk.bDeUsable_Mode = pVDev->u.disk.bDeModeSetting = pAdapter->mvChannel[channel].maxUltraDmaModeSupported - MV_ATA_TRANSFER_UDMA_0 + 8; } } static void device_change(IAL_ADAPTER_T *pAdapter , MV_U8 channelIndex, int plugged) { PVDevice pVDev; MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channelIndex]; if (!pMvSataChannel) return; if (plugged) { pVDev = &(pAdapter->VDevices[channelIndex]); init_vdev_params(pAdapter, channelIndex); pVDev->VDeviceType = pVDev->u.disk.df_atapi? VD_ATAPI : pVDev->u.disk.df_removable_drive? VD_REMOVABLE : VD_SINGLE_DISK; pVDev->VDeviceCapacity = pVDev->u.disk.dDeRealCapacity-SAVE_FOR_RAID_INFO; pVDev->pfnSendCommand = pfnSendCommand[pVDev->VDeviceType]; pVDev->pfnDeviceFailed = pfnDeviceFailed[pVDev->VDeviceType]; pVDev->vf_online = 1; #ifdef SUPPORT_ARRAY if(pVDev->pParent) { int iMember; for(iMember = 0; iMember < pVDev->pParent->u.array.bArnMember; iMember++) if((PVDevice)pVDev->pParent->u.array.pMember[iMember] == pVDev) pVDev->pParent->u.array.pMember[iMember] = NULL; pVDev->pParent = NULL; } #endif fNotifyGUI(ET_DEVICE_PLUGGED,pVDev); fCheckBootable(pVDev); RegisterVDevice(pVDev); #ifndef FOR_DEMO if (pAdapter->beeping) { pAdapter->beeping = 0; BeepOff(pAdapter->mvSataAdapter.adapterIoBaseAddress); } #endif } else { pVDev = &(pAdapter->VDevices[channelIndex]); failDevice(pVDev); } } static int start_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channelNum]; MV_CHANNEL *pChannelInfo = &(pAdapter->mvChannel[channelNum]); MV_U32 udmaMode,pioMode; KdPrint(("RR18xx [%d]: start channel (%d)", pMvSataAdapter->adapterId, channelNum)); /* Software reset channel */ if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Software reset\n", pMvSataAdapter->adapterId, channelNum); return -1; } /* Hardware reset channel */ if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) { /* If failed, try again - this is when trying to hardreset a channel */ /* when drive is just spinning up */ StallExec(5000000); /* wait 5 sec before trying again */ if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Hard reset\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* identify device*/ if (mvStorageDevATAIdentifyDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform ATA Identify command\n" , pMvSataAdapter->adapterId, channelNum); return -1; } if (hptmv_parse_identify_results(pMvSataChannel)) { MV_ERROR("RR18xx [%d,%d]: Error in parsing ATA Identify message\n" , pMvSataAdapter->adapterId, channelNum); return -1; } /* mvStorageDevATASetFeatures */ /* Disable 8 bit PIO in case CFA enabled */ if (pMvSataChannel->identifyDevice[86] & 4) { KdPrint(("RR18xx [%d]: Disable 8 bit PIO (CFA enabled) \n", pMvSataAdapter->adapterId)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_8_BIT_PIO, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures" " failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* Write cache */ #ifdef ENABLE_WRITE_CACHE if (pMvSataChannel->identifyDevice[82] & 0x20) { if (!(pMvSataChannel->identifyDevice[85] & 0x20)) /* if not enabled by default */ { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_ENABLE_WCACHE, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel %d, write cache enabled\n", pMvSataAdapter->adapterId, channelNum)); } else { KdPrint(("RR18xx [%d]: channel %d, write cache not supported\n", pMvSataAdapter->adapterId, channelNum)); } #else /* disable write cache */ { if (pMvSataChannel->identifyDevice[85] & 0x20) { KdPrint(("RR18xx [%d]: channel =%d, disable write cache\n", pMvSataAdapter->adapterId, channelNum)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_WCACHE, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel=%d, write cache disabled\n", pMvSataAdapter->adapterId, channelNum)); } #endif /* Set transfer mode */ KdPrint(("RR18xx [%d] Set transfer mode XFER_PIO_SLOW\n", pMvSataAdapter->adapterId)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, MV_ATA_TRANSFER_PIO_SLOW, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } if (pMvSataChannel->identifyDevice[IDEN_PIO_MODE_SPPORTED] & 1) { pioMode = MV_ATA_TRANSFER_PIO_4; } else if (pMvSataChannel->identifyDevice[IDEN_PIO_MODE_SPPORTED] & 2) { pioMode = MV_ATA_TRANSFER_PIO_3; } else { MV_ERROR("IAL Error in IDENTIFY info: PIO modes 3 and 4 not supported\n"); pioMode = MV_ATA_TRANSFER_PIO_SLOW; } KdPrint(("RR18xx [%d] Set transfer mode XFER_PIO_4\n", pMvSataAdapter->adapterId)); pAdapter->mvChannel[channelNum].maxPioModeSupported = pioMode; if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, pioMode, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } udmaMode = MV_ATA_TRANSFER_UDMA_0; if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x40) { udmaMode = MV_ATA_TRANSFER_UDMA_6; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x20) { udmaMode = MV_ATA_TRANSFER_UDMA_5; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x10) { udmaMode = MV_ATA_TRANSFER_UDMA_4; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 8) { udmaMode = MV_ATA_TRANSFER_UDMA_3; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 4) { udmaMode = MV_ATA_TRANSFER_UDMA_2; } KdPrint(("RR18xx [%d] Set transfer mode XFER_UDMA_%d\n", pMvSataAdapter->adapterId, udmaMode & 0xf)); pChannelInfo->maxUltraDmaModeSupported = udmaMode; /*if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, udmaMode, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; }*/ if (pChannelInfo->maxUltraDmaModeSupported == 0xFF) return TRUE; else do { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, pChannelInfo->maxUltraDmaModeSupported, 0, 0, 0) == MV_FALSE) { if (pChannelInfo->maxUltraDmaModeSupported > MV_ATA_TRANSFER_UDMA_0) { if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_REG_WRITE_BYTE(pMvSataAdapter->adapterIoBaseAddress, pMvSataChannel->eDmaRegsOffset + 0x11c, /* command reg */ MV_ATA_COMMAND_IDLE_IMMEDIATE); mvMicroSecondsDelay(10000); mvSataChannelHardReset(pMvSataAdapter, channelNum); if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) return FALSE; } if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) return FALSE; pChannelInfo->maxUltraDmaModeSupported--; continue; } else return FALSE; } break; }while (1); /* Read look ahead */ #ifdef ENABLE_READ_AHEAD if (pMvSataChannel->identifyDevice[82] & 0x40) { if (!(pMvSataChannel->identifyDevice[85] & 0x40)) /* if not enabled by default */ { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_ENABLE_RLA, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel=%d, read look ahead enabled\n", pMvSataAdapter->adapterId, channelNum)); } else { KdPrint(("RR18xx [%d]: channel %d, Read Look Ahead not supported\n", pMvSataAdapter->adapterId, channelNum)); } #else { if (pMvSataChannel->identifyDevice[86] & 0x20) { KdPrint(("RR18xx [%d]:channel %d, disable read look ahead\n", pMvSataAdapter->adapterId, channelNum)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_RLA, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]:channel %d: ATA Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]:channel %d, read look ahead disabled\n", pMvSataAdapter->adapterId, channelNum)); } #endif { KdPrint(("RR18xx [%d]: channel %d config EDMA, Non Queued Mode\n", pMvSataAdapter->adapterId, channelNum)); if (mvSataConfigEdmaMode(pMvSataAdapter, channelNum, MV_EDMA_MODE_NOT_QUEUED, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d Error: mvSataConfigEdmaMode failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* Enable EDMA */ if (mvSataEnableChannelDma(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d] Failed to enable DMA, channel=%d\n", pMvSataAdapter->adapterId, channelNum); return -1; } MV_ERROR("RR18xx [%d,%d]: channel started successfully\n", pMvSataAdapter->adapterId, channelNum); #ifndef FOR_DEMO set_fail_led(pMvSataAdapter, channelNum, 0); #endif return 0; } static void hptmv_handle_event(void * data, int flag) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)data; MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_U8 channelIndex; /* mvOsSemTake(&pMvSataAdapter->semaphore); */ for (channelIndex = 0; channelIndex < MV_SATA_CHANNELS_NUM; channelIndex++) { switch(pAdapter->sataEvents[channelIndex]) { case SATA_EVENT_CHANNEL_CONNECTED: /* Handle only connects */ if (flag == 1) break; KdPrint(("RR18xx [%d,%d]: new device connected\n", pMvSataAdapter->adapterId, channelIndex)); hptmv_init_channel(pAdapter, channelIndex); if (mvSataConfigureChannel( pMvSataAdapter, channelIndex) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to configure\n", pMvSataAdapter->adapterId, channelIndex); hptmv_free_channel(pAdapter, channelIndex); } else { /*mvSataChannelHardReset(pMvSataAdapter, channel);*/ if (start_channel( pAdapter, channelIndex)) { MV_ERROR("RR18xx [%d,%d]Failed to start channel\n", pMvSataAdapter->adapterId, channelIndex); hptmv_free_channel(pAdapter, channelIndex); } else { device_change(pAdapter, channelIndex, TRUE); } } pAdapter->sataEvents[channelIndex] = SATA_EVENT_NO_CHANGE; break; case SATA_EVENT_CHANNEL_DISCONNECTED: /* Handle only disconnects */ if (flag == 0) break; KdPrint(("RR18xx [%d,%d]: device disconnected\n", pMvSataAdapter->adapterId, channelIndex)); /* Flush pending commands */ if(pMvSataAdapter->sataChannel[channelIndex]) { _VBUS_INST(&pAdapter->VBus) mvSataFlushDmaQueue (pMvSataAdapter, channelIndex, MV_FLUSH_TYPE_CALLBACK); CheckPendingCall(_VBUS_P0); mvSataRemoveChannel(pMvSataAdapter,channelIndex); hptmv_free_channel(pAdapter, channelIndex); pMvSataAdapter->sataChannel[channelIndex] = NULL; KdPrint(("RR18xx [%d,%d]: channel removed\n", pMvSataAdapter->adapterId, channelIndex)); if (pAdapter->outstandingCommands==0 && DPC_Request_Nums==0) Check_Idle_Call(pAdapter); } else { KdPrint(("RR18xx [%d,%d]: channel already removed!!\n", pMvSataAdapter->adapterId, channelIndex)); } pAdapter->sataEvents[channelIndex] = SATA_EVENT_NO_CHANGE; break; case SATA_EVENT_NO_CHANGE: break; default: break; } } /* mvOsSemRelease(&pMvSataAdapter->semaphore); */ } #define EVENT_CONNECT 1 #define EVENT_DISCONNECT 0 static void hptmv_handle_event_connect(void *data) { hptmv_handle_event (data, 0); } static void hptmv_handle_event_disconnect(void *data) { hptmv_handle_event (data, 1); } static MV_BOOLEAN hptmv_event_notify(MV_SATA_ADAPTER *pMvSataAdapter, MV_EVENT_TYPE eventType, MV_U32 param1, MV_U32 param2) { IAL_ADAPTER_T *pAdapter = pMvSataAdapter->IALData; switch (eventType) { case MV_EVENT_TYPE_SATA_CABLE: { MV_U8 channel = param2; if (param1 == EVENT_CONNECT) { pAdapter->sataEvents[channel] = SATA_EVENT_CHANNEL_CONNECTED; KdPrint(("RR18xx [%d,%d]: device connected event received\n", pMvSataAdapter->adapterId, channel)); /* Delete previous timers (if multiple drives connected in the same time */ pAdapter->event_timer_connect = timeout(hptmv_handle_event_connect, pAdapter, 10*hz); } else if (param1 == EVENT_DISCONNECT) { pAdapter->sataEvents[channel] = SATA_EVENT_CHANNEL_DISCONNECTED; KdPrint(("RR18xx [%d,%d]: device disconnected event received \n", pMvSataAdapter->adapterId, channel)); device_change(pAdapter, channel, FALSE); /* Delete previous timers (if multiple drives disconnected in the same time */ /*pAdapter->event_timer_disconnect = timeout(hptmv_handle_event_disconnect, pAdapter, 10*hz); */ /*It is not necessary to wait, handle it directly*/ hptmv_handle_event_disconnect(pAdapter); } else { MV_ERROR("RR18xx: illigal value for param1(%d) at " "connect/disconect event, host=%d\n", param1, pMvSataAdapter->adapterId ); } } break; case MV_EVENT_TYPE_ADAPTER_ERROR: KdPrint(("RR18xx: DEVICE error event received, pci cause " "reg=%x, don't how to handle this\n", param1)); return MV_TRUE; default: MV_ERROR("RR18xx[%d]: unknown event type (%d)\n", pMvSataAdapter->adapterId, eventType); return MV_FALSE; } return MV_TRUE; } static int hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter) { pAdapter->requestsArrayBaseAddr = (MV_U8 *)contigmalloc(REQUESTS_ARRAY_SIZE, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); if (pAdapter->requestsArrayBaseAddr == NULL) { MV_ERROR("RR18xx[%d]: Failed to allocate memory for EDMA request" " queues\n", pAdapter->mvSataAdapter.adapterId); return -1; } pAdapter->requestsArrayBaseDmaAddr = fOsPhysicalAddress(pAdapter->requestsArrayBaseAddr); pAdapter->requestsArrayBaseAlignedAddr = pAdapter->requestsArrayBaseAddr; pAdapter->requestsArrayBaseAlignedAddr += MV_EDMA_REQUEST_QUEUE_SIZE; pAdapter->requestsArrayBaseAlignedAddr = (MV_U8 *) (((ULONG_PTR)pAdapter->requestsArrayBaseAlignedAddr) & ~(ULONG_PTR)(MV_EDMA_REQUEST_QUEUE_SIZE - 1)); pAdapter->requestsArrayBaseDmaAlignedAddr = pAdapter->requestsArrayBaseDmaAddr; pAdapter->requestsArrayBaseDmaAlignedAddr += MV_EDMA_REQUEST_QUEUE_SIZE; pAdapter->requestsArrayBaseDmaAlignedAddr &= ~(ULONG_PTR)(MV_EDMA_REQUEST_QUEUE_SIZE - 1); if ((pAdapter->requestsArrayBaseDmaAlignedAddr - pAdapter->requestsArrayBaseDmaAddr) != (pAdapter->requestsArrayBaseAlignedAddr - pAdapter->requestsArrayBaseAddr)) { MV_ERROR("RR18xx[%d]: Error in Request Quueues Alignment\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); return -1; } /* response queues */ pAdapter->responsesArrayBaseAddr = (MV_U8 *)contigmalloc(RESPONSES_ARRAY_SIZE, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); if (pAdapter->responsesArrayBaseAddr == NULL) { MV_ERROR("RR18xx[%d]: Failed to allocate memory for EDMA response" " queues\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); return -1; } pAdapter->responsesArrayBaseDmaAddr = fOsPhysicalAddress(pAdapter->responsesArrayBaseAddr); pAdapter->responsesArrayBaseAlignedAddr = pAdapter->responsesArrayBaseAddr; pAdapter->responsesArrayBaseAlignedAddr += MV_EDMA_RESPONSE_QUEUE_SIZE; pAdapter->responsesArrayBaseAlignedAddr = (MV_U8 *) (((ULONG_PTR)pAdapter->responsesArrayBaseAlignedAddr) & ~(ULONG_PTR)(MV_EDMA_RESPONSE_QUEUE_SIZE - 1)); pAdapter->responsesArrayBaseDmaAlignedAddr = pAdapter->responsesArrayBaseDmaAddr; pAdapter->responsesArrayBaseDmaAlignedAddr += MV_EDMA_RESPONSE_QUEUE_SIZE; pAdapter->responsesArrayBaseDmaAlignedAddr &= ~(ULONG_PTR)(MV_EDMA_RESPONSE_QUEUE_SIZE - 1); if ((pAdapter->responsesArrayBaseDmaAlignedAddr - pAdapter->responsesArrayBaseDmaAddr) != (pAdapter->responsesArrayBaseAlignedAddr - pAdapter->responsesArrayBaseAddr)) { MV_ERROR("RR18xx[%d]: Error in Response Quueues Alignment\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); contigfree(pAdapter->responsesArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); return -1; } return 0; } static void hptmv_free_edma_queues(IAL_ADAPTER_T *pAdapter) { contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); contigfree(pAdapter->responsesArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); } static PVOID AllocatePRDTable(IAL_ADAPTER_T *pAdapter) { PVOID ret; if (pAdapter->pFreePRDLink) { KdPrint(("pAdapter->pFreePRDLink:%p\n",pAdapter->pFreePRDLink)); ret = pAdapter->pFreePRDLink; pAdapter->pFreePRDLink = *(void**)ret; return ret; } return NULL; } static void FreePRDTable(IAL_ADAPTER_T *pAdapter, PVOID PRDTable) { *(void**)PRDTable = pAdapter->pFreePRDLink; pAdapter->pFreePRDLink = PRDTable; } extern PVDevice fGetFirstChild(PVDevice pLogical); extern void fResetBootMark(PVDevice pLogical); static void fRegisterVdevice(IAL_ADAPTER_T *pAdapter) { PVDevice pPhysical, pLogical; PVBus pVBus; int i,j; for(i=0;iVDevices[i]); pLogical = pPhysical; while (pLogical->pParent) pLogical = pLogical->pParent; if (pLogical->vf_online==0) { pPhysical->vf_bootmark = pLogical->vf_bootmark = 0; continue; } if (pLogical->VDeviceType==VD_SPARE || pPhysical!=fGetFirstChild(pLogical)) continue; pVBus = &pAdapter->VBus; if(pVBus) { j=0; while(jpVDevice[j]) j++; if(jpVDevice[j] = pLogical; pLogical->pVBus = pVBus; if (j>0 && pLogical->vf_bootmark) { if (pVBus->pVDevice[0]->vf_bootmark) { fResetBootMark(pLogical); } else { do { pVBus->pVDevice[j] = pVBus->pVDevice[j-1]; } while (--j); pVBus->pVDevice[0] = pLogical; } } } } } } PVDevice GetSpareDisk(_VBUS_ARG PVDevice pArray) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)pArray->pVBus->OsExt; LBA_T capacity = LongDiv(pArray->VDeviceCapacity, pArray->u.array.bArnMember-1); LBA_T thiscap, maxcap = MAX_LBA_T; PVDevice pVDevice, pFind = NULL; int i; for(i=0;iVDevices[i]; if(!pVDevice) continue; thiscap = pArray->vf_format_v2? pVDevice->u.disk.dDeRealCapacity : pVDevice->VDeviceCapacity; /* find the smallest usable spare disk */ if (pVDevice->VDeviceType==VD_SPARE && pVDevice->u.disk.df_on_line && thiscap < maxcap && thiscap >= capacity) { maxcap = pVDevice->VDeviceCapacity; pFind = pVDevice; } } return pFind; } /****************************************************************** * IO ATA Command *******************************************************************/ int HPTLIBAPI fDeReadWrite(PDevice pDev, ULONG Lba, UCHAR Cmd, void *tmpBuffer) { return mvReadWrite(pDev->mv, Lba, Cmd, tmpBuffer); } void HPTLIBAPI fDeSelectMode(PDevice pDev, UCHAR NewMode) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; UCHAR mvMode; /* 508x don't use MW-DMA? */ if (NewMode>4 && NewMode<8) NewMode = 4; pDev->bDeModeSetting = NewMode; if (NewMode<=4) mvMode = MV_ATA_TRANSFER_PIO_0 + NewMode; else mvMode = MV_ATA_TRANSFER_UDMA_0 + (NewMode-8); /*To fix 88i8030 bug*/ if (mvMode > MV_ATA_TRANSFER_UDMA_0 && mvMode < MV_ATA_TRANSFER_UDMA_4) mvMode = MV_ATA_TRANSFER_UDMA_0; mvSataDisableChannelDma(pSataAdapter, channelIndex); /* Flush pending commands */ mvSataFlushDmaQueue (pSataAdapter, channelIndex, MV_FLUSH_TYPE_NONE); if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_TRANSFER, mvMode, 0, 0, 0) == MV_FALSE) { KdPrint(("channel %d: Set Features failed\n", channelIndex)); } /* Enable EDMA */ if (mvSataEnableChannelDma(pSataAdapter, channelIndex) == MV_FALSE) KdPrint(("Failed to enable DMA, channel=%d", channelIndex)); } int HPTLIBAPI fDeSetTCQ(PDevice pDev, int enable, int depth) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if (enable) { if (pSataChannel->queuedDMA == MV_EDMA_MODE_NOT_QUEUED && (pSataChannel->identifyDevice[IDEN_SUPPORTED_COMMANDS2] & (0x2))) { UCHAR depth = ((pSataChannel->identifyDevice[IDEN_QUEUE_DEPTH]) & 0x1f) + 1; channelInfo->queueDepth = (depth==32)? 31 : depth; mvSataConfigEdmaMode(pSataAdapter, channelIndex, MV_EDMA_MODE_QUEUED, depth); ret = 1; } } else { if (pSataChannel->queuedDMA != MV_EDMA_MODE_NOT_QUEUED) { channelInfo->queueDepth = 2; mvSataConfigEdmaMode(pSataAdapter, channelIndex, MV_EDMA_MODE_NOT_QUEUED, 0); ret = 1; } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } int HPTLIBAPI fDeSetNCQ(PDevice pDev, int enable, int depth) { return 0; } int HPTLIBAPI fDeSetWriteCache(PDevice pDev, int enable) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if ((pSataChannel->identifyDevice[82] & (0x20))) { if (enable) { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_ENABLE_WCACHE, 0, 0, 0, 0)) { channelInfo->writeCacheEnabled = MV_TRUE; ret = 1; } } else { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_DISABLE_WCACHE, 0, 0, 0, 0)) { channelInfo->writeCacheEnabled = MV_FALSE; ret = 1; } } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } int HPTLIBAPI fDeSetReadAhead(PDevice pDev, int enable) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if ((pSataChannel->identifyDevice[82] & (0x40))) { if (enable) { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_ENABLE_RLA, 0, 0, 0, 0)) { channelInfo->readAheadEnabled = MV_TRUE; ret = 1; } } else { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_DISABLE_RLA, 0, 0, 0, 0)) { channelInfo->readAheadEnabled = MV_FALSE; ret = 1; } } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } #ifdef SUPPORT_ARRAY #define IdeRegisterVDevice fCheckArray #else void IdeRegisterVDevice(PDevice pDev) { PVDevice pVDev = Map2pVDevice(pDev); pVDev->VDeviceType = pDev->df_atapi? VD_ATAPI : pDev->df_removable_drive? VD_REMOVABLE : VD_SINGLE_DISK; pVDev->vf_online = 1; pVDev->VDeviceCapacity = pDev->dDeRealCapacity; pVDev->pfnSendCommand = pfnSendCommand[pVDev->VDeviceType]; pVDev->pfnDeviceFailed = pfnDeviceFailed[pVDev->VDeviceType]; } #endif static __inline PBUS_DMAMAP dmamap_get(struct IALAdapter * pAdapter) { PBUS_DMAMAP p = pAdapter->pbus_dmamap_list; if (p) pAdapter->pbus_dmamap_list = p-> next; return p; } static __inline void dmamap_put(PBUS_DMAMAP p) { p->next = p->pAdapter->pbus_dmamap_list; p->pAdapter->pbus_dmamap_list = p; } /*Since mtx not provide the initialize when declare, so we Final init here to initialize the global mtx*/ #if __FreeBSD_version >= 500000 #define override_kernel_driver() static void hpt_init(void *dummy) { override_kernel_driver(); mtx_init(&driver_lock, "hptsleeplock", NULL, MTX_DEF); } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); #endif static int num_adapters = 0; static int init_adapter(IAL_ADAPTER_T *pAdapter) { PVBus _vbus_p = &pAdapter->VBus; MV_SATA_ADAPTER *pMvSataAdapter; int i, channel, rid; PVDevice pVDev; intrmask_t oldspl = lock_driver(); pAdapter->next = 0; if(gIal_Adapter == 0){ gIal_Adapter = pAdapter; pCurAdapter = gIal_Adapter; } else { pCurAdapter->next = pAdapter; pCurAdapter = pAdapter; } pAdapter->outstandingCommands = 0; pMvSataAdapter = &(pAdapter->mvSataAdapter); _vbus_p->OsExt = (void *)pAdapter; pMvSataAdapter->IALData = pAdapter; if (bus_dma_tag_create(bus_get_dma_tag(pAdapter->hpt_dev),/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (MAX_SG_DESCRIPTORS-1), /* maxsize */ MAX_SG_DESCRIPTORS, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ #if __FreeBSD_version>502000 busdma_lock_mutex, /* lockfunc */ &driver_lock, /* lockfuncarg */ #endif &pAdapter->io_dma_parent /* tag */)) { return ENXIO; } if (hptmv_allocate_edma_queues(pAdapter)) { MV_ERROR("RR18xx: Failed to allocate memory for EDMA queues\n"); unlock_driver(oldspl); return ENOMEM; } /* also map EPROM address */ rid = 0x10; if (!(pAdapter->mem_res = bus_alloc_resource(pAdapter->hpt_dev, SYS_RES_MEMORY, &rid, 0, ~0, MV_SATA_PCI_BAR0_SPACE_SIZE+0x40000, RF_ACTIVE)) || !(pMvSataAdapter->adapterIoBaseAddress = rman_get_virtual(pAdapter->mem_res))) { MV_ERROR("RR18xx: Failed to remap memory space\n"); hptmv_free_edma_queues(pAdapter); unlock_driver(oldspl); return ENXIO; } else { KdPrint(("RR18xx: io base address 0x%p\n", pMvSataAdapter->adapterIoBaseAddress)); } pMvSataAdapter->adapterId = num_adapters++; /* get the revision ID */ pMvSataAdapter->pciConfigRevisionId = pci_read_config(pAdapter->hpt_dev, PCIR_REVID, 1); pMvSataAdapter->pciConfigDeviceId = pci_get_device(pAdapter->hpt_dev); /* init RR18xx */ pMvSataAdapter->intCoalThre[0]= 1; pMvSataAdapter->intCoalThre[1]= 1; pMvSataAdapter->intTimeThre[0] = 1; pMvSataAdapter->intTimeThre[1] = 1; pMvSataAdapter->pciCommand = 0x0107E371; pMvSataAdapter->pciSerrMask = 0xd77fe6ul; pMvSataAdapter->pciInterruptMask = 0xd77fe6ul; pMvSataAdapter->mvSataEventNotify = hptmv_event_notify; if (mvSataInitAdapter(pMvSataAdapter) == MV_FALSE) { MV_ERROR("RR18xx[%d]: core failed to initialize the adapter\n", pMvSataAdapter->adapterId); unregister: bus_release_resource(pAdapter->hpt_dev, SYS_RES_MEMORY, rid, pAdapter->mem_res); hptmv_free_edma_queues(pAdapter); unlock_driver(oldspl); return ENXIO; } pAdapter->ver_601 = pMvSataAdapter->pcbVersion; #ifndef FOR_DEMO set_fail_leds(pMvSataAdapter, 0); #endif /* setup command blocks */ KdPrint(("Allocate command blocks\n")); _vbus_(pFreeCommands) = 0; pAdapter->pCommandBlocks = malloc(sizeof(struct _Command) * MAX_COMMAND_BLOCKS_FOR_EACH_VBUS, M_DEVBUF, M_NOWAIT); KdPrint(("pCommandBlocks:%p\n",pAdapter->pCommandBlocks)); if (!pAdapter->pCommandBlocks) { MV_ERROR("insufficient memory\n"); goto unregister; } for (i=0; ipCommandBlocks[i])); } /*Set up the bus_dmamap*/ pAdapter->pbus_dmamap = (PBUS_DMAMAP)malloc (sizeof(struct _BUS_DMAMAP) * MAX_QUEUE_COMM, M_DEVBUF, M_NOWAIT); if(!pAdapter->pbus_dmamap) { MV_ERROR("insufficient memory\n"); free(pAdapter->pCommandBlocks, M_DEVBUF); goto unregister; } memset((void *)pAdapter->pbus_dmamap, 0, sizeof(struct _BUS_DMAMAP) * MAX_QUEUE_COMM); pAdapter->pbus_dmamap_list = 0; for (i=0; i < MAX_QUEUE_COMM; i++) { PBUS_DMAMAP pmap = &(pAdapter->pbus_dmamap[i]); pmap->pAdapter = pAdapter; dmamap_put(pmap); if(bus_dmamap_create(pAdapter->io_dma_parent, 0, &pmap->dma_map)) { MV_ERROR("Can not allocate dma map\n"); free(pAdapter->pCommandBlocks, M_DEVBUF); free(pAdapter->pbus_dmamap, M_DEVBUF); goto unregister; } } /* setup PRD Tables */ KdPrint(("Allocate PRD Tables\n")); pAdapter->pFreePRDLink = 0; pAdapter->prdTableAddr = (PUCHAR)contigmalloc( (PRD_ENTRIES_SIZE*PRD_TABLES_FOR_VBUS + 32), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); KdPrint(("prdTableAddr:%p\n",pAdapter->prdTableAddr)); if (!pAdapter->prdTableAddr) { MV_ERROR("insufficient PRD Tables\n"); goto unregister; } pAdapter->prdTableAlignedAddr = (PUCHAR)(((ULONG_PTR)pAdapter->prdTableAddr + 0x1f) & ~(ULONG_PTR)0x1fL); { PUCHAR PRDTable = pAdapter->prdTableAlignedAddr; for (i=0; ipFreePRDLink=%p\n",i,pAdapter->pFreePRDLink)); */ FreePRDTable(pAdapter, PRDTable); PRDTable += PRD_ENTRIES_SIZE; } } /* enable the adapter interrupts */ /* configure and start the connected channels*/ for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { pAdapter->mvChannel[channel].online = MV_FALSE; if (mvSataIsStorageDeviceConnected(pMvSataAdapter, channel) == MV_TRUE) { KdPrint(("RR18xx[%d]: channel %d is connected\n", pMvSataAdapter->adapterId, channel)); if (hptmv_init_channel(pAdapter, channel) == 0) { if (mvSataConfigureChannel(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx[%d]: Failed to configure channel" " %d\n",pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); } else { if (start_channel(pAdapter, channel)) { MV_ERROR("RR18xx[%d]: Failed to start channel," " channel=%d\n",pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); } pAdapter->mvChannel[channel].online = MV_TRUE; /* mvSataChannelSetEdmaLoopBackMode(pMvSataAdapter, channel, MV_TRUE);*/ } } } KdPrint(("pAdapter->mvChannel[channel].online:%x, channel:%d\n", pAdapter->mvChannel[channel].online, channel)); } #ifdef SUPPORT_ARRAY for(i = MAX_ARRAY_DEVICE - 1; i >= 0; i--) { pVDev = ArrayTables(i); mArFreeArrayTable(pVDev); } #endif KdPrint(("Initialize Devices\n")); for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channel]; if (pMvSataChannel) { init_vdev_params(pAdapter, channel); IdeRegisterVDevice(&pAdapter->VDevices[channel].u.disk); } } #ifdef SUPPORT_ARRAY CheckArrayCritical(_VBUS_P0); #endif _vbus_p->nInstances = 1; fRegisterVdevice(pAdapter); for (channel=0;channelpVDevice[channel]; if (pVDev && pVDev->vf_online) fCheckBootable(pVDev); } #if defined(SUPPORT_ARRAY) && defined(_RAID5N_) init_raid5_memory(_VBUS_P0); _vbus_(r5).enable_write_back = 1; printf("RR18xx: RAID5 write-back %s\n", _vbus_(r5).enable_write_back? "enabled" : "disabled"); #endif mvSataUnmaskAdapterInterrupt(pMvSataAdapter); unlock_driver(oldspl); return 0; } int MvSataResetChannel(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channel) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)pMvSataAdapter->IALData; mvSataDisableChannelDma(pMvSataAdapter, channel); /* Flush pending commands */ mvSataFlushDmaQueue (pMvSataAdapter, channel, MV_FLUSH_TYPE_CALLBACK); /* Software reset channel */ if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Software reset\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; } /* Hardware reset channel */ if (mvSataChannelHardReset(pMvSataAdapter, channel)== MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to Hard reser the SATA channel\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; } if (mvSataIsStorageDeviceConnected(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to Connect Device\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; }else { MV_ERROR("channel %d: perform recalibrate command", channel); if (!mvStorageDevATAExecuteNonUDMACommand(pMvSataAdapter, channel, MV_NON_UDMA_PROTOCOL_NON_DATA, MV_FALSE, NULL, /* pBuffer*/ 0, /* count */ 0, /*features*/ /* sectorCount */ 0, 0, /* lbaLow */ 0, /* lbaMid */ /* lbaHigh */ 0, 0, /* device */ /* command */ 0x10)) MV_ERROR("channel %d: recalibrate failed", channel); /* Set transfer mode */ if((mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, MV_ATA_TRANSFER_PIO_SLOW, 0, 0, 0) == MV_FALSE) || (mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, pAdapter->mvChannel[channel].maxPioModeSupported, 0, 0, 0) == MV_FALSE) || (mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, pAdapter->mvChannel[channel].maxUltraDmaModeSupported, 0, 0, 0) == MV_FALSE) ) { MV_ERROR("channel %d: Set Features failed", channel); hptmv_free_channel(pAdapter, channel); return -1; } /* Enable EDMA */ if (mvSataEnableChannelDma(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("Failed to enable DMA, channel=%d", channel); hptmv_free_channel(pAdapter, channel); return -1; } } return 0; } static int fResetActiveCommands(PVBus _vbus_p) { MV_SATA_ADAPTER *pMvSataAdapter = &((IAL_ADAPTER_T *)_vbus_p->OsExt)->mvSataAdapter; MV_U8 channel; for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) { if (pMvSataAdapter->sataChannel[channel] && pMvSataAdapter->sataChannel[channel]->outstandingCommands) MvSataResetChannel(pMvSataAdapter,channel); } return 0; } void fCompleteAllCommandsSynchronously(PVBus _vbus_p) { UINT cont; ULONG ticks = 0; MV_U8 channel; MV_SATA_ADAPTER *pMvSataAdapter = &((IAL_ADAPTER_T *)_vbus_p->OsExt)->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel; do { check_cmds: cont = 0; CheckPendingCall(_VBUS_P0); #ifdef _RAID5N_ dataxfer_poll(); xor_poll(); #endif for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) { pMvSataChannel = pMvSataAdapter->sataChannel[channel]; if (pMvSataChannel && pMvSataChannel->outstandingCommands) { while (pMvSataChannel->outstandingCommands) { if (!mvSataInterruptServiceRoutine(pMvSataAdapter)) { StallExec(1000); if (ticks++ > 3000) { MvSataResetChannel(pMvSataAdapter,channel); goto check_cmds; } } else ticks = 0; } cont = 1; } } } while (cont); } void fResetVBus(_VBUS_ARG0) { KdPrint(("fMvResetBus(%p)", _vbus_p)); /* some commands may already finished. */ CheckPendingCall(_VBUS_P0); fResetActiveCommands(_vbus_p); /* * the other pending commands may still be finished successfully. */ fCompleteAllCommandsSynchronously(_vbus_p); /* Now there should be no pending commands. No more action needed. */ CheckIdleCall(_VBUS_P0); KdPrint(("fMvResetBus() done")); } /*No rescan function*/ void fRescanAllDevice(_VBUS_ARG0) { } static MV_BOOLEAN CommandCompletionCB(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channelNum, MV_COMPLETION_TYPE comp_type, MV_VOID_PTR commandId, MV_U16 responseFlags, MV_U32 timeStamp, MV_STORAGE_DEVICE_REGISTERS *registerStruct) { PCommand pCmd = (PCommand) commandId; _VBUS_INST(pCmd->pVDevice->pVBus) if (pCmd->uScratch.sata_param.prdAddr) FreePRDTable(pMvSataAdapter->IALData,pCmd->uScratch.sata_param.prdAddr); switch (comp_type) { case MV_COMPLETION_TYPE_NORMAL: pCmd->Result = RETURN_SUCCESS; break; case MV_COMPLETION_TYPE_ABORT: pCmd->Result = RETURN_BUS_RESET; break; case MV_COMPLETION_TYPE_ERROR: MV_ERROR("IAL: COMPLETION ERROR, adapter %d, channel %d, flags=%x\n", pMvSataAdapter->adapterId, channelNum, responseFlags); if (responseFlags & 4) { MV_ERROR("ATA regs: error %x, sector count %x, LBA low %x, LBA mid %x," " LBA high %x, device %x, status %x\n", registerStruct->errorRegister, registerStruct->sectorCountRegister, registerStruct->lbaLowRegister, registerStruct->lbaMidRegister, registerStruct->lbaHighRegister, registerStruct->deviceRegister, registerStruct->statusRegister); } /*We can't do handleEdmaError directly here, because CommandCompletionCB is called by * mv's ISR, if we retry the command, than the internel data structure may be destroyed*/ pCmd->uScratch.sata_param.responseFlags = responseFlags; pCmd->uScratch.sata_param.bIdeStatus = registerStruct->statusRegister; pCmd->uScratch.sata_param.errorRegister = registerStruct->errorRegister; pCmd->pVDevice->u.disk.QueueLength--; CallAfterReturn(_VBUS_P (DPC_PROC)handleEdmaError,pCmd); return TRUE; default: MV_ERROR(" Unknown completion type (%d)\n", comp_type); return MV_FALSE; } if (pCmd->uCmd.Ide.Command == IDE_COMMAND_VERIFY && pCmd->uScratch.sata_param.cmd_priv > 1) { pCmd->uScratch.sata_param.cmd_priv --; return TRUE; } pCmd->pVDevice->u.disk.QueueLength--; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return TRUE; } void fDeviceSendCommand(_VBUS_ARG PCommand pCmd) { MV_SATA_EDMA_PRD_ENTRY *pPRDTable = 0; MV_SATA_ADAPTER *pMvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel; PVDevice pVDevice = pCmd->pVDevice; PDevice pDevice = &pVDevice->u.disk; LBA_T Lba = pCmd->uCmd.Ide.Lba; USHORT nSector = pCmd->uCmd.Ide.nSectors; MV_QUEUE_COMMAND_RESULT result; MV_QUEUE_COMMAND_INFO commandInfo; MV_UDMA_COMMAND_PARAMS *pUdmaParams = &commandInfo.commandParams.udmaCommand; MV_NONE_UDMA_COMMAND_PARAMS *pNoUdmaParams = &commandInfo.commandParams.NoneUdmaCommand; MV_BOOLEAN is48bit; MV_U8 channel; int i=0; DECLARE_BUFFER(FPSCAT_GATH, tmpSg); if (!pDevice->df_on_line) { MV_ERROR("Device is offline"); pCmd->Result = RETURN_BAD_DEVICE; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } pDevice->HeadPosition = pCmd->uCmd.Ide.Lba + pCmd->uCmd.Ide.nSectors; pMvSataChannel = pDevice->mv; pMvSataAdapter = pMvSataChannel->mvSataAdapter; channel = pMvSataChannel->channelNumber; /* old RAID0 has hidden lba. Remember to clear dDeHiddenLba when delete array! */ Lba += pDevice->dDeHiddenLba; /* check LBA */ if (Lba+nSector-1 > pDevice->dDeRealCapacity) { pCmd->Result = RETURN_INVALID_REQUEST; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } /* * always use 48bit LBA if drive supports it. * Some Seagate drives report error if you use a 28-bit command * to access sector 0xfffffff. */ is48bit = pMvSataChannel->lba48Address; switch (pCmd->uCmd.Ide.Command) { case IDE_COMMAND_READ: case IDE_COMMAND_WRITE: if (pDevice->bDeModeSetting<8) goto pio; commandInfo.type = MV_QUEUED_COMMAND_TYPE_UDMA; pUdmaParams->isEXT = is48bit; pUdmaParams->numOfSectors = nSector; pUdmaParams->lowLBAAddress = Lba; pUdmaParams->highLBAAddress = 0; pUdmaParams->prdHighAddr = 0; pUdmaParams->callBack = CommandCompletionCB; pUdmaParams->commandId = (MV_VOID_PTR )pCmd; if(pCmd->uCmd.Ide.Command == IDE_COMMAND_READ) pUdmaParams->readWrite = MV_UDMA_TYPE_READ; else pUdmaParams->readWrite = MV_UDMA_TYPE_WRITE; if (pCmd->pSgTable && pCmd->cf_physical_sg) { FPSCAT_GATH sg1=tmpSg, sg2=pCmd->pSgTable; do { *sg1++=*sg2; } while ((sg2++->wSgFlag & SG_FLAG_EOT)==0); } else { if (!pCmd->pfnBuildSgl || !pCmd->pfnBuildSgl(_VBUS_P pCmd, tmpSg, 0)) { pio: mvSataDisableChannelDma(pMvSataAdapter, channel); mvSataFlushDmaQueue(pMvSataAdapter, channel, MV_FLUSH_TYPE_CALLBACK); if (pCmd->pSgTable && pCmd->cf_physical_sg==0) { FPSCAT_GATH sg1=tmpSg, sg2=pCmd->pSgTable; do { *sg1++=*sg2; } while ((sg2++->wSgFlag & SG_FLAG_EOT)==0); } else { if (!pCmd->pfnBuildSgl || !pCmd->pfnBuildSgl(_VBUS_P pCmd, tmpSg, 1)) { pCmd->Result = RETURN_NEED_LOGICAL_SG; goto finish_cmd; } } do { ULONG size = tmpSg->wSgSize? tmpSg->wSgSize : 0x10000; ULONG_PTR addr = tmpSg->dSgAddress; if (size & 0x1ff) { pCmd->Result = RETURN_INVALID_REQUEST; goto finish_cmd; } if (mvStorageDevATAExecuteNonUDMACommand(pMvSataAdapter, channel, (pCmd->cf_data_out)?MV_NON_UDMA_PROTOCOL_PIO_DATA_OUT:MV_NON_UDMA_PROTOCOL_PIO_DATA_IN, is48bit, (MV_U16_PTR)addr, size >> 1, /* count */ 0, /* features N/A */ (MV_U16)(size>>9), /*sector count*/ (MV_U16)( (is48bit? (MV_U16)((Lba >> 16) & 0xFF00) : 0 ) | (UCHAR)(Lba & 0xFF) ), /*lbalow*/ (MV_U16)((Lba >> 8) & 0xFF), /* lbaMid */ (MV_U16)((Lba >> 16) & 0xFF),/* lbaHigh */ (MV_U8)(0x40 | (is48bit ? 0 : (UCHAR)(Lba >> 24) & 0xFF )),/* device */ (MV_U8)(is48bit ? (pCmd->cf_data_in?IDE_COMMAND_READ_EXT:IDE_COMMAND_WRITE_EXT):pCmd->uCmd.Ide.Command) )==MV_FALSE) { pCmd->Result = RETURN_IDE_ERROR; goto finish_cmd; } Lba += size>>9; if(Lba & 0xF0000000) is48bit = MV_TRUE; } while ((tmpSg++->wSgFlag & SG_FLAG_EOT)==0); pCmd->Result = RETURN_SUCCESS; finish_cmd: mvSataEnableChannelDma(pMvSataAdapter,channel); CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } } pPRDTable = (MV_SATA_EDMA_PRD_ENTRY *) AllocatePRDTable(pMvSataAdapter->IALData); KdPrint(("pPRDTable:%p\n",pPRDTable)); if (!pPRDTable) { pCmd->Result = RETURN_DEVICE_BUSY; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); HPT_ASSERT(0); return; } do{ pPRDTable[i].highBaseAddr = (sizeof(tmpSg->dSgAddress)>4 ? (MV_U32)(tmpSg->dSgAddress>>32) : 0); pPRDTable[i].flags = (MV_U16)tmpSg->wSgFlag; pPRDTable[i].byteCount = (MV_U16)tmpSg->wSgSize; pPRDTable[i].lowBaseAddr = (MV_U32)tmpSg->dSgAddress; pPRDTable[i].reserved = 0; i++; }while((tmpSg++->wSgFlag & SG_FLAG_EOT)==0); pUdmaParams->prdLowAddr = (ULONG)fOsPhysicalAddress(pPRDTable); if ((pUdmaParams->numOfSectors == 256) && (pMvSataChannel->lba48Address == MV_FALSE)) { pUdmaParams->numOfSectors = 0; } pCmd->uScratch.sata_param.prdAddr = (PVOID)pPRDTable; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); if (result != MV_QUEUE_COMMAND_RESULT_OK) { queue_failed: switch (result) { case MV_QUEUE_COMMAND_RESULT_BAD_LBA_ADDRESS: MV_ERROR("IAL Error: Edma Queue command failed. Bad LBA " "LBA[31:0](0x%08x)\n", pUdmaParams->lowLBAAddress); pCmd->Result = RETURN_IDE_ERROR; break; case MV_QUEUE_COMMAND_RESULT_QUEUED_MODE_DISABLED: MV_ERROR("IAL Error: Edma Queue command failed. EDMA" " disabled adapter %d channel %d\n", pMvSataAdapter->adapterId, channel); mvSataEnableChannelDma(pMvSataAdapter,channel); pCmd->Result = RETURN_IDE_ERROR; break; case MV_QUEUE_COMMAND_RESULT_FULL: MV_ERROR("IAL Error: Edma Queue command failed. Queue is" " Full adapter %d channel %d\n", pMvSataAdapter->adapterId, channel); pCmd->Result = RETURN_DEVICE_BUSY; break; case MV_QUEUE_COMMAND_RESULT_BAD_PARAMS: MV_ERROR("IAL Error: Edma Queue command failed. (Bad " "Params), pMvSataAdapter: %p, pSataChannel: %p.\n", pMvSataAdapter, pMvSataAdapter->sataChannel[channel]); pCmd->Result = RETURN_IDE_ERROR; break; default: MV_ERROR("IAL Error: Bad result value (%d) from queue" " command\n", result); pCmd->Result = RETURN_IDE_ERROR; } if(pPRDTable) FreePRDTable(pMvSataAdapter->IALData,pPRDTable); CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); } pDevice->QueueLength++; return; case IDE_COMMAND_VERIFY: commandInfo.type = MV_QUEUED_COMMAND_TYPE_NONE_UDMA; pNoUdmaParams->bufPtr = NULL; pNoUdmaParams->callBack = CommandCompletionCB; pNoUdmaParams->commandId = (MV_VOID_PTR)pCmd; pNoUdmaParams->count = 0; pNoUdmaParams->features = 0; pNoUdmaParams->protocolType = MV_NON_UDMA_PROTOCOL_NON_DATA; pCmd->uScratch.sata_param.cmd_priv = 1; if (pMvSataChannel->lba48Address == MV_TRUE){ pNoUdmaParams->command = MV_ATA_COMMAND_READ_VERIFY_SECTORS_EXT; pNoUdmaParams->isEXT = MV_TRUE; pNoUdmaParams->lbaHigh = (MV_U16)((Lba & 0xff0000) >> 16); pNoUdmaParams->lbaMid = (MV_U16)((Lba & 0xff00) >> 8); pNoUdmaParams->lbaLow = (MV_U16)(((Lba & 0xff000000) >> 16)| (Lba & 0xff)); pNoUdmaParams->sectorCount = nSector; pNoUdmaParams->device = 0x40; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); if (result != MV_QUEUE_COMMAND_RESULT_OK){ goto queue_failed; } return; } else{ pNoUdmaParams->command = MV_ATA_COMMAND_READ_VERIFY_SECTORS; pNoUdmaParams->isEXT = MV_FALSE; pNoUdmaParams->lbaHigh = (MV_U16)((Lba & 0xff0000) >> 16); pNoUdmaParams->lbaMid = (MV_U16)((Lba & 0xff00) >> 8); pNoUdmaParams->lbaLow = (MV_U16)(Lba & 0xff); pNoUdmaParams->sectorCount = 0xff & nSector; pNoUdmaParams->device = (MV_U8)(0x40 | ((Lba & 0xf000000) >> 24)); pNoUdmaParams->callBack = CommandCompletionCB; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); /*FIXME: how about the commands already queued? but marvel also forgets to consider this*/ if (result != MV_QUEUE_COMMAND_RESULT_OK){ goto queue_failed; } } break; default: pCmd->Result = RETURN_INVALID_REQUEST; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); break; } } /********************************************************** * * Probe the hostadapter. * **********************************************************/ static int hpt_probe(device_t dev) { if ((pci_get_vendor(dev) == MV_SATA_VENDOR_ID) && (pci_get_device(dev) == MV_SATA_DEVICE_ID_5081 #ifdef FOR_DEMO || pci_get_device(dev) == MV_SATA_DEVICE_ID_5080 #endif )) { KdPrintI((CONTROLLER_NAME " found\n")); device_set_desc(dev, CONTROLLER_NAME); return 0; } else return(ENXIO); } /*********************************************************** * * Auto configuration: attach and init a host adapter. * ***********************************************************/ static int hpt_attach(device_t dev) { IAL_ADAPTER_T * pAdapter = device_get_softc(dev); int rid; union ccb *ccb; struct cam_devq *devq; struct cam_sim *hpt_vsim; printf("%s Version %s \n", DRIVER_NAME, DRIVER_VERSION); if (!pAdapter) { pAdapter = (IAL_ADAPTER_T *)malloc(sizeof (IAL_ADAPTER_T), M_DEVBUF, M_NOWAIT); #if __FreeBSD_version > 410000 device_set_softc(dev, (void *)pAdapter); #else device_set_driver(dev, (driver_t *)pAdapter); #endif } if (!pAdapter) return (ENOMEM); bzero(pAdapter, sizeof(IAL_ADAPTER_T)); pAdapter->hpt_dev = dev; rid = init_adapter(pAdapter); if (rid) return rid; rid = 0; if ((pAdapter->hpt_irq = bus_alloc_resource(pAdapter->hpt_dev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { hpt_printk(("can't allocate interrupt\n")); return(ENXIO); } #if __FreeBSD_version <700000 if (bus_setup_intr(pAdapter->hpt_dev, pAdapter->hpt_irq, INTR_TYPE_CAM, hpt_intr, pAdapter, &pAdapter->hpt_intr)) #else if (bus_setup_intr(pAdapter->hpt_dev, pAdapter->hpt_irq, INTR_TYPE_CAM, NULL, hpt_intr, pAdapter, &pAdapter->hpt_intr)) #endif { hpt_printk(("can't set up interrupt\n")); free(pAdapter, M_DEVBUF); return(ENXIO); } if((ccb = (union ccb *)malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK)) != (union ccb*)NULL) { bzero(ccb, sizeof(*ccb)); ccb->ccb_h.pinfo.priority = 1; ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; } else { return ENOMEM; } /* * Create the device queue for our SIM(s). */ if((devq = cam_simq_alloc(8/*MAX_QUEUE_COMM*/)) == NULL) { KdPrint(("ENXIO\n")); return ENOMEM; } /* * Construct our SIM entry */ #if __FreeBSD_version <700000 hpt_vsim = cam_sim_alloc(hpt_action, hpt_poll, __str(PROC_DIR_NAME), pAdapter, device_get_unit(pAdapter->hpt_dev), 1, 8, devq); #else hpt_vsim = cam_sim_alloc(hpt_action, hpt_poll, __str(PROC_DIR_NAME), pAdapter, device_get_unit(pAdapter->hpt_dev), &Giant, 1, 8, devq); #endif if (hpt_vsim == NULL) { cam_simq_free(devq); return ENOMEM; } #if __FreeBSD_version <700000 if (xpt_bus_register(hpt_vsim, 0) != CAM_SUCCESS) #else if (xpt_bus_register(hpt_vsim, dev, 0) != CAM_SUCCESS) #endif { cam_sim_free(hpt_vsim, /*free devq*/ TRUE); hpt_vsim = NULL; return ENXIO; } if(xpt_create_path(&pAdapter->path, /*periph */ NULL, cam_sim_path(hpt_vsim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(hpt_vsim)); cam_sim_free(hpt_vsim, /*free_devq*/TRUE); hpt_vsim = NULL; return ENXIO; } xpt_setup_ccb(&(ccb->ccb_h), pAdapter->path, /*priority*/5); ccb->ccb_h.func_code = XPT_SASYNC_CB; ccb->csa.event_enable = AC_LOST_DEVICE; ccb->csa.callback = hpt_async; ccb->csa.callback_arg = hpt_vsim; xpt_action((union ccb *)ccb); free(ccb, M_DEVBUF); if (device_get_unit(dev) == 0) { /* Start the work thread. XXX */ launch_worker_thread(); } return 0; } static int hpt_detach(device_t dev) { return (EBUSY); } /*************************************************************** * The poll function is used to simulate the interrupt when * the interrupt subsystem is not functioning. * ***************************************************************/ static void hpt_poll(struct cam_sim *sim) { hpt_intr((void *)cam_sim_softc(sim)); } /**************************************************************** * Name: hpt_intr * Description: Interrupt handler. ****************************************************************/ static void hpt_intr(void *arg) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)arg; intrmask_t oldspl = lock_driver(); /* KdPrintI(("----- Entering Isr() -----\n")); */ if (mvSataInterruptServiceRoutine(&pAdapter->mvSataAdapter) == MV_TRUE) { _VBUS_INST(&pAdapter->VBus) CheckPendingCall(_VBUS_P0); } /* KdPrintI(("----- Leaving Isr() -----\n")); */ unlock_driver(oldspl); } /********************************************************** * Asynchronous Events *********************************************************/ #if (!defined(UNREFERENCED_PARAMETER)) #define UNREFERENCED_PARAMETER(x) (void)(x) #endif static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { /* debug XXXX */ panic("Here"); UNREFERENCED_PARAMETER(callback_arg); UNREFERENCED_PARAMETER(code); UNREFERENCED_PARAMETER(path); UNREFERENCED_PARAMETER(arg); } static void FlushAdapter(IAL_ADAPTER_T *pAdapter) { int i; hpt_printk(("flush all devices\n")); /* flush all devices */ for (i=0; iVBus.pVDevice[i]; if(pVDev) fFlushVDev(pVDev); } } static int hpt_shutdown(device_t dev) { IAL_ADAPTER_T *pAdapter; pAdapter = device_get_softc(dev); if (pAdapter == NULL) return (EINVAL); EVENTHANDLER_DEREGISTER(shutdown_final, pAdapter->eh); FlushAdapter(pAdapter); /* give the flush some time to happen, *otherwise "shutdown -p now" will make file system corrupted */ DELAY(1000 * 1000 * 5); return 0; } void Check_Idle_Call(IAL_ADAPTER_T *pAdapter) { _VBUS_INST(&pAdapter->VBus) if (mWaitingForIdle(_VBUS_P0)) { CheckIdleCall(_VBUS_P0); #ifdef SUPPORT_ARRAY { int i; PVDevice pArray; for(i = 0; i < MAX_ARRAY_PER_VBUS; i++){ if ((pArray=ArrayTables(i))->u.array.dArStamp==0) continue; else if (pArray->u.array.rf_auto_rebuild) { KdPrint(("auto rebuild.\n")); pArray->u.array.rf_auto_rebuild = 0; hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pArray, DUPLICATE); } } } #endif } /* launch the awaiting commands blocked by mWaitingForIdle */ while(pAdapter->pending_Q!= NULL) { _VBUS_INST(&pAdapter->VBus) union ccb *ccb = (union ccb *)pAdapter->pending_Q->ccb_h.ccb_ccb_ptr; hpt_free_ccb(&pAdapter->pending_Q, ccb); CallAfterReturn(_VBUS_P (DPC_PROC)OsSendCommand, ccb); } } static void ccb_done(union ccb *ccb) { PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T * pAdapter = pmap->pAdapter; KdPrintI(("ccb_done: ccb %p status %x\n", ccb, ccb->ccb_h.status)); dmamap_put(pmap); xpt_done(ccb); pAdapter->outstandingCommands--; if (pAdapter->outstandingCommands == 0) { if(DPC_Request_Nums == 0) Check_Idle_Call(pAdapter); } } /**************************************************************** * Name: hpt_action * Description: Process a queued command from the CAM layer. * Parameters: sim - Pointer to SIM object * ccb - Pointer to SCSI command structure. ****************************************************************/ void hpt_action(struct cam_sim *sim, union ccb *ccb) { intrmask_t oldspl; IAL_ADAPTER_T * pAdapter = (IAL_ADAPTER_T *) cam_sim_softc(sim); PBUS_DMAMAP pmap; _VBUS_INST(&pAdapter->VBus) CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("hpt_action\n")); KdPrint(("hpt_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code)); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ { /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } if (ccb->ccb_h.target_id >= MAX_VDEVICE_PER_VBUS || pAdapter->VBus.pVDevice[ccb->ccb_h.target_id]==0) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } oldspl = lock_driver(); if (pAdapter->outstandingCommands==0 && DPC_Request_Nums==0) Check_Idle_Call(pAdapter); pmap = dmamap_get(pAdapter); HPT_ASSERT(pmap); ccb->ccb_adapter = pmap; memset((void *)pmap->psg, 0, sizeof(pmap->psg)); if (mWaitingForIdle(_VBUS_P0)) hpt_queue_ccb(&pAdapter->pending_Q, ccb); else OsSendCommand(_VBUS_P ccb); unlock_driver(oldspl); /* KdPrint(("leave scsiio\n")); */ break; } case XPT_RESET_BUS: KdPrint(("reset bus\n")); oldspl = lock_driver(); fResetVBus(_VBUS_P0); unlock_driver(oldspl); xpt_done(ccb); break; case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: /* XXX Implement */ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: #if __FreeBSD_version >= 500000 cam_calc_geometry(&ccb->ccg, 1); #else { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb > 1024 ) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; } #endif xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; /* Not necessary to reset bus */ cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = MAX_VDEVICE_PER_VBUS; cpi->max_lun = 0; cpi->initiator_id = MAX_VDEVICE_PER_VBUS; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: KdPrint(("invalid cmd\n")); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } /* KdPrint(("leave hpt_action..............\n")); */ } /* shall be called at lock_driver() */ static void hpt_queue_ccb(union ccb **ccb_Q, union ccb *ccb) { if(*ccb_Q == NULL) ccb->ccb_h.ccb_ccb_ptr = ccb; else { ccb->ccb_h.ccb_ccb_ptr = (*ccb_Q)->ccb_h.ccb_ccb_ptr; (*ccb_Q)->ccb_h.ccb_ccb_ptr = (char *)ccb; } *ccb_Q = ccb; } /* shall be called at lock_driver() */ static void hpt_free_ccb(union ccb **ccb_Q, union ccb *ccb) { union ccb *TempCCB; TempCCB = *ccb_Q; if(ccb->ccb_h.ccb_ccb_ptr == ccb) /*it means SCpnt is the last one in CURRCMDs*/ *ccb_Q = NULL; else { while(TempCCB->ccb_h.ccb_ccb_ptr != (char *)ccb) TempCCB = (union ccb *)TempCCB->ccb_h.ccb_ccb_ptr; TempCCB->ccb_h.ccb_ccb_ptr = ccb->ccb_h.ccb_ccb_ptr; if(*ccb_Q == ccb) *ccb_Q = TempCCB; } } #ifdef SUPPORT_ARRAY /*************************************************************************** * Function: hpt_worker_thread * Description: Do background rebuilding. Execute in kernel thread context. * Returns: None ***************************************************************************/ static void hpt_worker_thread(void) { intrmask_t oldspl; for(;;) { while (DpcQueue_First!=DpcQueue_Last) { ST_HPT_DPC p; oldspl = lock_driver(); p = DpcQueue[DpcQueue_First]; DpcQueue_First++; DpcQueue_First %= MAX_DPC; DPC_Request_Nums++; unlock_driver(oldspl); p.dpc(p.pAdapter, p.arg, p.flags); oldspl = lock_driver(); DPC_Request_Nums--; /* since we may have prevented Check_Idle_Call, do it here */ if (DPC_Request_Nums==0) { if (p.pAdapter->outstandingCommands == 0) { _VBUS_INST(&p.pAdapter->VBus); Check_Idle_Call(p.pAdapter); CheckPendingCall(_VBUS_P0); } } unlock_driver(oldspl); /*Schedule out*/ #if (__FreeBSD_version < 500000) YIELD_THREAD; #else #if (__FreeBSD_version > 700033) pause("sched", 1); #else tsleep((caddr_t)hpt_worker_thread, PPAUSE, "sched", 1); #endif #endif if (SIGISMEMBER(curproc->p_siglist, SIGSTOP)) { /* abort rebuilding process. */ IAL_ADAPTER_T *pAdapter; PVDevice pArray; PVBus _vbus_p; int i; pAdapter = gIal_Adapter; while(pAdapter != 0){ _vbus_p = &pAdapter->VBus; for (i=0;iu.array.dArStamp==0) continue; else if (pArray->u.array.rf_rebuilding || pArray->u.array.rf_verifying || pArray->u.array.rf_initializing) { pArray->u.array.rf_abort_rebuild = 1; } } pAdapter = pAdapter->next; } } } /*Remove this debug option*/ /* #ifdef DEBUG if (SIGISMEMBER(curproc->p_siglist, SIGSTOP)) #if (__FreeBSD_version > 700033) pause("hptrdy", 2*hz); #else tsleep((caddr_t)hpt_worker_thread, PPAUSE, "hptrdy", 2*hz); #endif #endif */ #if (__FreeBSD_version >= 800002) kproc_suspend_check(curproc); #elif (__FreeBSD_version >= 500043) kthread_suspend_check(curproc); #else kproc_suspend_loop(curproc); #endif #if (__FreeBSD_version > 700033) pause("hptrdy", 2*hz); /* wait for something to do */ #else tsleep((caddr_t)hpt_worker_thread, PPAUSE, "hptrdy", 2*hz); /* wait for something to do */ #endif } } static struct proc *hptdaemonproc; static struct kproc_desc hpt_kp = { "hpt_wt", hpt_worker_thread, &hptdaemonproc }; /*Start this thread in the hpt_attach, to prevent kernel from loading it without our controller.*/ static void launch_worker_thread(void) { IAL_ADAPTER_T *pAdapTemp; kproc_start(&hpt_kp); for (pAdapTemp = gIal_Adapter; pAdapTemp; pAdapTemp = pAdapTemp->next) { _VBUS_INST(&pAdapTemp->VBus) int i; PVDevice pVDev; for(i = 0; i < MAX_ARRAY_PER_VBUS; i++) if ((pVDev=ArrayTables(i))->u.array.dArStamp==0) continue; else{ if (pVDev->u.array.rf_need_rebuild && !pVDev->u.array.rf_rebuilding) hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapTemp, pVDev, (UCHAR)((pVDev->u.array.CriticalMembers || pVDev->VDeviceType == VD_RAID_1)? DUPLICATE : REBUILD_PARITY)); } } /* * hpt_worker_thread needs to be suspended after shutdown sync, when fs sync finished. */ #if (__FreeBSD_version < 500043) EVENTHANDLER_REGISTER(shutdown_post_sync, shutdown_kproc, hptdaemonproc, SHUTDOWN_PRI_FIRST); #else EVENTHANDLER_REGISTER(shutdown_post_sync, kproc_shutdown, hptdaemonproc, SHUTDOWN_PRI_FIRST); #endif } /* *SYSINIT(hptwt, SI_SUB_KTHREAD_IDLE, SI_ORDER_FIRST, launch_worker_thread, NULL); */ #endif /********************************************************************************/ int HPTLIBAPI fOsBuildSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSg, int logical) { - union ccb *ccb = (union ccb *)pCmd->pOrgCommand; - bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; - int idx; - if(logical) { - if (ccb->ccb_h.flags & CAM_DATA_PHYS) - panic("physical address unsupported"); - - if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { - if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) - panic("physical address unsupported"); - - for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { - pSg[idx].dSgAddress = (ULONG_PTR)(UCHAR *)sgList[idx].ds_addr; - pSg[idx].wSgSize = sgList[idx].ds_len; - pSg[idx].wSgFlag = (idx==ccb->csio.sglist_cnt-1)? SG_FLAG_EOT : 0; - } - } - else { - pSg->dSgAddress = (ULONG_PTR)(UCHAR *)ccb->csio.data_ptr; - pSg->wSgSize = ccb->csio.dxfer_len; - pSg->wSgFlag = SG_FLAG_EOT; - } - return TRUE; - } - /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } /*******************************************************************************/ ULONG HPTLIBAPI GetStamp(void) { /* * the system variable, ticks, can't be used since it hasn't yet been active * when our driver starts (ticks==0, it's a invalid stamp value) */ ULONG stamp; do { stamp = random(); } while (stamp==0); return stamp; } static void SetInquiryData(PINQUIRYDATA inquiryData, PVDevice pVDev) { int i; IDENTIFY_DATA2 *pIdentify = (IDENTIFY_DATA2*)pVDev->u.disk.mv->identifyDevice; inquiryData->DeviceType = T_DIRECT; /*DIRECT_ACCESS_DEVICE*/ inquiryData->AdditionalLength = (UCHAR)(sizeof(INQUIRYDATA) - 5); #ifndef SERIAL_CMDS inquiryData->CommandQueue = 1; #endif switch(pVDev->VDeviceType) { case VD_SINGLE_DISK: case VD_ATAPI: case VD_REMOVABLE: /* Set the removable bit, if applicable. */ if ((pVDev->u.disk.df_removable_drive) || (pIdentify->GeneralConfiguration & 0x80)) inquiryData->RemovableMedia = 1; /* Fill in vendor identification fields. */ for (i = 0; i < 20; i += 2) { inquiryData->VendorId[i] = ((PUCHAR)pIdentify->ModelNumber)[i + 1]; inquiryData->VendorId[i+1] = ((PUCHAR)pIdentify->ModelNumber)[i]; } /* Initialize unused portion of product id. */ for (i = 0; i < 4; i++) inquiryData->ProductId[12+i] = ' '; /* firmware revision */ for (i = 0; i < 4; i += 2) { inquiryData->ProductRevisionLevel[i] = ((PUCHAR)pIdentify->FirmwareRevision)[i+1]; inquiryData->ProductRevisionLevel[i+1] = ((PUCHAR)pIdentify->FirmwareRevision)[i]; } break; default: memcpy(&inquiryData->VendorId, "RR18xx ", 8); #ifdef SUPPORT_ARRAY switch(pVDev->VDeviceType){ case VD_RAID_0: if ((pVDev->u.array.pMember[0] && mIsArray(pVDev->u.array.pMember[0])) || (pVDev->u.array.pMember[1] && mIsArray(pVDev->u.array.pMember[1]))) memcpy(&inquiryData->ProductId, "RAID 1/0 Array ", 16); else memcpy(&inquiryData->ProductId, "RAID 0 Array ", 16); break; case VD_RAID_1: if ((pVDev->u.array.pMember[0] && mIsArray(pVDev->u.array.pMember[0])) || (pVDev->u.array.pMember[1] && mIsArray(pVDev->u.array.pMember[1]))) memcpy(&inquiryData->ProductId, "RAID 0/1 Array ", 16); else memcpy(&inquiryData->ProductId, "RAID 1 Array ", 16); break; case VD_RAID_5: memcpy(&inquiryData->ProductId, "RAID 5 Array ", 16); break; case VD_JBOD: memcpy(&inquiryData->ProductId, "JBOD Array ", 16); break; } #endif memcpy(&inquiryData->ProductRevisionLevel, "3.00", 4); break; } } static void hpt_timeout(void *arg) { _VBUS_INST(&((PBUS_DMAMAP)((union ccb *)arg)->ccb_adapter)->pAdapter->VBus) intrmask_t oldspl = lock_driver(); fResetVBus(_VBUS_P0); unlock_driver(oldspl); } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCommand pCmd = (PCommand)arg; union ccb *ccb = pCmd->pOrgCommand; struct ccb_hdr *ccb_h = &ccb->ccb_h; PBUS_DMAMAP pmap = (PBUS_DMAMAP) ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; PVDevice pVDev = pAdapter->VBus.pVDevice[ccb_h->target_id]; FPSCAT_GATH psg = pCmd->pSgTable; int idx; _VBUS_INST(pVDev->pVBus) HPT_ASSERT(pCmd->cf_physical_sg); - if (error || nsegs == 0) + if (error) panic("busdma error"); HPT_ASSERT(nsegs<= MAX_SG_DESCRIPTORS); for (idx = 0; idx < nsegs; idx++, psg++) { psg->dSgAddress = (ULONG_PTR)(UCHAR *)segs[idx].ds_addr; psg->wSgSize = segs[idx].ds_len; psg->wSgFlag = (idx == nsegs-1)? SG_FLAG_EOT: 0; /* KdPrint(("psg[%d]:add=%p,size=%x,flag=%x\n", idx, psg->dSgAddress,psg->wSgSize,psg->wSgFlag)); */ } -/* psg[-1].wSgFlag = SG_FLAG_EOT; */ + if (nsegs) { + /* psg[-1].wSgFlag = SG_FLAG_EOT; */ + } if (pCmd->cf_data_in) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->cf_data_out) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREWRITE); } ccb->ccb_h.timeout_ch = timeout(hpt_timeout, (caddr_t)ccb, 20*hz); pVDev->pfnSendCommand(_VBUS_P pCmd); CheckPendingCall(_VBUS_P0); } static void HPTLIBAPI OsSendCommand(_VBUS_ARG union ccb *ccb) { PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; struct ccb_hdr *ccb_h = &ccb->ccb_h; struct ccb_scsiio *csio = &ccb->csio; PVDevice pVDev = pAdapter->VBus.pVDevice[ccb_h->target_id]; KdPrintI(("OsSendCommand: ccb %p cdb %x-%x-%x\n", ccb, *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[0], *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[4], *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[8] )); pAdapter->outstandingCommands++; if (pVDev == NULL || pVDev->vf_online == 0) { ccb->ccb_h.status = CAM_REQ_INVALID; ccb_done(ccb); goto Command_Complished; } switch(ccb->csio.cdb_io.cdb_bytes[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: /* FALLTHROUGH */ ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: ZeroMemory(ccb->csio.data_ptr, ccb->csio.dxfer_len); SetInquiryData((PINQUIRYDATA)ccb->csio.data_ptr, pVDev); ccb_h->status = CAM_REQ_CMP; break; case READ_CAPACITY: { UCHAR *rbuf=csio->data_ptr; unsigned int cap; if (pVDev->VDeviceCapacity > 0xfffffffful) { cap = 0xfffffffful; } else { cap = pVDev->VDeviceCapacity - 1; } rbuf[0] = (UCHAR)(cap>>24); rbuf[1] = (UCHAR)(cap>>16); rbuf[2] = (UCHAR)(cap>>8); rbuf[3] = (UCHAR)cap; /* Claim 512 byte blocks (big-endian). */ rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2; rbuf[7] = 0; ccb_h->status = CAM_REQ_CMP; break; } case 0x9e: /*SERVICE_ACTION_IN*/ { UCHAR *rbuf = csio->data_ptr; LBA_T cap = pVDev->VDeviceCapacity - 1; rbuf[0] = (UCHAR)(cap>>56); rbuf[1] = (UCHAR)(cap>>48); rbuf[2] = (UCHAR)(cap>>40); rbuf[3] = (UCHAR)(cap>>32); rbuf[4] = (UCHAR)(cap>>24); rbuf[5] = (UCHAR)(cap>>16); rbuf[6] = (UCHAR)(cap>>8); rbuf[7] = (UCHAR)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2; rbuf[11] = 0; ccb_h->status = CAM_REQ_CMP; break; } case READ_6: case WRITE_6: case READ_10: case WRITE_10: case 0x88: /* READ_16 */ case 0x8a: /* WRITE_16 */ case 0x13: case 0x2f: { UCHAR Cdb[16]; UCHAR CdbLength; _VBUS_INST(pVDev->pVBus) PCommand pCmd = AllocateCommand(_VBUS_P0); int error; HPT_ASSERT(pCmd); CdbLength = csio->cdb_len; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, Cdb, CdbLength); } else { KdPrintE(("ERROR!!!\n")); ccb->ccb_h.status = CAM_REQ_INVALID; break; } } else { bcopy(csio->cdb_io.cdb_bytes, Cdb, CdbLength); } pCmd->pOrgCommand = ccb; pCmd->pVDevice = pVDev; pCmd->pfnCompletion = fOsCommandDone; pCmd->pfnBuildSgl = fOsBuildSgl; pCmd->pSgTable = pmap->psg; switch (Cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((ULONG)Cdb[1] << 16) | ((ULONG)Cdb[2] << 8) | (ULONG)Cdb[3]; pCmd->uCmd.Ide.nSectors = (USHORT) Cdb[4]; break; case 0x88: /* READ_16 */ case 0x8a: /* WRITE_16 */ pCmd->uCmd.Ide.Lba = (HPT_U64)Cdb[2] << 56 | (HPT_U64)Cdb[3] << 48 | (HPT_U64)Cdb[4] << 40 | (HPT_U64)Cdb[5] << 32 | (HPT_U64)Cdb[6] << 24 | (HPT_U64)Cdb[7] << 16 | (HPT_U64)Cdb[8] << 8 | (HPT_U64)Cdb[9]; pCmd->uCmd.Ide.nSectors = (USHORT)Cdb[12] << 8 | (USHORT)Cdb[13]; break; default: pCmd->uCmd.Ide.Lba = (ULONG)Cdb[5] | ((ULONG)Cdb[4] << 8) | ((ULONG)Cdb[3] << 16) | ((ULONG)Cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (USHORT) Cdb[8] | ((USHORT)Cdb[7]<<8); break; } switch (Cdb[0]) { case READ_6: case READ_10: case 0x88: /* READ_16 */ pCmd->uCmd.Ide.Command = IDE_COMMAND_READ; pCmd->cf_data_in = 1; break; case WRITE_6: case WRITE_10: case 0x8a: /* WRITE_16 */ pCmd->uCmd.Ide.Command = IDE_COMMAND_WRITE; pCmd->cf_data_out = 1; break; case 0x13: case 0x2f: pCmd->uCmd.Ide.Command = IDE_COMMAND_VERIFY; break; } /*///////////////////////// */ pCmd->cf_physical_sg = 1; error = bus_dmamap_load_ccb(pAdapter->io_dma_parent, pmap->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d\n", error)); if (error && error!=EINPROGRESS) { hpt_printk(("bus_dmamap_load error %d\n", error)); FreeCommand(_VBUS_P pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; dmamap_put(pmap); pAdapter->outstandingCommands--; xpt_done(ccb); } goto Command_Complished; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } ccb_done(ccb); Command_Complished: CheckPendingCall(_VBUS_P0); return; } static void HPTLIBAPI fOsCommandDone(_VBUS_ARG PCommand pCmd) { union ccb *ccb = pCmd->pOrgCommand; PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; KdPrint(("fOsCommandDone(pcmd=%p, result=%d)\n", pCmd, pCmd->Result)); untimeout(hpt_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch); switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->cf_data_in) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->cf_data_out) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(pAdapter->io_dma_parent, pmap->dma_map); FreeCommand(_VBUS_P pCmd); ccb_done(ccb); } int hpt_queue_dpc(HPT_DPC dpc, IAL_ADAPTER_T * pAdapter, void *arg, UCHAR flags) { int p; p = (DpcQueue_Last + 1) % MAX_DPC; if (p==DpcQueue_First) { KdPrint(("DPC Queue full!\n")); return -1; } DpcQueue[DpcQueue_Last].dpc = dpc; DpcQueue[DpcQueue_Last].pAdapter = pAdapter; DpcQueue[DpcQueue_Last].arg = arg; DpcQueue[DpcQueue_Last].flags = flags; DpcQueue_Last = p; return 0; } #ifdef _RAID5N_ /* * Allocate memory above 16M, otherwise we may eat all low memory for ISA devices. * How about the memory for 5081 request/response array and PRD table? */ void *os_alloc_page(_VBUS_ARG0) { return (void *)contigmalloc(0x1000, M_DEVBUF, M_NOWAIT, 0x1000000, 0xffffffff, PAGE_SIZE, 0ul); } void *os_alloc_dma_page(_VBUS_ARG0) { return (void *)contigmalloc(0x1000, M_DEVBUF, M_NOWAIT, 0x1000000, 0xffffffff, PAGE_SIZE, 0ul); } void os_free_page(_VBUS_ARG void *p) { contigfree(p, 0x1000, M_DEVBUF); } void os_free_dma_page(_VBUS_ARG void *p) { contigfree(p, 0x1000, M_DEVBUF); } void DoXor1(ULONG *p0, ULONG *p1, ULONG *p2, UINT nBytes) { UINT i; for (i = 0; i < nBytes / 4; i++) *p0++ = *p1++ ^ *p2++; } void DoXor2(ULONG *p0, ULONG *p2, UINT nBytes) { UINT i; for (i = 0; i < nBytes / 4; i++) *p0++ ^= *p2++; } #endif Index: projects/physbio/sys/dev/hptrr/hptrr_osm_bsd.c =================================================================== --- projects/physbio/sys/dev/hptrr/hptrr_osm_bsd.c (revision 243875) +++ projects/physbio/sys/dev/hptrr/hptrr_osm_bsd.c (revision 243876) @@ -1,1396 +1,1370 @@ /* * Copyright (c) HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include /* $Id: osm_bsd.c,v 1.27 2007/11/22 07:35:49 gmm Exp $ * * HighPoint RAID Driver for FreeBSD * Copyright (C) 2005 HighPoint Technologies, Inc. All Rights Reserved. */ #include #include static int attach_generic = 0; TUNABLE_INT("hw.hptrr.attach_generic", &attach_generic); static int hpt_probe(device_t dev) { PCI_ID pci_id; HIM *him; int i; PHBA hba; /* Some of supported chips are used not only by HPT. */ if (pci_get_vendor(dev) != 0x1103 && !attach_generic) return (ENXIO); for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); hba = (PHBA)device_get_softc(dev); memset(hba, 0, sizeof(HBA)); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; return 0; } } } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him = hba->ldm_adapter.him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); #if __FreeBSD_version >=440000 pci_enable_busmaster(dev); #endif pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); if (!hba->ldm_adapter.him_handle) return ENXIO; hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); if (!vbus_ext) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } memset(vbus_ext, 0, sizeof(VBUS_EXT)); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_lock_vbus(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = MAX(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { hpt_unlock_vbus(vbus_ext); return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); hpt_unlock_vbus(vbus_ext); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); untimeout(hpt_timeout, pCmd, ccb->ccb_h.timeout_ch); switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { - POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; - union ccb *ccb = ext->ccb; - bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; - int idx; - if(logical) { - if (ccb->ccb_h.flags & CAM_DATA_PHYS) - panic("physical address unsupported"); - - if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { - if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) - panic("physical address unsupported"); - - for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { - os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr); - pSg[idx].size = sgList[idx].ds_len; - pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; - } - } - else { - os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); - pSg->size = ccb->csio.dxfer_len; - pSg->eot = 1; - } - return TRUE; - } - /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); - if (error || nsegs == 0) + if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } - psg[-1].eot = 1; + if (nsegs) + psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } ext->ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: { PINQUIRYDATA inquiryData; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; inquiryData->AdditionalLength = 31; inquiryData->CommandQueue = 1; memcpy(&inquiryData->VendorId, "HPT ", 8); memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); if (vd->target_id / 10) { inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; } else inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; } break; case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; if (vd->capacity>0xfffffffful) cap = 0xfffffffful; else cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2; rbuf[11] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: { int error; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; - pCmd->psg = ext->psg; pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hpt_lock_vbus(vbus_ext); hpt_scsi_io(vbus_ext, ccb); hpt_unlock_vbus(vbus_ext); return; case XPT_RESET_BUS: hpt_lock_vbus(vbus_ext); ldm_reset_vbus((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: #if __FreeBSD_version >= 500000 cam_calc_geometry(&ccb->ccg, 1); #else ccb->ccg.heads = 255; ccb->ccg.secs_per_track = 63; ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; #endif break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { hpt_pci_intr(cam_sim_softc(sim)); } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; i=503000 .d_version = D_VERSION, #endif #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) .d_flags = D_NEEDGIANT, #endif #if __FreeBSD_version<600034 #if __FreeBSD_version>501000 .d_maj = MAJOR_AUTO, #else .d_maj = HPT_DEV_MAJOR, #endif #endif }; static struct intr_config_hook hpt_ich; /* * hpt_final_init will be called after all hpt_attach. */ static void hpt_final_init(void *dummy) { int i; PVBUS_EXT vbus_ext; PVBUS vbus; PHBA hba; /* Clear the config hook */ config_intrhook_disestablish(&hpt_ich); /* allocate memory */ i = 0; ldm_for_each_vbus(vbus, vbus_ext) { if (hpt_alloc_mem(vbus_ext)) { os_printk("out of memory"); return; } i++; } if (!i) { if (bootverbose) os_printk("no controller detected."); return; } /* initializing hardware */ ldm_for_each_vbus(vbus, vbus_ext) { /* make timer available here */ callout_handle_init(&vbus_ext->timer); if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; #if (__FreeBSD_version >= 500000) mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF); #endif if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ #if __FreeBSD_version>502000 busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ #endif &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } #if __FreeBSD_version > 700025 vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, 0, &Giant, os_max_queue_comm, /*tagged*/8, devq); #else vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, 0, os_max_queue_comm, /*tagged*/8, devq); #endif if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); return ; } #if __FreeBSD_version > 700044 if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { #else if (xpt_bus_register(vbus_ext->sim, 0) != CAM_SUCCESS) { #endif os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); vbus_ext->sim = NULL; return ; } xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, #if __FreeBSD_version > 700025 NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) #else hpt_pci_intr, vbus_ext, &hba->irq_handle)) #endif { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) && (__FreeBSD_version >= 503000) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), DEVMETHOD_END }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); #if __FreeBSD_version>503000 typedef struct cdev * ioctl_dev_t; #else typedef dev_t ioctl_dev_t; #endif #if __FreeBSD_version >= 500000 typedef struct thread * ioctl_thread_t; #else typedef struct proc * ioctl_thread_t; #endif static int hpt_open(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t td) { return 0; } static int hpt_close(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t td) { return 0; } static int hpt_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data, int fflag, ioctl_thread_t td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; HPT_U32 bytesReturned; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpOutBuffer) goto invalid; } #if (__FreeBSD_version >= 500000) mtx_lock(&Giant); #endif hpt_do_ioctl(&ioctl_args); #if (__FreeBSD_version >= 500000) mtx_unlock(&Giant); #endif if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; #if (__FreeBSD_version >= 500000) mtx_lock(&Giant); #endif ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) return(ENOMEM); if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } #if (__FreeBSD_version >= 500000) mtx_unlock(&Giant); #endif return(0); } Index: projects/physbio/sys/dev/iir/iir.c =================================================================== --- projects/physbio/sys/dev/iir/iir.c (revision 243875) +++ projects/physbio/sys/dev/iir/iir.c (revision 243876) @@ -1,1963 +1,1930 @@ /*- * Copyright (c) 2000-04 ICP vortex GmbH * Copyright (c) 2002-04 Intel Corporation * Copyright (c) 2003-04 Adaptec Inc. * All Rights Reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * iir.c: SCSI dependant code for the Intel Integrated RAID Controller driver * * Written by: Achim Leubner * Fixes/Additions: Boji Tony Kannanthanam * * credits: Niklas Hallqvist; OpenBSD driver for the ICP Controllers. * Mike Smith; Some driver source code. * FreeBSD.ORG; Great O/S to work on and for. * * $Id: iir.c 1.5 2004/03/30 10:17:53 achim Exp $" */ #include __FBSDID("$FreeBSD$"); #define _IIR_C_ /* #include "opt_iir.h" */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_GDTBUF, "iirbuf", "iir driver buffer"); struct gdt_softc *gdt_wait_gdt; int gdt_wait_index; #ifdef GDT_DEBUG int gdt_debug = GDT_DEBUG; #ifdef __SERIAL__ #define MAX_SERBUF 160 static void ser_init(void); static void ser_puts(char *str); static void ser_putc(int c); static char strbuf[MAX_SERBUF+1]; #ifdef __COM2__ #define COM_BASE 0x2f8 #else #define COM_BASE 0x3f8 #endif static void ser_init() { unsigned port=COM_BASE; outb(port+3, 0x80); outb(port+1, 0); /* 19200 Baud, if 9600: outb(12,port) */ outb(port, 6); outb(port+3, 3); outb(port+1, 0); } static void ser_puts(char *str) { char *ptr; ser_init(); for (ptr=str;*ptr;++ptr) ser_putc((int)(*ptr)); } static void ser_putc(int c) { unsigned port=COM_BASE; while ((inb(port+5) & 0x20)==0); outb(port, c); if (c==0x0a) { while ((inb(port+5) & 0x20)==0); outb(port, 0x0d); } } int ser_printf(const char *fmt, ...) { va_list args; int i; va_start(args,fmt); i = vsprintf(strbuf,fmt,args); ser_puts(strbuf); va_end(args); return i; } #endif #endif /* The linked list of softc structures */ struct gdt_softc_list gdt_softcs = TAILQ_HEAD_INITIALIZER(gdt_softcs); /* controller cnt. */ int gdt_cnt = 0; /* event buffer */ static gdt_evt_str ebuffer[GDT_MAX_EVENTS]; static int elastidx, eoldidx; /* statistics */ gdt_statist_t gdt_stat; /* Definitions for our use of the SIM private CCB area */ #define ccb_sim_ptr spriv_ptr0 #define ccb_priority spriv_field1 static void iir_action(struct cam_sim *sim, union ccb *ccb); static void iir_poll(struct cam_sim *sim); static void iir_shutdown(void *arg, int howto); static void iir_timeout(void *arg); static void gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs); static int gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb, u_int8_t service, u_int16_t opcode, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *ccb, int timeout); static struct gdt_ccb *gdt_get_ccb(struct gdt_softc *gdt); static int gdt_sync_event(struct gdt_softc *gdt, int service, u_int8_t index, struct gdt_ccb *gccb); static int gdt_async_event(struct gdt_softc *gdt, int service); static struct gdt_ccb *gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock); static struct gdt_ccb *gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock); static struct gdt_ccb *gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd, int *lock); static void gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb); static void gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error); static void gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error); int iir_init(struct gdt_softc *gdt) { u_int16_t cdev_cnt; int i, id, drv_cyls, drv_hds, drv_secs; struct gdt_ccb *gccb; GDT_DPRINTF(GDT_D_DEBUG, ("iir_init()\n")); gdt->sc_state = GDT_POLLING; gdt_clear_events(); bzero(&gdt_stat, sizeof(gdt_statist_t)); SLIST_INIT(&gdt->sc_free_gccb); SLIST_INIT(&gdt->sc_pending_gccb); TAILQ_INIT(&gdt->sc_ccb_queue); TAILQ_INIT(&gdt->sc_ucmd_queue); TAILQ_INSERT_TAIL(&gdt_softcs, gdt, links); /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/MAXBSIZE, /*nsegments*/GDT_MAXSG, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/BUS_DMA_ALLOCNOW, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &gdt->sc_buffer_dmat) != 0) { printf("iir%d: bus_dma_tag_create(...,gdt->sc_buffer_dmat) failed\n", gdt->sc_hanum); return (1); } gdt->sc_init_level++; /* DMA tag for our ccb structures */ if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, GDT_MAXCMDS * GDT_SCRATCH_SZ, /* maxsize */ /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &gdt->sc_gcscratch_dmat) != 0) { printf("iir%d: bus_dma_tag_create(...,gdt->sc_gcscratch_dmat) failed\n", gdt->sc_hanum); return (1); } gdt->sc_init_level++; /* Allocation for our ccb scratch area */ if (bus_dmamem_alloc(gdt->sc_gcscratch_dmat, (void **)&gdt->sc_gcscratch, BUS_DMA_NOWAIT, &gdt->sc_gcscratch_dmamap) != 0) { printf("iir%d: bus_dmamem_alloc(...,&gdt->sc_gccbs,...) failed\n", gdt->sc_hanum); return (1); } gdt->sc_init_level++; /* And permanently map them */ bus_dmamap_load(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch_dmamap, gdt->sc_gcscratch, GDT_MAXCMDS * GDT_SCRATCH_SZ, gdtmapmem, &gdt->sc_gcscratch_busbase, /*flags*/0); gdt->sc_init_level++; /* Clear them out. */ bzero(gdt->sc_gcscratch, GDT_MAXCMDS * GDT_SCRATCH_SZ); /* Initialize the ccbs */ gdt->sc_gccbs = malloc(sizeof(struct gdt_ccb) * GDT_MAXCMDS, M_GDTBUF, M_NOWAIT | M_ZERO); if (gdt->sc_gccbs == NULL) { printf("iir%d: no memory for gccbs.\n", gdt->sc_hanum); return (1); } for (i = GDT_MAXCMDS-1; i >= 0; i--) { gccb = &gdt->sc_gccbs[i]; gccb->gc_cmd_index = i + 2; gccb->gc_flags = GDT_GCF_UNUSED; gccb->gc_map_flag = FALSE; if (bus_dmamap_create(gdt->sc_buffer_dmat, /*flags*/0, &gccb->gc_dmamap) != 0) return(1); gccb->gc_map_flag = TRUE; gccb->gc_scratch = &gdt->sc_gcscratch[GDT_SCRATCH_SZ * i]; gccb->gc_scratch_busbase = gdt->sc_gcscratch_busbase + GDT_SCRATCH_SZ * i; SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle); } gdt->sc_init_level++; /* create the control device */ gdt->sc_dev = gdt_make_dev(gdt->sc_hanum); /* allocate ccb for gdt_internal_cmd() */ gccb = gdt_get_ccb(gdt); if (gccb == NULL) { printf("iir%d: No free command index found\n", gdt->sc_hanum); return (1); } bzero(gccb->gc_cmd, GDT_CMD_SZ); if (!gdt_internal_cmd(gdt, gccb, GDT_SCREENSERVICE, GDT_INIT, 0, 0, 0)) { printf("iir%d: Screen service initialization error %d\n", gdt->sc_hanum, gdt->sc_status); gdt_free_ccb(gdt, gccb); return (1); } gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0); if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT, GDT_LINUX_OS, 0, 0)) { printf("iir%d: Cache service initialization error %d\n", gdt->sc_hanum, gdt->sc_status); gdt_free_ccb(gdt, gccb); return (1); } cdev_cnt = (u_int16_t)gdt->sc_info; gdt->sc_fw_vers = gdt->sc_service; /* Detect number of buses */ gdt_enc32(gccb->gc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST); gccb->gc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS; gccb->gc_scratch[GDT_IOC_FIRST_CHAN] = 0; gccb->gc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1; gdt_enc32(gccb->gc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ); if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL, GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL, GDT_IOC_HDR_SZ + GDT_MAXBUS * GDT_RAWIOC_SZ)) { gdt->sc_bus_cnt = gccb->gc_scratch[GDT_IOC_CHAN_COUNT]; for (i = 0; i < gdt->sc_bus_cnt; i++) { id = gccb->gc_scratch[GDT_IOC_HDR_SZ + i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID]; gdt->sc_bus_id[i] = id < GDT_MAXID_FC ? id : 0xff; } } else { /* New method failed, use fallback. */ for (i = 0; i < GDT_MAXBUS; i++) { gdt_enc32(gccb->gc_scratch + GDT_GETCH_CHANNEL_NO, i); if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL, GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN, GDT_IO_CHANNEL | GDT_INVALID_CHANNEL, GDT_GETCH_SZ)) { if (i == 0) { printf("iir%d: Cannot get channel count, " "error %d\n", gdt->sc_hanum, gdt->sc_status); gdt_free_ccb(gdt, gccb); return (1); } break; } gdt->sc_bus_id[i] = (gccb->gc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID_FC) ? gccb->gc_scratch[GDT_GETCH_SIOP_ID] : 0xff; } gdt->sc_bus_cnt = i; } /* add one "virtual" channel for the host drives */ gdt->sc_virt_bus = gdt->sc_bus_cnt; gdt->sc_bus_cnt++; if (!gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_INIT, 0, 0, 0)) { printf("iir%d: Raw service initialization error %d\n", gdt->sc_hanum, gdt->sc_status); gdt_free_ccb(gdt, gccb); return (1); } /* Set/get features raw service (scatter/gather) */ gdt->sc_raw_feat = 0; if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_SET_FEAT, GDT_SCATTER_GATHER, 0, 0)) { if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) { gdt->sc_raw_feat = gdt->sc_info; if (!(gdt->sc_info & GDT_SCATTER_GATHER)) { panic("iir%d: Scatter/Gather Raw Service " "required but not supported!\n", gdt->sc_hanum); gdt_free_ccb(gdt, gccb); return (1); } } } /* Set/get features cache service (scatter/gather) */ gdt->sc_cache_feat = 0; if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_SET_FEAT, 0, GDT_SCATTER_GATHER, 0)) { if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) { gdt->sc_cache_feat = gdt->sc_info; if (!(gdt->sc_info & GDT_SCATTER_GATHER)) { panic("iir%d: Scatter/Gather Cache Service " "required but not supported!\n", gdt->sc_hanum); gdt_free_ccb(gdt, gccb); return (1); } } } /* OEM */ gdt_enc32(gccb->gc_scratch + GDT_OEM_VERSION, 0x01); gdt_enc32(gccb->gc_scratch + GDT_OEM_BUFSIZE, sizeof(gdt_oem_record_t)); if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL, GDT_OEM_STR_RECORD, GDT_INVALID_CHANNEL, sizeof(gdt_oem_str_record_t))) { strncpy(gdt->oem_name, ((gdt_oem_str_record_t *) gccb->gc_scratch)->text.scsi_host_drive_inquiry_vendor_id, 7); gdt->oem_name[7]='\0'; } else { /* Old method, based on PCI ID */ if (gdt->sc_vendor == INTEL_VENDOR_ID) strcpy(gdt->oem_name,"Intel "); else strcpy(gdt->oem_name,"ICP "); } /* Scan for cache devices */ for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) { if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INFO, i, 0, 0)) { gdt->sc_hdr[i].hd_present = 1; gdt->sc_hdr[i].hd_size = gdt->sc_info; /* * Evaluate mapping (sectors per head, heads per cyl) */ gdt->sc_hdr[i].hd_size &= ~GDT_SECS32; if (gdt->sc_info2 == 0) gdt_eval_mapping(gdt->sc_hdr[i].hd_size, &drv_cyls, &drv_hds, &drv_secs); else { drv_hds = gdt->sc_info2 & 0xff; drv_secs = (gdt->sc_info2 >> 8) & 0xff; drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds / drv_secs; } gdt->sc_hdr[i].hd_heads = drv_hds; gdt->sc_hdr[i].hd_secs = drv_secs; /* Round the size */ gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs; if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_DEVTYPE, i, 0, 0)) gdt->sc_hdr[i].hd_devtype = gdt->sc_info; } } GDT_DPRINTF(GDT_D_INIT, ("dpmem %x %d-bus %d cache device%s\n", gdt->sc_dpmembase, gdt->sc_bus_cnt, cdev_cnt, cdev_cnt == 1 ? "" : "s")); gdt_free_ccb(gdt, gccb); gdt_cnt++; return (0); } void iir_free(struct gdt_softc *gdt) { int i; GDT_DPRINTF(GDT_D_INIT, ("iir_free()\n")); switch (gdt->sc_init_level) { default: gdt_destroy_dev(gdt->sc_dev); case 5: for (i = GDT_MAXCMDS-1; i >= 0; i--) if (gdt->sc_gccbs[i].gc_map_flag) bus_dmamap_destroy(gdt->sc_buffer_dmat, gdt->sc_gccbs[i].gc_dmamap); bus_dmamap_unload(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch_dmamap); free(gdt->sc_gccbs, M_GDTBUF); case 4: bus_dmamem_free(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch, gdt->sc_gcscratch_dmamap); case 3: bus_dma_tag_destroy(gdt->sc_gcscratch_dmat); case 2: bus_dma_tag_destroy(gdt->sc_buffer_dmat); case 1: bus_dma_tag_destroy(gdt->sc_parent_dmat); case 0: break; } TAILQ_REMOVE(&gdt_softcs, gdt, links); } void iir_attach(struct gdt_softc *gdt) { struct cam_devq *devq; int i; GDT_DPRINTF(GDT_D_INIT, ("iir_attach()\n")); /* * Create the device queue for our SIM. * XXX Throttle this down since the card has problems under load. */ devq = cam_simq_alloc(32); if (devq == NULL) return; for (i = 0; i < gdt->sc_bus_cnt; i++) { /* * Construct our SIM entry */ gdt->sims[i] = cam_sim_alloc(iir_action, iir_poll, "iir", gdt, gdt->sc_hanum, &Giant, /*untagged*/1, /*tagged*/GDT_MAXCMDS, devq); if (xpt_bus_register(gdt->sims[i], gdt->sc_devnode, i) != CAM_SUCCESS) { cam_sim_free(gdt->sims[i], /*free_devq*/i == 0); break; } if (xpt_create_path(&gdt->paths[i], /*periph*/NULL, cam_sim_path(gdt->sims[i]), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(gdt->sims[i])); cam_sim_free(gdt->sims[i], /*free_devq*/i == 0); break; } } if (i > 0) EVENTHANDLER_REGISTER(shutdown_final, iir_shutdown, gdt, SHUTDOWN_PRI_DEFAULT); /* iir_watchdog(gdt); */ gdt->sc_state = GDT_NORMAL; } static void gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs) { *cyls = size / GDT_HEADS / GDT_SECS; if (*cyls < GDT_MAXCYLS) { *heads = GDT_HEADS; *secs = GDT_SECS; } else { /* Too high for 64 * 32 */ *cyls = size / GDT_MEDHEADS / GDT_MEDSECS; if (*cyls < GDT_MAXCYLS) { *heads = GDT_MEDHEADS; *secs = GDT_MEDSECS; } else { /* Too high for 127 * 63 */ *cyls = size / GDT_BIGHEADS / GDT_BIGSECS; *heads = GDT_BIGHEADS; *secs = GDT_BIGSECS; } } } static int gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *gccb, int timeout) { int rv = 0; GDT_DPRINTF(GDT_D_INIT, ("gdt_wait(%p, %p, %d)\n", gdt, gccb, timeout)); gdt->sc_state |= GDT_POLL_WAIT; do { iir_intr(gdt); if (gdt == gdt_wait_gdt && gccb->gc_cmd_index == gdt_wait_index) { rv = 1; break; } DELAY(1); } while (--timeout); gdt->sc_state &= ~GDT_POLL_WAIT; while (gdt->sc_test_busy(gdt)) DELAY(1); /* XXX correct? */ return (rv); } static int gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb, u_int8_t service, u_int16_t opcode, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { int retries; GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d)\n", gdt, service, opcode, arg1, arg2, arg3)); bzero(gccb->gc_cmd, GDT_CMD_SZ); for (retries = GDT_RETRIES; ; ) { gccb->gc_service = service; gccb->gc_flags = GDT_GCF_INTERNAL; gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, opcode); switch (service) { case GDT_CACHESERVICE: if (opcode == GDT_IOCTL) { gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC, arg1); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL, arg2); gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM, gccb->gc_scratch_busbase); } else { gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO, (u_int16_t)arg1); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO, arg2); } break; case GDT_SCSIRAWSERVICE: gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION, arg1); gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] = (u_int8_t)arg2; gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] = (u_int8_t)arg3; gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] = (u_int8_t)(arg3 >> 8); } gdt->sc_set_sema0(gdt); gccb->gc_cmd_len = GDT_CMD_SZ; gdt->sc_cmd_off = 0; gdt->sc_cmd_cnt = 0; gdt->sc_copy_cmd(gdt, gccb); gdt->sc_release_event(gdt); DELAY(20); if (!gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT)) return (0); if (gdt->sc_status != GDT_S_BSY || --retries == 0) break; DELAY(1); } return (gdt->sc_status == GDT_S_OK); } static struct gdt_ccb * gdt_get_ccb(struct gdt_softc *gdt) { struct gdt_ccb *gccb; int lock; GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p)\n", gdt)); lock = splcam(); gccb = SLIST_FIRST(&gdt->sc_free_gccb); if (gccb != NULL) { SLIST_REMOVE_HEAD(&gdt->sc_free_gccb, sle); SLIST_INSERT_HEAD(&gdt->sc_pending_gccb, gccb, sle); ++gdt_stat.cmd_index_act; if (gdt_stat.cmd_index_act > gdt_stat.cmd_index_max) gdt_stat.cmd_index_max = gdt_stat.cmd_index_act; } splx(lock); return (gccb); } void gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb) { int lock; GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p)\n", gdt, gccb)); lock = splcam(); gccb->gc_flags = GDT_GCF_UNUSED; SLIST_REMOVE(&gdt->sc_pending_gccb, gccb, gdt_ccb, sle); SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle); --gdt_stat.cmd_index_act; splx(lock); if (gdt->sc_state & GDT_SHUTDOWN) wakeup(gccb); } void gdt_next(struct gdt_softc *gdt) { int lock; union ccb *ccb; gdt_ucmd_t *ucmd; struct cam_sim *sim; int bus, target, lun; int next_cmd; struct ccb_scsiio *csio; struct ccb_hdr *ccbh; struct gdt_ccb *gccb = NULL; u_int8_t cmd; GDT_DPRINTF(GDT_D_QUEUE, ("gdt_next(%p)\n", gdt)); lock = splcam(); if (gdt->sc_test_busy(gdt)) { if (!(gdt->sc_state & GDT_POLLING)) { splx(lock); return; } while (gdt->sc_test_busy(gdt)) DELAY(1); } gdt->sc_cmd_cnt = gdt->sc_cmd_off = 0; next_cmd = TRUE; for (;;) { /* I/Os in queue? controller ready? */ if (!TAILQ_FIRST(&gdt->sc_ucmd_queue) && !TAILQ_FIRST(&gdt->sc_ccb_queue)) break; /* 1.: I/Os without ccb (IOCTLs) */ ucmd = TAILQ_FIRST(&gdt->sc_ucmd_queue); if (ucmd != NULL) { TAILQ_REMOVE(&gdt->sc_ucmd_queue, ucmd, links); if ((gccb = gdt_ioctl_cmd(gdt, ucmd, &lock)) == NULL) { TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links); break; } break; /* wenn mehrere Kdos. zulassen: if (!gdt_polling) continue; */ } /* 2.: I/Os with ccb */ ccb = (union ccb *)TAILQ_FIRST(&gdt->sc_ccb_queue); /* ist dann immer != NULL, da oben getestet */ sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr; bus = cam_sim_bus(sim); target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; TAILQ_REMOVE(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe); --gdt_stat.req_queue_act; /* ccb->ccb_h.func_code is XPT_SCSI_IO */ GDT_DPRINTF(GDT_D_QUEUE, ("XPT_SCSI_IO flags 0x%x)\n", ccb->ccb_h.flags)); csio = &ccb->csio; ccbh = &ccb->ccb_h; cmd = csio->cdb_io.cdb_bytes[0]; /* Max CDB length is 12 bytes */ if (csio->cdb_len > 12) { ccbh->status = CAM_REQ_INVALID; --gdt_stat.io_count_act; xpt_done(ccb); } else if (bus != gdt->sc_virt_bus) { /* raw service command */ if ((gccb = gdt_raw_cmd(gdt, ccb, &lock)) == NULL) { TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe); ++gdt_stat.req_queue_act; if (gdt_stat.req_queue_act > gdt_stat.req_queue_max) gdt_stat.req_queue_max = gdt_stat.req_queue_act; next_cmd = FALSE; } } else if (target >= GDT_MAX_HDRIVES || !gdt->sc_hdr[target].hd_present || lun != 0) { ccbh->status = CAM_DEV_NOT_THERE; --gdt_stat.io_count_act; xpt_done(ccb); } else { /* cache service command */ if (cmd == READ_6 || cmd == WRITE_6 || cmd == READ_10 || cmd == WRITE_10) { if ((gccb = gdt_cache_cmd(gdt, ccb, &lock)) == NULL) { TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe); ++gdt_stat.req_queue_act; if (gdt_stat.req_queue_act > gdt_stat.req_queue_max) gdt_stat.req_queue_max = gdt_stat.req_queue_act; next_cmd = FALSE; } } else { splx(lock); gdt_internal_cache_cmd(gdt, ccb); lock = splcam(); } } if ((gdt->sc_state & GDT_POLLING) || !next_cmd) break; } if (gdt->sc_cmd_cnt > 0) gdt->sc_release_event(gdt); splx(lock); if ((gdt->sc_state & GDT_POLLING) && gdt->sc_cmd_cnt > 0) { gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT); } } static struct gdt_ccb * gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock) { struct gdt_ccb *gccb; struct cam_sim *sim; int error; GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb)); if (roundup(GDT_CMD_UNION + GDT_RAW_SZ, sizeof(u_int32_t)) + gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET > gdt->sc_ic_all_size) { GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_raw_cmd(): DPMEM overflow\n", gdt->sc_hanum)); return (NULL); } gccb = gdt_get_ccb(gdt); if (gccb == NULL) { GDT_DPRINTF(GDT_D_INVALID, ("iir%d: No free command index found\n", gdt->sc_hanum)); return (gccb); } bzero(gccb->gc_cmd, GDT_CMD_SZ); sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr; gccb->gc_ccb = ccb; gccb->gc_service = GDT_SCSIRAWSERVICE; gccb->gc_flags = GDT_GCF_SCSI; if (gdt->sc_cmd_cnt == 0) gdt->sc_set_sema0(gdt); splx(*lock); gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_WRITE); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION, (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ? GDT_DATA_IN : GDT_DATA_OUT); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN, ccb->csio.dxfer_len); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN, ccb->csio.cdb_len); bcopy(ccb->csio.cdb_io.cdb_bytes, gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CMD, ccb->csio.cdb_len); gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] = ccb->ccb_h.target_id; gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] = ccb->ccb_h.target_lun; gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] = cam_sim_bus(sim); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN, sizeof(struct scsi_sense_data)); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA, gccb->gc_scratch_busbase); error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat, gccb->gc_dmamap, ccb, gdtexecuteccb, gccb, /*flags*/0); if (error == EINPROGRESS) { xpt_freeze_simq(sim, 1); gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } *lock = splcam(); return (gccb); } static struct gdt_ccb * gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock) { struct gdt_ccb *gccb; struct cam_sim *sim; u_int8_t *cmdp; u_int16_t opcode; u_int32_t blockno, blockcnt; + int error; GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb)); if (roundup(GDT_CMD_UNION + GDT_CACHE_SZ, sizeof(u_int32_t)) + gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET > gdt->sc_ic_all_size) { GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_cache_cmd(): DPMEM overflow\n", gdt->sc_hanum)); return (NULL); } gccb = gdt_get_ccb(gdt); if (gccb == NULL) { GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n", gdt->sc_hanum)); return (gccb); } bzero(gccb->gc_cmd, GDT_CMD_SZ); sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr; gccb->gc_ccb = ccb; gccb->gc_service = GDT_CACHESERVICE; gccb->gc_flags = GDT_GCF_SCSI; if (gdt->sc_cmd_cnt == 0) gdt->sc_set_sema0(gdt); splx(*lock); gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); cmdp = ccb->csio.cdb_io.cdb_bytes; opcode = (*cmdp == WRITE_6 || *cmdp == WRITE_10) ? GDT_WRITE : GDT_READ; if ((gdt->sc_state & GDT_SHUTDOWN) && opcode == GDT_WRITE) opcode = GDT_WRITE_THR; gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, opcode); gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO, ccb->ccb_h.target_id); if (ccb->csio.cdb_len == 6) { struct scsi_rw_6 *rw = (struct scsi_rw_6 *)cmdp; blockno = scsi_3btoul(rw->addr) & ((SRW_TOPADDR<<16) | 0xffff); blockcnt = rw->length ? rw->length : 0x100; } else { struct scsi_rw_10 *rw = (struct scsi_rw_10 *)cmdp; blockno = scsi_4btoul(rw->addr); blockcnt = scsi_2btoul(rw->length); } gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO, blockno); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT, blockcnt); - /* - * If we have any data to send with this command, - * map it into bus space. - */ - /* Only use S/G if there is a transfer */ - if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { - if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { - int s; - int error; - - /* vorher unlock von splcam() ??? */ - s = splsoftvm(); - error = - bus_dmamap_load(gdt->sc_buffer_dmat, + error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat, gccb->gc_dmamap, - ccb->csio.data_ptr, - ccb->csio.dxfer_len, + ccb, gdtexecuteccb, gccb, /*flags*/0); - if (error == EINPROGRESS) { - xpt_freeze_simq(sim, 1); - gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ; - } - splx(s); - } else { - panic("iir: CAM_DATA_PHYS not supported"); - } - } else { - struct bus_dma_segment *segs; - - if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) - panic("iir%d: iir_action - Physical " - "segment pointers unsupported", gdt->sc_hanum); - - if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0) - panic("iir%d: iir_action - Virtual " - "segment addresses unsupported", gdt->sc_hanum); - - /* Just use the segments provided */ - segs = (struct bus_dma_segment *)ccb->csio.data_ptr; - gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0); + if (error == EINPROGRESS) { + xpt_freeze_simq(sim, 1); + gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } - *lock = splcam(); return (gccb); } static struct gdt_ccb * gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd, int *lock) { struct gdt_ccb *gccb; u_int32_t cnt; GDT_DPRINTF(GDT_D_DEBUG, ("gdt_ioctl_cmd(%p, %p)\n", gdt, ucmd)); gccb = gdt_get_ccb(gdt); if (gccb == NULL) { GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n", gdt->sc_hanum)); return (gccb); } bzero(gccb->gc_cmd, GDT_CMD_SZ); gccb->gc_ucmd = ucmd; gccb->gc_service = ucmd->service; gccb->gc_flags = GDT_GCF_IOCTL; /* check DPMEM space, copy data buffer from user space */ if (ucmd->service == GDT_CACHESERVICE) { if (ucmd->OpCode == GDT_IOCTL) { gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_IOCTL_SZ, sizeof(u_int32_t)); cnt = ucmd->u.ioctl.param_size; if (cnt > GDT_SCRATCH_SZ) { printf("iir%d: Scratch buffer too small (%d/%d)\n", gdt->sc_hanum, GDT_SCRATCH_SZ, cnt); gdt_free_ccb(gdt, gccb); return (NULL); } } else { gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST + GDT_SG_SZ, sizeof(u_int32_t)); cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE; if (cnt > GDT_SCRATCH_SZ) { printf("iir%d: Scratch buffer too small (%d/%d)\n", gdt->sc_hanum, GDT_SCRATCH_SZ, cnt); gdt_free_ccb(gdt, gccb); return (NULL); } } } else { gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST + GDT_SG_SZ, sizeof(u_int32_t)); cnt = ucmd->u.raw.sdlen; if (cnt + ucmd->u.raw.sense_len > GDT_SCRATCH_SZ) { printf("iir%d: Scratch buffer too small (%d/%d)\n", gdt->sc_hanum, GDT_SCRATCH_SZ, cnt + ucmd->u.raw.sense_len); gdt_free_ccb(gdt, gccb); return (NULL); } } if (cnt != 0) bcopy(ucmd->data, gccb->gc_scratch, cnt); if (gdt->sc_cmd_off + gccb->gc_cmd_len + GDT_DPMEM_COMMAND_OFFSET > gdt->sc_ic_all_size) { GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_ioctl_cmd(): DPMEM overflow\n", gdt->sc_hanum)); gdt_free_ccb(gdt, gccb); return (NULL); } if (gdt->sc_cmd_cnt == 0) gdt->sc_set_sema0(gdt); splx(*lock); /* fill cmd structure */ gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, ucmd->OpCode); if (ucmd->service == GDT_CACHESERVICE) { if (ucmd->OpCode == GDT_IOCTL) { /* IOCTL */ gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE, ucmd->u.ioctl.param_size); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC, ucmd->u.ioctl.subfunc); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL, ucmd->u.ioctl.channel); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM, gccb->gc_scratch_busbase); } else { /* cache service command */ gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO, ucmd->u.cache.DeviceNo); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO, ucmd->u.cache.BlockNo); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT, ucmd->u.cache.BlockCnt); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR, 0xffffffffUL); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ, 1); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST + GDT_SG_PTR, gccb->gc_scratch_busbase); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST + GDT_SG_LEN, ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE); } } else { /* raw service command */ gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION, ucmd->u.raw.direction); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA, 0xffffffffUL); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN, ucmd->u.raw.sdlen); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN, ucmd->u.raw.clen); bcopy(ucmd->u.raw.cmd, gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CMD, 12); gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] = ucmd->u.raw.target; gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] = ucmd->u.raw.lun; gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] = ucmd->u.raw.bus; gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN, ucmd->u.raw.sense_len); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA, gccb->gc_scratch_busbase + ucmd->u.raw.sdlen); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ, 1); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST + GDT_SG_PTR, gccb->gc_scratch_busbase); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST + GDT_SG_LEN, ucmd->u.raw.sdlen); } *lock = splcam(); gdt_stat.sg_count_act = 1; gdt->sc_copy_cmd(gdt, gccb); return (gccb); } static void gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb) { int t; t = ccb->ccb_h.target_id; GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd(%p, %p, 0x%x, %d)\n", gdt, ccb, ccb->csio.cdb_io.cdb_bytes[0], t)); switch (ccb->csio.cdb_io.cdb_bytes[0]) { case TEST_UNIT_READY: case START_STOP: break; case REQUEST_SENSE: GDT_DPRINTF(GDT_D_MISC, ("REQUEST_SENSE\n")); break; case INQUIRY: { struct scsi_inquiry_data inq; size_t copylen = MIN(sizeof(inq), ccb->csio.dxfer_len); bzero(&inq, sizeof(inq)); inq.device = (gdt->sc_hdr[t].hd_devtype & 4) ? T_CDROM : T_DIRECT; inq.dev_qual2 = (gdt->sc_hdr[t].hd_devtype & 1) ? 0x80 : 0; inq.version = SCSI_REV_2; inq.response_format = 2; inq.additional_length = 32; inq.flags = SID_CmdQue | SID_Sync; strncpy(inq.vendor, gdt->oem_name, sizeof(inq.vendor)); snprintf(inq.product, sizeof(inq.product), "Host Drive #%02d", t); strncpy(inq.revision, " ", sizeof(inq.revision)); bcopy(&inq, ccb->csio.data_ptr, copylen ); if( ccb->csio.dxfer_len > copylen ) bzero( ccb->csio.data_ptr+copylen, ccb->csio.dxfer_len - copylen ); break; } case MODE_SENSE_6: { struct mpd_data { struct scsi_mode_hdr_6 hd; struct scsi_mode_block_descr bd; struct scsi_control_page cp; } mpd; size_t copylen = MIN(sizeof(mpd), ccb->csio.dxfer_len); u_int8_t page; /*mpd = (struct mpd_data *)ccb->csio.data_ptr;*/ bzero(&mpd, sizeof(mpd)); mpd.hd.datalen = sizeof(struct scsi_mode_hdr_6) + sizeof(struct scsi_mode_block_descr); mpd.hd.dev_specific = (gdt->sc_hdr[t].hd_devtype & 2) ? 0x80 : 0; mpd.hd.block_descr_len = sizeof(struct scsi_mode_block_descr); mpd.bd.block_len[0] = (GDT_SECTOR_SIZE & 0x00ff0000) >> 16; mpd.bd.block_len[1] = (GDT_SECTOR_SIZE & 0x0000ff00) >> 8; mpd.bd.block_len[2] = (GDT_SECTOR_SIZE & 0x000000ff); bcopy(&mpd, ccb->csio.data_ptr, copylen ); if( ccb->csio.dxfer_len > copylen ) bzero( ccb->csio.data_ptr+copylen, ccb->csio.dxfer_len - copylen ); page=((struct scsi_mode_sense_6 *)ccb->csio.cdb_io.cdb_bytes)->page; switch (page) { default: GDT_DPRINTF(GDT_D_MISC, ("MODE_SENSE_6: page 0x%x\n", page)); break; } break; } case READ_CAPACITY: { struct scsi_read_capacity_data rcd; size_t copylen = MIN(sizeof(rcd), ccb->csio.dxfer_len); /*rcd = (struct scsi_read_capacity_data *)ccb->csio.data_ptr;*/ bzero(&rcd, sizeof(rcd)); scsi_ulto4b(gdt->sc_hdr[t].hd_size - 1, rcd.addr); scsi_ulto4b(GDT_SECTOR_SIZE, rcd.length); bcopy(&rcd, ccb->csio.data_ptr, copylen ); if( ccb->csio.dxfer_len > copylen ) bzero( ccb->csio.data_ptr+copylen, ccb->csio.dxfer_len - copylen ); break; } default: GDT_DPRINTF(GDT_D_MISC, ("gdt_internal_cache_cmd(%d) unknown\n", ccb->csio.cdb_io.cdb_bytes[0])); break; } ccb->ccb_h.status |= CAM_REQ_CMP; --gdt_stat.io_count_act; xpt_done(ccb); } static void gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { bus_addr_t *busaddrp; busaddrp = (bus_addr_t *)arg; *busaddrp = dm_segs->ds_addr; } static void gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct gdt_ccb *gccb; union ccb *ccb; struct gdt_softc *gdt; int i, lock; lock = splcam(); gccb = (struct gdt_ccb *)arg; ccb = gccb->gc_ccb; gdt = cam_sim_softc((struct cam_sim *)ccb->ccb_h.ccb_sim_ptr); GDT_DPRINTF(GDT_D_CMD, ("gdtexecuteccb(%p, %p, %p, %d, %d)\n", gdt, gccb, dm_segs, nseg, error)); gdt_stat.sg_count_act = nseg; if (nseg > gdt_stat.sg_count_max) gdt_stat.sg_count_max = nseg; /* Copy the segments into our SG list */ if (gccb->gc_service == GDT_CACHESERVICE) { for (i = 0; i < nseg; ++i) { gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST + i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST + i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len); dm_segs++; } gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ, nseg); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR, 0xffffffffUL); gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST + nseg * GDT_SG_SZ, sizeof(u_int32_t)); } else { for (i = 0; i < nseg; ++i) { gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST + i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST + i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len); dm_segs++; } gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ, nseg); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA, 0xffffffffUL); gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST + nseg * GDT_SG_SZ, sizeof(u_int32_t)); } if (nseg != 0) { bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); } /* We must NOT abort the command here if CAM_REQ_INPROG is not set, * because command semaphore is already set! */ ccb->ccb_h.status |= CAM_SIM_QUEUED; /* timeout handling */ ccb->ccb_h.timeout_ch = timeout(iir_timeout, (caddr_t)gccb, (ccb->ccb_h.timeout * hz) / 1000); gdt->sc_copy_cmd(gdt, gccb); splx(lock); } static void iir_action( struct cam_sim *sim, union ccb *ccb ) { struct gdt_softc *gdt; int lock, bus, target, lun; gdt = (struct gdt_softc *)cam_sim_softc( sim ); ccb->ccb_h.ccb_sim_ptr = sim; bus = cam_sim_bus(sim); target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; GDT_DPRINTF(GDT_D_CMD, ("iir_action(%p) func 0x%x cmd 0x%x bus %d target %d lun %d\n", gdt, ccb->ccb_h.func_code, ccb->csio.cdb_io.cdb_bytes[0], bus, target, lun)); ++gdt_stat.io_count_act; if (gdt_stat.io_count_act > gdt_stat.io_count_max) gdt_stat.io_count_max = gdt_stat.io_count_act; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: lock = splcam(); TAILQ_INSERT_TAIL(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe); ++gdt_stat.req_queue_act; if (gdt_stat.req_queue_act > gdt_stat.req_queue_max) gdt_stat.req_queue_max = gdt_stat.req_queue_act; splx(lock); gdt_next(gdt); break; case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; --gdt_stat.io_count_act; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; --gdt_stat.io_count_act; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (cts->type == CTS_TYPE_USER_SETTINGS) { spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; spi->sync_period = 25; /* 10MHz */ if (spi->sync_period != 0) spi->sync_offset = 15; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; } --gdt_stat.io_count_act; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; ccg->heads = gdt->sc_hdr[target].hd_heads; ccg->secs_per_track = gdt->sc_hdr[target].hd_secs; secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; --gdt_stat.io_count_act; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_CMP; --gdt_stat.io_count_act; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; --gdt_stat.io_count_act; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 1; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; if (bus == gdt->sc_virt_bus) cpi->max_target = GDT_MAX_HDRIVES - 1; else if (gdt->sc_class & GDT_FC) cpi->max_target = GDT_MAXID_FC - 1; else cpi->max_target = GDT_MAXID - 1; cpi->max_lun = 7; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = bus; cpi->initiator_id = (bus == gdt->sc_virt_bus ? 127 : gdt->sc_bus_id[bus]); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); if (gdt->sc_vendor == INTEL_VENDOR_ID) strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN); else strncpy(cpi->hba_vid, "ICP vortex ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; --gdt_stat.io_count_act; xpt_done(ccb); break; } default: GDT_DPRINTF(GDT_D_INVALID, ("gdt_next(%p) cmd 0x%x invalid\n", gdt, ccb->ccb_h.func_code)); ccb->ccb_h.status = CAM_REQ_INVALID; --gdt_stat.io_count_act; xpt_done(ccb); break; } } static void iir_poll( struct cam_sim *sim ) { struct gdt_softc *gdt; gdt = (struct gdt_softc *)cam_sim_softc( sim ); GDT_DPRINTF(GDT_D_CMD, ("iir_poll sim %p gdt %p\n", sim, gdt)); iir_intr(gdt); } static void iir_timeout(void *arg) { GDT_DPRINTF(GDT_D_TIMEOUT, ("iir_timeout(%p)\n", gccb)); } static void iir_shutdown( void *arg, int howto ) { struct gdt_softc *gdt; struct gdt_ccb *gccb; gdt_ucmd_t *ucmd; int lock, i; gdt = (struct gdt_softc *)arg; GDT_DPRINTF(GDT_D_CMD, ("iir_shutdown(%p, %d)\n", gdt, howto)); printf("iir%d: Flushing all Host Drives. Please wait ... ", gdt->sc_hanum); /* allocate ucmd buffer */ ucmd = malloc(sizeof(gdt_ucmd_t), M_GDTBUF, M_NOWAIT); if (ucmd == NULL) { printf("iir%d: iir_shutdown(): Cannot allocate resource\n", gdt->sc_hanum); return; } bzero(ucmd, sizeof(gdt_ucmd_t)); /* wait for pending IOs */ lock = splcam(); gdt->sc_state = GDT_SHUTDOWN; splx(lock); if ((gccb = SLIST_FIRST(&gdt->sc_pending_gccb)) != NULL) (void) tsleep((void *)gccb, PCATCH | PRIBIO, "iirshw", 100 * hz); /* flush */ for (i = 0; i < GDT_MAX_HDRIVES; ++i) { if (gdt->sc_hdr[i].hd_present) { ucmd->service = GDT_CACHESERVICE; ucmd->OpCode = GDT_FLUSH; ucmd->u.cache.DeviceNo = i; lock = splcam(); TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links); ucmd->complete_flag = FALSE; splx(lock); gdt_next(gdt); if (!ucmd->complete_flag) (void) tsleep((void *)ucmd, PCATCH|PRIBIO, "iirshw", 10*hz); } } free(ucmd, M_DEVBUF); printf("Done.\n"); } void iir_intr(void *arg) { struct gdt_softc *gdt = arg; struct gdt_intr_ctx ctx; int lock = 0; struct gdt_ccb *gccb; gdt_ucmd_t *ucmd; u_int32_t cnt; GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p)\n", gdt)); /* If polling and we were not called from gdt_wait, just return */ if ((gdt->sc_state & GDT_POLLING) && !(gdt->sc_state & GDT_POLL_WAIT)) return; if (!(gdt->sc_state & GDT_POLLING)) lock = splcam(); gdt_wait_index = 0; ctx.istatus = gdt->sc_get_status(gdt); if (ctx.istatus == 0x00) { if (!(gdt->sc_state & GDT_POLLING)) splx(lock); gdt->sc_status = GDT_S_NO_STATUS; return; } gdt->sc_intr(gdt, &ctx); gdt->sc_status = ctx.cmd_status; gdt->sc_service = ctx.service; gdt->sc_info = ctx.info; gdt->sc_info2 = ctx.info2; if (gdt->sc_state & GDT_POLL_WAIT) { gdt_wait_gdt = gdt; gdt_wait_index = ctx.istatus; } if (ctx.istatus == GDT_ASYNCINDEX) { gdt_async_event(gdt, ctx.service); if (!(gdt->sc_state & GDT_POLLING)) splx(lock); return; } if (ctx.istatus == GDT_SPEZINDEX) { GDT_DPRINTF(GDT_D_INVALID, ("iir%d: Service unknown or not initialized!\n", gdt->sc_hanum)); gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver); gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum; gdt_store_event(GDT_ES_DRIVER, 4, &gdt->sc_dvr); if (!(gdt->sc_state & GDT_POLLING)) splx(lock); return; } gccb = &gdt->sc_gccbs[ctx.istatus - 2]; ctx.service = gccb->gc_service; switch (gccb->gc_flags) { case GDT_GCF_UNUSED: GDT_DPRINTF(GDT_D_INVALID, ("iir%d: Index (%d) to unused command!\n", gdt->sc_hanum, ctx.istatus)); gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver); gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum; gdt->sc_dvr.eu.driver.index = ctx.istatus; gdt_store_event(GDT_ES_DRIVER, 1, &gdt->sc_dvr); gdt_free_ccb(gdt, gccb); /* fallthrough */ case GDT_GCF_INTERNAL: if (!(gdt->sc_state & GDT_POLLING)) splx(lock); break; case GDT_GCF_IOCTL: ucmd = gccb->gc_ucmd; if (gdt->sc_status == GDT_S_BSY) { GDT_DPRINTF(GDT_D_DEBUG, ("iir_intr(%p) ioctl: gccb %p busy\n", gdt, gccb)); TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links); if (!(gdt->sc_state & GDT_POLLING)) splx(lock); } else { ucmd->status = gdt->sc_status; ucmd->info = gdt->sc_info; ucmd->complete_flag = TRUE; if (ucmd->service == GDT_CACHESERVICE) { if (ucmd->OpCode == GDT_IOCTL) { cnt = ucmd->u.ioctl.param_size; if (cnt != 0) bcopy(gccb->gc_scratch, ucmd->data, cnt); } else { cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE; if (cnt != 0) bcopy(gccb->gc_scratch, ucmd->data, cnt); } } else { cnt = ucmd->u.raw.sdlen; if (cnt != 0) bcopy(gccb->gc_scratch, ucmd->data, cnt); if (ucmd->u.raw.sense_len != 0) bcopy(gccb->gc_scratch, ucmd->data, cnt); } gdt_free_ccb(gdt, gccb); if (!(gdt->sc_state & GDT_POLLING)) splx(lock); /* wakeup */ wakeup(ucmd); } gdt_next(gdt); break; default: gdt_free_ccb(gdt, gccb); gdt_sync_event(gdt, ctx.service, ctx.istatus, gccb); if (!(gdt->sc_state & GDT_POLLING)) splx(lock); gdt_next(gdt); break; } } int gdt_async_event(struct gdt_softc *gdt, int service) { struct gdt_ccb *gccb; GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d)\n", gdt, service)); if (service == GDT_SCREENSERVICE) { if (gdt->sc_status == GDT_MSG_REQUEST) { while (gdt->sc_test_busy(gdt)) DELAY(1); gccb = gdt_get_ccb(gdt); if (gccb == NULL) { printf("iir%d: No free command index found\n", gdt->sc_hanum); return (1); } bzero(gccb->gc_cmd, GDT_CMD_SZ); gccb->gc_service = service; gccb->gc_flags = GDT_GCF_SCREEN; gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_READ); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE, GDT_MSG_INV_HANDLE); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR, gccb->gc_scratch_busbase); gdt->sc_set_sema0(gdt); gdt->sc_cmd_off = 0; gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ, sizeof(u_int32_t)); gdt->sc_cmd_cnt = 0; gdt->sc_copy_cmd(gdt, gccb); printf("iir%d: [PCI %d/%d] ", gdt->sc_hanum,gdt->sc_bus,gdt->sc_slot); gdt->sc_release_event(gdt); } } else { if ((gdt->sc_fw_vers & 0xff) >= 0x1a) { gdt->sc_dvr.size = 0; gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum; gdt->sc_dvr.eu.async.status = gdt->sc_status; /* severity and event_string already set! */ } else { gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.async); gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum; gdt->sc_dvr.eu.async.service = service; gdt->sc_dvr.eu.async.status = gdt->sc_status; gdt->sc_dvr.eu.async.info = gdt->sc_info; *(u_int32_t *)gdt->sc_dvr.eu.async.scsi_coord = gdt->sc_info2; } gdt_store_event(GDT_ES_ASYNC, service, &gdt->sc_dvr); printf("iir%d: %s\n", gdt->sc_hanum, gdt->sc_dvr.event_string); } return (0); } int gdt_sync_event(struct gdt_softc *gdt, int service, u_int8_t index, struct gdt_ccb *gccb) { union ccb *ccb; GDT_DPRINTF(GDT_D_INTR, ("gdt_sync_event(%p, %d, %d, %p)\n", gdt,service,index,gccb)); ccb = gccb->gc_ccb; if (service == GDT_SCREENSERVICE) { u_int32_t msg_len; msg_len = gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_LEN); if (msg_len) if (!(gccb->gc_scratch[GDT_SCR_MSG_ANSWER] && gccb->gc_scratch[GDT_SCR_MSG_EXT])) { gccb->gc_scratch[GDT_SCR_MSG_TEXT + msg_len] = '\0'; printf("%s",&gccb->gc_scratch[GDT_SCR_MSG_TEXT]); } if (gccb->gc_scratch[GDT_SCR_MSG_EXT] && !gccb->gc_scratch[GDT_SCR_MSG_ANSWER]) { while (gdt->sc_test_busy(gdt)) DELAY(1); bzero(gccb->gc_cmd, GDT_CMD_SZ); gccb = gdt_get_ccb(gdt); if (gccb == NULL) { printf("iir%d: No free command index found\n", gdt->sc_hanum); return (1); } gccb->gc_service = service; gccb->gc_flags = GDT_GCF_SCREEN; gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_READ); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE, gccb->gc_scratch[GDT_SCR_MSG_HANDLE]); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR, gccb->gc_scratch_busbase); gdt->sc_set_sema0(gdt); gdt->sc_cmd_off = 0; gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ, sizeof(u_int32_t)); gdt->sc_cmd_cnt = 0; gdt->sc_copy_cmd(gdt, gccb); gdt->sc_release_event(gdt); return (0); } if (gccb->gc_scratch[GDT_SCR_MSG_ANSWER] && gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN)) { /* default answers (getchar() not possible) */ if (gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) == 1) { gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 0); gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 1); gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 0; } else { gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) - 2); gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 2); gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 1; gccb->gc_scratch[GDT_SCR_MSG_TEXT + 1] = 0; } gccb->gc_scratch[GDT_SCR_MSG_EXT] = 0; gccb->gc_scratch[GDT_SCR_MSG_ANSWER] = 0; while (gdt->sc_test_busy(gdt)) DELAY(1); bzero(gccb->gc_cmd, GDT_CMD_SZ); gccb = gdt_get_ccb(gdt); if (gccb == NULL) { printf("iir%d: No free command index found\n", gdt->sc_hanum); return (1); } gccb->gc_service = service; gccb->gc_flags = GDT_GCF_SCREEN; gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_WRITE); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE, gccb->gc_scratch[GDT_SCR_MSG_HANDLE]); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR, gccb->gc_scratch_busbase); gdt->sc_set_sema0(gdt); gdt->sc_cmd_off = 0; gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ, sizeof(u_int32_t)); gdt->sc_cmd_cnt = 0; gdt->sc_copy_cmd(gdt, gccb); gdt->sc_release_event(gdt); return (0); } printf("\n"); return (0); } else { untimeout(iir_timeout, gccb, ccb->ccb_h.timeout_ch); if (gdt->sc_status == GDT_S_BSY) { GDT_DPRINTF(GDT_D_DEBUG, ("gdt_sync_event(%p) gccb %p busy\n", gdt, gccb)); TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe); ++gdt_stat.req_queue_act; if (gdt_stat.req_queue_act > gdt_stat.req_queue_max) gdt_stat.req_queue_max = gdt_stat.req_queue_act; return (2); } bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(gdt->sc_buffer_dmat, gccb->gc_dmamap); ccb->csio.resid = 0; if (gdt->sc_status == GDT_S_OK) { ccb->ccb_h.status |= CAM_REQ_CMP; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; } else { /* error */ if (gccb->gc_service == GDT_CACHESERVICE) { struct scsi_sense_data *sense; ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; bzero(&ccb->csio.sense_data, ccb->csio.sense_len); sense = &ccb->csio.sense_data; scsi_set_sense_data(sense, /*sense_format*/ SSD_TYPE_NONE, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NOT_READY, /*asc*/ 0x4, /*ascq*/ 0x01, SSD_ELEM_NONE); gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.sync); gdt->sc_dvr.eu.sync.ionode = gdt->sc_hanum; gdt->sc_dvr.eu.sync.service = service; gdt->sc_dvr.eu.sync.status = gdt->sc_status; gdt->sc_dvr.eu.sync.info = gdt->sc_info; gdt->sc_dvr.eu.sync.hostdrive = ccb->ccb_h.target_id; if (gdt->sc_status >= 0x8000) gdt_store_event(GDT_ES_SYNC, 0, &gdt->sc_dvr); else gdt_store_event(GDT_ES_SYNC, service, &gdt->sc_dvr); } else { /* raw service */ if (gdt->sc_status != GDT_S_RAW_SCSI || gdt->sc_info >= 0x100) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->csio.scsi_status = gdt->sc_info; bcopy(gccb->gc_scratch, &ccb->csio.sense_data, ccb->csio.sense_len); } } } --gdt_stat.io_count_act; xpt_done(ccb); } return (0); } /* Controller event handling functions */ gdt_evt_str *gdt_store_event(u_int16_t source, u_int16_t idx, gdt_evt_data *evt) { gdt_evt_str *e; struct timeval tv; GDT_DPRINTF(GDT_D_MISC, ("gdt_store_event(%d, %d)\n", source, idx)); if (source == 0) /* no source -> no event */ return 0; if (ebuffer[elastidx].event_source == source && ebuffer[elastidx].event_idx == idx && ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 && !memcmp((char *)&ebuffer[elastidx].event_data.eu, (char *)&evt->eu, evt->size)) || (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 && !strcmp((char *)&ebuffer[elastidx].event_data.event_string, (char *)&evt->event_string)))) { e = &ebuffer[elastidx]; getmicrotime(&tv); e->last_stamp = tv.tv_sec; ++e->same_count; } else { if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */ ++elastidx; if (elastidx == GDT_MAX_EVENTS) elastidx = 0; if (elastidx == eoldidx) { /* reached mark ? */ ++eoldidx; if (eoldidx == GDT_MAX_EVENTS) eoldidx = 0; } } e = &ebuffer[elastidx]; e->event_source = source; e->event_idx = idx; getmicrotime(&tv); e->first_stamp = e->last_stamp = tv.tv_sec; e->same_count = 1; e->event_data = *evt; e->application = 0; } return e; } int gdt_read_event(int handle, gdt_evt_str *estr) { gdt_evt_str *e; int eindex, lock; GDT_DPRINTF(GDT_D_MISC, ("gdt_read_event(%d)\n", handle)); lock = splcam(); if (handle == -1) eindex = eoldidx; else eindex = handle; estr->event_source = 0; if (eindex >= GDT_MAX_EVENTS) { splx(lock); return eindex; } e = &ebuffer[eindex]; if (e->event_source != 0) { if (eindex != elastidx) { if (++eindex == GDT_MAX_EVENTS) eindex = 0; } else { eindex = -1; } memcpy(estr, e, sizeof(gdt_evt_str)); } splx(lock); return eindex; } void gdt_readapp_event(u_int8_t application, gdt_evt_str *estr) { gdt_evt_str *e; int found = FALSE; int eindex, lock; GDT_DPRINTF(GDT_D_MISC, ("gdt_readapp_event(%d)\n", application)); lock = splcam(); eindex = eoldidx; for (;;) { e = &ebuffer[eindex]; if (e->event_source == 0) break; if ((e->application & application) == 0) { e->application |= application; found = TRUE; break; } if (eindex == elastidx) break; if (++eindex == GDT_MAX_EVENTS) eindex = 0; } if (found) memcpy(estr, e, sizeof(gdt_evt_str)); else estr->event_source = 0; splx(lock); } void gdt_clear_events() { GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events\n")); eoldidx = elastidx = 0; ebuffer[0].event_source = 0; } Index: projects/physbio/sys/dev/isci/isci_io_request.c =================================================================== --- projects/physbio/sys/dev/isci/isci_io_request.c (revision 243875) +++ projects/physbio/sys/dev/isci/isci_io_request.c (revision 243876) @@ -1,973 +1,969 @@ /*- * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include /** * @brief This user callback will inform the user that an IO request has * completed. * * @param[in] controller This parameter specifies the controller on * which the IO request is completing. * @param[in] remote_device This parameter specifies the remote device on * which this request is completing. * @param[in] io_request This parameter specifies the IO request that has * completed. * @param[in] completion_status This parameter specifies the results of * the IO request operation. SCI_IO_SUCCESS indicates * successful completion. * * @return none */ void scif_cb_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, SCI_REMOTE_DEVICE_HANDLE_T remote_device, SCI_IO_REQUEST_HANDLE_T io_request, SCI_IO_STATUS completion_status) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)sci_object_get_association(io_request); scif_controller_complete_io(scif_controller, remote_device, io_request); isci_io_request_complete(scif_controller, remote_device, isci_request, completion_status); } void isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, SCI_REMOTE_DEVICE_HANDLE_T remote_device, struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status) { struct ISCI_CONTROLLER *isci_controller; struct ISCI_REMOTE_DEVICE *isci_remote_device; union ccb *ccb; BOOL complete_ccb; complete_ccb = TRUE; isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller); isci_remote_device = (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device); ccb = isci_request->ccb; ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (completion_status) { case SCI_IO_SUCCESS: case SCI_IO_SUCCESS_COMPLETE_BEFORE_START: #if __FreeBSD_version >= 900026 if (ccb->ccb_h.func_code == XPT_SMP_IO) { void *smp_response = scif_io_request_get_response_iu_address( isci_request->sci_object); memcpy(ccb->smpio.smp_response, smp_response, ccb->smpio.smp_response_len); } #endif ccb->ccb_h.status |= CAM_REQ_CMP; break; case SCI_IO_SUCCESS_IO_DONE_EARLY: ccb->ccb_h.status |= CAM_REQ_CMP; ccb->csio.resid = ccb->csio.dxfer_len - scif_io_request_get_number_of_bytes_transferred( isci_request->sci_object); break; case SCI_IO_FAILURE_RESPONSE_VALID: { SCI_SSP_RESPONSE_IU_T * response_buffer; uint32_t sense_length; int error_code, sense_key, asc, ascq; struct ccb_scsiio *csio = &ccb->csio; response_buffer = (SCI_SSP_RESPONSE_IU_T *) scif_io_request_get_response_iu_address( isci_request->sci_object); sense_length = sci_ssp_get_sense_data_length( response_buffer->sense_data_length); sense_length = MIN(csio->sense_len, sense_length); memcpy(&csio->sense_data, response_buffer->data, sense_length); csio->sense_resid = csio->sense_len - sense_length; csio->scsi_status = response_buffer->status; ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; scsi_extract_sense( &csio->sense_data, &error_code, &sense_key, &asc, &ascq ); isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], csio->scsi_status, sense_key, asc, ascq); break; } case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: isci_remote_device_reset(isci_remote_device, NULL); /* drop through */ case SCI_IO_FAILURE_TERMINATED: ccb->ccb_h.status |= CAM_REQ_TERMIO; isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); break; case SCI_IO_FAILURE_INVALID_STATE: case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES: complete_ccb = FALSE; break; case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE: ccb->ccb_h.status |= CAM_DEV_NOT_THERE; break; case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE: { struct ccb_relsim ccb_relsim; struct cam_path *path; xpt_create_path(&path, NULL, cam_sim_path(isci_controller->sim), isci_remote_device->index, 0); xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5); ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ; ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE; ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS; ccb_relsim.openings = scif_remote_device_get_max_queue_depth(remote_device); xpt_action((union ccb *)&ccb_relsim); xpt_free_path(path); complete_ccb = FALSE; } break; case SCI_IO_FAILURE: case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT: case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL: case SCI_IO_FAILURE_PROTOCOL_VIOLATION: case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE: case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR: default: isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0], completion_status); ccb->ccb_h.status |= CAM_REQ_CMP_ERR; break; } callout_stop(&isci_request->parent.timer); bus_dmamap_sync(isci_request->parent.dma_tag, isci_request->parent.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(isci_request->parent.dma_tag, isci_request->parent.dma_map); isci_request->ccb = NULL; sci_pool_put(isci_controller->request_pool, (struct ISCI_REQUEST *)isci_request); if (complete_ccb) { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* ccb will be completed with some type of non-success * status. So temporarily freeze the queue until the * upper layers can act on the status. The * CAM_DEV_QFRZN flag will then release the queue * after the status is acted upon. */ ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } if (ccb->ccb_h.status & CAM_SIM_QUEUED) { KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, ("multiple internally queued ccbs in flight")); TAILQ_REMOVE(&isci_remote_device->queued_ccbs, &ccb->ccb_h, sim_links.tqe); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; /* * This CCB that was in the queue was completed, so * set the in_progress pointer to NULL denoting that * we can retry another CCB from the queue. We only * allow one CCB at a time from the queue to be * in progress so that we can effectively maintain * ordering. */ isci_remote_device->queued_ccb_in_progress = NULL; } if (isci_remote_device->frozen_lun_mask != 0) { isci_remote_device_release_device_queue(isci_remote_device); } xpt_done(ccb); if (isci_controller->is_frozen == TRUE) { isci_controller->is_frozen = FALSE; xpt_release_simq(isci_controller->sim, TRUE); } } else { isci_remote_device_freeze_lun_queue(isci_remote_device, ccb->ccb_h.target_lun); if (ccb->ccb_h.status & CAM_SIM_QUEUED) { KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, ("multiple internally queued ccbs in flight")); /* * Do nothing, CCB is already on the device's queue. * We leave it on the queue, to be retried again * next time a CCB on this device completes, or we * get a ready notification for this device. */ isci_log_message(1, "ISCI", "already queued %p %x\n", ccb, ccb->csio.cdb_io.cdb_bytes[0]); isci_remote_device->queued_ccb_in_progress = NULL; } else { isci_log_message(1, "ISCI", "queue %p %x\n", ccb, ccb->csio.cdb_io.cdb_bytes[0]); ccb->ccb_h.status |= CAM_SIM_QUEUED; TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs, &ccb->ccb_h, sim_links.tqe); } } } /** * @brief This callback method asks the user to provide the physical * address for the supplied virtual address when building an * io request object. * * @param[in] controller This parameter is the core controller object * handle. * @param[in] io_request This parameter is the io request object handle * for which the physical address is being requested. * @param[in] virtual_address This paramter is the virtual address which * is to be returned as a physical address. * @param[out] physical_address The physical address for the supplied virtual * address. * * @return None. */ void scic_cb_io_request_get_physical_address(SCI_CONTROLLER_HANDLE_T controller, SCI_IO_REQUEST_HANDLE_T io_request, void *virtual_address, SCI_PHYSICAL_ADDRESS *physical_address) { SCI_IO_REQUEST_HANDLE_T scif_request = sci_object_get_association(io_request); struct ISCI_REQUEST *isci_request = sci_object_get_association(scif_request); if(isci_request != NULL) { /* isci_request is not NULL, meaning this is a request initiated * by CAM or the isci layer (i.e. device reset for I/O * timeout). Therefore we can calculate the physical address * based on the address we stored in the struct ISCI_REQUEST * object. */ *physical_address = isci_request->physical_address + (uintptr_t)virtual_address - (uintptr_t)isci_request; } else { /* isci_request is NULL, meaning this is a request generated * internally by SCIL (i.e. for SMP requests or NCQ error * recovery). Therefore we calculate the physical address * based on the controller's uncached controller memory buffer, * since we know that this is what SCIL uses for internal * framework requests. */ SCI_CONTROLLER_HANDLE_T scif_controller = (SCI_CONTROLLER_HANDLE_T) sci_object_get_association(controller); struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller); U64 virt_addr_offset = (uintptr_t)virtual_address - (U64)isci_controller->uncached_controller_memory.virtual_address; *physical_address = isci_controller->uncached_controller_memory.physical_address + virt_addr_offset; } } /** * @brief This callback method asks the user to provide the address for * the command descriptor block (CDB) associated with this IO request. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the virtual address of the CDB. */ void * scif_cb_io_request_get_cdb_address(void * scif_user_io_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; return (isci_request->ccb->csio.cdb_io.cdb_bytes); } /** * @brief This callback method asks the user to provide the length of * the command descriptor block (CDB) associated with this IO request. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the length of the CDB. */ uint32_t scif_cb_io_request_get_cdb_length(void * scif_user_io_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; return (isci_request->ccb->csio.cdb_len); } /** * @brief This callback method asks the user to provide the Logical Unit (LUN) * associated with this IO request. * * @note The contents of the value returned from this callback are defined * by the protocol standard (e.g. T10 SAS specification). Please * refer to the transport command information unit description * in the associated standard. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the LUN associated with this request. */ uint32_t scif_cb_io_request_get_lun(void * scif_user_io_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; return (isci_request->ccb->ccb_h.target_lun); } /** * @brief This callback method asks the user to provide the task attribute * associated with this IO request. * * @note The contents of the value returned from this callback are defined * by the protocol standard (e.g. T10 SAS specification). Please * refer to the transport command information unit description * in the associated standard. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the task attribute associated with this * IO request. */ uint32_t scif_cb_io_request_get_task_attribute(void * scif_user_io_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; uint32_t task_attribute; if((isci_request->ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) switch(isci_request->ccb->csio.tag_action) { case MSG_HEAD_OF_Q_TAG: task_attribute = SCI_SAS_HEAD_OF_QUEUE_ATTRIBUTE; break; case MSG_ORDERED_Q_TAG: task_attribute = SCI_SAS_ORDERED_ATTRIBUTE; break; case MSG_ACA_TASK: task_attribute = SCI_SAS_ACA_ATTRIBUTE; break; default: task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE; break; } else task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE; return (task_attribute); } /** * @brief This callback method asks the user to provide the command priority * associated with this IO request. * * @note The contents of the value returned from this callback are defined * by the protocol standard (e.g. T10 SAS specification). Please * refer to the transport command information unit description * in the associated standard. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the command priority associated with this * IO request. */ uint32_t scif_cb_io_request_get_command_priority(void * scif_user_io_request) { return (0); } /** * @brief This method simply returns the virtual address associated * with the scsi_io and byte_offset supplied parameters. * * @note This callback is not utilized in the fast path. The expectation * is that this method is utilized for items such as SCSI to ATA * translation for commands like INQUIRY, READ CAPACITY, etc. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * @param[in] byte_offset This parameter specifies the offset into the data * buffers pointed to by the SGL. The byte offset starts at 0 * and continues until the last byte pointed to be the last SGL * element. * * @return A virtual address pointer to the location specified by the * parameters. */ uint8_t * scif_cb_io_request_get_virtual_address_from_sgl(void * scif_user_io_request, uint32_t byte_offset) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; return (isci_request->ccb->csio.data_ptr + byte_offset); } /** * @brief This callback method asks the user to provide the number of * bytes to be transfered as part of this request. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the number of payload data bytes to be * transfered for this IO request. */ uint32_t scif_cb_io_request_get_transfer_length(void * scif_user_io_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; return (isci_request->ccb->csio.dxfer_len); } /** * @brief This callback method asks the user to provide the data direction * for this request. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the value of SCI_IO_REQUEST_DATA_OUT, * SCI_IO_REQUEST_DATA_IN, or SCI_IO_REQUEST_NO_DATA. */ SCI_IO_REQUEST_DATA_DIRECTION scif_cb_io_request_get_data_direction(void * scif_user_io_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; switch (isci_request->ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: return (SCI_IO_REQUEST_DATA_IN); case CAM_DIR_OUT: return (SCI_IO_REQUEST_DATA_OUT); default: return (SCI_IO_REQUEST_NO_DATA); } } /** * @brief This callback method asks the user to provide the address * to where the next Scatter-Gather Element is located. * * Details regarding usage: * - Regarding the first SGE: the user should initialize an index, * or a pointer, prior to construction of the request that will * reference the very first scatter-gather element. This is * important since this method is called for every scatter-gather * element, including the first element. * - Regarding the last SGE: the user should return NULL from this * method when this method is called and the SGL has exhausted * all elements. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * @param[in] current_sge_address This parameter specifies the address for * the current SGE (i.e. the one that has just processed). * @param[out] next_sge An address specifying the location for the next scatter * gather element to be processed. * * @return None. */ void scif_cb_io_request_get_next_sge(void * scif_user_io_request, void * current_sge_address, void ** next_sge) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; if (isci_request->current_sge_index == isci_request->num_segments) *next_sge = NULL; else { bus_dma_segment_t *sge = &isci_request->sge[isci_request->current_sge_index]; isci_request->current_sge_index++; *next_sge = sge; } } /** * @brief This callback method asks the user to provide the contents of the * "address" field in the Scatter-Gather Element. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * @param[in] sge_address This parameter specifies the address for the * SGE from which to retrieve the address field. * * @return A physical address specifying the contents of the SGE's address * field. */ SCI_PHYSICAL_ADDRESS scif_cb_sge_get_address_field(void *scif_user_io_request, void *sge_address) { bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address; return ((SCI_PHYSICAL_ADDRESS)sge->ds_addr); } /** * @brief This callback method asks the user to provide the contents of the * "length" field in the Scatter-Gather Element. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * @param[in] sge_address This parameter specifies the address for the * SGE from which to retrieve the address field. * * @return This method returns the length field specified inside the SGE * referenced by the sge_address parameter. */ uint32_t scif_cb_sge_get_length_field(void *scif_user_io_request, void *sge_address) { bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address; return ((uint32_t)sge->ds_len); } void isci_request_construct(struct ISCI_REQUEST *request, SCI_CONTROLLER_HANDLE_T scif_controller_handle, bus_dma_tag_t io_buffer_dma_tag, bus_addr_t physical_address) { request->controller_handle = scif_controller_handle; request->dma_tag = io_buffer_dma_tag; request->physical_address = physical_address; bus_dmamap_create(request->dma_tag, 0, &request->dma_map); callout_init(&request->timer, CALLOUT_MPSAFE); } static void isci_io_request_construct(void *arg, bus_dma_segment_t *seg, int nseg, int error) { union ccb *ccb; struct ISCI_IO_REQUEST *io_request = (struct ISCI_IO_REQUEST *)arg; SCI_REMOTE_DEVICE_HANDLE_T *device = io_request->parent.remote_device_handle; SCI_STATUS status; io_request->num_segments = nseg; io_request->sge = seg; ccb = io_request->ccb; /* XXX More cleanup is needed here */ if ((nseg == 0) || (error != 0)) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } status = scif_io_request_construct( io_request->parent.controller_handle, io_request->parent.remote_device_handle, SCI_CONTROLLER_INVALID_IO_TAG, (void *)io_request, (void *)((char*)io_request + sizeof(struct ISCI_IO_REQUEST)), &io_request->sci_object); if (status != SCI_SUCCESS) { isci_io_request_complete(io_request->parent.controller_handle, device, io_request, (SCI_IO_STATUS)status); return; } sci_object_set_association(io_request->sci_object, io_request); bus_dmamap_sync(io_request->parent.dma_tag, io_request->parent.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); status = (SCI_STATUS)scif_controller_start_io( io_request->parent.controller_handle, device, io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG); if (status != SCI_SUCCESS) { isci_io_request_complete(io_request->parent.controller_handle, device, io_request, (SCI_IO_STATUS)status); return; } if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) callout_reset(&io_request->parent.timer, ccb->ccb_h.timeout, isci_io_request_timeout, io_request); } void isci_io_request_execute_scsi_io(union ccb *ccb, struct ISCI_CONTROLLER *controller) { target_id_t target_id = ccb->ccb_h.target_id; struct ISCI_REQUEST *request; struct ISCI_IO_REQUEST *io_request; struct ISCI_REMOTE_DEVICE *device = controller->remote_device[target_id]; int error; if (device == NULL) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(ccb); return; } if (sci_pool_empty(controller->request_pool)) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; xpt_freeze_simq(controller->sim, 1); controller->is_frozen = TRUE; xpt_done(ccb); return; } ASSERT(device->is_resetting == FALSE); sci_pool_get(controller->request_pool, request); io_request = (struct ISCI_IO_REQUEST *)request; io_request->ccb = ccb; io_request->current_sge_index = 0; io_request->parent.remote_device_handle = device->sci_object; - if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) != 0) - panic("Unexpected CAM_SCATTER_VALID flag! flags = 0x%x\n", - ccb->ccb_h.flags); - - if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) - panic("Unexpected CAM_DATA_PHYS flag! flags = 0x%x\n", + if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) + panic("Unexpected cam data format! flags = 0x%x\n", ccb->ccb_h.flags); error = bus_dmamap_load_ccb(io_request->parent.dma_tag, io_request->parent.dma_map, ccb, isci_io_request_construct, io_request, 0x0); /* A resource shortage from BUSDMA will be automatically * continued at a later point, pushing the CCB processing * forward, which will in turn unfreeze the simq. */ if (error == EINPROGRESS) { xpt_freeze_simq(controller->sim, 1); ccb->ccb_h.flags |= CAM_RELEASE_SIMQ; } } void isci_io_request_timeout(void *arg) { struct ISCI_IO_REQUEST *request = (struct ISCI_IO_REQUEST *)arg; struct ISCI_REMOTE_DEVICE *remote_device = (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(request->parent.remote_device_handle); struct ISCI_CONTROLLER *controller = remote_device->domain->controller; mtx_lock(&controller->lock); isci_remote_device_reset(remote_device, NULL); mtx_unlock(&controller->lock); } #if __FreeBSD_version >= 900026 /** * @brief This callback method gets the size of and pointer to the buffer * (if any) containing the request buffer for an SMP request. * * @param[in] core_request This parameter specifies the SCI core's request * object associated with the SMP request. * @param[out] smp_request_buffer This parameter returns a pointer to the * payload portion of the SMP request - i.e. everything after * the SMP request header. * * @return Size of the request buffer in bytes. This does *not* include * the size of the SMP request header. */ static uint32_t smp_io_request_cb_get_request_buffer(SCI_IO_REQUEST_HANDLE_T core_request, uint8_t ** smp_request_buffer) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) sci_object_get_association(sci_object_get_association(core_request)); *smp_request_buffer = isci_request->ccb->smpio.smp_request + sizeof(SMP_REQUEST_HEADER_T); return (isci_request->ccb->smpio.smp_request_len - sizeof(SMP_REQUEST_HEADER_T)); } /** * @brief This callback method gets the SMP function for an SMP request. * * @param[in] core_request This parameter specifies the SCI core's request * object associated with the SMP request. * * @return SMP function for the SMP request. */ static uint8_t smp_io_request_cb_get_function(SCI_IO_REQUEST_HANDLE_T core_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) sci_object_get_association(sci_object_get_association(core_request)); SMP_REQUEST_HEADER_T *header = (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; return (header->function); } /** * @brief This callback method gets the SMP frame type for an SMP request. * * @param[in] core_request This parameter specifies the SCI core's request * object associated with the SMP request. * * @return SMP frame type for the SMP request. */ static uint8_t smp_io_request_cb_get_frame_type(SCI_IO_REQUEST_HANDLE_T core_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) sci_object_get_association(sci_object_get_association(core_request)); SMP_REQUEST_HEADER_T *header = (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; return (header->smp_frame_type); } /** * @brief This callback method gets the allocated response length for an SMP request. * * @param[in] core_request This parameter specifies the SCI core's request * object associated with the SMP request. * * @return Allocated response length for the SMP request. */ static uint8_t smp_io_request_cb_get_allocated_response_length( SCI_IO_REQUEST_HANDLE_T core_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) sci_object_get_association(sci_object_get_association(core_request)); SMP_REQUEST_HEADER_T *header = (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; return (header->allocated_response_length); } static SCI_STATUS isci_smp_request_construct(struct ISCI_IO_REQUEST *request) { SCI_STATUS status; SCIC_SMP_PASSTHRU_REQUEST_CALLBACKS_T callbacks; status = scif_request_construct(request->parent.controller_handle, request->parent.remote_device_handle, SCI_CONTROLLER_INVALID_IO_TAG, (void *)request, (void *)((char*)request + sizeof(struct ISCI_IO_REQUEST)), &request->sci_object); if (status == SCI_SUCCESS) { callbacks.scic_cb_smp_passthru_get_request = &smp_io_request_cb_get_request_buffer; callbacks.scic_cb_smp_passthru_get_function = &smp_io_request_cb_get_function; callbacks.scic_cb_smp_passthru_get_frame_type = &smp_io_request_cb_get_frame_type; callbacks.scic_cb_smp_passthru_get_allocated_response_length = &smp_io_request_cb_get_allocated_response_length; /* create the smp passthrough part of the io request */ status = scic_io_request_construct_smp_pass_through( scif_io_request_get_scic_handle(request->sci_object), &callbacks); } return (status); } void isci_io_request_execute_smp_io(union ccb *ccb, struct ISCI_CONTROLLER *controller) { SCI_STATUS status; target_id_t target_id = ccb->ccb_h.target_id; struct ISCI_REQUEST *request; struct ISCI_IO_REQUEST *io_request; SCI_REMOTE_DEVICE_HANDLE_T smp_device_handle; struct ISCI_REMOTE_DEVICE *end_device = controller->remote_device[target_id]; /* SMP commands are sent to an end device, because SMP devices are not * exposed to the kernel. It is our responsibility to use this method * to get the SMP device that contains the specified end device. If * the device is direct-attached, the handle will come back NULL, and * we'll just fail the SMP_IO with DEV_NOT_THERE. */ scif_remote_device_get_containing_device(end_device->sci_object, &smp_device_handle); if (smp_device_handle == NULL) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(ccb); return; } if (sci_pool_empty(controller->request_pool)) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; xpt_freeze_simq(controller->sim, 1); controller->is_frozen = TRUE; xpt_done(ccb); return; } ASSERT(device->is_resetting == FALSE); sci_pool_get(controller->request_pool, request); io_request = (struct ISCI_IO_REQUEST *)request; io_request->ccb = ccb; io_request->parent.remote_device_handle = smp_device_handle; status = isci_smp_request_construct(io_request); if (status != SCI_SUCCESS) { isci_io_request_complete(controller->scif_controller_handle, smp_device_handle, io_request, (SCI_IO_STATUS)status); return; } sci_object_set_association(io_request->sci_object, io_request); status = (SCI_STATUS) scif_controller_start_io( controller->scif_controller_handle, smp_device_handle, io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG); if (status != SCI_SUCCESS) { isci_io_request_complete(controller->scif_controller_handle, smp_device_handle, io_request, (SCI_IO_STATUS)status); return; } if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) callout_reset(&io_request->parent.timer, ccb->ccb_h.timeout, isci_io_request_timeout, request); } #endif Index: projects/physbio/sys/dev/mly/mly.c =================================================================== --- projects/physbio/sys/dev/mly/mly.c (revision 243875) +++ projects/physbio/sys/dev/mly/mly.c (revision 243876) @@ -1,2999 +1,2987 @@ /*- * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int mly_probe(device_t dev); static int mly_attach(device_t dev); static int mly_pci_attach(struct mly_softc *sc); static int mly_detach(device_t dev); static int mly_shutdown(device_t dev); static void mly_intr(void *arg); static int mly_sg_map(struct mly_softc *sc); static void mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int mly_mmbox_map(struct mly_softc *sc); static void mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void mly_free(struct mly_softc *sc); static int mly_get_controllerinfo(struct mly_softc *sc); static void mly_scan_devices(struct mly_softc *sc); static void mly_rescan_btl(struct mly_softc *sc, int bus, int target); static void mly_complete_rescan(struct mly_command *mc); static int mly_get_eventstatus(struct mly_softc *sc); static int mly_enable_mmbox(struct mly_softc *sc); static int mly_flush(struct mly_softc *sc); static int mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length); static void mly_check_event(struct mly_softc *sc); static void mly_fetch_event(struct mly_softc *sc); static void mly_complete_event(struct mly_command *mc); static void mly_process_event(struct mly_softc *sc, struct mly_event *me); static void mly_periodic(void *data); static int mly_immediate_command(struct mly_command *mc); static int mly_start(struct mly_command *mc); static void mly_done(struct mly_softc *sc); static void mly_complete(void *context, int pending); static int mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp); static void mly_release_command(struct mly_command *mc); static void mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int mly_alloc_commands(struct mly_softc *sc); static void mly_release_commands(struct mly_softc *sc); static void mly_map_command(struct mly_command *mc); static void mly_unmap_command(struct mly_command *mc); static int mly_cam_attach(struct mly_softc *sc); static void mly_cam_detach(struct mly_softc *sc); static void mly_cam_rescan_btl(struct mly_softc *sc, int bus, int target); static void mly_cam_action(struct cam_sim *sim, union ccb *ccb); static int mly_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio); static void mly_cam_poll(struct cam_sim *sim); static void mly_cam_complete(struct mly_command *mc); static struct cam_periph *mly_find_periph(struct mly_softc *sc, int bus, int target); static int mly_name_device(struct mly_softc *sc, int bus, int target); static int mly_fwhandshake(struct mly_softc *sc); static void mly_describe_controller(struct mly_softc *sc); #ifdef MLY_DEBUG static void mly_printstate(struct mly_softc *sc); static void mly_print_command(struct mly_command *mc); static void mly_print_packet(struct mly_command *mc); static void mly_panic(struct mly_softc *sc, char *reason); static int mly_timeout(struct mly_softc *sc); #endif void mly_print_controller(int controller); static d_open_t mly_user_open; static d_close_t mly_user_close; static d_ioctl_t mly_user_ioctl; static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc); static int mly_user_health(struct mly_softc *sc, struct mly_user_health *uh); #define MLY_CMD_TIMEOUT 20 static device_method_t mly_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mly_probe), DEVMETHOD(device_attach, mly_attach), DEVMETHOD(device_detach, mly_detach), DEVMETHOD(device_shutdown, mly_shutdown), { 0, 0 } }; static driver_t mly_pci_driver = { "mly", mly_methods, sizeof(struct mly_softc) }; static devclass_t mly_devclass; DRIVER_MODULE(mly, pci, mly_pci_driver, mly_devclass, 0, 0); MODULE_DEPEND(mly, pci, 1, 1, 1); MODULE_DEPEND(mly, cam, 1, 1, 1); static struct cdevsw mly_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = mly_user_open, .d_close = mly_user_close, .d_ioctl = mly_user_ioctl, .d_name = "mly", }; /******************************************************************************** ******************************************************************************** Device Interface ******************************************************************************** ********************************************************************************/ static struct mly_ident { u_int16_t vendor; u_int16_t device; u_int16_t subvendor; u_int16_t subdevice; int hwif; char *desc; } mly_identifiers[] = { {0x1069, 0xba56, 0x1069, 0x0040, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 2000"}, {0x1069, 0xba56, 0x1069, 0x0030, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 3000"}, {0x1069, 0x0050, 0x1069, 0x0050, MLY_HWIF_I960RX, "Mylex AcceleRAID 352"}, {0x1069, 0x0050, 0x1069, 0x0052, MLY_HWIF_I960RX, "Mylex AcceleRAID 170"}, {0x1069, 0x0050, 0x1069, 0x0054, MLY_HWIF_I960RX, "Mylex AcceleRAID 160"}, {0, 0, 0, 0, 0, 0} }; /******************************************************************************** * Compare the provided PCI device with the list we support. */ static int mly_probe(device_t dev) { struct mly_ident *m; debug_called(1); for (m = mly_identifiers; m->vendor != 0; m++) { if ((m->vendor == pci_get_vendor(dev)) && (m->device == pci_get_device(dev)) && ((m->subvendor == 0) || ((m->subvendor == pci_get_subvendor(dev)) && (m->subdevice == pci_get_subdevice(dev))))) { device_set_desc(dev, m->desc); return(BUS_PROBE_DEFAULT); /* allow room to be overridden */ } } return(ENXIO); } /******************************************************************************** * Initialise the controller and softc */ static int mly_attach(device_t dev) { struct mly_softc *sc = device_get_softc(dev); int error; debug_called(1); sc->mly_dev = dev; #ifdef MLY_DEBUG if (device_get_unit(sc->mly_dev) == 0) mly_softc0 = sc; #endif /* * Do PCI-specific initialisation. */ if ((error = mly_pci_attach(sc)) != 0) goto out; /* * Initialise per-controller queues. */ mly_initq_free(sc); mly_initq_busy(sc); mly_initq_complete(sc); /* * Initialise command-completion task. */ TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc); /* disable interrupts before we start talking to the controller */ MLY_MASK_INTERRUPTS(sc); /* * Wait for the controller to come ready, handshake with the firmware if required. * This is typically only necessary on platforms where the controller BIOS does not * run. */ if ((error = mly_fwhandshake(sc))) goto out; /* * Allocate initial command buffers. */ if ((error = mly_alloc_commands(sc))) goto out; /* * Obtain controller feature information */ if ((error = mly_get_controllerinfo(sc))) goto out; /* * Reallocate command buffers now we know how many we want. */ mly_release_commands(sc); if ((error = mly_alloc_commands(sc))) goto out; /* * Get the current event counter for health purposes, populate the initial * health status buffer. */ if ((error = mly_get_eventstatus(sc))) goto out; /* * Enable memory-mailbox mode. */ if ((error = mly_enable_mmbox(sc))) goto out; /* * Attach to CAM. */ if ((error = mly_cam_attach(sc))) goto out; /* * Print a little information about the controller */ mly_describe_controller(sc); /* * Mark all attached devices for rescan. */ mly_scan_devices(sc); /* * Instigate the first status poll immediately. Rescan completions won't * happen until interrupts are enabled, which should still be before * the SCSI subsystem gets to us, courtesy of the "SCSI settling delay". */ mly_periodic((void *)sc); /* * Create the control device. */ sc->mly_dev_t = make_dev(&mly_cdevsw, 0, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "mly%d", device_get_unit(sc->mly_dev)); sc->mly_dev_t->si_drv1 = sc; /* enable interrupts now */ MLY_UNMASK_INTERRUPTS(sc); #ifdef MLY_DEBUG timeout((timeout_t *)mly_timeout, sc, MLY_CMD_TIMEOUT * hz); #endif out: if (error != 0) mly_free(sc); return(error); } /******************************************************************************** * Perform PCI-specific initialisation. */ static int mly_pci_attach(struct mly_softc *sc) { int i, error; u_int32_t command; debug_called(1); /* assume failure is 'not configured' */ error = ENXIO; /* * Verify that the adapter is correctly set up in PCI space. * * XXX we shouldn't do this; the PCI code should. */ command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2); command |= PCIM_CMD_BUSMASTEREN; pci_write_config(sc->mly_dev, PCIR_COMMAND, command, 2); command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2); if (!(command & PCIM_CMD_BUSMASTEREN)) { mly_printf(sc, "can't enable busmaster feature\n"); goto fail; } if ((command & PCIM_CMD_MEMEN) == 0) { mly_printf(sc, "memory window not available\n"); goto fail; } /* * Allocate the PCI register window. */ sc->mly_regs_rid = PCIR_BAR(0); /* first base address register */ if ((sc->mly_regs_resource = bus_alloc_resource_any(sc->mly_dev, SYS_RES_MEMORY, &sc->mly_regs_rid, RF_ACTIVE)) == NULL) { mly_printf(sc, "can't allocate register window\n"); goto fail; } sc->mly_btag = rman_get_bustag(sc->mly_regs_resource); sc->mly_bhandle = rman_get_bushandle(sc->mly_regs_resource); /* * Allocate and connect our interrupt. */ sc->mly_irq_rid = 0; if ((sc->mly_irq = bus_alloc_resource_any(sc->mly_dev, SYS_RES_IRQ, &sc->mly_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { mly_printf(sc, "can't allocate interrupt\n"); goto fail; } if (bus_setup_intr(sc->mly_dev, sc->mly_irq, INTR_TYPE_CAM | INTR_ENTROPY, NULL, mly_intr, sc, &sc->mly_intr)) { mly_printf(sc, "can't set up interrupt\n"); goto fail; } /* assume failure is 'out of memory' */ error = ENOMEM; /* * Allocate the parent bus DMA tag appropriate for our PCI interface. * * Note that all of these controllers are 64-bit capable. */ if (bus_dma_tag_create(bus_get_dma_tag(sc->mly_dev),/* PCI parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, MLY_MAX_SGENTRIES, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->mly_parent_dmat)) { mly_printf(sc, "can't allocate parent DMA tag\n"); goto fail; } /* * Create DMA tag for mapping buffers into controller-addressable space. */ if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, MLY_MAX_SGENTRIES, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &Giant, /* lockarg */ &sc->mly_buffer_dmat)) { mly_printf(sc, "can't allocate buffer DMA tag\n"); goto fail; } /* * Initialise the DMA tag for command packets. */ if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sizeof(union mly_command_packet) * MLY_MAX_COMMANDS, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mly_packet_dmat)) { mly_printf(sc, "can't allocate command packet DMA tag\n"); goto fail; } /* * Detect the hardware interface version */ for (i = 0; mly_identifiers[i].vendor != 0; i++) { if ((mly_identifiers[i].vendor == pci_get_vendor(sc->mly_dev)) && (mly_identifiers[i].device == pci_get_device(sc->mly_dev))) { sc->mly_hwif = mly_identifiers[i].hwif; switch(sc->mly_hwif) { case MLY_HWIF_I960RX: debug(1, "set hardware up for i960RX"); sc->mly_doorbell_true = 0x00; sc->mly_command_mailbox = MLY_I960RX_COMMAND_MAILBOX; sc->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX; sc->mly_idbr = MLY_I960RX_IDBR; sc->mly_odbr = MLY_I960RX_ODBR; sc->mly_error_status = MLY_I960RX_ERROR_STATUS; sc->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS; sc->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK; break; case MLY_HWIF_STRONGARM: debug(1, "set hardware up for StrongARM"); sc->mly_doorbell_true = 0xff; /* doorbell 'true' is 0 */ sc->mly_command_mailbox = MLY_STRONGARM_COMMAND_MAILBOX; sc->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX; sc->mly_idbr = MLY_STRONGARM_IDBR; sc->mly_odbr = MLY_STRONGARM_ODBR; sc->mly_error_status = MLY_STRONGARM_ERROR_STATUS; sc->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS; sc->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK; break; } break; } } /* * Create the scatter/gather mappings. */ if ((error = mly_sg_map(sc))) goto fail; /* * Allocate and map the memory mailbox */ if ((error = mly_mmbox_map(sc))) goto fail; error = 0; fail: return(error); } /******************************************************************************** * Shut the controller down and detach all our resources. */ static int mly_detach(device_t dev) { int error; if ((error = mly_shutdown(dev)) != 0) return(error); mly_free(device_get_softc(dev)); return(0); } /******************************************************************************** * Bring the controller to a state where it can be safely left alone. * * Note that it should not be necessary to wait for any outstanding commands, * as they should be completed prior to calling here. * * XXX this applies for I/O, but not status polls; we should beware of * the case where a status command is running while we detach. */ static int mly_shutdown(device_t dev) { struct mly_softc *sc = device_get_softc(dev); debug_called(1); if (sc->mly_state & MLY_STATE_OPEN) return(EBUSY); /* kill the periodic event */ untimeout(mly_periodic, sc, sc->mly_periodic); /* flush controller */ mly_printf(sc, "flushing cache..."); printf("%s\n", mly_flush(sc) ? "failed" : "done"); MLY_MASK_INTERRUPTS(sc); return(0); } /******************************************************************************* * Take an interrupt, or be poked by other code to look for interrupt-worthy * status. */ static void mly_intr(void *arg) { struct mly_softc *sc = (struct mly_softc *)arg; debug_called(2); mly_done(sc); }; /******************************************************************************** ******************************************************************************** Bus-dependant Resource Management ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Allocate memory for the scatter/gather tables */ static int mly_sg_map(struct mly_softc *sc) { size_t segsize; debug_called(1); /* * Create a single tag describing a region large enough to hold all of * the s/g lists we will need. */ segsize = sizeof(struct mly_sg_entry) * MLY_MAX_COMMANDS *MLY_MAX_SGENTRIES; if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ segsize, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mly_sg_dmat)) { mly_printf(sc, "can't allocate scatter/gather DMA tag\n"); return(ENOMEM); } /* * Allocate enough s/g maps for all commands and permanently map them into * controller-visible space. * * XXX this assumes we can get enough space for all the s/g maps in one * contiguous slab. */ if (bus_dmamem_alloc(sc->mly_sg_dmat, (void **)&sc->mly_sg_table, BUS_DMA_NOWAIT, &sc->mly_sg_dmamap)) { mly_printf(sc, "can't allocate s/g table\n"); return(ENOMEM); } if (bus_dmamap_load(sc->mly_sg_dmat, sc->mly_sg_dmamap, sc->mly_sg_table, segsize, mly_sg_map_helper, sc, BUS_DMA_NOWAIT) != 0) return (ENOMEM); return(0); } /******************************************************************************** * Save the physical address of the base of the s/g table. */ static void mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_softc *sc = (struct mly_softc *)arg; debug_called(1); /* save base of s/g table's address in bus space */ sc->mly_sg_busaddr = segs->ds_addr; } /******************************************************************************** * Allocate memory for the memory-mailbox interface */ static int mly_mmbox_map(struct mly_softc *sc) { /* * Create a DMA tag for a single contiguous region large enough for the * memory mailbox structure. */ if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sizeof(struct mly_mmbox), 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mly_mmbox_dmat)) { mly_printf(sc, "can't allocate memory mailbox DMA tag\n"); return(ENOMEM); } /* * Allocate the buffer */ if (bus_dmamem_alloc(sc->mly_mmbox_dmat, (void **)&sc->mly_mmbox, BUS_DMA_NOWAIT, &sc->mly_mmbox_dmamap)) { mly_printf(sc, "can't allocate memory mailbox\n"); return(ENOMEM); } if (bus_dmamap_load(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap, sc->mly_mmbox, sizeof(struct mly_mmbox), mly_mmbox_map_helper, sc, BUS_DMA_NOWAIT) != 0) return (ENOMEM); bzero(sc->mly_mmbox, sizeof(*sc->mly_mmbox)); return(0); } /******************************************************************************** * Save the physical address of the memory mailbox */ static void mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_softc *sc = (struct mly_softc *)arg; debug_called(1); sc->mly_mmbox_busaddr = segs->ds_addr; } /******************************************************************************** * Free all of the resources associated with (sc) * * Should not be called if the controller is active. */ static void mly_free(struct mly_softc *sc) { debug_called(1); /* Remove the management device */ destroy_dev(sc->mly_dev_t); /* detach from CAM */ mly_cam_detach(sc); /* release command memory */ mly_release_commands(sc); /* throw away the controllerinfo structure */ if (sc->mly_controllerinfo != NULL) free(sc->mly_controllerinfo, M_DEVBUF); /* throw away the controllerparam structure */ if (sc->mly_controllerparam != NULL) free(sc->mly_controllerparam, M_DEVBUF); /* destroy data-transfer DMA tag */ if (sc->mly_buffer_dmat) bus_dma_tag_destroy(sc->mly_buffer_dmat); /* free and destroy DMA memory and tag for s/g lists */ if (sc->mly_sg_table) { bus_dmamap_unload(sc->mly_sg_dmat, sc->mly_sg_dmamap); bus_dmamem_free(sc->mly_sg_dmat, sc->mly_sg_table, sc->mly_sg_dmamap); } if (sc->mly_sg_dmat) bus_dma_tag_destroy(sc->mly_sg_dmat); /* free and destroy DMA memory and tag for memory mailbox */ if (sc->mly_mmbox) { bus_dmamap_unload(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap); bus_dmamem_free(sc->mly_mmbox_dmat, sc->mly_mmbox, sc->mly_mmbox_dmamap); } if (sc->mly_mmbox_dmat) bus_dma_tag_destroy(sc->mly_mmbox_dmat); /* disconnect the interrupt handler */ if (sc->mly_intr) bus_teardown_intr(sc->mly_dev, sc->mly_irq, sc->mly_intr); if (sc->mly_irq != NULL) bus_release_resource(sc->mly_dev, SYS_RES_IRQ, sc->mly_irq_rid, sc->mly_irq); /* destroy the parent DMA tag */ if (sc->mly_parent_dmat) bus_dma_tag_destroy(sc->mly_parent_dmat); /* release the register window mapping */ if (sc->mly_regs_resource != NULL) bus_release_resource(sc->mly_dev, SYS_RES_MEMORY, sc->mly_regs_rid, sc->mly_regs_resource); } /******************************************************************************** ******************************************************************************** Command Wrappers ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc. */ static int mly_get_controllerinfo(struct mly_softc *sc) { struct mly_command_ioctl mci; u_int8_t status; int error; debug_called(1); if (sc->mly_controllerinfo != NULL) free(sc->mly_controllerinfo, M_DEVBUF); /* build the getcontrollerinfo ioctl and send it */ bzero(&mci, sizeof(mci)); sc->mly_controllerinfo = NULL; mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO; if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerinfo, sizeof(*sc->mly_controllerinfo), &status, NULL, NULL))) return(error); if (status != 0) return(EIO); if (sc->mly_controllerparam != NULL) free(sc->mly_controllerparam, M_DEVBUF); /* build the getcontrollerparameter ioctl and send it */ bzero(&mci, sizeof(mci)); sc->mly_controllerparam = NULL; mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER; if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerparam, sizeof(*sc->mly_controllerparam), &status, NULL, NULL))) return(error); if (status != 0) return(EIO); return(0); } /******************************************************************************** * Schedule all possible devices for a rescan. * */ static void mly_scan_devices(struct mly_softc *sc) { int bus, target; debug_called(1); /* * Clear any previous BTL information. */ bzero(&sc->mly_btl, sizeof(sc->mly_btl)); /* * Mark all devices as requiring a rescan, and let the next * periodic scan collect them. */ for (bus = 0; bus < sc->mly_cam_channels; bus++) if (MLY_BUS_IS_VALID(sc, bus)) for (target = 0; target < MLY_MAX_TARGETS; target++) sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN; } /******************************************************************************** * Rescan a device, possibly as a consequence of getting an event which suggests * that it may have changed. * * If we suffer resource starvation, we can abandon the rescan as we'll be * retried. */ static void mly_rescan_btl(struct mly_softc *sc, int bus, int target) { struct mly_command *mc; struct mly_command_ioctl *mci; debug_called(1); /* check that this bus is valid */ if (!MLY_BUS_IS_VALID(sc, bus)) return; /* get a command */ if (mly_alloc_command(sc, &mc)) return; /* set up the data buffer */ if ((mc->mc_data = malloc(sizeof(union mly_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { mly_release_command(mc); return; } mc->mc_flags |= MLY_CMD_DATAIN; mc->mc_complete = mly_complete_rescan; /* * Build the ioctl. */ mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl; mci->opcode = MDACMD_IOCTL; mci->addr.phys.controller = 0; mci->timeout.value = 30; mci->timeout.scale = MLY_TIMEOUT_SECONDS; if (MLY_BUS_IS_VIRTUAL(sc, bus)) { mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid); mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID; mci->addr.log.logdev = MLY_LOGDEV_ID(sc, bus, target); debug(1, "logical device %d", mci->addr.log.logdev); } else { mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid); mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID; mci->addr.phys.lun = 0; mci->addr.phys.target = target; mci->addr.phys.channel = bus; debug(1, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target); } /* * Dispatch the command. If we successfully send the command, clear the rescan * bit. */ if (mly_start(mc) != 0) { mly_release_command(mc); } else { sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN; /* success */ } } /******************************************************************************** * Handle the completion of a rescan operation */ static void mly_complete_rescan(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; struct mly_ioctl_getlogdevinfovalid *ldi; struct mly_ioctl_getphysdevinfovalid *pdi; struct mly_command_ioctl *mci; struct mly_btl btl, *btlp; int bus, target, rescan; debug_called(1); /* * Recover the bus and target from the command. We need these even in * the case where we don't have a useful response. */ mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl; if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) { bus = MLY_LOGDEV_BUS(sc, mci->addr.log.logdev); target = MLY_LOGDEV_TARGET(sc, mci->addr.log.logdev); } else { bus = mci->addr.phys.channel; target = mci->addr.phys.target; } /* XXX validate bus/target? */ /* the default result is 'no device' */ bzero(&btl, sizeof(btl)); /* if the rescan completed OK, we have possibly-new BTL data */ if (mc->mc_status == 0) { if (mc->mc_length == sizeof(*ldi)) { ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data; if ((MLY_LOGDEV_BUS(sc, ldi->logical_device_number) != bus) || (MLY_LOGDEV_TARGET(sc, ldi->logical_device_number) != target)) { mly_printf(sc, "WARNING: BTL rescan for %d:%d returned data for %d:%d instead\n", bus, target, MLY_LOGDEV_BUS(sc, ldi->logical_device_number), MLY_LOGDEV_TARGET(sc, ldi->logical_device_number)); /* XXX what can we do about this? */ } btl.mb_flags = MLY_BTL_LOGICAL; btl.mb_type = ldi->raid_level; btl.mb_state = ldi->state; debug(1, "BTL rescan for %d returns %s, %s", ldi->logical_device_number, mly_describe_code(mly_table_device_type, ldi->raid_level), mly_describe_code(mly_table_device_state, ldi->state)); } else if (mc->mc_length == sizeof(*pdi)) { pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data; if ((pdi->channel != bus) || (pdi->target != target)) { mly_printf(sc, "WARNING: BTL rescan for %d:%d returned data for %d:%d instead\n", bus, target, pdi->channel, pdi->target); /* XXX what can we do about this? */ } btl.mb_flags = MLY_BTL_PHYSICAL; btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL; btl.mb_state = pdi->state; btl.mb_speed = pdi->speed; btl.mb_width = pdi->width; if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED) sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED; debug(1, "BTL rescan for %d:%d returns %s", bus, target, mly_describe_code(mly_table_device_state, pdi->state)); } else { mly_printf(sc, "BTL rescan result invalid\n"); } } free(mc->mc_data, M_DEVBUF); mly_release_command(mc); /* * Decide whether we need to rescan the device. */ rescan = 0; /* device type changes (usually between 'nothing' and 'something') */ btlp = &sc->mly_btl[bus][target]; if (btl.mb_flags != btlp->mb_flags) { debug(1, "flags changed, rescanning"); rescan = 1; } /* XXX other reasons? */ /* * Update BTL information. */ *btlp = btl; /* * Perform CAM rescan if required. */ if (rescan) mly_cam_rescan_btl(sc, bus, target); } /******************************************************************************** * Get the current health status and set the 'next event' counter to suit. */ static int mly_get_eventstatus(struct mly_softc *sc) { struct mly_command_ioctl mci; struct mly_health_status *mh; u_int8_t status; int error; /* build the gethealthstatus ioctl and send it */ bzero(&mci, sizeof(mci)); mh = NULL; mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS; if ((error = mly_ioctl(sc, &mci, (void **)&mh, sizeof(*mh), &status, NULL, NULL))) return(error); if (status != 0) return(EIO); /* get the event counter */ sc->mly_event_change = mh->change_counter; sc->mly_event_waiting = mh->next_event; sc->mly_event_counter = mh->next_event; /* save the health status into the memory mailbox */ bcopy(mh, &sc->mly_mmbox->mmm_health.status, sizeof(*mh)); debug(1, "initial change counter %d, event counter %d", mh->change_counter, mh->next_event); free(mh, M_DEVBUF); return(0); } /******************************************************************************** * Enable the memory mailbox mode. */ static int mly_enable_mmbox(struct mly_softc *sc) { struct mly_command_ioctl mci; u_int8_t *sp, status; int error; debug_called(1); /* build the ioctl and send it */ bzero(&mci, sizeof(mci)); mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX; /* set buffer addresses */ mci.param.setmemorymailbox.command_mailbox_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command); mci.param.setmemorymailbox.status_mailbox_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status); mci.param.setmemorymailbox.health_buffer_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health); /* set buffer sizes - abuse of data_size field is revolting */ sp = (u_int8_t *)&mci.data_size; sp[0] = ((sizeof(union mly_command_packet) * MLY_MMBOX_COMMANDS) / 1024); sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) / 1024; mci.param.setmemorymailbox.health_buffer_size = sizeof(union mly_health_region) / 1024; debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox, mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0], mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1], mci.param.setmemorymailbox.health_buffer_physaddr, mci.param.setmemorymailbox.health_buffer_size); if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL))) return(error); if (status != 0) return(EIO); sc->mly_state |= MLY_STATE_MMBOX_ACTIVE; debug(1, "memory mailbox active"); return(0); } /******************************************************************************** * Flush all pending I/O from the controller. */ static int mly_flush(struct mly_softc *sc) { struct mly_command_ioctl mci; u_int8_t status; int error; debug_called(1); /* build the ioctl */ bzero(&mci, sizeof(mci)); mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA; mci.param.deviceoperation.operation_device = MLY_OPDEVICE_PHYSICAL_CONTROLLER; /* pass it off to the controller */ if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL))) return(error); return((status == 0) ? 0 : EIO); } /******************************************************************************** * Perform an ioctl command. * * If (data) is not NULL, the command requires data transfer. If (*data) is NULL * the command requires data transfer from the controller, and we will allocate * a buffer for it. If (*data) is not NULL, the command requires data transfer * to the controller. * * XXX passing in the whole ioctl structure is ugly. Better ideas? * * XXX we don't even try to handle the case where datasize > 4k. We should. */ static int mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length) { struct mly_command *mc; struct mly_command_ioctl *mci; int error; debug_called(1); mc = NULL; if (mly_alloc_command(sc, &mc)) { error = ENOMEM; goto out; } /* copy the ioctl structure, but save some important fields and then fixup */ mci = &mc->mc_packet->ioctl; ioctl->sense_buffer_address = mci->sense_buffer_address; ioctl->maximum_sense_size = mci->maximum_sense_size; *mci = *ioctl; mci->opcode = MDACMD_IOCTL; mci->timeout.value = 30; mci->timeout.scale = MLY_TIMEOUT_SECONDS; /* handle the data buffer */ if (data != NULL) { if (*data == NULL) { /* allocate data buffer */ if ((mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT)) == NULL) { error = ENOMEM; goto out; } mc->mc_flags |= MLY_CMD_DATAIN; } else { mc->mc_data = *data; mc->mc_flags |= MLY_CMD_DATAOUT; } mc->mc_length = datasize; mc->mc_packet->generic.data_size = datasize; } /* run the command */ if ((error = mly_immediate_command(mc))) goto out; /* clean up and return any data */ *status = mc->mc_status; if ((mc->mc_sense > 0) && (sense_buffer != NULL)) { bcopy(mc->mc_packet, sense_buffer, mc->mc_sense); *sense_length = mc->mc_sense; goto out; } /* should we return a data pointer? */ if ((data != NULL) && (*data == NULL)) *data = mc->mc_data; /* command completed OK */ error = 0; out: if (mc != NULL) { /* do we need to free a data buffer we allocated? */ if (error && (mc->mc_data != NULL) && (*data == NULL)) free(mc->mc_data, M_DEVBUF); mly_release_command(mc); } return(error); } /******************************************************************************** * Check for event(s) outstanding in the controller. */ static void mly_check_event(struct mly_softc *sc) { /* * The controller may have updated the health status information, * so check for it here. Note that the counters are all in host memory, * so this check is very cheap. Also note that we depend on checking on * completion */ if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) { sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter; debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change, sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event); sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event; /* wake up anyone that might be interested in this */ wakeup(&sc->mly_event_change); } if (sc->mly_event_counter != sc->mly_event_waiting) mly_fetch_event(sc); } /******************************************************************************** * Fetch one event from the controller. * * If we fail due to resource starvation, we'll be retried the next time a * command completes. */ static void mly_fetch_event(struct mly_softc *sc) { struct mly_command *mc; struct mly_command_ioctl *mci; int s; u_int32_t event; debug_called(1); /* get a command */ if (mly_alloc_command(sc, &mc)) return; /* set up the data buffer */ if ((mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { mly_release_command(mc); return; } mc->mc_length = sizeof(struct mly_event); mc->mc_flags |= MLY_CMD_DATAIN; mc->mc_complete = mly_complete_event; /* * Get an event number to fetch. It's possible that we've raced with another * context for the last event, in which case there will be no more events. */ s = splcam(); if (sc->mly_event_counter == sc->mly_event_waiting) { mly_release_command(mc); splx(s); return; } event = sc->mly_event_counter++; splx(s); /* * Build the ioctl. * * At this point we are committed to sending this request, as it * will be the only one constructed for this particular event number. */ mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl; mci->opcode = MDACMD_IOCTL; mci->data_size = sizeof(struct mly_event); mci->addr.phys.lun = (event >> 16) & 0xff; mci->addr.phys.target = (event >> 24) & 0xff; mci->addr.phys.channel = 0; mci->addr.phys.controller = 0; mci->timeout.value = 30; mci->timeout.scale = MLY_TIMEOUT_SECONDS; mci->sub_ioctl = MDACIOCTL_GETEVENT; mci->param.getevent.sequence_number_low = event & 0xffff; debug(1, "fetch event %u", event); /* * Submit the command. * * Note that failure of mly_start() will result in this event never being * fetched. */ if (mly_start(mc) != 0) { mly_printf(sc, "couldn't fetch event %u\n", event); mly_release_command(mc); } } /******************************************************************************** * Handle the completion of an event poll. */ static void mly_complete_event(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; struct mly_event *me = (struct mly_event *)mc->mc_data; debug_called(1); /* * If the event was successfully fetched, process it. */ if (mc->mc_status == SCSI_STATUS_OK) { mly_process_event(sc, me); free(me, M_DEVBUF); } mly_release_command(mc); /* * Check for another event. */ mly_check_event(sc); } /******************************************************************************** * Process a controller event. */ static void mly_process_event(struct mly_softc *sc, struct mly_event *me) { struct scsi_sense_data_fixed *ssd; char *fp, *tp; int bus, target, event, class, action; ssd = (struct scsi_sense_data_fixed *)&me->sense[0]; /* * Errors can be reported using vendor-unique sense data. In this case, the * event code will be 0x1c (Request sense data present), the sense key will * be 0x09 (vendor specific), the MSB of the ASC will be set, and the * actual event code will be a 16-bit value comprised of the ASCQ (low byte) * and low seven bits of the ASC (low seven bits of the high byte). */ if ((me->code == 0x1c) && ((ssd->flags & SSD_KEY) == SSD_KEY_Vendor_Specific) && (ssd->add_sense_code & 0x80)) { event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual; } else { event = me->code; } /* look up event, get codes */ fp = mly_describe_code(mly_table_event, event); debug(1, "Event %d code 0x%x", me->sequence_number, me->code); /* quiet event? */ class = fp[0]; if (isupper(class) && bootverbose) class = tolower(class); /* get action code, text string */ action = fp[1]; tp = &fp[2]; /* * Print some information about the event. * * This code uses a table derived from the corresponding portion of the Linux * driver, and thus the parser is very similar. */ switch(class) { case 'p': /* error on physical device */ mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp); if (action == 'r') sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN; break; case 'l': /* error on logical unit */ case 'm': /* message about logical unit */ bus = MLY_LOGDEV_BUS(sc, me->lun); target = MLY_LOGDEV_TARGET(sc, me->lun); mly_name_device(sc, bus, target); mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp); if (action == 'r') sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN; break; case 's': /* report of sense data */ if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) || (((ssd->flags & SSD_KEY) == SSD_KEY_NOT_READY) && (ssd->add_sense_code == 0x04) && ((ssd->add_sense_code_qual == 0x01) || (ssd->add_sense_code_qual == 0x02)))) break; /* ignore NO_SENSE or NOT_READY in one case */ mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp); mly_printf(sc, " sense key %d asc %02x ascq %02x\n", ssd->flags & SSD_KEY, ssd->add_sense_code, ssd->add_sense_code_qual); mly_printf(sc, " info %4D csi %4D\n", ssd->info, "", ssd->cmd_spec_info, ""); if (action == 'r') sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN; break; case 'e': mly_printf(sc, tp, me->target, me->lun); printf("\n"); break; case 'c': mly_printf(sc, "controller %s\n", tp); break; case '?': mly_printf(sc, "%s - %d\n", tp, me->code); break; default: /* probably a 'noisy' event being ignored */ break; } } /******************************************************************************** * Perform periodic activities. */ static void mly_periodic(void *data) { struct mly_softc *sc = (struct mly_softc *)data; int bus, target; debug_called(2); /* * Scan devices. */ for (bus = 0; bus < sc->mly_cam_channels; bus++) { if (MLY_BUS_IS_VALID(sc, bus)) { for (target = 0; target < MLY_MAX_TARGETS; target++) { /* ignore the controller in this scan */ if (target == sc->mly_controllerparam->initiator_id) continue; /* perform device rescan? */ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN) mly_rescan_btl(sc, bus, target); } } } /* check for controller events */ mly_check_event(sc); /* reschedule ourselves */ sc->mly_periodic = timeout(mly_periodic, sc, MLY_PERIODIC_INTERVAL * hz); } /******************************************************************************** ******************************************************************************** Command Processing ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Run a command and wait for it to complete. * */ static int mly_immediate_command(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; int error, s; debug_called(1); /* spinning at splcam is ugly, but we're only used during controller init */ s = splcam(); if ((error = mly_start(mc))) { splx(s); return(error); } if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) { /* sleep on the command */ while(!(mc->mc_flags & MLY_CMD_COMPLETE)) { tsleep(mc, PRIBIO, "mlywait", 0); } } else { /* spin and collect status while we do */ while(!(mc->mc_flags & MLY_CMD_COMPLETE)) { mly_done(mc->mc_sc); } } splx(s); return(0); } /******************************************************************************** * Deliver a command to the controller. * * XXX it would be good to just queue commands that we can't submit immediately * and send them later, but we probably want a wrapper for that so that * we don't hang on a failed submission for an immediate command. */ static int mly_start(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; union mly_command_packet *pkt; int s; debug_called(2); /* * Set the command up for delivery to the controller. */ mly_map_command(mc); mc->mc_packet->generic.command_id = mc->mc_slot; #ifdef MLY_DEBUG mc->mc_timestamp = time_second; #endif s = splcam(); /* * Do we have to use the hardware mailbox? */ if (!(sc->mly_state & MLY_STATE_MMBOX_ACTIVE)) { /* * Check to see if the controller is ready for us. */ if (MLY_IDBR_TRUE(sc, MLY_HM_CMDSENT)) { splx(s); return(EBUSY); } mc->mc_flags |= MLY_CMD_BUSY; /* * It's ready, send the command. */ MLY_SET_MBOX(sc, sc->mly_command_mailbox, &mc->mc_packetphys); MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_CMDSENT); } else { /* use memory-mailbox mode */ pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index]; /* check to see if the next index is free yet */ if (pkt->mmbox.flag != 0) { splx(s); return(EBUSY); } mc->mc_flags |= MLY_CMD_BUSY; /* copy in new command */ bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data)); /* barrier to ensure completion of previous write before we write the flag */ bus_space_barrier(sc->mly_btag, sc->mly_bhandle, 0, 0, BUS_SPACE_BARRIER_WRITE); /* copy flag last */ pkt->mmbox.flag = mc->mc_packet->mmbox.flag; /* barrier to ensure completion of previous write before we notify the controller */ bus_space_barrier(sc->mly_btag, sc->mly_bhandle, 0, 0, BUS_SPACE_BARRIER_WRITE); /* signal controller, update index */ MLY_SET_REG(sc, sc->mly_idbr, MLY_AM_CMDSENT); sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS; } mly_enqueue_busy(mc); splx(s); return(0); } /******************************************************************************** * Pick up command status from the controller, schedule a completion event */ static void mly_done(struct mly_softc *sc) { struct mly_command *mc; union mly_status_packet *sp; u_int16_t slot; int s, worked; s = splcam(); worked = 0; /* pick up hardware-mailbox commands */ if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) { slot = MLY_GET_REG2(sc, sc->mly_status_mailbox); if (slot < MLY_SLOT_MAX) { mc = &sc->mly_command[slot - MLY_SLOT_START]; mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2); mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3); mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4); mly_remove_busy(mc); mc->mc_flags &= ~MLY_CMD_BUSY; mly_enqueue_complete(mc); worked = 1; } else { /* slot 0xffff may mean "extremely bogus command" */ mly_printf(sc, "got HM completion for illegal slot %u\n", slot); } /* unconditionally acknowledge status */ MLY_SET_REG(sc, sc->mly_odbr, MLY_HM_STSREADY); MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK); } /* pick up memory-mailbox commands */ if (MLY_ODBR_TRUE(sc, MLY_AM_STSREADY)) { for (;;) { sp = &sc->mly_mmbox->mmm_status[sc->mly_mmbox_status_index]; /* check for more status */ if (sp->mmbox.flag == 0) break; /* get slot number */ slot = sp->status.command_id; if (slot < MLY_SLOT_MAX) { mc = &sc->mly_command[slot - MLY_SLOT_START]; mc->mc_status = sp->status.status; mc->mc_sense = sp->status.sense_length; mc->mc_resid = sp->status.residue; mly_remove_busy(mc); mc->mc_flags &= ~MLY_CMD_BUSY; mly_enqueue_complete(mc); worked = 1; } else { /* slot 0xffff may mean "extremely bogus command" */ mly_printf(sc, "got AM completion for illegal slot %u at %d\n", slot, sc->mly_mmbox_status_index); } /* clear and move to next index */ sp->mmbox.flag = 0; sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS; } /* acknowledge that we have collected status value(s) */ MLY_SET_REG(sc, sc->mly_odbr, MLY_AM_STSREADY); } splx(s); if (worked) { if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) taskqueue_enqueue(taskqueue_swi_giant, &sc->mly_task_complete); else mly_complete(sc, 0); } } /******************************************************************************** * Process completed commands */ static void mly_complete(void *context, int pending) { struct mly_softc *sc = (struct mly_softc *)context; struct mly_command *mc; void (* mc_complete)(struct mly_command *mc); debug_called(2); /* * Spin pulling commands off the completed queue and processing them. */ while ((mc = mly_dequeue_complete(sc)) != NULL) { /* * Free controller resources, mark command complete. * * Note that as soon as we mark the command complete, it may be freed * out from under us, so we need to save the mc_complete field in * order to later avoid dereferencing mc. (We would not expect to * have a polling/sleeping consumer with mc_complete != NULL). */ mly_unmap_command(mc); mc_complete = mc->mc_complete; mc->mc_flags |= MLY_CMD_COMPLETE; /* * Call completion handler or wake up sleeping consumer. */ if (mc_complete != NULL) { mc_complete(mc); } else { wakeup(mc); } } /* * XXX if we are deferring commands due to controller-busy status, we should * retry submitting them here. */ } /******************************************************************************** ******************************************************************************** Command Buffer Management ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Allocate a command. */ static int mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp) { struct mly_command *mc; debug_called(3); if ((mc = mly_dequeue_free(sc)) == NULL) return(ENOMEM); *mcp = mc; return(0); } /******************************************************************************** * Release a command back to the freelist. */ static void mly_release_command(struct mly_command *mc) { debug_called(3); /* * Fill in parts of the command that may cause confusion if * a consumer doesn't when we are later allocated. */ mc->mc_data = NULL; mc->mc_flags = 0; mc->mc_complete = NULL; mc->mc_private = NULL; /* * By default, we set up to overwrite the command packet with * sense information. */ mc->mc_packet->generic.sense_buffer_address = mc->mc_packetphys; mc->mc_packet->generic.maximum_sense_size = sizeof(union mly_command_packet); mly_enqueue_free(mc); } /******************************************************************************** * Map helper for command allocation. */ static void mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_softc *sc = (struct mly_softc *)arg; debug_called(1); sc->mly_packetphys = segs[0].ds_addr; } /******************************************************************************** * Allocate and initialise command and packet structures. * * If the controller supports fewer than MLY_MAX_COMMANDS commands, limit our * allocation to that number. If we don't yet know how many commands the * controller supports, allocate a very small set (suitable for initialisation * purposes only). */ static int mly_alloc_commands(struct mly_softc *sc) { struct mly_command *mc; int i, ncmd; if (sc->mly_controllerinfo == NULL) { ncmd = 4; } else { ncmd = min(MLY_MAX_COMMANDS, sc->mly_controllerinfo->maximum_parallel_commands); } /* * Allocate enough space for all the command packets in one chunk and * map them permanently into controller-visible space. */ if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&sc->mly_packet, BUS_DMA_NOWAIT, &sc->mly_packetmap)) { return(ENOMEM); } if (bus_dmamap_load(sc->mly_packet_dmat, sc->mly_packetmap, sc->mly_packet, ncmd * sizeof(union mly_command_packet), mly_alloc_commands_map, sc, BUS_DMA_NOWAIT) != 0) return (ENOMEM); for (i = 0; i < ncmd; i++) { mc = &sc->mly_command[i]; bzero(mc, sizeof(*mc)); mc->mc_sc = sc; mc->mc_slot = MLY_SLOT_START + i; mc->mc_packet = sc->mly_packet + i; mc->mc_packetphys = sc->mly_packetphys + (i * sizeof(union mly_command_packet)); if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap)) mly_release_command(mc); } return(0); } /******************************************************************************** * Free all the storage held by commands. * * Must be called with all commands on the free list. */ static void mly_release_commands(struct mly_softc *sc) { struct mly_command *mc; /* throw away command buffer DMA maps */ while (mly_alloc_command(sc, &mc) == 0) bus_dmamap_destroy(sc->mly_buffer_dmat, mc->mc_datamap); /* release the packet storage */ if (sc->mly_packet != NULL) { bus_dmamap_unload(sc->mly_packet_dmat, sc->mly_packetmap); bus_dmamem_free(sc->mly_packet_dmat, sc->mly_packet, sc->mly_packetmap); sc->mly_packet = NULL; } } /******************************************************************************** * Command-mapping helper function - populate this command's s/g table * with the s/g entries for its data. */ static void mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_command *mc = (struct mly_command *)arg; struct mly_softc *sc = mc->mc_sc; struct mly_command_generic *gen = &(mc->mc_packet->generic); struct mly_sg_entry *sg; int i, tabofs; debug_called(2); /* can we use the transfer structure directly? */ if (nseg <= 2) { sg = &gen->transfer.direct.sg[0]; gen->command_control.extended_sg_table = 0; } else { tabofs = ((mc->mc_slot - MLY_SLOT_START) * MLY_MAX_SGENTRIES); sg = sc->mly_sg_table + tabofs; gen->transfer.indirect.entries[0] = nseg; gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry)); gen->command_control.extended_sg_table = 1; } /* copy the s/g table */ for (i = 0; i < nseg; i++) { sg[i].physaddr = segs[i].ds_addr; sg[i].length = segs[i].ds_len; } } #if 0 /******************************************************************************** * Command-mapping helper function - save the cdb's physical address. * * We don't support 'large' SCSI commands at this time, so this is unused. */ static void mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_command *mc = (struct mly_command *)arg; debug_called(2); /* XXX can we safely assume that a CDB will never cross a page boundary? */ if ((segs[0].ds_addr % PAGE_SIZE) > ((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE)) panic("cdb crosses page boundary"); /* fix up fields in the command packet */ mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr; } #endif /******************************************************************************** * Map a command into controller-visible space */ static void mly_map_command(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; debug_called(2); /* don't map more than once */ if (mc->mc_flags & MLY_CMD_MAPPED) return; /* does the command have a data buffer? */ if (mc->mc_data != NULL) { if (mc->mc_flags & MLY_CMD_CCB) bus_dmamap_load_ccb(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mly_map_command_sg, mc, 0); else bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length, mly_map_command_sg, mc, 0); if (mc->mc_flags & MLY_CMD_DATAIN) bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD); if (mc->mc_flags & MLY_CMD_DATAOUT) bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE); } mc->mc_flags |= MLY_CMD_MAPPED; } /******************************************************************************** * Unmap a command from controller-visible space */ static void mly_unmap_command(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; debug_called(2); if (!(mc->mc_flags & MLY_CMD_MAPPED)) return; /* does the command have a data buffer? */ if (mc->mc_data != NULL) { if (mc->mc_flags & MLY_CMD_DATAIN) bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD); if (mc->mc_flags & MLY_CMD_DATAOUT) bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap); } mc->mc_flags &= ~MLY_CMD_MAPPED; } /******************************************************************************** ******************************************************************************** CAM interface ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Attach the physical and virtual SCSI busses to CAM. * * Physical bus numbering starts from 0, virtual bus numbering from one greater * than the highest physical bus. Physical busses are only registered if * the kernel environment variable "hw.mly.register_physical_channels" is set. * * When we refer to a "bus", we are referring to the bus number registered with * the SIM, wheras a "channel" is a channel number given to the adapter. In order * to keep things simple, we map these 1:1, so "bus" and "channel" may be used * interchangeably. */ static int mly_cam_attach(struct mly_softc *sc) { struct cam_devq *devq; int chn, i; debug_called(1); /* * Allocate a devq for all our channels combined. */ if ((devq = cam_simq_alloc(sc->mly_controllerinfo->maximum_parallel_commands)) == NULL) { mly_printf(sc, "can't allocate CAM SIM queue\n"); return(ENOMEM); } /* * If physical channel registration has been requested, register these first. * Note that we enable tagged command queueing for physical channels. */ if (testenv("hw.mly.register_physical_channels")) { chn = 0; for (i = 0; i < sc->mly_controllerinfo->physical_channels_present; i++, chn++) { if ((sc->mly_cam_sim[chn] = cam_sim_alloc(mly_cam_action, mly_cam_poll, "mly", sc, device_get_unit(sc->mly_dev), &Giant, sc->mly_controllerinfo->maximum_parallel_commands, 1, devq)) == NULL) { return(ENOMEM); } if (xpt_bus_register(sc->mly_cam_sim[chn], sc->mly_dev, chn)) { mly_printf(sc, "CAM XPT phsyical channel registration failed\n"); return(ENXIO); } debug(1, "registered physical channel %d", chn); } } /* * Register our virtual channels, with bus numbers matching channel numbers. */ chn = sc->mly_controllerinfo->physical_channels_present; for (i = 0; i < sc->mly_controllerinfo->virtual_channels_present; i++, chn++) { if ((sc->mly_cam_sim[chn] = cam_sim_alloc(mly_cam_action, mly_cam_poll, "mly", sc, device_get_unit(sc->mly_dev), &Giant, sc->mly_controllerinfo->maximum_parallel_commands, 0, devq)) == NULL) { return(ENOMEM); } if (xpt_bus_register(sc->mly_cam_sim[chn], sc->mly_dev, chn)) { mly_printf(sc, "CAM XPT virtual channel registration failed\n"); return(ENXIO); } debug(1, "registered virtual channel %d", chn); } /* * This is the total number of channels that (might have been) registered with * CAM. Some may not have been; check the mly_cam_sim array to be certain. */ sc->mly_cam_channels = sc->mly_controllerinfo->physical_channels_present + sc->mly_controllerinfo->virtual_channels_present; return(0); } /******************************************************************************** * Detach from CAM */ static void mly_cam_detach(struct mly_softc *sc) { int i; debug_called(1); for (i = 0; i < sc->mly_cam_channels; i++) { if (sc->mly_cam_sim[i] != NULL) { xpt_bus_deregister(cam_sim_path(sc->mly_cam_sim[i])); cam_sim_free(sc->mly_cam_sim[i], 0); } } if (sc->mly_cam_devq != NULL) cam_simq_free(sc->mly_cam_devq); } /************************************************************************ * Rescan a device. */ static void mly_cam_rescan_btl(struct mly_softc *sc, int bus, int target) { union ccb *ccb; debug_called(1); if ((ccb = xpt_alloc_ccb()) == NULL) { mly_printf(sc, "rescan failed (can't allocate CCB)\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sc->mly_cam_sim[bus]), target, 0) != CAM_REQ_CMP) { mly_printf(sc, "rescan failed (can't create path)\n"); xpt_free_ccb(ccb); return; } debug(1, "rescan target %d:%d", bus, target); xpt_rescan(ccb); } /******************************************************************************** * Handle an action requested by CAM */ static void mly_cam_action(struct cam_sim *sim, union ccb *ccb) { struct mly_softc *sc = cam_sim_softc(sim); debug_called(2); switch (ccb->ccb_h.func_code) { /* perform SCSI I/O */ case XPT_SCSI_IO: if (!mly_cam_action_io(sim, (struct ccb_scsiio *)&ccb->csio)) return; break; /* perform geometry calculations */ case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg = &ccb->ccg; u_int32_t secs_per_cylinder; debug(2, "XPT_CALC_GEOMETRY %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); if (sc->mly_controllerparam->bios_geometry == MLY_BIOSGEOM_8G) { ccg->heads = 255; ccg->secs_per_track = 63; } else { /* MLY_BIOSGEOM_2G */ ccg->heads = 128; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; break; } /* handle path attribute inquiry */ case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; debug(2, "XPT_PATH_INQ %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; /* XXX extra flags for physical channels? */ cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->max_target = MLY_MAX_TARGETS - 1; cpi->max_lun = MLY_MAX_LUNS - 1; cpi->initiator_id = sc->mly_controllerparam->initiator_id; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "FreeBSD", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 132 * 1024; /* XXX what to set this to? */ cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; int bus, target; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags = 0; scsi->valid = 0; spi->flags = 0; spi->valid = 0; bus = cam_sim_bus(sim); target = cts->ccb_h.target_id; debug(2, "XPT_GET_TRAN_SETTINGS %d:%d", bus, target); /* logical device? */ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_LOGICAL) { /* nothing special for these */ /* physical device? */ } else if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_PHYSICAL) { /* allow CAM to try tagged transactions */ scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; scsi->valid |= CTS_SCSI_VALID_TQ; /* convert speed (MHz) to usec */ if (sc->mly_btl[bus][target].mb_speed == 0) { spi->sync_period = 1000000 / 5; } else { spi->sync_period = 1000000 / sc->mly_btl[bus][target].mb_speed; } /* convert bus width to CAM internal encoding */ switch (sc->mly_btl[bus][target].mb_width) { case 32: spi->bus_width = MSG_EXT_WDTR_BUS_32_BIT; break; case 16: spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; case 8: default: spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } spi->valid |= CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_BUS_WIDTH; /* not a device, bail out */ } else { cts->ccb_h.status = CAM_REQ_CMP_ERR; break; } /* disconnect always OK */ spi->flags |= CTS_SPI_FLAGS_DISC_ENB; spi->valid |= CTS_SPI_VALID_DISC; cts->ccb_h.status = CAM_REQ_CMP; break; } default: /* we can't do this */ debug(2, "unspported func_code = 0x%x", ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } /******************************************************************************** * Handle an I/O operation requested by CAM */ static int mly_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio) { struct mly_softc *sc = cam_sim_softc(sim); struct mly_command *mc; struct mly_command_scsi_small *ss; int bus, target; int error; int s; bus = cam_sim_bus(sim); target = csio->ccb_h.target_id; debug(2, "XPT_SCSI_IO %d:%d:%d", bus, target, csio->ccb_h.target_lun); /* validate bus number */ if (!MLY_BUS_IS_VALID(sc, bus)) { debug(0, " invalid bus %d", bus); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* check for I/O attempt to a protected device */ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_PROTECTED) { debug(2, " device protected"); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* check for I/O attempt to nonexistent device */ if (!(sc->mly_btl[bus][target].mb_flags & (MLY_BTL_LOGICAL | MLY_BTL_PHYSICAL))) { debug(2, " device %d:%d does not exist", bus, target); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* XXX increase if/when we support large SCSI commands */ if (csio->cdb_len > MLY_CMD_SCSI_SMALL_CDB) { debug(0, " command too large (%d > %d)", csio->cdb_len, MLY_CMD_SCSI_SMALL_CDB); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* check that the CDB pointer is not to a physical address */ if ((csio->ccb_h.flags & CAM_CDB_POINTER) && (csio->ccb_h.flags & CAM_CDB_PHYS)) { debug(0, " CDB pointer is to physical address"); csio->ccb_h.status = CAM_REQ_CMP_ERR; } - /* if there is data transfer, it must be to/from a virtual address */ - if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if (csio->ccb_h.flags & CAM_DATA_PHYS) { /* we can't map it */ - debug(0, " data pointer is to physical address"); - csio->ccb_h.status = CAM_REQ_CMP_ERR; - } - if (csio->ccb_h.flags & CAM_SCATTER_VALID) { /* we want to do the s/g setup */ - debug(0, " data has premature s/g setup"); - csio->ccb_h.status = CAM_REQ_CMP_ERR; - } - } - /* abandon aborted ccbs or those that have failed validation */ if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { debug(2, "abandoning CCB due to abort/validation failure"); return(EINVAL); } /* * Get a command, or push the ccb back to CAM and freeze the queue. */ if ((error = mly_alloc_command(sc, &mc))) { s = splcam(); xpt_freeze_simq(sim, 1); csio->ccb_h.status |= CAM_REQUEUE_REQ; sc->mly_qfrzn_cnt++; splx(s); return(error); } /* build the command */ mc->mc_data = csio; mc->mc_length = csio->dxfer_len; mc->mc_complete = mly_cam_complete; mc->mc_private = csio; mc->mc_flags |= MLY_CMD_CCB; /* XXX This code doesn't set the data direction in mc_flags. */ /* save the bus number in the ccb for later recovery XXX should be a better way */ csio->ccb_h.sim_priv.entries[0].field = bus; /* build the packet for the controller */ ss = &mc->mc_packet->scsi_small; ss->opcode = MDACMD_SCSI; if (csio->ccb_h.flags & CAM_DIS_DISCONNECT) ss->command_control.disable_disconnect = 1; if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) ss->command_control.data_direction = MLY_CCB_WRITE; ss->data_size = csio->dxfer_len; ss->addr.phys.lun = csio->ccb_h.target_lun; ss->addr.phys.target = csio->ccb_h.target_id; ss->addr.phys.channel = bus; if (csio->ccb_h.timeout < (60 * 1000)) { ss->timeout.value = csio->ccb_h.timeout / 1000; ss->timeout.scale = MLY_TIMEOUT_SECONDS; } else if (csio->ccb_h.timeout < (60 * 60 * 1000)) { ss->timeout.value = csio->ccb_h.timeout / (60 * 1000); ss->timeout.scale = MLY_TIMEOUT_MINUTES; } else { ss->timeout.value = csio->ccb_h.timeout / (60 * 60 * 1000); /* overflow? */ ss->timeout.scale = MLY_TIMEOUT_HOURS; } ss->maximum_sense_size = csio->sense_len; ss->cdb_length = csio->cdb_len; if (csio->ccb_h.flags & CAM_CDB_POINTER) { bcopy(csio->cdb_io.cdb_ptr, ss->cdb, csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, ss->cdb, csio->cdb_len); } /* give the command to the controller */ if ((error = mly_start(mc))) { s = splcam(); xpt_freeze_simq(sim, 1); csio->ccb_h.status |= CAM_REQUEUE_REQ; sc->mly_qfrzn_cnt++; splx(s); return(error); } return(0); } /******************************************************************************** * Check for possibly-completed commands. */ static void mly_cam_poll(struct cam_sim *sim) { struct mly_softc *sc = cam_sim_softc(sim); debug_called(2); mly_done(sc); } /******************************************************************************** * Handle completion of a command - pass results back through the CCB */ static void mly_cam_complete(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; struct ccb_scsiio *csio = (struct ccb_scsiio *)mc->mc_private; struct scsi_inquiry_data *inq = (struct scsi_inquiry_data *)csio->data_ptr; struct mly_btl *btl; u_int8_t cmd; int bus, target; int s; debug_called(2); csio->scsi_status = mc->mc_status; switch(mc->mc_status) { case SCSI_STATUS_OK: /* * In order to report logical device type and status, we overwrite * the result of the INQUIRY command to logical devices. */ bus = csio->ccb_h.sim_priv.entries[0].field; target = csio->ccb_h.target_id; /* XXX validate bus/target? */ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_LOGICAL) { if (csio->ccb_h.flags & CAM_CDB_POINTER) { cmd = *csio->cdb_io.cdb_ptr; } else { cmd = csio->cdb_io.cdb_bytes[0]; } if (cmd == INQUIRY) { btl = &sc->mly_btl[bus][target]; padstr(inq->vendor, mly_describe_code(mly_table_device_type, btl->mb_type), 8); padstr(inq->product, mly_describe_code(mly_table_device_state, btl->mb_state), 16); padstr(inq->revision, "", 4); } } debug(2, "SCSI_STATUS_OK"); csio->ccb_h.status = CAM_REQ_CMP; break; case SCSI_STATUS_CHECK_COND: debug(1, "SCSI_STATUS_CHECK_COND sense %d resid %d", mc->mc_sense, mc->mc_resid); csio->ccb_h.status = CAM_SCSI_STATUS_ERROR; bzero(&csio->sense_data, SSD_FULL_SIZE); bcopy(mc->mc_packet, &csio->sense_data, mc->mc_sense); csio->sense_len = mc->mc_sense; csio->ccb_h.status |= CAM_AUTOSNS_VALID; csio->resid = mc->mc_resid; /* XXX this is a signed value... */ break; case SCSI_STATUS_BUSY: debug(1, "SCSI_STATUS_BUSY"); csio->ccb_h.status = CAM_SCSI_BUSY; break; default: debug(1, "unknown status 0x%x", csio->scsi_status); csio->ccb_h.status = CAM_REQ_CMP_ERR; break; } s = splcam(); if (sc->mly_qfrzn_cnt) { csio->ccb_h.status |= CAM_RELEASE_SIMQ; sc->mly_qfrzn_cnt--; } splx(s); xpt_done((union ccb *)csio); mly_release_command(mc); } /******************************************************************************** * Find a peripheral attahed at (bus),(target) */ static struct cam_periph * mly_find_periph(struct mly_softc *sc, int bus, int target) { struct cam_periph *periph; struct cam_path *path; int status; status = xpt_create_path(&path, NULL, cam_sim_path(sc->mly_cam_sim[bus]), target, 0); if (status == CAM_REQ_CMP) { periph = cam_periph_find(path, NULL); xpt_free_path(path); } else { periph = NULL; } return(periph); } /******************************************************************************** * Name the device at (bus)(target) */ static int mly_name_device(struct mly_softc *sc, int bus, int target) { struct cam_periph *periph; if ((periph = mly_find_periph(sc, bus, target)) != NULL) { sprintf(sc->mly_btl[bus][target].mb_name, "%s%d", periph->periph_name, periph->unit_number); return(0); } sc->mly_btl[bus][target].mb_name[0] = 0; return(ENOENT); } /******************************************************************************** ******************************************************************************** Hardware Control ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Handshake with the firmware while the card is being initialised. */ static int mly_fwhandshake(struct mly_softc *sc) { u_int8_t error, param0, param1; int spinup = 0; debug_called(1); /* set HM_STSACK and let the firmware initialise */ MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK); DELAY(1000); /* too short? */ /* if HM_STSACK is still true, the controller is initialising */ if (!MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) return(0); mly_printf(sc, "controller initialisation started\n"); /* spin waiting for initialisation to finish, or for a message to be delivered */ while (MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) { /* check for a message */ if (MLY_ERROR_VALID(sc)) { error = MLY_GET_REG(sc, sc->mly_error_status) & ~MLY_MSG_EMPTY; param0 = MLY_GET_REG(sc, sc->mly_command_mailbox); param1 = MLY_GET_REG(sc, sc->mly_command_mailbox + 1); switch(error) { case MLY_MSG_SPINUP: if (!spinup) { mly_printf(sc, "drive spinup in progress\n"); spinup = 1; /* only print this once (should print drive being spun?) */ } break; case MLY_MSG_RACE_RECOVERY_FAIL: mly_printf(sc, "mirror race recovery failed, one or more drives offline\n"); break; case MLY_MSG_RACE_IN_PROGRESS: mly_printf(sc, "mirror race recovery in progress\n"); break; case MLY_MSG_RACE_ON_CRITICAL: mly_printf(sc, "mirror race recovery on a critical drive\n"); break; case MLY_MSG_PARITY_ERROR: mly_printf(sc, "FATAL MEMORY PARITY ERROR\n"); return(ENXIO); default: mly_printf(sc, "unknown initialisation code 0x%x\n", error); } } } return(0); } /******************************************************************************** ******************************************************************************** Debugging and Diagnostics ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Print some information about the controller. */ static void mly_describe_controller(struct mly_softc *sc) { struct mly_ioctl_getcontrollerinfo *mi = sc->mly_controllerinfo; mly_printf(sc, "%16s, %d channel%s, firmware %d.%02d-%d-%02d (%02d%02d%02d%02d), %dMB RAM\n", mi->controller_name, mi->physical_channels_present, (mi->physical_channels_present) > 1 ? "s" : "", mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build, /* XXX turn encoding? */ mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day, mi->memory_size); if (bootverbose) { mly_printf(sc, "%s %s (%x), %dMHz %d-bit %.16s\n", mly_describe_code(mly_table_oemname, mi->oem_information), mly_describe_code(mly_table_controllertype, mi->controller_type), mi->controller_type, mi->interface_speed, mi->interface_width, mi->interface_name); mly_printf(sc, "%dMB %dMHz %d-bit %s%s%s, cache %dMB\n", mi->memory_size, mi->memory_speed, mi->memory_width, mly_describe_code(mly_table_memorytype, mi->memory_type), mi->memory_parity ? "+parity": "",mi->memory_ecc ? "+ECC": "", mi->cache_size); mly_printf(sc, "CPU: %s @ %dMHz\n", mly_describe_code(mly_table_cputype, mi->cpu[0].type), mi->cpu[0].speed); if (mi->l2cache_size != 0) mly_printf(sc, "%dKB L2 cache\n", mi->l2cache_size); if (mi->exmemory_size != 0) mly_printf(sc, "%dMB %dMHz %d-bit private %s%s%s\n", mi->exmemory_size, mi->exmemory_speed, mi->exmemory_width, mly_describe_code(mly_table_memorytype, mi->exmemory_type), mi->exmemory_parity ? "+parity": "",mi->exmemory_ecc ? "+ECC": ""); mly_printf(sc, "battery backup %s\n", mi->bbu_present ? "present" : "not installed"); mly_printf(sc, "maximum data transfer %d blocks, maximum sg entries/command %d\n", mi->maximum_block_count, mi->maximum_sg_entries); mly_printf(sc, "logical devices present/critical/offline %d/%d/%d\n", mi->logical_devices_present, mi->logical_devices_critical, mi->logical_devices_offline); mly_printf(sc, "physical devices present %d\n", mi->physical_devices_present); mly_printf(sc, "physical disks present/offline %d/%d\n", mi->physical_disks_present, mi->physical_disks_offline); mly_printf(sc, "%d physical channel%s, %d virtual channel%s of %d possible\n", mi->physical_channels_present, mi->physical_channels_present == 1 ? "" : "s", mi->virtual_channels_present, mi->virtual_channels_present == 1 ? "" : "s", mi->virtual_channels_possible); mly_printf(sc, "%d parallel commands supported\n", mi->maximum_parallel_commands); mly_printf(sc, "%dMB flash ROM, %d of %d maximum cycles\n", mi->flash_size, mi->flash_age, mi->flash_maximum_age); } } #ifdef MLY_DEBUG /******************************************************************************** * Print some controller state */ static void mly_printstate(struct mly_softc *sc) { mly_printf(sc, "IDBR %02x ODBR %02x ERROR %02x (%x %x %x)\n", MLY_GET_REG(sc, sc->mly_idbr), MLY_GET_REG(sc, sc->mly_odbr), MLY_GET_REG(sc, sc->mly_error_status), sc->mly_idbr, sc->mly_odbr, sc->mly_error_status); mly_printf(sc, "IMASK %02x ISTATUS %02x\n", MLY_GET_REG(sc, sc->mly_interrupt_mask), MLY_GET_REG(sc, sc->mly_interrupt_status)); mly_printf(sc, "COMMAND %02x %02x %02x %02x %02x %02x %02x %02x\n", MLY_GET_REG(sc, sc->mly_command_mailbox), MLY_GET_REG(sc, sc->mly_command_mailbox + 1), MLY_GET_REG(sc, sc->mly_command_mailbox + 2), MLY_GET_REG(sc, sc->mly_command_mailbox + 3), MLY_GET_REG(sc, sc->mly_command_mailbox + 4), MLY_GET_REG(sc, sc->mly_command_mailbox + 5), MLY_GET_REG(sc, sc->mly_command_mailbox + 6), MLY_GET_REG(sc, sc->mly_command_mailbox + 7)); mly_printf(sc, "STATUS %02x %02x %02x %02x %02x %02x %02x %02x\n", MLY_GET_REG(sc, sc->mly_status_mailbox), MLY_GET_REG(sc, sc->mly_status_mailbox + 1), MLY_GET_REG(sc, sc->mly_status_mailbox + 2), MLY_GET_REG(sc, sc->mly_status_mailbox + 3), MLY_GET_REG(sc, sc->mly_status_mailbox + 4), MLY_GET_REG(sc, sc->mly_status_mailbox + 5), MLY_GET_REG(sc, sc->mly_status_mailbox + 6), MLY_GET_REG(sc, sc->mly_status_mailbox + 7)); mly_printf(sc, " %04x %08x\n", MLY_GET_REG2(sc, sc->mly_status_mailbox), MLY_GET_REG4(sc, sc->mly_status_mailbox + 4)); } struct mly_softc *mly_softc0 = NULL; void mly_printstate0(void) { if (mly_softc0 != NULL) mly_printstate(mly_softc0); } /******************************************************************************** * Print a command */ static void mly_print_command(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; mly_printf(sc, "COMMAND @ %p\n", mc); mly_printf(sc, " slot %d\n", mc->mc_slot); mly_printf(sc, " status 0x%x\n", mc->mc_status); mly_printf(sc, " sense len %d\n", mc->mc_sense); mly_printf(sc, " resid %d\n", mc->mc_resid); mly_printf(sc, " packet %p/0x%llx\n", mc->mc_packet, mc->mc_packetphys); if (mc->mc_packet != NULL) mly_print_packet(mc); mly_printf(sc, " data %p/%d\n", mc->mc_data, mc->mc_length); mly_printf(sc, " flags %b\n", mc->mc_flags, "\20\1busy\2complete\3slotted\4mapped\5datain\6dataout\n"); mly_printf(sc, " complete %p\n", mc->mc_complete); mly_printf(sc, " private %p\n", mc->mc_private); } /******************************************************************************** * Print a command packet */ static void mly_print_packet(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; struct mly_command_generic *ge = (struct mly_command_generic *)mc->mc_packet; struct mly_command_scsi_small *ss = (struct mly_command_scsi_small *)mc->mc_packet; struct mly_command_scsi_large *sl = (struct mly_command_scsi_large *)mc->mc_packet; struct mly_command_ioctl *io = (struct mly_command_ioctl *)mc->mc_packet; int transfer; mly_printf(sc, " command_id %d\n", ge->command_id); mly_printf(sc, " opcode %d\n", ge->opcode); mly_printf(sc, " command_control fua %d dpo %d est %d dd %s nas %d ddis %d\n", ge->command_control.force_unit_access, ge->command_control.disable_page_out, ge->command_control.extended_sg_table, (ge->command_control.data_direction == MLY_CCB_WRITE) ? "WRITE" : "READ", ge->command_control.no_auto_sense, ge->command_control.disable_disconnect); mly_printf(sc, " data_size %d\n", ge->data_size); mly_printf(sc, " sense_buffer_address 0x%llx\n", ge->sense_buffer_address); mly_printf(sc, " lun %d\n", ge->addr.phys.lun); mly_printf(sc, " target %d\n", ge->addr.phys.target); mly_printf(sc, " channel %d\n", ge->addr.phys.channel); mly_printf(sc, " logical device %d\n", ge->addr.log.logdev); mly_printf(sc, " controller %d\n", ge->addr.phys.controller); mly_printf(sc, " timeout %d %s\n", ge->timeout.value, (ge->timeout.scale == MLY_TIMEOUT_SECONDS) ? "seconds" : ((ge->timeout.scale == MLY_TIMEOUT_MINUTES) ? "minutes" : "hours")); mly_printf(sc, " maximum_sense_size %d\n", ge->maximum_sense_size); switch(ge->opcode) { case MDACMD_SCSIPT: case MDACMD_SCSI: mly_printf(sc, " cdb length %d\n", ss->cdb_length); mly_printf(sc, " cdb %*D\n", ss->cdb_length, ss->cdb, " "); transfer = 1; break; case MDACMD_SCSILC: case MDACMD_SCSILCPT: mly_printf(sc, " cdb length %d\n", sl->cdb_length); mly_printf(sc, " cdb 0x%llx\n", sl->cdb_physaddr); transfer = 1; break; case MDACMD_IOCTL: mly_printf(sc, " sub_ioctl 0x%x\n", io->sub_ioctl); switch(io->sub_ioctl) { case MDACIOCTL_SETMEMORYMAILBOX: mly_printf(sc, " health_buffer_size %d\n", io->param.setmemorymailbox.health_buffer_size); mly_printf(sc, " health_buffer_phys 0x%llx\n", io->param.setmemorymailbox.health_buffer_physaddr); mly_printf(sc, " command_mailbox 0x%llx\n", io->param.setmemorymailbox.command_mailbox_physaddr); mly_printf(sc, " status_mailbox 0x%llx\n", io->param.setmemorymailbox.status_mailbox_physaddr); transfer = 0; break; case MDACIOCTL_SETREALTIMECLOCK: case MDACIOCTL_GETHEALTHSTATUS: case MDACIOCTL_GETCONTROLLERINFO: case MDACIOCTL_GETLOGDEVINFOVALID: case MDACIOCTL_GETPHYSDEVINFOVALID: case MDACIOCTL_GETPHYSDEVSTATISTICS: case MDACIOCTL_GETLOGDEVSTATISTICS: case MDACIOCTL_GETCONTROLLERSTATISTICS: case MDACIOCTL_GETBDT_FOR_SYSDRIVE: case MDACIOCTL_CREATENEWCONF: case MDACIOCTL_ADDNEWCONF: case MDACIOCTL_GETDEVCONFINFO: case MDACIOCTL_GETFREESPACELIST: case MDACIOCTL_MORE: case MDACIOCTL_SETPHYSDEVPARAMETER: case MDACIOCTL_GETPHYSDEVPARAMETER: case MDACIOCTL_GETLOGDEVPARAMETER: case MDACIOCTL_SETLOGDEVPARAMETER: mly_printf(sc, " param %10D\n", io->param.data.param, " "); transfer = 1; break; case MDACIOCTL_GETEVENT: mly_printf(sc, " event %d\n", io->param.getevent.sequence_number_low + ((u_int32_t)io->addr.log.logdev << 16)); transfer = 1; break; case MDACIOCTL_SETRAIDDEVSTATE: mly_printf(sc, " state %d\n", io->param.setraiddevstate.state); transfer = 0; break; case MDACIOCTL_XLATEPHYSDEVTORAIDDEV: mly_printf(sc, " raid_device %d\n", io->param.xlatephysdevtoraiddev.raid_device); mly_printf(sc, " controller %d\n", io->param.xlatephysdevtoraiddev.controller); mly_printf(sc, " channel %d\n", io->param.xlatephysdevtoraiddev.channel); mly_printf(sc, " target %d\n", io->param.xlatephysdevtoraiddev.target); mly_printf(sc, " lun %d\n", io->param.xlatephysdevtoraiddev.lun); transfer = 0; break; case MDACIOCTL_GETGROUPCONFINFO: mly_printf(sc, " group %d\n", io->param.getgroupconfinfo.group); transfer = 1; break; case MDACIOCTL_GET_SUBSYSTEM_DATA: case MDACIOCTL_SET_SUBSYSTEM_DATA: case MDACIOCTL_STARTDISOCVERY: case MDACIOCTL_INITPHYSDEVSTART: case MDACIOCTL_INITPHYSDEVSTOP: case MDACIOCTL_INITRAIDDEVSTART: case MDACIOCTL_INITRAIDDEVSTOP: case MDACIOCTL_REBUILDRAIDDEVSTART: case MDACIOCTL_REBUILDRAIDDEVSTOP: case MDACIOCTL_MAKECONSISTENTDATASTART: case MDACIOCTL_MAKECONSISTENTDATASTOP: case MDACIOCTL_CONSISTENCYCHECKSTART: case MDACIOCTL_CONSISTENCYCHECKSTOP: case MDACIOCTL_RESETDEVICE: case MDACIOCTL_FLUSHDEVICEDATA: case MDACIOCTL_PAUSEDEVICE: case MDACIOCTL_UNPAUSEDEVICE: case MDACIOCTL_LOCATEDEVICE: case MDACIOCTL_SETMASTERSLAVEMODE: case MDACIOCTL_DELETERAIDDEV: case MDACIOCTL_REPLACEINTERNALDEV: case MDACIOCTL_CLEARCONF: case MDACIOCTL_GETCONTROLLERPARAMETER: case MDACIOCTL_SETCONTRLLERPARAMETER: case MDACIOCTL_CLEARCONFSUSPMODE: case MDACIOCTL_STOREIMAGE: case MDACIOCTL_READIMAGE: case MDACIOCTL_FLASHIMAGES: case MDACIOCTL_RENAMERAIDDEV: default: /* no idea what to print */ transfer = 0; break; } break; case MDACMD_IOCTLCHECK: case MDACMD_MEMCOPY: default: transfer = 0; break; /* print nothing */ } if (transfer) { if (ge->command_control.extended_sg_table) { mly_printf(sc, " sg table 0x%llx/%d\n", ge->transfer.indirect.table_physaddr[0], ge->transfer.indirect.entries[0]); } else { mly_printf(sc, " 0000 0x%llx/%lld\n", ge->transfer.direct.sg[0].physaddr, ge->transfer.direct.sg[0].length); mly_printf(sc, " 0001 0x%llx/%lld\n", ge->transfer.direct.sg[1].physaddr, ge->transfer.direct.sg[1].length); } } } /******************************************************************************** * Panic in a slightly informative fashion */ static void mly_panic(struct mly_softc *sc, char *reason) { mly_printstate(sc); panic(reason); } /******************************************************************************** * Print queue statistics, callable from DDB. */ void mly_print_controller(int controller) { struct mly_softc *sc; if ((sc = devclass_get_softc(devclass_find("mly"), controller)) == NULL) { printf("mly: controller %d invalid\n", controller); } else { device_printf(sc->mly_dev, "queue curr max\n"); device_printf(sc->mly_dev, "free %04d/%04d\n", sc->mly_qstat[MLYQ_FREE].q_length, sc->mly_qstat[MLYQ_FREE].q_max); device_printf(sc->mly_dev, "busy %04d/%04d\n", sc->mly_qstat[MLYQ_BUSY].q_length, sc->mly_qstat[MLYQ_BUSY].q_max); device_printf(sc->mly_dev, "complete %04d/%04d\n", sc->mly_qstat[MLYQ_COMPLETE].q_length, sc->mly_qstat[MLYQ_COMPLETE].q_max); } } #endif /******************************************************************************** ******************************************************************************** Control device interface ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Accept an open operation on the control device. */ static int mly_user_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mly_softc *sc = dev->si_drv1; sc->mly_state |= MLY_STATE_OPEN; return(0); } /******************************************************************************** * Accept the last close on the control device. */ static int mly_user_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mly_softc *sc = dev->si_drv1; sc->mly_state &= ~MLY_STATE_OPEN; return (0); } /******************************************************************************** * Handle controller-specific control operations. */ static int mly_user_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { struct mly_softc *sc = (struct mly_softc *)dev->si_drv1; struct mly_user_command *uc = (struct mly_user_command *)addr; struct mly_user_health *uh = (struct mly_user_health *)addr; switch(cmd) { case MLYIO_COMMAND: return(mly_user_command(sc, uc)); case MLYIO_HEALTH: return(mly_user_health(sc, uh)); default: return(ENOIOCTL); } } /******************************************************************************** * Execute a command passed in from userspace. * * The control structure contains the actual command for the controller, as well * as the user-space data pointer and data size, and an optional sense buffer * size/pointer. On completion, the data size is adjusted to the command * residual, and the sense buffer size to the size of the returned sense data. * */ static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc) { struct mly_command *mc; int error, s; /* allocate a command */ if (mly_alloc_command(sc, &mc)) { error = ENOMEM; goto out; /* XXX Linux version will wait for a command */ } /* handle data size/direction */ mc->mc_length = (uc->DataTransferLength >= 0) ? uc->DataTransferLength : -uc->DataTransferLength; if (mc->mc_length > 0) { if ((mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_NOWAIT)) == NULL) { error = ENOMEM; goto out; } } if (uc->DataTransferLength > 0) { mc->mc_flags |= MLY_CMD_DATAIN; bzero(mc->mc_data, mc->mc_length); } if (uc->DataTransferLength < 0) { mc->mc_flags |= MLY_CMD_DATAOUT; if ((error = copyin(uc->DataTransferBuffer, mc->mc_data, mc->mc_length)) != 0) goto out; } /* copy the controller command */ bcopy(&uc->CommandMailbox, mc->mc_packet, sizeof(uc->CommandMailbox)); /* clear command completion handler so that we get woken up */ mc->mc_complete = NULL; /* execute the command */ if ((error = mly_start(mc)) != 0) goto out; s = splcam(); while (!(mc->mc_flags & MLY_CMD_COMPLETE)) tsleep(mc, PRIBIO, "mlyioctl", 0); splx(s); /* return the data to userspace */ if (uc->DataTransferLength > 0) if ((error = copyout(mc->mc_data, uc->DataTransferBuffer, mc->mc_length)) != 0) goto out; /* return the sense buffer to userspace */ if ((uc->RequestSenseLength > 0) && (mc->mc_sense > 0)) { if ((error = copyout(mc->mc_packet, uc->RequestSenseBuffer, min(uc->RequestSenseLength, mc->mc_sense))) != 0) goto out; } /* return command results to userspace (caller will copy out) */ uc->DataTransferLength = mc->mc_resid; uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense); uc->CommandStatus = mc->mc_status; error = 0; out: if (mc->mc_data != NULL) free(mc->mc_data, M_DEVBUF); if (mc != NULL) mly_release_command(mc); return(error); } /******************************************************************************** * Return health status to userspace. If the health change index in the user * structure does not match that currently exported by the controller, we * return the current status immediately. Otherwise, we block until either * interrupted or new status is delivered. */ static int mly_user_health(struct mly_softc *sc, struct mly_user_health *uh) { struct mly_health_status mh; int error, s; /* fetch the current health status from userspace */ if ((error = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh))) != 0) return(error); /* spin waiting for a status update */ s = splcam(); error = EWOULDBLOCK; while ((error != 0) && (sc->mly_event_change == mh.change_counter)) error = tsleep(&sc->mly_event_change, PRIBIO | PCATCH, "mlyhealth", 0); splx(s); /* copy the controller's health status buffer out (there is a race here if it changes again) */ error = copyout(&sc->mly_mmbox->mmm_health.status, uh->HealthStatusBuffer, sizeof(uh->HealthStatusBuffer)); return(error); } #ifdef MLY_DEBUG static int mly_timeout(struct mly_softc *sc) { struct mly_command *mc; int deadline; deadline = time_second - MLY_CMD_TIMEOUT; TAILQ_FOREACH(mc, &sc->mly_busy, mc_link) { if ((mc->mc_timestamp < deadline)) { device_printf(sc->mly_dev, "COMMAND %p TIMEOUT AFTER %d SECONDS\n", mc, (int)(time_second - mc->mc_timestamp)); } } timeout((timeout_t *)mly_timeout, sc, MLY_CMD_TIMEOUT * hz); return (0); } #endif Index: projects/physbio/sys/dev/mps/mps_sas.c =================================================================== --- projects/physbio/sys/dev/mps/mps_sas.c (revision 243875) +++ projects/physbio/sys/dev/mps/mps_sas.c (revision 243876) @@ -1,3606 +1,3612 @@ /*- * Copyright (c) 2009 Yahoo! Inc. * Copyright (c) 2011, 2012 LSI Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * LSI MPT-Fusion Host Adapter FreeBSD * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); /* Communications core for LSI MPT2 */ /* TODO Move headers to mpsvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 900026 #include #endif #include #include #include #include #include #include #include #include #include #include #include #define MPSSAS_DISCOVERY_TIMEOUT 20 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */ /* * static array to check SCSI OpCode for EEDP protection bits */ #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP static uint8_t op_code_prot[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory"); static void mpssas_discovery_timeout(void *data); static void mpssas_remove_device(struct mps_softc *, struct mps_command *); static void mpssas_remove_complete(struct mps_softc *, struct mps_command *); static void mpssas_action(struct cam_sim *sim, union ccb *ccb); static void mpssas_poll(struct cam_sim *sim); static void mpssas_scsiio_timeout(void *data); static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm); static void mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm, union ccb *ccb); static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *); static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *); static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *); #if __FreeBSD_version >= 900026 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm); static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr); static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb); #endif //FreeBSD_version >= 900026 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *); static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm); static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type); static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb); static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb); static void mpssas_scanner_thread(void *arg); #if __FreeBSD_version >= 1000006 static void mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); #else static void mpssas_check_eedp(struct mpssas_softc *sassc); static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb); #endif static int mpssas_send_portenable(struct mps_softc *sc); static void mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm); struct mpssas_target * mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle) { struct mpssas_target *target; int i; for (i = start; i < sassc->sc->facts->MaxTargets; i++) { target = &sassc->targets[i]; if (target->handle == handle) return (target); } return (NULL); } /* we need to freeze the simq during attach and diag reset, to avoid failing * commands before device handles have been found by discovery. Since * discovery involves reading config pages and possibly sending commands, * discovery actions may continue even after we receive the end of discovery * event, so refcount discovery actions instead of assuming we can unfreeze * the simq when we get the event. */ void mpssas_startup_increment(struct mpssas_softc *sassc) { if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) { if (sassc->startup_refcount++ == 0) { /* just starting, freeze the simq */ mps_dprint(sassc->sc, MPS_INFO, "%s freezing simq\n", __func__); xpt_freeze_simq(sassc->sim, 1); } mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__, sassc->startup_refcount); } } void mpssas_startup_decrement(struct mpssas_softc *sassc) { if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) { if (--sassc->startup_refcount == 0) { /* finished all discovery-related actions, release * the simq and rescan for the latest topology. */ mps_dprint(sassc->sc, MPS_INFO, "%s releasing simq\n", __func__); sassc->flags &= ~MPSSAS_IN_STARTUP; xpt_release_simq(sassc->sim, 1); mpssas_rescan_target(sassc->sc, NULL); } mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__, sassc->startup_refcount); } } /* LSI's firmware requires us to stop sending commands when we're doing task * management, so refcount the TMs and keep the simq frozen when any are in * use. */ struct mps_command * mpssas_alloc_tm(struct mps_softc *sc) { struct mps_command *tm; tm = mps_alloc_high_priority_command(sc); if (tm != NULL) { if (sc->sassc->tm_count++ == 0) { mps_printf(sc, "%s freezing simq\n", __func__); xpt_freeze_simq(sc->sassc->sim, 1); } mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__, sc->sassc->tm_count); } return tm; } void mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm) { if (tm == NULL) return; /* if there are no TMs in use, we can release the simq. We use our * own refcount so that it's easier for a diag reset to cleanup and * release the simq. */ if (--sc->sassc->tm_count == 0) { mps_printf(sc, "%s releasing simq\n", __func__); xpt_release_simq(sc->sassc->sim, 1); } mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__, sc->sassc->tm_count); mps_free_high_priority_command(sc, tm); } void mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ) { struct mpssas_softc *sassc = sc->sassc; path_id_t pathid; target_id_t targetid; union ccb *ccb; pathid = cam_sim_path(sassc->sim); if (targ == NULL) targetid = CAM_TARGET_WILDCARD; else targetid = targ - sassc->targets; /* * Allocate a CCB and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mps_dprint(sc, MPS_FAULT, "unable to alloc CCB for rescan\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n"); xpt_free_ccb(ccb); return; } if (targetid == CAM_TARGET_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_BUS; else ccb->ccb_h.func_code = XPT_SCAN_TGT; mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid); mpssas_rescan(sassc, ccb); } static void mpssas_log_command(struct mps_command *cm, const char *fmt, ...) { struct sbuf sb; va_list ap; char str[192]; char path_str[64]; if (cm == NULL) return; sbuf_new(&sb, str, sizeof(str), 0); va_start(ap, fmt); if (cm->cm_ccb != NULL) { xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str, sizeof(path_str)); sbuf_cat(&sb, path_str); if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) { scsi_command_string(&cm->cm_ccb->csio, &sb); sbuf_printf(&sb, "length %d ", cm->cm_ccb->csio.dxfer_len); } } else { sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ", cam_sim_name(cm->cm_sc->sassc->sim), cam_sim_unit(cm->cm_sc->sassc->sim), cam_sim_bus(cm->cm_sc->sassc->sim), cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF, cm->cm_lun); } sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID); sbuf_vprintf(&sb, fmt, ap); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); va_end(ap); } static void mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; struct mpssas_target *targ; uint16_t handle; mps_dprint(sc, MPS_INFO, "%s\n", __func__); reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; handle = (uint16_t)(uintptr_t)tm->cm_complete_data; targ = tm->cm_targ; if (reply == NULL) { /* XXX retry the remove after the diag reset completes? */ mps_printf(sc, "%s NULL reply reseting device 0x%04x\n", __func__, handle); mpssas_free_tm(sc, tm); return; } if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) { mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n", reply->IOCStatus, handle); mpssas_free_tm(sc, tm); return; } mps_printf(sc, "Reset aborted %u commands\n", reply->TerminationCount); mps_free_reply(sc, tm->cm_reply_data); tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ mps_printf(sc, "clearing target %u handle 0x%04x\n", targ->tid, handle); /* * Don't clear target if remove fails because things will get confusing. * Leave the devname and sasaddr intact so that we know to avoid reusing * this target id if possible, and so we can assign the same target id * to this device if it comes back in the future. */ if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) { targ = tm->cm_targ; targ->handle = 0x0; targ->encl_handle = 0x0; targ->encl_slot = 0x0; targ->exp_dev_handle = 0x0; targ->phy_num = 0x0; targ->linkrate = 0x0; targ->devinfo = 0x0; targ->flags = 0x0; } mpssas_free_tm(sc, tm); } /* * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal. * Otherwise Volume Delete is same as Bare Drive Removal. */ void mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mps_softc *sc; struct mps_command *cm; struct mpssas_target *targ = NULL; mps_dprint(sassc->sc, MPS_INFO, "%s\n", __func__); sc = sassc->sc; #ifdef WD_SUPPORT /* * If this is a WD controller, determine if the disk should be exposed * to the OS or not. If disk should be exposed, return from this * function without doing anything. */ if (sc->WD_available && (sc->WD_hide_expose == MPS_WD_EXPOSE_ALWAYS)) { return; } #endif //WD_SUPPORT targ = mpssas_find_target_by_handle(sassc, 0, handle); if (targ == NULL) { /* FIXME: what is the action? */ /* We don't know about this device? */ printf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle); return; } targ->flags |= MPSSAS_TARGET_INREMOVAL; cm = mpssas_alloc_tm(sc); if (cm == NULL) { mps_printf(sc, "%s: command alloc failure\n", __func__); return; } mpssas_rescan_target(sc, targ); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; req->DevHandle = targ->handle; req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; /* SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; cm->cm_targ = targ; cm->cm_data = NULL; cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; cm->cm_complete = mpssas_remove_volume; cm->cm_complete_data = (void *)(uintptr_t)handle; mps_map_command(sc, cm); } /* * The MPT2 firmware performs debounce on the link to avoid transient link * errors and false removals. When it does decide that link has been lost * and a device need to go away, it expects that the host will perform a * target reset and then an op remove. The reset has the side-effect of * aborting any outstanding requests for the device, which is required for * the op-remove to succeed. It's not clear if the host should check for * the device coming back alive after the reset. */ void mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mps_softc *sc; struct mps_command *cm; struct mpssas_target *targ = NULL; mps_dprint(sassc->sc, MPS_INFO, "%s\n", __func__); sc = sassc->sc; targ = mpssas_find_target_by_handle(sassc, 0, handle); if (targ == NULL) { /* FIXME: what is the action? */ /* We don't know about this device? */ printf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle); return; } targ->flags |= MPSSAS_TARGET_INREMOVAL; cm = mpssas_alloc_tm(sc); if (cm == NULL) { mps_printf(sc, "%s: command alloc failure\n", __func__); return; } mpssas_rescan_target(sc, targ); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; memset(req, 0, sizeof(*req)); req->DevHandle = htole16(targ->handle); req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; /* SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; cm->cm_targ = targ; cm->cm_data = NULL; cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; cm->cm_complete = mpssas_remove_device; cm->cm_complete_data = (void *)(uintptr_t)handle; mps_map_command(sc, cm); } static void mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SAS_IOUNIT_CONTROL_REQUEST *req; struct mpssas_target *targ; struct mps_command *next_cm; uint16_t handle; mps_dprint(sc, MPS_INFO, "%s\n", __func__); reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; handle = (uint16_t)(uintptr_t)tm->cm_complete_data; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! " "This should not happen!\n", __func__, tm->cm_flags, handle); mpssas_free_tm(sc, tm); return; } if (reply == NULL) { /* XXX retry the remove after the diag reset completes? */ mps_printf(sc, "%s NULL reply reseting device 0x%04x\n", __func__, handle); mpssas_free_tm(sc, tm); return; } if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) { mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n", le16toh(reply->IOCStatus), handle); mpssas_free_tm(sc, tm); return; } mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n", le32toh(reply->TerminationCount)); mps_free_reply(sc, tm->cm_reply_data); tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ /* Reuse the existing command */ req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req; memset(req, 0, sizeof(*req)); req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; req->Operation = MPI2_SAS_OP_REMOVE_DEVICE; req->DevHandle = htole16(handle); tm->cm_data = NULL; tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; tm->cm_complete = mpssas_remove_complete; tm->cm_complete_data = (void *)(uintptr_t)handle; mps_map_command(sc, tm); mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n", targ->tid, handle); TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) { union ccb *ccb; mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm); ccb = tm->cm_complete_data; ccb->ccb_h.status = CAM_DEV_NOT_THERE; mpssas_scsiio_complete(sc, tm); } } static void mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm) { MPI2_SAS_IOUNIT_CONTROL_REPLY *reply; uint16_t handle; struct mpssas_target *targ; struct mpssas_lun *lun; mps_dprint(sc, MPS_INFO, "%s\n", __func__); reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply; handle = (uint16_t)(uintptr_t)tm->cm_complete_data; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! " "This should not happen!\n", __func__, tm->cm_flags, handle); mpssas_free_tm(sc, tm); return; } if (reply == NULL) { /* most likely a chip reset */ mps_printf(sc, "%s NULL reply removing device 0x%04x\n", __func__, handle); mpssas_free_tm(sc, tm); return; } mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__, handle, le16toh(reply->IOCStatus)); /* * Don't clear target if remove fails because things will get confusing. * Leave the devname and sasaddr intact so that we know to avoid reusing * this target id if possible, and so we can assign the same target id * to this device if it comes back in the future. */ if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) { targ = tm->cm_targ; targ->handle = 0x0; targ->encl_handle = 0x0; targ->encl_slot = 0x0; targ->exp_dev_handle = 0x0; targ->phy_num = 0x0; targ->linkrate = 0x0; targ->devinfo = 0x0; targ->flags = 0x0; while(!SLIST_EMPTY(&targ->luns)) { lun = SLIST_FIRST(&targ->luns); SLIST_REMOVE_HEAD(&targ->luns, lun_link); free(lun, M_MPT2); } } mpssas_free_tm(sc, tm); } static int mpssas_register_events(struct mps_softc *sc) { u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; bzero(events, 16); setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); setbit(events, MPI2_EVENT_SAS_DISCOVERY); setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE); setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW); setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); setbit(events, MPI2_EVENT_IR_VOLUME); setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK); setbit(events, MPI2_EVENT_IR_OPERATION_STATUS); setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); mps_register_events(sc, events, mpssas_evt_handler, NULL, &sc->sassc->mpssas_eh); return (0); } int mps_attach_sas(struct mps_softc *sc) { struct mpssas_softc *sassc; #if __FreeBSD_version >= 1000006 cam_status status; #endif int unit, error = 0; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO); if(!sassc) { device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n", __func__, __LINE__); return (ENOMEM); } sassc->targets = malloc(sizeof(struct mpssas_target) * sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO); if(!sassc->targets) { device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n", __func__, __LINE__); free(sassc, M_MPT2); return (ENOMEM); } sc->sassc = sassc; sassc->sc = sc; if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) { mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n"); error = ENOMEM; goto out; } unit = device_get_unit(sc->mps_dev); sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc, unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq); if (sassc->sim == NULL) { mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n"); error = EINVAL; goto out; } TAILQ_INIT(&sassc->ev_queue); /* Initialize taskqueue for Event Handling */ TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc); sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sassc->ev_tq); /* Run the task queue with lowest priority */ taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq", device_get_nameunit(sc->mps_dev)); TAILQ_INIT(&sassc->ccb_scanq); error = mps_kproc_create(mpssas_scanner_thread, sassc, &sassc->rescan_thread, 0, 0, "mps_scan%d", unit); if (error) { mps_printf(sc, "Error %d starting rescan thread\n", error); goto out; } mps_lock(sc); sassc->flags |= MPSSAS_SCANTHREAD; /* * XXX There should be a bus for every port on the adapter, but since * we're just going to fake the topology for now, we'll pretend that * everything is just a target on a single bus. */ if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) { mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n", error); mps_unlock(sc); goto out; } /* * Assume that discovery events will start right away. Freezing * the simq will prevent the CAM boottime scanner from running * before discovery is complete. */ sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY; xpt_freeze_simq(sassc->sim, 1); sc->sassc->startup_refcount = 0; callout_init(&sassc->discovery_callout, 1 /*mpsafe*/); sassc->discovery_timeouts = 0; sassc->tm_count = 0; #if __FreeBSD_version >= 1000006 status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL); if (status != CAM_REQ_CMP) { mps_printf(sc, "Error %#x registering async handler for " "AC_ADVINFO_CHANGED events\n", status); } #endif mps_unlock(sc); mpssas_register_events(sc); out: if (error) mps_detach_sas(sc); return (error); } int mps_detach_sas(struct mps_softc *sc) { struct mpssas_softc *sassc; struct mpssas_lun *lun, *lun_tmp; struct mpssas_target *targ; int i; mps_dprint(sc, MPS_INFO, "%s\n", __func__); if (sc->sassc == NULL) return (0); sassc = sc->sassc; mps_deregister_events(sc, sassc->mpssas_eh); /* * Drain and free the event handling taskqueue with the lock * unheld so that any parallel processing tasks drain properly * without deadlocking. */ if (sassc->ev_tq != NULL) taskqueue_free(sassc->ev_tq); /* Make sure CAM doesn't wedge if we had to bail out early. */ mps_lock(sc); /* Deregister our async handler */ #if __FreeBSD_version >= 1000006 xpt_register_async(0, mpssas_async, sc, NULL); #endif if (sassc->flags & MPSSAS_IN_STARTUP) xpt_release_simq(sassc->sim, 1); if (sassc->sim != NULL) { xpt_bus_deregister(cam_sim_path(sassc->sim)); cam_sim_free(sassc->sim, FALSE); } if (sassc->flags & MPSSAS_SCANTHREAD) { sassc->flags |= MPSSAS_SHUTDOWN; wakeup(&sassc->ccb_scanq); if (sassc->flags & MPSSAS_SCANTHREAD) { msleep(&sassc->flags, &sc->mps_mtx, PRIBIO, "mps_shutdown", 30 * hz); } } mps_unlock(sc); mps_dprint(sc, MPS_INFO, "%s:%d\n", __func__,__LINE__); if (sassc->devq != NULL) cam_simq_free(sassc->devq); for(i=0; i< sc->facts->MaxTargets ;i++) { targ = &sassc->targets[i]; SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { free(lun, M_MPT2); } } free(sassc->targets, M_MPT2); free(sassc, M_MPT2); sc->sassc = NULL; return (0); } void mpssas_discovery_end(struct mpssas_softc *sassc) { struct mps_softc *sc = sassc->sc; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING) callout_stop(&sassc->discovery_callout); } static void mpssas_discovery_timeout(void *data) { struct mpssas_softc *sassc = data; struct mps_softc *sc; sc = sassc->sc; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); mps_lock(sc); mps_printf(sc, "Timeout waiting for discovery, interrupts may not be working!\n"); sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING; /* Poll the hardware for events in case interrupts aren't working */ mps_intr_locked(sc); mps_printf(sassc->sc, "Finished polling after discovery timeout at %d\n", ticks); if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) { mpssas_discovery_end(sassc); } else { if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) { sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING; callout_reset(&sassc->discovery_callout, MPSSAS_DISCOVERY_TIMEOUT * hz, mpssas_discovery_timeout, sassc); sassc->discovery_timeouts++; } else { mps_dprint(sassc->sc, MPS_FAULT, "Discovery timed out, continuing.\n"); sassc->flags &= ~MPSSAS_IN_DISCOVERY; mpssas_discovery_end(sassc); } } mps_unlock(sc); } static void mpssas_action(struct cam_sim *sim, union ccb *ccb) { struct mpssas_softc *sassc; sassc = cam_sim_softc(sim); mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__, ccb->ccb_h.func_code); mtx_assert(&sassc->sc->mps_mtx, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = sassc->sc->facts->MaxTargets - 1; cpi->max_lun = 255; cpi->initiator_id = sassc->sc->facts->MaxTargets - 1; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC; #if __FreeBSD_version >= 800001 /* * XXX KDM where does this number come from? */ cpi->maxio = 256 * 1024; #endif cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_sas *sas; struct ccb_trans_settings_scsi *scsi; struct mpssas_target *targ; cts = &ccb->cts; sas = &cts->xport_specific.sas; scsi = &cts->proto_specific.scsi; targ = &sassc->targets[cts->ccb_h.target_id]; if (targ->handle == 0x0) { cts->ccb_h.status = CAM_SEL_TIMEOUT; break; } cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_SAS; cts->transport_version = 0; sas->valid = CTS_SAS_VALID_SPEED; switch (targ->linkrate) { case 0x08: sas->bitrate = 150000; break; case 0x09: sas->bitrate = 300000; break; case 0x0a: sas->bitrate = 600000; break; default: sas->valid = 0; } cts->protocol = PROTO_SCSI; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; cts->ccb_h.status = CAM_REQ_CMP; break; } case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, /*extended*/1); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_RESET_DEV: mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n"); mpssas_action_resetdev(sassc, ccb); return; case XPT_RESET_BUS: case XPT_ABORT: case XPT_TERM_IO: mps_printf(sassc->sc, "mpssas_action faking success for " "abort or reset\n"); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_SCSI_IO: mpssas_action_scsiio(sassc, ccb); return; #if __FreeBSD_version >= 900026 case XPT_SMP_IO: mpssas_action_smpio(sassc, ccb); return; #endif default: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } xpt_done(ccb); } static void mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code, target_id_t target_id, lun_id_t lun_id) { path_id_t path_id = cam_sim_path(sc->sassc->sim); struct cam_path *path; mps_printf(sc, "%s code %x target %d lun %d\n", __func__, ac_code, target_id, lun_id); if (xpt_create_path(&path, NULL, path_id, target_id, lun_id) != CAM_REQ_CMP) { mps_printf(sc, "unable to create path for reset " "notification\n"); return; } xpt_async(ac_code, path, NULL); xpt_free_path(path); } static void mpssas_complete_all_commands(struct mps_softc *sc) { struct mps_command *cm; int i; int completed; mps_printf(sc, "%s\n", __func__); mtx_assert(&sc->mps_mtx, MA_OWNED); /* complete all commands with a NULL reply */ for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; cm->cm_reply = NULL; completed = 0; if (cm->cm_flags & MPS_CM_FLAGS_POLLED) cm->cm_flags |= MPS_CM_FLAGS_COMPLETE; if (cm->cm_complete != NULL) { mpssas_log_command(cm, "completing cm %p state %x ccb %p for diag reset\n", cm, cm->cm_state, cm->cm_ccb); cm->cm_complete(sc, cm); completed = 1; } if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) { mpssas_log_command(cm, "waking up cm %p state %x ccb %p for diag reset\n", cm, cm->cm_state, cm->cm_ccb); wakeup(cm); completed = 1; } if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) { /* this should never happen, but if it does, log */ mpssas_log_command(cm, "cm %p state %x flags 0x%x ccb %p during diag " "reset\n", cm, cm->cm_state, cm->cm_flags, cm->cm_ccb); } } } void mpssas_handle_reinit(struct mps_softc *sc) { int i; /* Go back into startup mode and freeze the simq, so that CAM * doesn't send any commands until after we've rediscovered all * targets and found the proper device handles for them. * * After the reset, portenable will trigger discovery, and after all * discovery-related activities have finished, the simq will be * released. */ mps_printf(sc, "%s startup\n", __func__); sc->sassc->flags |= MPSSAS_IN_STARTUP; sc->sassc->flags |= MPSSAS_IN_DISCOVERY; xpt_freeze_simq(sc->sassc->sim, 1); /* notify CAM of a bus reset */ mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); /* complete and cleanup after all outstanding commands */ mpssas_complete_all_commands(sc); mps_printf(sc, "%s startup %u tm %u after command completion\n", __func__, sc->sassc->startup_refcount, sc->sassc->tm_count); /* * The simq was explicitly frozen above, so set the refcount to 0. * The simq will be explicitly released after port enable completes. */ sc->sassc->startup_refcount = 0; /* zero all the target handles, since they may change after the * reset, and we have to rediscover all the targets and use the new * handles. */ for (i = 0; i < sc->facts->MaxTargets; i++) { if (sc->sassc->targets[i].outstanding != 0) mps_printf(sc, "target %u outstanding %u\n", i, sc->sassc->targets[i].outstanding); sc->sassc->targets[i].handle = 0x0; sc->sassc->targets[i].exp_dev_handle = 0x0; sc->sassc->targets[i].outstanding = 0; sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET; } } static void mpssas_tm_timeout(void *data) { struct mps_command *tm = data; struct mps_softc *sc = tm->cm_sc; mtx_assert(&sc->mps_mtx, MA_OWNED); mpssas_log_command(tm, "task mgmt %p timed out\n", tm); mps_reinit(sc); } static void mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SCSI_TASK_MANAGE_REQUEST *req; unsigned int cm_count = 0; struct mps_command *cm; struct mpssas_target *targ; callout_stop(&tm->cm_callout); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mps_printf(sc, "%s: cm_flags = %#x for LUN reset! " "This should not happen!\n", __func__, tm->cm_flags); mpssas_free_tm(sc, tm); return; } if (reply == NULL) { mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm); if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { /* this completion was due to a reset, just cleanup */ targ->flags &= ~MPSSAS_TARGET_INRESET; targ->tm = NULL; mpssas_free_tm(sc, tm); } else { /* we should have gotten a reply. */ mps_reinit(sc); } return; } mpssas_log_command(tm, "logical unit reset status 0x%x code 0x%x count %u\n", le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), le32toh(reply->TerminationCount)); /* See if there are any outstanding commands for this LUN. * This could be made more efficient by using a per-LU data * structure of some sort. */ TAILQ_FOREACH(cm, &targ->commands, cm_link) { if (cm->cm_lun == tm->cm_lun) cm_count++; } if (cm_count == 0) { mpssas_log_command(tm, "logical unit %u finished recovery after reset\n", tm->cm_lun, tm); mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, tm->cm_lun); /* we've finished recovery for this logical unit. check and * see if some other logical unit has a timedout command * that needs to be processed. */ cm = TAILQ_FIRST(&targ->timedout_commands); if (cm) { mpssas_send_abort(sc, tm, cm); } else { targ->tm = NULL; mpssas_free_tm(sc, tm); } } else { /* if we still have commands for this LUN, the reset * effectively failed, regardless of the status reported. * Escalate to a target reset. */ mpssas_log_command(tm, "logical unit reset complete for tm %p, but still have %u command(s)\n", tm, cm_count); mpssas_send_reset(sc, tm, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); } } static void mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpssas_target *targ; callout_stop(&tm->cm_callout); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mps_printf(sc, "%s: cm_flags = %#x for target reset! " "This should not happen!\n", __func__, tm->cm_flags); mpssas_free_tm(sc, tm); return; } if (reply == NULL) { mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm); if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { /* this completion was due to a reset, just cleanup */ targ->flags &= ~MPSSAS_TARGET_INRESET; targ->tm = NULL; mpssas_free_tm(sc, tm); } else { /* we should have gotten a reply. */ mps_reinit(sc); } return; } mpssas_log_command(tm, "target reset status 0x%x code 0x%x count %u\n", le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), le32toh(reply->TerminationCount)); targ->flags &= ~MPSSAS_TARGET_INRESET; if (targ->outstanding == 0) { /* we've finished recovery for this target and all * of its logical units. */ mpssas_log_command(tm, "recovery finished after target reset\n"); mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, CAM_LUN_WILDCARD); targ->tm = NULL; mpssas_free_tm(sc, tm); } else { /* after a target reset, if this target still has * outstanding commands, the reset effectively failed, * regardless of the status reported. escalate. */ mpssas_log_command(tm, "target reset complete for tm %p, but still have %u command(s)\n", tm, targ->outstanding); mps_reinit(sc); } } #define MPS_RESET_TIMEOUT 30 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpssas_target *target; int err; target = tm->cm_targ; if (target->handle == 0) { mps_printf(sc, "%s null devhandle for target_id %d\n", __func__, target->tid); return -1; } req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = htole16(target->handle); req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; req->TaskType = type; if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) { /* XXX Need to handle invalid LUNs */ MPS_SET_LUN(req->LUN, tm->cm_lun); tm->cm_targ->logical_unit_resets++; mpssas_log_command(tm, "sending logical unit reset\n"); tm->cm_complete = mpssas_logical_unit_reset_complete; } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { /* Target reset method = SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; tm->cm_targ->target_resets++; tm->cm_targ->flags |= MPSSAS_TARGET_INRESET; mpssas_log_command(tm, "sending target reset\n"); tm->cm_complete = mpssas_target_reset_complete; } else { mps_printf(sc, "unexpected reset type 0x%x\n", type); return -1; } tm->cm_data = NULL; tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; tm->cm_complete_data = (void *)tm; callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz, mpssas_tm_timeout, tm); err = mps_map_command(sc, tm); if (err) mpssas_log_command(tm, "error %d sending reset type %u\n", err, type); return err; } static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm) { struct mps_command *cm; MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpssas_target *targ; callout_stop(&tm->cm_callout); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mpssas_log_command(tm, "cm_flags = %#x for abort %p TaskMID %u!\n", tm->cm_flags, tm, le16toh(req->TaskMID)); mpssas_free_tm(sc, tm); return; } if (reply == NULL) { mpssas_log_command(tm, "NULL abort reply for tm %p TaskMID %u\n", tm, le16toh(req->TaskMID)); if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { /* this completion was due to a reset, just cleanup */ targ->tm = NULL; mpssas_free_tm(sc, tm); } else { /* we should have gotten a reply. */ mps_reinit(sc); } return; } mpssas_log_command(tm, "abort TaskMID %u status 0x%x code 0x%x count %u\n", le16toh(req->TaskMID), le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), le32toh(reply->TerminationCount)); cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands); if (cm == NULL) { /* if there are no more timedout commands, we're done with * error recovery for this target. */ mpssas_log_command(tm, "finished recovery after aborting TaskMID %u\n", le16toh(req->TaskMID)); targ->tm = NULL; mpssas_free_tm(sc, tm); } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) { /* abort success, but we have more timedout commands to abort */ mpssas_log_command(tm, "continuing recovery after aborting TaskMID %u\n", le16toh(req->TaskMID)); mpssas_send_abort(sc, tm, cm); } else { /* we didn't get a command completion, so the abort * failed as far as we're concerned. escalate. */ mpssas_log_command(tm, "abort failed for TaskMID %u tm %p\n", le16toh(req->TaskMID), tm); mpssas_send_reset(sc, tm, MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); } } #define MPS_ABORT_TIMEOUT 5 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpssas_target *targ; int err; targ = cm->cm_targ; if (targ->handle == 0) { mps_printf(sc, "%s null devhandle for target_id %d\n", __func__, cm->cm_ccb->ccb_h.target_id); return -1; } req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = htole16(targ->handle); req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; /* XXX Need to handle invalid LUNs */ MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun); req->TaskMID = htole16(cm->cm_desc.Default.SMID); tm->cm_data = NULL; tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; tm->cm_complete = mpssas_abort_complete; tm->cm_complete_data = (void *)tm; tm->cm_targ = cm->cm_targ; tm->cm_lun = cm->cm_lun; callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz, mpssas_tm_timeout, tm); targ->aborts++; err = mps_map_command(sc, tm); if (err) mpssas_log_command(tm, "error %d sending abort for cm %p SMID %u\n", err, cm, req->TaskMID); return err; } static void mpssas_scsiio_timeout(void *data) { struct mps_softc *sc; struct mps_command *cm; struct mpssas_target *targ; cm = (struct mps_command *)data; sc = cm->cm_sc; mtx_assert(&sc->mps_mtx, MA_OWNED); mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm); /* * Run the interrupt handler to make sure it's not pending. This * isn't perfect because the command could have already completed * and been re-used, though this is unlikely. */ mps_intr_locked(sc); if (cm->cm_state == MPS_CM_STATE_FREE) { mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc); return; } if (cm->cm_ccb == NULL) { mps_printf(sc, "command timeout with NULL ccb\n"); return; } mpssas_log_command(cm, "command timeout cm %p ccb %p\n", cm, cm->cm_ccb); targ = cm->cm_targ; targ->timeouts++; /* XXX first, check the firmware state, to see if it's still * operational. if not, do a diag reset. */ cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT; cm->cm_state = MPS_CM_STATE_TIMEDOUT; TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery); if (targ->tm != NULL) { /* target already in recovery, just queue up another * timedout command to be processed later. */ mps_printf(sc, "queued timedout cm %p for processing by tm %p\n", cm, targ->tm); } else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) { mps_printf(sc, "timedout cm %p allocated tm %p\n", cm, targ->tm); /* start recovery by aborting the first timedout command */ mpssas_send_abort(sc, targ->tm, cm); } else { /* XXX queue this target up for recovery once a TM becomes * available. The firmware only has a limited number of * HighPriority credits for the high priority requests used * for task management, and we ran out. * * Isilon: don't worry about this for now, since we have * more credits than disks in an enclosure, and limit * ourselves to one TM per target for recovery. */ mps_printf(sc, "timedout cm %p failed to allocate a tm\n", cm); } } static void mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb) { MPI2_SCSI_IO_REQUEST *req; struct ccb_scsiio *csio; struct mps_softc *sc; struct mpssas_target *targ; struct mpssas_lun *lun; struct mps_command *cm; uint8_t i, lba_byte, *ref_tag_addr; uint16_t eedp_flags; uint32_t mpi_control; sc = sassc->sc; mtx_assert(&sc->mps_mtx, MA_OWNED); csio = &ccb->csio; targ = &sassc->targets[csio->ccb_h.target_id]; mps_dprint(sc, MPS_TRACE, "%s ccb %p target flag %x\n", __func__, ccb, targ->flags); if (targ->handle == 0x0) { mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n", __func__, csio->ccb_h.target_id); csio->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) { mps_dprint(sc, MPS_TRACE, "%s Raid component no SCSI IO supported %u\n", __func__, csio->ccb_h.target_id); csio->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } /* * If devinfo is 0 this will be a volume. In that case don't tell CAM * that the volume has timed out. We want volumes to be enumerated * until they are deleted/removed, not just failed. */ if (targ->flags & MPSSAS_TARGET_INREMOVAL) { if (targ->devinfo == 0) csio->ccb_h.status = CAM_REQ_CMP; else csio->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) { mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__); csio->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } cm = mps_alloc_command(sc); if (cm == NULL) { if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) { xpt_freeze_simq(sassc->sim, 1); sassc->flags |= MPSSAS_QUEUE_FROZEN; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status |= CAM_REQUEUE_REQ; xpt_done(ccb); return; } req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req; bzero(req, sizeof(*req)); req->DevHandle = htole16(targ->handle); req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; req->MsgFlags = 0; req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); req->SenseBufferLength = MPS_SENSE_LEN; req->SGLFlags = 0; req->ChainOffset = 0; req->SGLOffset0 = 24; /* 32bit word offset to the SGL */ req->SGLOffset1= 0; req->SGLOffset2= 0; req->SGLOffset3= 0; req->SkipCount = 0; req->DataLength = htole32(csio->dxfer_len); req->BidirectionalDataLength = 0; req->IoFlags = htole16(csio->cdb_len); req->EEDPFlags = 0; /* Note: BiDirectional transfers are not supported */ switch (csio->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: mpi_control = MPI2_SCSIIO_CONTROL_READ; cm->cm_flags |= MPS_CM_FLAGS_DATAIN; break; case CAM_DIR_OUT: mpi_control = MPI2_SCSIIO_CONTROL_WRITE; cm->cm_flags |= MPS_CM_FLAGS_DATAOUT; break; case CAM_DIR_NONE: default: mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; break; } if (csio->cdb_len == 32) mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; /* * It looks like the hardware doesn't require an explicit tag * number for each transaction. SAM Task Management not supported * at the moment. */ switch (csio->tag_action) { case MSG_HEAD_OF_Q_TAG: mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ; break; case MSG_ORDERED_Q_TAG: mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; break; case MSG_ACA_TASK: mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ; break; case CAM_TAG_ACTION_NONE: case MSG_SIMPLE_Q_TAG: default: mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; break; } mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits; req->Control = htole32(mpi_control); if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) { mps_free_command(sc, cm); ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } if (csio->ccb_h.flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); req->IoFlags = htole16(csio->cdb_len); /* * Check if EEDP is supported and enabled. If it is then check if the * SCSI opcode could be using EEDP. If so, make sure the LUN exists and * is formatted for EEDP support. If all of this is true, set CDB up * for EEDP transfer. */ eedp_flags = op_code_prot[req->CDB.CDB32[0]]; if (sc->eedp_enabled && eedp_flags) { SLIST_FOREACH(lun, &targ->luns, lun_link) { if (lun->lun_id == csio->ccb_h.target_lun) { break; } } if ((lun != NULL) && (lun->eedp_formatted)) { req->EEDPBlockSize = htole16(lun->eedp_block_size); eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); req->EEDPFlags = htole16(eedp_flags); /* * If CDB less than 32, fill in Primary Ref Tag with * low 4 bytes of LBA. If CDB is 32, tag stuff is * already there. Also, set protection bit. FreeBSD * currently does not support CDBs bigger than 16, but * the code doesn't hurt, and will be here for the * future. */ if (csio->cdb_len != 32) { lba_byte = (csio->cdb_len == 16) ? 6 : 2; ref_tag_addr = (uint8_t *)&req->CDB.EEDP32. PrimaryReferenceTag; for (i = 0; i < 4; i++) { *ref_tag_addr = req->CDB.CDB32[lba_byte + i]; ref_tag_addr++; } req->CDB.EEDP32.PrimaryReferenceTag = htole32(req->CDB.EEDP32.PrimaryReferenceTag); req->CDB.EEDP32.PrimaryApplicationTagMask = 0xFFFF; req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) | 0x20; } else { eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG; req->EEDPFlags = htole16(eedp_flags); req->CDB.CDB32[10] = (req->CDB.CDB32[10] & 0x1F) | 0x20; } } } - cm->cm_data = ccb; - cm->cm_flags |= MPS_CM_FLAGS_USE_CCB; cm->cm_length = csio->dxfer_len; + if (cm->cm_length != 0) { + cm->cm_data = ccb; + cm->cm_flags |= MPS_CM_FLAGS_USE_CCB; + } else { + cm->cm_data = NULL; + } cm->cm_sge = &req->SGL; cm->cm_sglsize = (32 - 24) * 4; cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle); cm->cm_complete = mpssas_scsiio_complete; cm->cm_complete_data = ccb; cm->cm_targ = targ; cm->cm_lun = csio->ccb_h.target_lun; cm->cm_ccb = ccb; /* * If HBA is a WD and the command is not for a retry, try to build a * direct I/O message. If failed, or the command is for a retry, send * the I/O to the IR volume itself. */ if (sc->WD_valid_config) { if (ccb->ccb_h.status != MPS_WD_RETRY) { mpssas_direct_drive_io(sassc, cm, ccb); } else { ccb->ccb_h.status = CAM_REQ_INPROG; } } callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000, mpssas_scsiio_timeout, cm); targ->issued++; targ->outstanding++; TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); if ((sc->mps_debug & MPS_TRACE) != 0) mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n", __func__, cm, ccb, targ->outstanding); mps_map_command(sc, cm); return; } static void mps_response_code(struct mps_softc *sc, u8 response_code) { char *desc; switch (response_code) { case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: desc = "task management request completed"; break; case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: desc = "invalid frame"; break; case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: desc = "task management request not supported"; break; case MPI2_SCSITASKMGMT_RSP_TM_FAILED: desc = "task management request failed"; break; case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: desc = "task management request succeeded"; break; case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: desc = "invalid lun"; break; case 0xA: desc = "overlapped tag attempted"; break; case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: desc = "task queued, however not sent to target"; break; default: desc = "unknown"; break; } mps_dprint(sc, MPS_INFO, "response_code(0x%01x): %s\n", response_code, desc); } /** * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request */ static void mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio, Mpi2SCSIIOReply_t *mpi_reply) { u32 response_info; u8 *response_bytes; u16 ioc_status = le16toh(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; u8 scsi_state = mpi_reply->SCSIState; u8 scsi_status = mpi_reply->SCSIStatus; char *desc_ioc_state = NULL; char *desc_scsi_status = NULL; char *desc_scsi_state = sc->tmp_string; u32 log_info = le32toh(mpi_reply->IOCLogInfo); if (log_info == 0x31170000) return; switch (ioc_status) { case MPI2_IOCSTATUS_SUCCESS: desc_ioc_state = "success"; break; case MPI2_IOCSTATUS_INVALID_FUNCTION: desc_ioc_state = "invalid function"; break; case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: desc_ioc_state = "scsi recovered error"; break; case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: desc_ioc_state = "scsi invalid dev handle"; break; case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: desc_ioc_state = "scsi device not there"; break; case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: desc_ioc_state = "scsi data overrun"; break; case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: desc_ioc_state = "scsi data underrun"; break; case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: desc_ioc_state = "scsi io data error"; break; case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: desc_ioc_state = "scsi protocol error"; break; case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: desc_ioc_state = "scsi task terminated"; break; case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: desc_ioc_state = "scsi residual mismatch"; break; case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: desc_ioc_state = "scsi task mgmt failed"; break; case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: desc_ioc_state = "scsi ioc terminated"; break; case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: desc_ioc_state = "scsi ext terminated"; break; case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: desc_ioc_state = "eedp guard error"; break; case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: desc_ioc_state = "eedp ref tag error"; break; case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: desc_ioc_state = "eedp app tag error"; break; default: desc_ioc_state = "unknown"; break; } switch (scsi_status) { case MPI2_SCSI_STATUS_GOOD: desc_scsi_status = "good"; break; case MPI2_SCSI_STATUS_CHECK_CONDITION: desc_scsi_status = "check condition"; break; case MPI2_SCSI_STATUS_CONDITION_MET: desc_scsi_status = "condition met"; break; case MPI2_SCSI_STATUS_BUSY: desc_scsi_status = "busy"; break; case MPI2_SCSI_STATUS_INTERMEDIATE: desc_scsi_status = "intermediate"; break; case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: desc_scsi_status = "intermediate condmet"; break; case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: desc_scsi_status = "reservation conflict"; break; case MPI2_SCSI_STATUS_COMMAND_TERMINATED: desc_scsi_status = "command terminated"; break; case MPI2_SCSI_STATUS_TASK_SET_FULL: desc_scsi_status = "task set full"; break; case MPI2_SCSI_STATUS_ACA_ACTIVE: desc_scsi_status = "aca active"; break; case MPI2_SCSI_STATUS_TASK_ABORTED: desc_scsi_status = "task aborted"; break; default: desc_scsi_status = "unknown"; break; } desc_scsi_state[0] = '\0'; if (!scsi_state) desc_scsi_state = " "; if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) strcat(desc_scsi_state, "response info "); if (scsi_state & MPI2_SCSI_STATE_TERMINATED) strcat(desc_scsi_state, "state terminated "); if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) strcat(desc_scsi_state, "no status "); if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) strcat(desc_scsi_state, "autosense failed "); if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) strcat(desc_scsi_state, "autosense valid "); mps_dprint(sc, MPS_INFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x), \n", le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status); /* We can add more detail about underflow data here * TO-DO * */ mps_dprint(sc, MPS_INFO, "\tscsi_status(%s)(0x%02x), " "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); if (sc->mps_debug & MPS_INFO && scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { mps_dprint(sc, MPS_INFO, "-> Sense Buffer Data : Start :\n"); scsi_sense_print(csio); mps_dprint(sc, MPS_INFO, "-> Sense Buffer Data : End :\n"); } if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { response_info = le32toh(mpi_reply->ResponseInfo); response_bytes = (u8 *)&response_info; mps_response_code(sc,response_bytes[0]); } } static void mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm) { MPI2_SCSI_IO_REPLY *rep; union ccb *ccb; struct ccb_scsiio *csio; struct mpssas_softc *sassc; struct scsi_vpd_supported_page_list *vpd_list = NULL; u8 *TLR_bits, TLR_on; int dir = 0, i; u16 alloc_len; mps_dprint(sc, MPS_TRACE, "%s cm %p SMID %u ccb %p reply %p outstanding %u\n", __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply, cm->cm_targ->outstanding); callout_stop(&cm->cm_callout); mtx_assert(&sc->mps_mtx, MA_OWNED); sassc = sc->sassc; ccb = cm->cm_complete_data; csio = &ccb->csio; rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply; /* * XXX KDM if the chain allocation fails, does it matter if we do * the sync and unload here? It is simpler to do it in every case, * assuming it doesn't cause problems. */ if (cm->cm_data != NULL) { if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) dir = BUS_DMASYNC_POSTREAD; else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) dir = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); } cm->cm_targ->completed++; cm->cm_targ->outstanding--; TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link); if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) { TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery); if (cm->cm_reply != NULL) mpssas_log_command(cm, "completed timedout cm %p ccb %p during recovery " "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb, le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); else mpssas_log_command(cm, "completed timedout cm %p ccb %p during recovery\n", cm, cm->cm_ccb); } else if (cm->cm_targ->tm != NULL) { if (cm->cm_reply != NULL) mpssas_log_command(cm, "completed cm %p ccb %p during recovery " "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb, le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); else mpssas_log_command(cm, "completed cm %p ccb %p during recovery\n", cm, cm->cm_ccb); } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { mpssas_log_command(cm, "reset completed cm %p ccb %p\n", cm, cm->cm_ccb); } if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { /* * We ran into an error after we tried to map the command, * so we're getting a callback without queueing the command * to the hardware. So we set the status here, and it will * be retained below. We'll go through the "fast path", * because there can be no reply when we haven't actually * gone out to the hardware. */ ccb->ccb_h.status |= CAM_REQUEUE_REQ; /* * Currently the only error included in the mask is * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of * chain frames. We need to freeze the queue until we get * a command that completed without this error, which will * hopefully have some chain frames attached that we can * use. If we wanted to get smarter about it, we would * only unfreeze the queue in this condition when we're * sure that we're getting some chain frames back. That's * probably unnecessary. */ if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) { xpt_freeze_simq(sassc->sim, 1); sassc->flags |= MPSSAS_QUEUE_FROZEN; mps_dprint(sc, MPS_INFO, "Error sending command, " "freezing SIM queue\n"); } } /* Take the fast path to completion */ if (cm->cm_reply == NULL) { if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) ccb->ccb_h.status = CAM_SCSI_BUS_RESET; else { ccb->ccb_h.status = CAM_REQ_CMP; ccb->csio.scsi_status = SCSI_STATUS_OK; } if (sassc->flags & MPSSAS_QUEUE_FROZEN) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; sassc->flags &= ~MPSSAS_QUEUE_FROZEN; mps_dprint(sc, MPS_INFO, "Unfreezing SIM queue\n"); } } /* * There are two scenarios where the status won't be * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is * set, the second is in the MPS_FLAGS_DIAGRESET above. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* * Freeze the dev queue so that commands are * executed in the correct order with after error * recovery. */ ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); } mps_free_command(sc, cm); xpt_done(ccb); return; } if (sc->mps_debug & MPS_TRACE) mpssas_log_command(cm, "ioc %x scsi %x state %x xfer %u\n", le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); /* * If this is a Direct Drive I/O, reissue the I/O to the original IR * Volume if an error occurred (normal I/O retry). Use the original * CCB, but set a flag that this will be a retry so that it's sent to * the original volume. Free the command but reuse the CCB. */ if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) { mps_free_command(sc, cm); ccb->ccb_h.status = MPS_WD_RETRY; mpssas_action_scsiio(sassc, ccb); return; } switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) { case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: csio->resid = cm->cm_length - le32toh(rep->TransferCount); /* FALLTHROUGH */ case MPI2_IOCSTATUS_SUCCESS: case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR) mpssas_log_command(cm, "recovered error\n"); /* Completion failed at the transport level. */ if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS | MPI2_SCSI_STATE_TERMINATED)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } /* In a modern packetized environment, an autosense failure * implies that there's not much else that can be done to * recover the command. */ if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) { ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; break; } /* * CAM doesn't care about SAS Response Info data, but if this is * the state check if TLR should be done. If not, clear the * TLR_bits for the target. */ if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) && ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) == MPS_SCSI_RI_INVALID_FRAME)) { sc->mapping_table[csio->ccb_h.target_id].TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; } /* * Intentionally override the normal SCSI status reporting * for these two cases. These are likely to happen in a * multi-initiator environment, and we want to make sure that * CAM retries these commands rather than fail them. */ if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) || (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) { ccb->ccb_h.status = CAM_REQ_ABORTED; break; } /* Handle normal status and sense */ csio->scsi_status = rep->SCSIStatus; if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD) ccb->ccb_h.status = CAM_REQ_CMP; else ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { int sense_len, returned_sense_len; returned_sense_len = min(le32toh(rep->SenseCount), sizeof(struct scsi_sense_data)); if (returned_sense_len < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - returned_sense_len; else ccb->csio.sense_resid = 0; sense_len = min(returned_sense_len, ccb->csio.sense_len - ccb->csio.sense_resid); bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len); ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } /* * Check if this is an INQUIRY command. If it's a VPD inquiry, * and it's page code 0 (Supported Page List), and there is * inquiry data, and this is for a sequential access device, and * the device is an SSP target, and TLR is supported by the * controller, turn the TLR_bits value ON if page 0x90 is * supported. */ if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && (csio->cdb_io.cdb_bytes[1] & SI_EVPD) && (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) && (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] == T_SEQUENTIAL) && (sc->control_TLR) && (sc->mapping_table[csio->ccb_h.target_id].device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)) { vpd_list = (struct scsi_vpd_supported_page_list *) csio->data_ptr; TLR_bits = &sc->mapping_table[csio->ccb_h.target_id]. TLR_bits; *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON; alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) + csio->cdb_io.cdb_bytes[4]; for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) { if (vpd_list->list[i] == 0x90) { *TLR_bits = TLR_on; break; } } } break; case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* * If devinfo is 0 this will be a volume. In that case don't * tell CAM that the volume is not there. We want volumes to * be enumerated until they are deleted/removed, not just * failed. */ if (cm->cm_targ->devinfo == 0) ccb->ccb_h.status = CAM_REQ_CMP; else ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case MPI2_IOCSTATUS_INVALID_SGL: mps_print_scsiio_cmd(sc, cm); ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; break; case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: /* * This is one of the responses that comes back when an I/O * has been aborted. If it is because of a timeout that we * initiated, just set the status to CAM_CMD_TIMEOUT. * Otherwise set it to CAM_REQ_ABORTED. The effect on the * command is the same (it gets retried, subject to the * retry counter), the only difference is what gets printed * on the console. */ if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) ccb->ccb_h.status = CAM_CMD_TIMEOUT; else ccb->ccb_h.status = CAM_REQ_ABORTED; break; case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: /* resid is ignored for this condition */ csio->resid = 0; ccb->ccb_h.status = CAM_DATA_RUN_ERR; break; case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: /* * Since these are generally external (i.e. hopefully * transient transport-related) errors, retry these without * decrementing the retry count. */ ccb->ccb_h.status = CAM_REQUEUE_REQ; mpssas_log_command(cm, "terminated ioc %x scsi %x state %x xfer %u\n", le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); break; case MPI2_IOCSTATUS_INVALID_FUNCTION: case MPI2_IOCSTATUS_INTERNAL_ERROR: case MPI2_IOCSTATUS_INVALID_VPID: case MPI2_IOCSTATUS_INVALID_FIELD: case MPI2_IOCSTATUS_INVALID_STATE: case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: default: mpssas_log_command(cm, "completed ioc %x scsi %x state %x xfer %u\n", le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); csio->resid = cm->cm_length; ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } mps_sc_failed_io_info(sc,csio,rep); if (sassc->flags & MPSSAS_QUEUE_FROZEN) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; sassc->flags &= ~MPSSAS_QUEUE_FROZEN; mps_dprint(sc, MPS_INFO, "Command completed, " "unfreezing SIM queue\n"); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); } mps_free_command(sc, cm); xpt_done(ccb); } /* All Request reached here are Endian safe */ static void mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm, union ccb *ccb) { pMpi2SCSIIORequest_t pIO_req; struct mps_softc *sc = sassc->sc; uint64_t virtLBA; uint32_t physLBA, stripe_offset, stripe_unit; uint32_t io_size, column; uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB; /* * If this is a valid SCSI command (Read6, Read10, Read16, Write6, * Write10, or Write16), build a direct I/O message. Otherwise, the I/O * will be sent to the IR volume itself. Since Read6 and Write6 are a * bit different than the 10/16 CDBs, handle them separately. */ pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req; CDB = pIO_req->CDB.CDB32; /* * Handle 6 byte CDBs. */ if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) || (CDB[0] == WRITE_6))) { /* * Get the transfer size in blocks. */ io_size = (cm->cm_length >> sc->DD_block_exponent); /* * Get virtual LBA given in the CDB. */ virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) | ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3]; /* * Check that LBA range for I/O does not exceed volume's * MaxLBA. */ if ((virtLBA + (uint64_t)io_size - 1) <= sc->DD_max_lba) { /* * Check if the I/O crosses a stripe boundary. If not, * translate the virtual LBA to a physical LBA and set * the DevHandle for the PhysDisk to be used. If it * does cross a boundry, do normal I/O. To get the * right DevHandle to use, get the map number for the * column, then use that map number to look up the * DevHandle of the PhysDisk. */ stripe_offset = (uint32_t)virtLBA & (sc->DD_stripe_size - 1); if ((stripe_offset + io_size) <= sc->DD_stripe_size) { physLBA = (uint32_t)virtLBA >> sc->DD_stripe_exponent; stripe_unit = physLBA / sc->DD_num_phys_disks; column = physLBA % sc->DD_num_phys_disks; pIO_req->DevHandle = htole16(sc->DD_column_map[column].dev_handle); /* ???? Is this endian safe*/ cm->cm_desc.SCSIIO.DevHandle = pIO_req->DevHandle; physLBA = (stripe_unit << sc->DD_stripe_exponent) + stripe_offset; ptrLBA = &pIO_req->CDB.CDB32[1]; physLBA_byte = (uint8_t)(physLBA >> 16); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[2]; physLBA_byte = (uint8_t)(physLBA >> 8); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[3]; physLBA_byte = (uint8_t)physLBA; *ptrLBA = physLBA_byte; /* * Set flag that Direct Drive I/O is * being done. */ cm->cm_flags |= MPS_CM_FLAGS_DD_IO; } } return; } /* * Handle 10, 12 or 16 byte CDBs. */ if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) || (CDB[0] == WRITE_10) || (CDB[0] == READ_16) || (CDB[0] == WRITE_16) || (CDB[0] == READ_12) || (CDB[0] == WRITE_12))) { /* * For 16-byte CDB's, verify that the upper 4 bytes of the CDB * are 0. If not, this is accessing beyond 2TB so handle it in * the else section. 10-byte and 12-byte CDB's are OK. * FreeBSD sends very rare 12 byte READ/WRITE, but driver is * ready to accept 12byte CDB for Direct IOs. */ if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) || (CDB[0] == READ_12 || CDB[0] == WRITE_12) || !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) { /* * Get the transfer size in blocks. */ io_size = (cm->cm_length >> sc->DD_block_exponent); /* * Get virtual LBA. Point to correct lower 4 bytes of * LBA in the CDB depending on command. */ lba_idx = ((CDB[0] == READ_12) || (CDB[0] == WRITE_12) || (CDB[0] == READ_10) || (CDB[0] == WRITE_10))? 2 : 6; virtLBA = ((uint64_t)CDB[lba_idx] << 24) | ((uint64_t)CDB[lba_idx + 1] << 16) | ((uint64_t)CDB[lba_idx + 2] << 8) | (uint64_t)CDB[lba_idx + 3]; /* * Check that LBA range for I/O does not exceed volume's * MaxLBA. */ if ((virtLBA + (uint64_t)io_size - 1) <= sc->DD_max_lba) { /* * Check if the I/O crosses a stripe boundary. * If not, translate the virtual LBA to a * physical LBA and set the DevHandle for the * PhysDisk to be used. If it does cross a * boundry, do normal I/O. To get the right * DevHandle to use, get the map number for the * column, then use that map number to look up * the DevHandle of the PhysDisk. */ stripe_offset = (uint32_t)virtLBA & (sc->DD_stripe_size - 1); if ((stripe_offset + io_size) <= sc->DD_stripe_size) { physLBA = (uint32_t)virtLBA >> sc->DD_stripe_exponent; stripe_unit = physLBA / sc->DD_num_phys_disks; column = physLBA % sc->DD_num_phys_disks; pIO_req->DevHandle = htole16(sc->DD_column_map[column]. dev_handle); cm->cm_desc.SCSIIO.DevHandle = pIO_req->DevHandle; physLBA = (stripe_unit << sc->DD_stripe_exponent) + stripe_offset; ptrLBA = &pIO_req->CDB.CDB32[lba_idx]; physLBA_byte = (uint8_t)(physLBA >> 24); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[lba_idx + 1]; physLBA_byte = (uint8_t)(physLBA >> 16); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[lba_idx + 2]; physLBA_byte = (uint8_t)(physLBA >> 8); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[lba_idx + 3]; physLBA_byte = (uint8_t)physLBA; *ptrLBA = physLBA_byte; /* * Set flag that Direct Drive I/O is * being done. */ cm->cm_flags |= MPS_CM_FLAGS_DD_IO; } } } else { /* * 16-byte CDB and the upper 4 bytes of the CDB are not * 0. Get the transfer size in blocks. */ io_size = (cm->cm_length >> sc->DD_block_exponent); /* * Get virtual LBA. */ virtLBA = ((uint64_t)CDB[2] << 54) | ((uint64_t)CDB[3] << 48) | ((uint64_t)CDB[4] << 40) | ((uint64_t)CDB[5] << 32) | ((uint64_t)CDB[6] << 24) | ((uint64_t)CDB[7] << 16) | ((uint64_t)CDB[8] << 8) | (uint64_t)CDB[9]; /* * Check that LBA range for I/O does not exceed volume's * MaxLBA. */ if ((virtLBA + (uint64_t)io_size - 1) <= sc->DD_max_lba) { /* * Check if the I/O crosses a stripe boundary. * If not, translate the virtual LBA to a * physical LBA and set the DevHandle for the * PhysDisk to be used. If it does cross a * boundry, do normal I/O. To get the right * DevHandle to use, get the map number for the * column, then use that map number to look up * the DevHandle of the PhysDisk. */ stripe_offset = (uint32_t)virtLBA & (sc->DD_stripe_size - 1); if ((stripe_offset + io_size) <= sc->DD_stripe_size) { physLBA = (uint32_t)(virtLBA >> sc->DD_stripe_exponent); stripe_unit = physLBA / sc->DD_num_phys_disks; column = physLBA % sc->DD_num_phys_disks; pIO_req->DevHandle = htole16(sc->DD_column_map[column]. dev_handle); cm->cm_desc.SCSIIO.DevHandle = pIO_req->DevHandle; physLBA = (stripe_unit << sc->DD_stripe_exponent) + stripe_offset; /* * Set upper 4 bytes of LBA to 0. We * assume that the phys disks are less * than 2 TB's in size. Then, set the * lower 4 bytes. */ pIO_req->CDB.CDB32[2] = 0; pIO_req->CDB.CDB32[3] = 0; pIO_req->CDB.CDB32[4] = 0; pIO_req->CDB.CDB32[5] = 0; ptrLBA = &pIO_req->CDB.CDB32[6]; physLBA_byte = (uint8_t)(physLBA >> 24); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[7]; physLBA_byte = (uint8_t)(physLBA >> 16); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[8]; physLBA_byte = (uint8_t)(physLBA >> 8); *ptrLBA = physLBA_byte; ptrLBA = &pIO_req->CDB.CDB32[9]; physLBA_byte = (uint8_t)physLBA; *ptrLBA = physLBA_byte; /* * Set flag that Direct Drive I/O is * being done. */ cm->cm_flags |= MPS_CM_FLAGS_DD_IO; } } } } } #if __FreeBSD_version >= 900026 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm) { MPI2_SMP_PASSTHROUGH_REPLY *rpl; MPI2_SMP_PASSTHROUGH_REQUEST *req; uint64_t sasaddr; union ccb *ccb; ccb = cm->cm_complete_data; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and SMP * commands require two S/G elements only. That should be handled * in the standard request size. */ if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n", __func__, cm->cm_flags); ccb->ccb_h.status = CAM_REQ_CMP_ERR; goto bailout; } rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply; if (rpl == NULL) { mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__); ccb->ccb_h.status = CAM_REQ_CMP_ERR; goto bailout; } req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; sasaddr = le32toh(req->SASAddress.Low); sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32; if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS || rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) { mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n", __func__, le16toh(rpl->IOCStatus), rpl->SASStatus); ccb->ccb_h.status = CAM_REQ_CMP_ERR; goto bailout; } mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address " "%#jx completed successfully\n", __func__, (uintmax_t)sasaddr); if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED) ccb->ccb_h.status = CAM_REQ_CMP; else ccb->ccb_h.status = CAM_SMP_STATUS_ERROR; bailout: /* * We sync in both directions because we had DMAs in the S/G list * in both directions. */ bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); mps_free_command(sc, cm); xpt_done(ccb); } static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr) { struct mps_command *cm; uint8_t *request, *response; MPI2_SMP_PASSTHROUGH_REQUEST *req; struct mps_softc *sc; struct sglist *sg; int error; sc = sassc->sc; sg = NULL; error = 0; /* * XXX We don't yet support physical addresses here. */ - if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) { + switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { + case CAM_DATA_PADDR: + case CAM_DATA_SG_PADDR: mps_printf(sc, "%s: physical addresses not supported\n", __func__); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; - } - - /* - * If the user wants to send an S/G list, check to make sure they - * have single buffers. - */ - if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { + case CAM_DATA_SG: /* * The chip does not support more than one buffer for the * request or response. */ if ((ccb->smpio.smp_request_sglist_cnt > 1) || (ccb->smpio.smp_response_sglist_cnt > 1)) { mps_printf(sc, "%s: multiple request or response " "buffer segments not supported for SMP\n", __func__); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } /* * The CAM_SCATTER_VALID flag was originally implemented * for the XPT_SCSI_IO CCB, which only has one data pointer. * We have two. So, just take that flag to mean that we * might have S/G lists, and look at the S/G segment count * to figure out whether that is the case for each individual * buffer. */ if (ccb->smpio.smp_request_sglist_cnt != 0) { bus_dma_segment_t *req_sg; req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; } else request = ccb->smpio.smp_request; if (ccb->smpio.smp_response_sglist_cnt != 0) { bus_dma_segment_t *rsp_sg; rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; } else response = ccb->smpio.smp_response; - } else { + break; + case CAM_DATA_VADDR: request = ccb->smpio.smp_request; response = ccb->smpio.smp_response; + break; + default: + ccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(ccb); + return; } cm = mps_alloc_command(sc); if (cm == NULL) { mps_printf(sc, "%s: cannot allocate command\n", __func__); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; bzero(req, sizeof(*req)); req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; /* Allow the chip to use any route to this SAS address. */ req->PhysicalPort = 0xff; req->RequestDataLength = htole16(ccb->smpio.smp_request_len); req->SGLFlags = MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI; mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS " "address %#jx\n", __func__, (uintmax_t)sasaddr); mpi_init_sge(cm, req, &req->SGL); /* * Set up a uio to pass into mps_map_command(). This allows us to * do one map command, and one busdma call in there. */ cm->cm_uio.uio_iov = cm->cm_iovec; cm->cm_uio.uio_iovcnt = 2; cm->cm_uio.uio_segflg = UIO_SYSSPACE; /* * The read/write flag isn't used by busdma, but set it just in * case. This isn't exactly accurate, either, since we're going in * both directions. */ cm->cm_uio.uio_rw = UIO_WRITE; cm->cm_iovec[0].iov_base = request; cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength); cm->cm_iovec[1].iov_base = response; cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len; cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len + cm->cm_iovec[1].iov_len; /* * Trigger a warning message in mps_data_cb() for the user if we * wind up exceeding two S/G segments. The chip expects one * segment for the request and another for the response. */ cm->cm_max_segs = 2; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_complete = mpssas_smpio_complete; cm->cm_complete_data = ccb; /* * Tell the mapping code that we're using a uio, and that this is * an SMP passthrough request. There is a little special-case * logic there (in mps_data_cb()) to handle the bidirectional * transfer. */ cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS | MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT; /* The chip data format is little endian. */ req->SASAddress.High = htole32(sasaddr >> 32); req->SASAddress.Low = htole32(sasaddr); /* * XXX Note that we don't have a timeout/abort mechanism here. * From the manual, it looks like task management requests only * work for SCSI IO and SATA passthrough requests. We may need to * have a mechanism to retry requests in the event of a chip reset * at least. Hopefully the chip will insure that any errors short * of that are relayed back to the driver. */ error = mps_map_command(sc, cm); if ((error != 0) && (error != EINPROGRESS)) { mps_printf(sc, "%s: error %d returned from mps_map_command()\n", __func__, error); goto bailout_error; } return; bailout_error: mps_free_command(sc, cm); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb) { struct mps_softc *sc; struct mpssas_target *targ; uint64_t sasaddr = 0; sc = sassc->sc; /* * Make sure the target exists. */ targ = &sassc->targets[ccb->ccb_h.target_id]; if (targ->handle == 0x0) { mps_printf(sc, "%s: target %d does not exist!\n", __func__, ccb->ccb_h.target_id); ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } /* * If this device has an embedded SMP target, we'll talk to it * directly. * figure out what the expander's address is. */ if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0) sasaddr = targ->sasaddr; /* * If we don't have a SAS address for the expander yet, try * grabbing it from the page 0x83 information cached in the * transport layer for this target. LSI expanders report the * expander SAS address as the port-associated SAS address in * Inquiry VPD page 0x83. Maxim expanders don't report it in page * 0x83. * * XXX KDM disable this for now, but leave it commented out so that * it is obvious that this is another possible way to get the SAS * address. * * The parent handle method below is a little more reliable, and * the other benefit is that it works for devices other than SES * devices. So you can send a SMP request to a da(4) device and it * will get routed to the expander that device is attached to. * (Assuming the da(4) device doesn't contain an SMP target...) */ #if 0 if (sasaddr == 0) sasaddr = xpt_path_sas_addr(ccb->ccb_h.path); #endif /* * If we still don't have a SAS address for the expander, look for * the parent device of this device, which is probably the expander. */ if (sasaddr == 0) { #ifdef OLD_MPS_PROBE struct mpssas_target *parent_target; #endif if (targ->parent_handle == 0x0) { mps_printf(sc, "%s: handle %d does not have a valid " "parent handle!\n", __func__, targ->handle); ccb->ccb_h.status = CAM_REQ_INVALID; goto bailout; } #ifdef OLD_MPS_PROBE parent_target = mpssas_find_target_by_handle(sassc, 0, targ->parent_handle); if (parent_target == NULL) { mps_printf(sc, "%s: handle %d does not have a valid " "parent target!\n", __func__, targ->handle); ccb->ccb_h.status = CAM_REQ_INVALID; goto bailout; } if ((parent_target->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { mps_printf(sc, "%s: handle %d parent %d does not " "have an SMP target!\n", __func__, targ->handle, parent_target->handle); ccb->ccb_h.status = CAM_REQ_INVALID; goto bailout; } sasaddr = parent_target->sasaddr; #else /* OLD_MPS_PROBE */ if ((targ->parent_devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { mps_printf(sc, "%s: handle %d parent %d does not " "have an SMP target!\n", __func__, targ->handle, targ->parent_handle); ccb->ccb_h.status = CAM_REQ_INVALID; goto bailout; } if (targ->parent_sasaddr == 0x0) { mps_printf(sc, "%s: handle %d parent handle %d does " "not have a valid SAS address!\n", __func__, targ->handle, targ->parent_handle); ccb->ccb_h.status = CAM_REQ_INVALID; goto bailout; } sasaddr = targ->parent_sasaddr; #endif /* OLD_MPS_PROBE */ } if (sasaddr == 0) { mps_printf(sc, "%s: unable to find SAS address for handle %d\n", __func__, targ->handle); ccb->ccb_h.status = CAM_REQ_INVALID; goto bailout; } mpssas_send_smpcmd(sassc, ccb, sasaddr); return; bailout: xpt_done(ccb); } #endif //__FreeBSD_version >= 900026 static void mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mps_softc *sc; struct mps_command *tm; struct mpssas_target *targ; mps_dprint(sassc->sc, MPS_TRACE, __func__); mtx_assert(&sassc->sc->mps_mtx, MA_OWNED); sc = sassc->sc; tm = mps_alloc_command(sc); if (tm == NULL) { mps_printf(sc, "comand alloc failure in mpssas_action_resetdev\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } targ = &sassc->targets[ccb->ccb_h.target_id]; req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = htole16(targ->handle); req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; /* SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; tm->cm_data = NULL; tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; tm->cm_complete = mpssas_resetdev_complete; tm->cm_complete_data = ccb; tm->cm_targ = targ; mps_map_command(sc, tm); } static void mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *resp; union ccb *ccb; mps_dprint(sc, MPS_TRACE, __func__); mtx_assert(&sc->mps_mtx, MA_OWNED); resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; ccb = tm->cm_complete_data; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! " "This should not happen!\n", __func__, tm->cm_flags, req->DevHandle); ccb->ccb_h.status = CAM_REQ_CMP_ERR; goto bailout; } printf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode)); if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) { ccb->ccb_h.status = CAM_REQ_CMP; mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, CAM_LUN_WILDCARD); } else ccb->ccb_h.status = CAM_REQ_CMP_ERR; bailout: mpssas_free_tm(sc, tm); xpt_done(ccb); } static void mpssas_poll(struct cam_sim *sim) { struct mpssas_softc *sassc; sassc = cam_sim_softc(sim); if (sassc->sc->mps_debug & MPS_TRACE) { /* frequent debug messages during a panic just slow * everything down too much. */ mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__); sassc->sc->mps_debug &= ~MPS_TRACE; } mps_intr_locked(sassc->sc); } static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb) { struct mpssas_softc *sassc; char path_str[64]; if (done_ccb == NULL) return; sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1; mtx_assert(&sassc->sc->mps_mtx, MA_OWNED); xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str)); mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str); xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); #if __FreeBSD_version < 1000006 /* * Before completing scan, get EEDP stuff for all of the existing * targets. */ mpssas_check_eedp(sassc); #endif } /* thread to handle bus rescans */ static void mpssas_scanner_thread(void *arg) { struct mpssas_softc *sassc; struct mps_softc *sc; union ccb *ccb; sassc = (struct mpssas_softc *)arg; sc = sassc->sc; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); mps_lock(sc); for (;;) { /* Sleep for 1 second and check the queue status*/ msleep(&sassc->ccb_scanq, &sc->mps_mtx, PRIBIO, "mps_scanq", 1 * hz); if (sassc->flags & MPSSAS_SHUTDOWN) { mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n"); break; } next_work: // Get first work. ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq); if (ccb == NULL) continue; // Got first work. TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe); xpt_action(ccb); if (sassc->flags & MPSSAS_SHUTDOWN) { mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n"); break; } goto next_work; } sassc->flags &= ~MPSSAS_SCANTHREAD; wakeup(&sassc->flags); mps_unlock(sc); mps_dprint(sc, MPS_TRACE, "Scanner exiting\n"); mps_kproc_exit(0); } /* * This function will send READ_CAP_16 to find out EEDP protection mode. * It will check inquiry data before sending READ_CAP_16. * Callback for READ_CAP_16 is "mpssas_read_cap_done". * This is insternal scsi command and we need to take care release of devq, if * CAM_DEV_QFRZN is set. Driver needs to release devq if it has frozen any. * xpt_release_devq is called from mpssas_read_cap_done. * * All other commands will be handled by periph layer and there it will * check for "CAM_DEV_QFRZN" and release of devq will be done. */ static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb) { char path_str[64]; mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__); mtx_assert(&sassc->sc->mps_mtx, MA_OWNED); if (ccb == NULL) return; xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str)); mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str); /* Prepare request */ ccb->ccb_h.ppriv_ptr1 = sassc; ccb->ccb_h.cbfcnp = mpssas_rescan_done; xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT); TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe); wakeup(&sassc->ccb_scanq); } #if __FreeBSD_version >= 1000006 static void mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct mps_softc *sc; sc = (struct mps_softc *)callback_arg; switch (code) { case AC_ADVINFO_CHANGED: { struct mpssas_target *target; struct mpssas_softc *sassc; struct scsi_read_capacity_data_long rcap_buf; struct ccb_dev_advinfo cdai; struct mpssas_lun *lun; lun_id_t lunid; int found_lun; uintptr_t buftype; buftype = (uintptr_t)arg; found_lun = 0; sassc = sc->sassc; /* * We're only interested in read capacity data changes. */ if (buftype != CDAI_TYPE_RCAPLONG) break; /* * We're only interested in devices that are attached to * this controller. */ if (xpt_path_path_id(path) != sassc->sim->path_id) break; /* * We should have a handle for this, but check to make sure. */ target = &sassc->targets[xpt_path_target_id(path)]; if (target->handle == 0) break; lunid = xpt_path_lun_id(path); SLIST_FOREACH(lun, &target->luns, lun_link) { if (lun->lun_id == lunid) { found_lun = 1; break; } } if (found_lun == 0) { lun = malloc(sizeof(struct mpssas_lun), M_MPT2, M_NOWAIT | M_ZERO); if (lun == NULL) { mps_dprint(sc, MPS_FAULT, "Unable to alloc " "LUN for EEDP support.\n"); break; } lun->lun_id = lunid; SLIST_INSERT_HEAD(&target->luns, lun, lun_link); } bzero(&rcap_buf, sizeof(rcap_buf)); xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.ccb_h.flags = CAM_DIR_IN; cdai.buftype = CDAI_TYPE_RCAPLONG; cdai.flags = 0; cdai.bufsiz = sizeof(rcap_buf); cdai.buf = (uint8_t *)&rcap_buf; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) && (rcap_buf.prot & SRC16_PROT_EN)) { lun->eedp_formatted = TRUE; lun->eedp_block_size = scsi_4btoul(rcap_buf.length); } else { lun->eedp_formatted = FALSE; lun->eedp_block_size = 0; } break; } default: break; } } #else /* __FreeBSD_version >= 1000006 */ static void mpssas_check_eedp(struct mpssas_softc *sassc) { struct mps_softc *sc = sassc->sc; struct ccb_scsiio *csio; struct scsi_read_capacity_16 *scsi_cmd; struct scsi_read_capacity_eedp *rcap_buf; union ccb *ccb; path_id_t pathid = cam_sim_path(sassc->sim); target_id_t targetid; lun_id_t lunid; struct cam_periph *found_periph; struct mpssas_target *target; struct mpssas_lun *lun; uint8_t found_lun; struct ccb_getdev cgd; char path_str[64]; /* * Issue a READ CAPACITY 16 command to each LUN of each target. This * info is used to determine if the LUN is formatted for EEDP support. */ for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) { target = &sassc->targets[targetid]; if (target->handle == 0x0) { continue; } lunid = 0; do { ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mps_dprint(sc, MPS_FAULT, "Unable to alloc CCB " "for EEDP support.\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, targetid, lunid) != CAM_REQ_CMP) { mps_dprint(sc, MPS_FAULT, "Unable to create " "path for EEDP support\n"); xpt_free_ccb(ccb); return; } /* * If a periph is returned, the LUN exists. Create an * entry in the target's LUN list. */ if ((found_periph = cam_periph_find(ccb->ccb_h.path, NULL)) != NULL) { /* * If LUN is already in list, don't create a new * one. */ found_lun = FALSE; SLIST_FOREACH(lun, &target->luns, lun_link) { if (lun->lun_id == lunid) { found_lun = TRUE; break; } } if (!found_lun) { lun = malloc(sizeof(struct mpssas_lun), M_MPT2, M_NOWAIT | M_ZERO); if (lun == NULL) { mps_dprint(sc, MPS_FAULT, "Unable to alloc LUN for " "EEDP support.\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } lun->lun_id = lunid; SLIST_INSERT_HEAD(&target->luns, lun, lun_link); } lunid++; /* Before Issuing READ CAPACITY 16, * check Device type. */ xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); /* * If this flag is set in the inquiry data, * the device supports protection information, * and must support the 16 byte read * capacity command, otherwise continue without * sending read cap 16 */ xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str)); if ((cgd.inq_data.spc3_flags & SPC3_SID_PROTECT) == 0) { xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); continue; } mps_dprint(sc, MPS_INFO, "Sending read cap: path %s" " handle %d\n", path_str, target->handle ); /* * Issue a READ CAPACITY 16 command for the LUN. * The mpssas_read_cap_done function will load * the read cap info into the LUN struct. */ rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPT2, M_NOWAIT| M_ZERO); if (rcap_buf == NULL) { mps_dprint(sc, MPS_FAULT, "Unable to alloc read " "capacity buffer for EEDP support.\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } csio = &ccb->csio; csio->ccb_h.func_code = XPT_SCSI_IO; csio->ccb_h.flags = CAM_DIR_IN; csio->ccb_h.retry_count = 4; csio->ccb_h.cbfcnp = mpssas_read_cap_done; csio->ccb_h.timeout = 60000; csio->data_ptr = (uint8_t *)rcap_buf; csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp); csio->sense_len = MPS_SENSE_LEN; csio->cdb_len = sizeof(*scsi_cmd); csio->tag_action = MSG_SIMPLE_Q_TAG; scsi_cmd = (struct scsi_read_capacity_16 *) &csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = 0x9E; scsi_cmd->service_action = SRC16_SERVICE_ACTION; ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp); /* * Set the path, target and lun IDs for the READ * CAPACITY request. */ ccb->ccb_h.path_id = xpt_path_path_id(ccb->ccb_h.path); ccb->ccb_h.target_id = xpt_path_target_id(ccb->ccb_h.path); ccb->ccb_h.target_lun = xpt_path_lun_id(ccb->ccb_h.path); ccb->ccb_h.ppriv_ptr1 = sassc; xpt_action(ccb); } else { xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); } } while (found_periph); } } static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb) { struct mpssas_softc *sassc; struct mpssas_target *target; struct mpssas_lun *lun; struct scsi_read_capacity_eedp *rcap_buf; if (done_ccb == NULL) return; /* Driver need to release devq, it Scsi command is * generated by driver internally. * Currently there is a single place where driver * calls scsi command internally. In future if driver * calls more scsi command internally, it needs to release * devq internally, since those command will not go back to * cam_periph. */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) { done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; xpt_release_devq(done_ccb->ccb_h.path, /*count*/ 1, /*run_queue*/TRUE); } rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr; /* * Get the LUN ID for the path and look it up in the LUN list for the * target. */ sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1; target = &sassc->targets[done_ccb->ccb_h.target_id]; SLIST_FOREACH(lun, &target->luns, lun_link) { if (lun->lun_id != done_ccb->ccb_h.target_lun) continue; /* * Got the LUN in the target's LUN list. Fill it in * with EEDP info. If the READ CAP 16 command had some * SCSI error (common if command is not supported), mark * the lun as not supporting EEDP and set the block size * to 0. */ if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) { lun->eedp_formatted = FALSE; lun->eedp_block_size = 0; break; } if (rcap_buf->protect & 0x01) { lun->eedp_formatted = TRUE; lun->eedp_block_size = scsi_4btoul(rcap_buf->length); } break; } // Finished with this CCB and path. free(rcap_buf, M_MPT2); xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } #endif /* __FreeBSD_version >= 1000006 */ int mpssas_startup(struct mps_softc *sc) { struct mpssas_softc *sassc; /* * Send the port enable message and set the wait_for_port_enable flag. * This flag helps to keep the simq frozen until all discovery events * are processed. */ sassc = sc->sassc; mpssas_startup_increment(sassc); sc->wait_for_port_enable = 1; mpssas_send_portenable(sc); return (0); } static int mpssas_send_portenable(struct mps_softc *sc) { MPI2_PORT_ENABLE_REQUEST *request; struct mps_command *cm; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); if ((cm = mps_alloc_command(sc)) == NULL) return (EBUSY); request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req; request->Function = MPI2_FUNCTION_PORT_ENABLE; request->MsgFlags = 0; request->VP_ID = 0; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_complete = mpssas_portenable_complete; cm->cm_data = NULL; cm->cm_sge = NULL; mps_map_command(sc, cm); mps_dprint(sc, MPS_TRACE, "mps_send_portenable finished cm %p req %p complete %p\n", cm, cm->cm_req, cm->cm_complete); return (0); } static void mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm) { MPI2_PORT_ENABLE_REPLY *reply; struct mpssas_softc *sassc; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); sassc = sc->sassc; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * port enable commands don't have S/G lists. */ if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { mps_printf(sc, "%s: cm_flags = %#x for port enable! " "This should not happen!\n", __func__, cm->cm_flags); } reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply; if (reply == NULL) mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n"); else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) mps_dprint(sc, MPS_FAULT, "Portenable failed\n"); mps_free_command(sc, cm); if (sc->mps_ich.ich_arg != NULL) { mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n"); config_intrhook_disestablish(&sc->mps_ich); sc->mps_ich.ich_arg = NULL; } /* * Get WarpDrive info after discovery is complete but before the scan * starts. At this point, all devices are ready to be exposed to the * OS. If devices should be hidden instead, take them out of the * 'targets' array before the scan. The devinfo for a disk will have * some info and a volume's will be 0. Use that to remove disks. */ mps_wd_config_pages(sc); /* * Done waiting for port enable to complete. Decrement the refcount. * If refcount is 0, discovery is complete and a rescan of the bus can * take place. Since the simq was explicitly frozen before port * enable, it must be explicitly released here to keep the * freeze/release count in sync. */ sc->wait_for_port_enable = 0; sc->port_enable_complete = 1; wakeup(&sc->port_enable_complete); mpssas_startup_decrement(sassc); xpt_release_simq(sassc->sim, 1); } Index: projects/physbio/sys/dev/mpt/mpt_cam.c =================================================================== --- projects/physbio/sys/dev/mpt/mpt_cam.c (revision 243875) +++ projects/physbio/sys/dev/mpt/mpt_cam.c (revision 243876) @@ -1,5448 +1,5448 @@ /*- * FreeBSD/CAM specific routines for LSI '909 FC adapters. * FreeBSD Version. * * Copyright (c) 2000, 2001 by Greg Ansley * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002, 2006 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Support from Chris Ellsworth in order to make SAS adapters work * is gratefully acknowledged. * * Support from LSI-Logic has also gone a great deal toward making this a * workable subsystem and is gratefully acknowledged. */ /*- * Copyright (c) 2004, Avid Technology, Inc. and its contributors. * Copyright (c) 2005, WHEEL Sp. z o.o. * Copyright (c) 2004, 2005 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ #include "dev/mpt/mpilib/mpi_init.h" #include "dev/mpt/mpilib/mpi_targ.h" #include "dev/mpt/mpilib/mpi_fc.h" #include "dev/mpt/mpilib/mpi_sas.h" #include #include #include #if __FreeBSD_version >= 700025 #ifndef CAM_NEW_TRAN_CODE #define CAM_NEW_TRAN_CODE 1 #endif #endif static void mpt_poll(struct cam_sim *); static timeout_t mpt_timeout; static void mpt_action(struct cam_sim *, union ccb *); static int mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); static void mpt_setwidth(struct mpt_softc *, int, int); static void mpt_setsync(struct mpt_softc *, int, int, int); static int mpt_update_spi_config(struct mpt_softc *, int); static mpt_reply_handler_t mpt_scsi_reply_handler; static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; static mpt_reply_handler_t mpt_fc_els_reply_handler; static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, MSG_DEFAULT_REPLY *); static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); static int mpt_fc_reset_link(struct mpt_softc *, int); static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); static void mpt_recovery_thread(void *arg); static void mpt_recover_commands(struct mpt_softc *mpt); static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, u_int, u_int, u_int, int); static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); static void mpt_post_target_command(struct mpt_softc *, request_t *, int); static int mpt_add_els_buffers(struct mpt_softc *mpt); static int mpt_add_target_commands(struct mpt_softc *mpt); static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); static void mpt_target_start_io(struct mpt_softc *, union ccb *); static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, uint8_t, uint8_t const *); static void mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, tgt_resource_t *, int); static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; static mpt_reply_handler_t mpt_sata_pass_reply_handler; static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; static mpt_probe_handler_t mpt_cam_probe; static mpt_attach_handler_t mpt_cam_attach; static mpt_enable_handler_t mpt_cam_enable; static mpt_ready_handler_t mpt_cam_ready; static mpt_event_handler_t mpt_cam_event; static mpt_reset_handler_t mpt_cam_ioc_reset; static mpt_detach_handler_t mpt_cam_detach; static struct mpt_personality mpt_cam_personality = { .name = "mpt_cam", .probe = mpt_cam_probe, .attach = mpt_cam_attach, .enable = mpt_cam_enable, .ready = mpt_cam_ready, .event = mpt_cam_event, .reset = mpt_cam_ioc_reset, .detach = mpt_cam_detach, }; DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); int mpt_enable_sata_wc = -1; TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); static int mpt_cam_probe(struct mpt_softc *mpt) { int role; /* * Only attach to nodes that support the initiator or target role * (or want to) or have RAID physical devices that need CAM pass-thru * support. */ if (mpt->do_cfg_role) { role = mpt->cfg_role; } else { role = mpt->role; } if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { return (0); } return (ENODEV); } static int mpt_cam_attach(struct mpt_softc *mpt) { struct cam_devq *devq; mpt_handler_t handler; int maxq; int error; MPT_LOCK(mpt); TAILQ_INIT(&mpt->request_timeout_list); maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); handler.reply_handler = mpt_scsi_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &scsi_io_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } handler.reply_handler = mpt_scsi_tmf_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &scsi_tmf_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } /* * If we're fibre channel and could support target mode, we register * an ELS reply handler and give it resources. */ if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { handler.reply_handler = mpt_fc_els_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &fc_els_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } if (mpt_add_els_buffers(mpt) == FALSE) { error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } maxq -= mpt->els_cmds_allocated; } /* * If we support target mode, we register a reply handler for it, * but don't add command resources until we actually enable target * mode. */ if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { handler.reply_handler = mpt_scsi_tgt_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &mpt->scsi_tgt_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } } if (mpt->is_sas) { handler.reply_handler = mpt_sata_pass_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &sata_pass_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } } /* * We keep one request reserved for timeout TMF requests. */ mpt->tmf_req = mpt_get_request(mpt, FALSE); if (mpt->tmf_req == NULL) { mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } /* * Mark the request as free even though not on the free list. * There is only one TMF request allowed to be outstanding at * a time and the TMF routines perform their own allocation * tracking using the standard state flags. */ mpt->tmf_req->state = REQ_STATE_FREE; maxq--; /* * The rest of this is CAM foo, for which we need to drop our lock */ MPT_UNLOCK(mpt); if (mpt_spawn_recovery_thread(mpt) != 0) { mpt_prt(mpt, "Unable to spawn recovery thread!\n"); error = ENOMEM; goto cleanup; } /* * Create the device queue for our SIM(s). */ devq = cam_simq_alloc(maxq); if (devq == NULL) { mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); error = ENOMEM; goto cleanup; } /* * Construct our SIM entry. */ mpt->sim = mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); if (mpt->sim == NULL) { mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); cam_simq_free(devq); error = ENOMEM; goto cleanup; } /* * Register exactly this bus. */ MPT_LOCK(mpt); if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { mpt_prt(mpt, "Bus registration Failed!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "Unable to allocate Path!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } MPT_UNLOCK(mpt); /* * Only register a second bus for RAID physical * devices if the controller supports RAID. */ if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { return (0); } /* * Create a "bus" to export all hidden disks to CAM. */ mpt->phydisk_sim = mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); if (mpt->phydisk_sim == NULL) { mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); error = ENOMEM; goto cleanup; } /* * Register this bus. */ MPT_LOCK(mpt); if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != CAM_SUCCESS) { mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } if (xpt_create_path(&mpt->phydisk_path, NULL, cam_sim_path(mpt->phydisk_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } MPT_UNLOCK(mpt); mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); return (0); cleanup: mpt_cam_detach(mpt); return (error); } /* * Read FC configuration information */ static int mpt_read_config_info_fc(struct mpt_softc *mpt) { struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; char *topology = NULL; int rv; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", mpt->mpt_fcport_page0.Header.PageVersion, mpt->mpt_fcport_page0.Header.PageLength, mpt->mpt_fcport_page0.Header.PageNumber, mpt->mpt_fcport_page0.Header.PageType); rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, sizeof(mpt->mpt_fcport_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read FC Port Page 0\n"); return (-1); } mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; switch (mpt->mpt_fcport_page0.Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: mpt->mpt_fcport_speed = 0; topology = ""; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: topology = "N-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: topology = "NL-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: topology = "F-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: topology = "FL-Port"; break; default: mpt->mpt_fcport_speed = 0; topology = "?"; break; } mpt_lprt(mpt, MPT_PRT_INFO, "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " "Speed %u-Gbit\n", topology, mpt->mpt_fcport_page0.WWNN.High, mpt->mpt_fcport_page0.WWNN.Low, mpt->mpt_fcport_page0.WWPN.High, mpt->mpt_fcport_page0.WWPN.Low, mpt->mpt_fcport_speed); MPT_UNLOCK(mpt); ctx = device_get_sysctl_ctx(mpt->dev); tree = device_get_sysctl_tree(mpt->dev); snprintf(mpt->scinfo.fc.wwnn, sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", mpt->mpt_fcport_page0.WWNN.High, mpt->mpt_fcport_page0.WWNN.Low); snprintf(mpt->scinfo.fc.wwpn, sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", mpt->mpt_fcport_page0.WWPN.High, mpt->mpt_fcport_page0.WWPN.Low); SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, "World Wide Node Name"); SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, "World Wide Port Name"); MPT_LOCK(mpt); return (0); } /* * Set FC configuration information. */ static int mpt_set_initial_config_fc(struct mpt_softc *mpt) { CONFIG_PAGE_FC_PORT_1 fc; U32 fl; int r, doit = 0; int role; r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, &fc.Header, FALSE, 5000); if (r) { mpt_prt(mpt, "failed to read FC page 1 header\n"); return (mpt_fc_reset_link(mpt, 1)); } r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, &fc.Header, sizeof (fc), FALSE, 5000); if (r) { mpt_prt(mpt, "failed to read FC page 1\n"); return (mpt_fc_reset_link(mpt, 1)); } mpt2host_config_page_fc_port_1(&fc); /* * Check our flags to make sure we support the role we want. */ doit = 0; role = 0; fl = fc.Flags; if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { role |= MPT_ROLE_INITIATOR; } if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { role |= MPT_ROLE_TARGET; } fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; if (mpt->do_cfg_role == 0) { role = mpt->cfg_role; } else { mpt->do_cfg_role = 0; } if (role != mpt->cfg_role) { if (mpt->cfg_role & MPT_ROLE_INITIATOR) { if ((role & MPT_ROLE_INITIATOR) == 0) { mpt_prt(mpt, "adding initiator role\n"); fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; doit++; } else { mpt_prt(mpt, "keeping initiator role\n"); } } else if (role & MPT_ROLE_INITIATOR) { mpt_prt(mpt, "removing initiator role\n"); doit++; } if (mpt->cfg_role & MPT_ROLE_TARGET) { if ((role & MPT_ROLE_TARGET) == 0) { mpt_prt(mpt, "adding target role\n"); fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; doit++; } else { mpt_prt(mpt, "keeping target role\n"); } } else if (role & MPT_ROLE_TARGET) { mpt_prt(mpt, "removing target role\n"); doit++; } mpt->role = mpt->cfg_role; } if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { mpt_prt(mpt, "adding OXID option\n"); fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; doit++; } } if (doit) { fc.Flags = fl; host2mpt_config_page_fc_port_1(&fc); r = mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, sizeof(fc), FALSE, 5000); if (r != 0) { mpt_prt(mpt, "failed to update NVRAM with changes\n"); return (0); } mpt_prt(mpt, "NOTE: NVRAM changes will not take " "effect until next reboot or IOC reset\n"); } return (0); } static int mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) { ConfigExtendedPageHeader_t hdr; struct mptsas_phyinfo *phyinfo; SasIOUnitPage0_t *buffer; int error, len, i; error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, &hdr, 0, 10000); if (error) goto out; if (hdr.ExtPageLength == 0) { error = ENXIO; goto out; } len = hdr.ExtPageLength * 4; buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); if (buffer == NULL) { error = ENOMEM; goto out; } error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 0, &hdr, buffer, len, 0, 10000); if (error) { free(buffer, M_DEVBUF); goto out; } portinfo->num_phys = buffer->NumPhys; portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) * portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); if (portinfo->phy_info == NULL) { free(buffer, M_DEVBUF); error = ENOMEM; goto out; } for (i = 0; i < portinfo->num_phys; i++) { phyinfo = &portinfo->phy_info[i]; phyinfo->phy_num = i; phyinfo->port_id = buffer->PhyData[i].Port; phyinfo->negotiated_link_rate = buffer->PhyData[i].NegotiatedLinkRate; phyinfo->handle = le16toh(buffer->PhyData[i].ControllerDevHandle); } free(buffer, M_DEVBUF); out: return (error); } static int mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, uint32_t form, uint32_t form_specific) { ConfigExtendedPageHeader_t hdr; SasPhyPage0_t *buffer; int error; error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 0, 10000); if (error) goto out; if (hdr.ExtPageLength == 0) { error = ENXIO; goto out; } buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); if (buffer == NULL) { error = ENOMEM; goto out; } error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, form + form_specific, &hdr, buffer, sizeof(SasPhyPage0_t), 0, 10000); if (error) { free(buffer, M_DEVBUF); goto out; } phy_info->hw_link_rate = buffer->HwLinkRate; phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); free(buffer, M_DEVBUF); out: return (error); } static int mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, uint32_t form, uint32_t form_specific) { ConfigExtendedPageHeader_t hdr; SasDevicePage0_t *buffer; uint64_t sas_address; int error = 0; bzero(device_info, sizeof(*device_info)); error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, &hdr, 0, 10000); if (error) goto out; if (hdr.ExtPageLength == 0) { error = ENXIO; goto out; } buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); if (buffer == NULL) { error = ENOMEM; goto out; } error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, form + form_specific, &hdr, buffer, sizeof(SasDevicePage0_t), 0, 10000); if (error) { free(buffer, M_DEVBUF); goto out; } device_info->dev_handle = le16toh(buffer->DevHandle); device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); device_info->slot = le16toh(buffer->Slot); device_info->phy_num = buffer->PhyNum; device_info->physical_port = buffer->PhysicalPort; device_info->target_id = buffer->TargetID; device_info->bus = buffer->Bus; bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); device_info->sas_address = le64toh(sas_address); device_info->device_info = le32toh(buffer->DeviceInfo); free(buffer, M_DEVBUF); out: return (error); } /* * Read SAS configuration information. Nothing to do yet. */ static int mpt_read_config_info_sas(struct mpt_softc *mpt) { struct mptsas_portinfo *portinfo; struct mptsas_phyinfo *phyinfo; int error, i; portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); if (portinfo == NULL) return (ENOMEM); error = mptsas_sas_io_unit_pg0(mpt, portinfo); if (error) { free(portinfo, M_DEVBUF); return (0); } for (i = 0; i < portinfo->num_phys; i++) { phyinfo = &portinfo->phy_info[i]; error = mptsas_sas_phy_pg0(mpt, phyinfo, (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << MPI_SAS_PHY_PGAD_FORM_SHIFT), i); if (error) break; error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), phyinfo->handle); if (error) break; phyinfo->identify.phy_num = phyinfo->phy_num = i; if (phyinfo->attached.dev_handle) error = mptsas_sas_device_pg0(mpt, &phyinfo->attached, (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), phyinfo->attached.dev_handle); if (error) break; } mpt->sas_portinfo = portinfo; return (0); } static void mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, int enabled) { SataPassthroughRequest_t *pass; request_t *req; int error, status; req = mpt_get_request(mpt, 0); if (req == NULL) return; pass = req->req_vbuf; bzero(pass, sizeof(SataPassthroughRequest_t)); pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; pass->TargetID = devinfo->target_id; pass->Bus = devinfo->bus; pass->PassthroughFlags = 0; pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; pass->DataLength = 0; pass->MsgContext = htole32(req->index | sata_pass_handler_id); pass->CommandFIS[0] = 0x27; pass->CommandFIS[1] = 0x80; pass->CommandFIS[2] = 0xef; pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; pass->CommandFIS[7] = 0x40; pass->CommandFIS[15] = 0x08; mpt_check_doorbell(mpt); mpt_send_cmd(mpt, req); error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 10 * 1000); if (error) { mpt_free_request(mpt, req); printf("error %d sending passthrough\n", error); return; } status = le16toh(req->IOCStatus); if (status != MPI_IOCSTATUS_SUCCESS) { mpt_free_request(mpt, req); printf("IOCSTATUS %d\n", status); return; } mpt_free_request(mpt, req); } /* * Set SAS configuration information. Nothing to do yet. */ static int mpt_set_initial_config_sas(struct mpt_softc *mpt) { struct mptsas_phyinfo *phyinfo; int i; if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { phyinfo = &mpt->sas_portinfo->phy_info[i]; if (phyinfo->attached.dev_handle == 0) continue; if ((phyinfo->attached.device_info & MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) continue; if (bootverbose) device_printf(mpt->dev, "%sabling SATA WC on phy %d\n", (mpt_enable_sata_wc) ? "En" : "Dis", i); mptsas_set_sata_wc(mpt, &phyinfo->attached, mpt_enable_sata_wc); } } return (0); } static int mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { if (req != NULL) { if (reply_frame != NULL) { req->IOCStatus = le16toh(reply_frame->IOCStatus); } req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; TAILQ_REMOVE(&mpt->request_pending_list, req, links); if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { wakeup(req); } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { /* * Whew- we can free this request (late completion) */ mpt_free_request(mpt, req); } } return (TRUE); } /* * Read SCSI configuration information */ static int mpt_read_config_info_spi(struct mpt_softc *mpt) { int rv, i; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, &mpt->mpt_port_page0.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", mpt->mpt_port_page0.Header.PageVersion, mpt->mpt_port_page0.Header.PageLength, mpt->mpt_port_page0.Header.PageNumber, mpt->mpt_port_page0.Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, &mpt->mpt_port_page1.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", mpt->mpt_port_page1.Header.PageVersion, mpt->mpt_port_page1.Header.PageLength, mpt->mpt_port_page1.Header.PageNumber, mpt->mpt_port_page1.Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, &mpt->mpt_port_page2.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", mpt->mpt_port_page2.Header.PageVersion, mpt->mpt_port_page2.Header.PageLength, mpt->mpt_port_page2.Header.PageNumber, mpt->mpt_port_page2.Header.PageType); for (i = 0; i < 16; i++) { rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, mpt->mpt_dev_page0[i].Header.PageVersion, mpt->mpt_dev_page0[i].Header.PageLength, mpt->mpt_dev_page0[i].Header.PageNumber, mpt->mpt_dev_page0[i].Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, mpt->mpt_dev_page1[i].Header.PageVersion, mpt->mpt_dev_page1[i].Header.PageLength, mpt->mpt_dev_page1[i].Header.PageNumber, mpt->mpt_dev_page1[i].Header.PageType); } /* * At this point, we don't *have* to fail. As long as we have * valid config header information, we can (barely) lurch * along. */ rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, sizeof(mpt->mpt_port_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 0\n"); } else { mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", mpt->mpt_port_page0.Capabilities, mpt->mpt_port_page0.PhysicalInterface); } rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, sizeof(mpt->mpt_port_page1), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 1\n"); } else { mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", mpt->mpt_port_page1.Configuration, mpt->mpt_port_page1.OnBusTimerValue); } rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, sizeof(mpt->mpt_port_page2), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 2\n"); } else { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "Port Page 2: Flags %x Settings %x\n", mpt->mpt_port_page2.PortFlags, mpt->mpt_port_page2.PortSettings); mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); for (i = 0; i < 16; i++) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); } } for (i = 0; i < 16; i++) { rv = mpt_read_cur_cfg_page(mpt, i, &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "cannot read SPI Target %d Device Page 0\n", i); continue; } mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "target %d page 0: Negotiated Params %x Information %x\n", i, mpt->mpt_dev_page0[i].NegotiatedParameters, mpt->mpt_dev_page0[i].Information); rv = mpt_read_cur_cfg_page(mpt, i, &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), FALSE, 5000); if (rv) { mpt_prt(mpt, "cannot read SPI Target %d Device Page 1\n", i); continue; } mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "target %d page 1: Requested Params %x Configuration %x\n", i, mpt->mpt_dev_page1[i].RequestedParameters, mpt->mpt_dev_page1[i].Configuration); } return (0); } /* * Validate SPI configuration information. * * In particular, validate SPI Port Page 1. */ static int mpt_set_initial_config_spi(struct mpt_softc *mpt) { int error, i, pp1val; mpt->mpt_disc_enable = 0xff; mpt->mpt_tag_enable = 0; pp1val = ((1 << mpt->mpt_ini_id) << MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id; if (mpt->mpt_port_page1.Configuration != pp1val) { CONFIG_PAGE_SCSI_PORT_1 tmp; mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); tmp = mpt->mpt_port_page1; tmp.Configuration = pp1val; host2mpt_config_page_scsi_port_1(&tmp); error = mpt_write_cur_cfg_page(mpt, 0, &tmp.Header, sizeof(tmp), FALSE, 5000); if (error) { return (-1); } error = mpt_read_cur_cfg_page(mpt, 0, &tmp.Header, sizeof(tmp), FALSE, 5000); if (error) { return (-1); } mpt2host_config_page_scsi_port_1(&tmp); if (tmp.Configuration != pp1val) { mpt_prt(mpt, "failed to reset SPI Port Page 1 Config value\n"); return (-1); } mpt->mpt_port_page1 = tmp; } /* * The purpose of this exercise is to get * all targets back to async/narrow. * * We skip this step if the BIOS has already negotiated * speeds with the targets. */ i = mpt->mpt_port_page2.PortSettings & MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "honoring BIOS transfer negotiations\n"); } else { for (i = 0; i < 16; i++) { mpt->mpt_dev_page1[i].RequestedParameters = 0; mpt->mpt_dev_page1[i].Configuration = 0; (void) mpt_update_spi_config(mpt, i); } } return (0); } static int mpt_cam_enable(struct mpt_softc *mpt) { int error; MPT_LOCK(mpt); error = EIO; if (mpt->is_fc) { if (mpt_read_config_info_fc(mpt)) { goto out; } if (mpt_set_initial_config_fc(mpt)) { goto out; } } else if (mpt->is_sas) { if (mpt_read_config_info_sas(mpt)) { goto out; } if (mpt_set_initial_config_sas(mpt)) { goto out; } } else if (mpt->is_spi) { if (mpt_read_config_info_spi(mpt)) { goto out; } if (mpt_set_initial_config_spi(mpt)) { goto out; } } error = 0; out: MPT_UNLOCK(mpt); return (error); } static void mpt_cam_ready(struct mpt_softc *mpt) { /* * If we're in target mode, hang out resources now * so we don't cause the world to hang talking to us. */ if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { /* * Try to add some target command resources */ MPT_LOCK(mpt); if (mpt_add_target_commands(mpt) == FALSE) { mpt_prt(mpt, "failed to add target commands\n"); } MPT_UNLOCK(mpt); } mpt->ready = 1; } static void mpt_cam_detach(struct mpt_softc *mpt) { mpt_handler_t handler; MPT_LOCK(mpt); mpt->ready = 0; mpt_terminate_recovery_thread(mpt); handler.reply_handler = mpt_scsi_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, scsi_io_handler_id); handler.reply_handler = mpt_scsi_tmf_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, scsi_tmf_handler_id); handler.reply_handler = mpt_fc_els_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, fc_els_handler_id); handler.reply_handler = mpt_scsi_tgt_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, mpt->scsi_tgt_handler_id); handler.reply_handler = mpt_sata_pass_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, sata_pass_handler_id); if (mpt->tmf_req != NULL) { mpt->tmf_req->state = REQ_STATE_ALLOCATED; mpt_free_request(mpt, mpt->tmf_req); mpt->tmf_req = NULL; } if (mpt->sas_portinfo != NULL) { free(mpt->sas_portinfo, M_DEVBUF); mpt->sas_portinfo = NULL; } if (mpt->sim != NULL) { xpt_free_path(mpt->path); xpt_bus_deregister(cam_sim_path(mpt->sim)); cam_sim_free(mpt->sim, TRUE); mpt->sim = NULL; } if (mpt->phydisk_sim != NULL) { xpt_free_path(mpt->phydisk_path); xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); cam_sim_free(mpt->phydisk_sim, TRUE); mpt->phydisk_sim = NULL; } MPT_UNLOCK(mpt); } /* This routine is used after a system crash to dump core onto the swap device. */ static void mpt_poll(struct cam_sim *sim) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)cam_sim_softc(sim); mpt_intr(mpt); } /* * Watchdog timeout routine for SCSI requests. */ static void mpt_timeout(void *arg) { union ccb *ccb; struct mpt_softc *mpt; request_t *req; ccb = (union ccb *)arg; mpt = ccb->ccb_h.ccb_mpt_ptr; MPT_LOCK_ASSERT(mpt); req = ccb->ccb_h.ccb_req_ptr; mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, req->serno, ccb, req->ccb); /* XXX: WHAT ARE WE TRYING TO DO HERE? */ if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { TAILQ_REMOVE(&mpt->request_pending_list, req, links); TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); req->state |= REQ_STATE_TIMEDOUT; mpt_wakeup_recovery_thread(mpt); } } /* * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. * * Takes a list of physical segments and builds the SGL for SCSI IO command * and forwards the commard to the IOC after one last check that CAM has not * aborted the transaction. */ static void mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { request_t *req, *trq; char *mpt_off; union ccb *ccb; struct mpt_softc *mpt; bus_addr_t chain_list_addr; int first_lim, seg, this_seg_lim; uint32_t addr, cur_off, flags, nxt_off, tf; void *sglp = NULL; MSG_REQUEST_HEADER *hdrp; SGE_SIMPLE64 *se; SGE_CHAIN64 *ce; int istgt = 0; req = (request_t *)arg; ccb = req->ccb; mpt = ccb->ccb_h.ccb_mpt_ptr; req = ccb->ccb_h.ccb_req_ptr; hdrp = req->req_vbuf; mpt_off = req->req_vbuf; if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; } if (error == 0) { switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: istgt = 0; sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; break; case MPI_FUNCTION_TARGET_ASSIST: istgt = 1; sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; break; default: mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", hdrp->Function); error = EINVAL; break; } } if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; mpt_prt(mpt, "segment count %d too large (max %u)\n", nseg, mpt->max_seg_cnt); } bad: if (error != 0) { if (error != EFBIG && error != ENOMEM) { mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { cam_status status; mpt_freeze_ccb(ccb); if (error == EFBIG) { status = CAM_REQ_TOO_BIG; } else if (error == ENOMEM) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } status = CAM_REQUEUE_REQ; } else { status = CAM_REQ_CMP_ERR; } mpt_set_ccb_status(ccb, status); } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } /* * No data to transfer? * Just make a single simple SGL with zero length. */ if (mpt->verbose >= MPT_PRT_DEBUG) { int tidx = ((char *)sglp) - mpt_off; memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); } if (nseg == 0) { SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; MPI_pSGE_SET_FLAGS(se1, (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); se1->FlagsLength = htole32(se1->FlagsLength); goto out; } flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } - if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { + if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREWRITE; } else { op = BUS_DMASYNC_PREREAD; } } bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); } /* * Okay, fill in what we can at the end of the command frame. * If we have up to MPT_NSGL_FIRST, we can fit them all into * the command frame. * * Otherwise, we fill up through MPT_NSGL_FIRST less one * SIMPLE64 pointers and start doing CHAIN64 entries after * that. */ if (nseg < MPT_NSGL_FIRST(mpt)) { first_lim = nseg; } else { /* * Leave room for CHAIN element */ first_lim = MPT_NSGL_FIRST(mpt) - 1; } se = (SGE_SIMPLE64 *) sglp; for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { tf = flags; memset(se, 0, sizeof (*se)); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); if (sizeof(bus_addr_t) > 4) { addr = ((uint64_t)dm_segs->ds_addr) >> 32; /* SAS1078 36GB limitation WAR */ if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { addr |= (1 << 31); tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; } se->Address.High = htole32(addr); } if (seg == first_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); } if (seg == nseg) { goto out; } /* * Tell the IOC where to find the first chain element. */ hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; nxt_off = MPT_RQSL(mpt); trq = req; /* * Make up the rest of the data segments out of a chain element * (contained in the current request frame) which points to * SIMPLE64 elements in the next request frame, possibly ending * with *another* chain element (if there's more). */ while (seg < nseg) { /* * Point to the chain descriptor. Note that the chain * descriptor is at the end of the *previous* list (whether * chain or simple). */ ce = (SGE_CHAIN64 *) se; /* * Before we change our current pointer, make sure we won't * overflow the request area with this frame. Note that we * test against 'greater than' here as it's okay in this case * to have next offset be just outside the request area. */ if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { nxt_off = MPT_REQUEST_AREA; goto next_chain; } /* * Set our SGE element pointer to the beginning of the chain * list and update our next chain list offset. */ se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; cur_off = nxt_off; nxt_off += MPT_RQSL(mpt); /* * Now initialize the chain descriptor. */ memset(ce, 0, sizeof (*ce)); /* * Get the physical address of the chain list. */ chain_list_addr = trq->req_pbuf; chain_list_addr += cur_off; if (sizeof (bus_addr_t) > 4) { ce->Address.High = htole32(((uint64_t)chain_list_addr) >> 32); } ce->Address.Low = htole32(chain_list_addr & 0xffffffff); ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; /* * If we have more than a frame's worth of segments left, * set up the chain list to have the last element be another * chain descriptor. */ if ((nseg - seg) > MPT_NSGL(mpt)) { this_seg_lim = seg + MPT_NSGL(mpt) - 1; /* * The length of the chain is the length in bytes of the * number of segments plus the next chain element. * * The next chain descriptor offset is the length, * in words, of the number of segments. */ ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE64); ce->NextChainOffset = ce->Length >> 2; ce->Length += sizeof (SGE_CHAIN64); } else { this_seg_lim = nseg; ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE64); } ce->Length = htole16(ce->Length); /* * Fill in the chain list SGE elements with our segment data. * * If we're the last element in this chain list, set the last * element flag. If we're the completely last element period, * set the end of list and end of buffer flags. */ while (seg < this_seg_lim) { tf = flags; memset(se, 0, sizeof (*se)); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); if (sizeof (bus_addr_t) > 4) { addr = ((uint64_t)dm_segs->ds_addr) >> 32; /* SAS1078 36GB limitation WAR */ if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { addr |= (1 << 31); tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; } se->Address.High = htole32(addr); } if (seg == this_seg_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); se++; seg++; dm_segs++; } next_chain: /* * If we have more segments to do and we've used up all of * the space in a request area, go allocate another one * and chain to that. */ if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { request_t *nrq; nrq = mpt_get_request(mpt, FALSE); if (nrq == NULL) { error = ENOMEM; goto bad; } /* * Append the new request area on the tail of our list. */ if ((trq = req->chain) == NULL) { req->chain = nrq; } else { while (trq->chain != NULL) { trq = trq->chain; } trq->chain = nrq; } trq = nrq; mpt_off = trq->req_vbuf; if (mpt->verbose >= MPT_PRT_DEBUG) { memset(mpt_off, 0xff, MPT_REQUEST_AREA); } nxt_off = 0; } } out: /* * Last time we need to check if this CCB needs to be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } mpt_prt(mpt, "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", ccb->ccb_h.status & CAM_STATUS_MASK); - if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { + if (nseg) { bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, mpt_timeout, ccb); } if (mpt->verbose > MPT_PRT_DEBUG) { int nc = 0; mpt_print_request(req->req_vbuf); for (trq = req->chain; trq; trq = trq->chain) { printf(" Additional Chain Area %d\n", nc++); mpt_dump_sgl(trq->req_vbuf, 0); } } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; } else { tgt->state = TGT_STATE_MOVING_DATA; } #else tgt->state = TGT_STATE_MOVING_DATA; #endif } mpt_send_cmd(mpt, req); } static void mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { request_t *req, *trq; char *mpt_off; union ccb *ccb; struct mpt_softc *mpt; int seg, first_lim; uint32_t flags, nxt_off; void *sglp = NULL; MSG_REQUEST_HEADER *hdrp; SGE_SIMPLE32 *se; SGE_CHAIN32 *ce; int istgt = 0; req = (request_t *)arg; ccb = req->ccb; mpt = ccb->ccb_h.ccb_mpt_ptr; req = ccb->ccb_h.ccb_req_ptr; hdrp = req->req_vbuf; mpt_off = req->req_vbuf; if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; } if (error == 0) { switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; break; case MPI_FUNCTION_TARGET_ASSIST: istgt = 1; sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; break; default: mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", hdrp->Function); error = EINVAL; break; } } if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; mpt_prt(mpt, "segment count %d too large (max %u)\n", nseg, mpt->max_seg_cnt); } bad: if (error != 0) { if (error != EFBIG && error != ENOMEM) { mpt_prt(mpt, "mpt_execute_req: err %d\n", error); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { cam_status status; mpt_freeze_ccb(ccb); if (error == EFBIG) { status = CAM_REQ_TOO_BIG; } else if (error == ENOMEM) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } status = CAM_REQUEUE_REQ; } else { status = CAM_REQ_CMP_ERR; } mpt_set_ccb_status(ccb, status); } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } /* * No data to transfer? * Just make a single simple SGL with zero length. */ if (mpt->verbose >= MPT_PRT_DEBUG) { int tidx = ((char *)sglp) - mpt_off; memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); } if (nseg == 0) { SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; MPI_pSGE_SET_FLAGS(se1, (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); se1->FlagsLength = htole32(se1->FlagsLength); goto out; } flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } - if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { + if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if (istgt) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREWRITE; } else { op = BUS_DMASYNC_PREREAD; } } bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); } /* * Okay, fill in what we can at the end of the command frame. * If we have up to MPT_NSGL_FIRST, we can fit them all into * the command frame. * * Otherwise, we fill up through MPT_NSGL_FIRST less one * SIMPLE32 pointers and start doing CHAIN32 entries after * that. */ if (nseg < MPT_NSGL_FIRST(mpt)) { first_lim = nseg; } else { /* * Leave room for CHAIN element */ first_lim = MPT_NSGL_FIRST(mpt) - 1; } se = (SGE_SIMPLE32 *) sglp; for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { uint32_t tf; memset(se, 0,sizeof (*se)); se->Address = htole32(dm_segs->ds_addr); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (seg == first_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); } if (seg == nseg) { goto out; } /* * Tell the IOC where to find the first chain element. */ hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; nxt_off = MPT_RQSL(mpt); trq = req; /* * Make up the rest of the data segments out of a chain element * (contained in the current request frame) which points to * SIMPLE32 elements in the next request frame, possibly ending * with *another* chain element (if there's more). */ while (seg < nseg) { int this_seg_lim; uint32_t tf, cur_off; bus_addr_t chain_list_addr; /* * Point to the chain descriptor. Note that the chain * descriptor is at the end of the *previous* list (whether * chain or simple). */ ce = (SGE_CHAIN32 *) se; /* * Before we change our current pointer, make sure we won't * overflow the request area with this frame. Note that we * test against 'greater than' here as it's okay in this case * to have next offset be just outside the request area. */ if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { nxt_off = MPT_REQUEST_AREA; goto next_chain; } /* * Set our SGE element pointer to the beginning of the chain * list and update our next chain list offset. */ se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; cur_off = nxt_off; nxt_off += MPT_RQSL(mpt); /* * Now initialize the chain descriptor. */ memset(ce, 0, sizeof (*ce)); /* * Get the physical address of the chain list. */ chain_list_addr = trq->req_pbuf; chain_list_addr += cur_off; ce->Address = htole32(chain_list_addr); ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; /* * If we have more than a frame's worth of segments left, * set up the chain list to have the last element be another * chain descriptor. */ if ((nseg - seg) > MPT_NSGL(mpt)) { this_seg_lim = seg + MPT_NSGL(mpt) - 1; /* * The length of the chain is the length in bytes of the * number of segments plus the next chain element. * * The next chain descriptor offset is the length, * in words, of the number of segments. */ ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE32); ce->NextChainOffset = ce->Length >> 2; ce->Length += sizeof (SGE_CHAIN32); } else { this_seg_lim = nseg; ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE32); } ce->Length = htole16(ce->Length); /* * Fill in the chain list SGE elements with our segment data. * * If we're the last element in this chain list, set the last * element flag. If we're the completely last element period, * set the end of list and end of buffer flags. */ while (seg < this_seg_lim) { memset(se, 0, sizeof (*se)); se->Address = htole32(dm_segs->ds_addr); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (seg == this_seg_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); se++; seg++; dm_segs++; } next_chain: /* * If we have more segments to do and we've used up all of * the space in a request area, go allocate another one * and chain to that. */ if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { request_t *nrq; nrq = mpt_get_request(mpt, FALSE); if (nrq == NULL) { error = ENOMEM; goto bad; } /* * Append the new request area on the tail of our list. */ if ((trq = req->chain) == NULL) { req->chain = nrq; } else { while (trq->chain != NULL) { trq = trq->chain; } trq->chain = nrq; } trq = nrq; mpt_off = trq->req_vbuf; if (mpt->verbose >= MPT_PRT_DEBUG) { memset(mpt_off, 0xff, MPT_REQUEST_AREA); } nxt_off = 0; } } out: /* * Last time we need to check if this CCB needs to be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } mpt_prt(mpt, "mpt_execute_req: I/O cancelled (status 0x%x)\n", ccb->ccb_h.status & CAM_STATUS_MASK); - if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { + if (nseg) { bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, mpt_timeout, ccb); } if (mpt->verbose > MPT_PRT_DEBUG) { int nc = 0; mpt_print_request(req->req_vbuf); for (trq = req->chain; trq; trq = trq->chain) { printf(" Additional Chain Area %d\n", nc++); mpt_dump_sgl(trq->req_vbuf, 0); } } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; } else { tgt->state = TGT_STATE_MOVING_DATA; } #else tgt->state = TGT_STATE_MOVING_DATA; #endif } mpt_send_cmd(mpt, req); } static void mpt_start(struct cam_sim *sim, union ccb *ccb) { request_t *req; struct mpt_softc *mpt; MSG_SCSI_IO_REQUEST *mpt_req; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccbh = &ccb->ccb_h; bus_dmamap_callback_t *cb; target_id_t tgt; int raid_passthru; int error; /* Get the pointer for the physical addapter */ mpt = ccb->ccb_h.ccb_mpt_ptr; raid_passthru = (sim == mpt->phydisk_sim); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); return; } #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); #endif if (sizeof (bus_addr_t) > 4) { cb = mpt_execute_req_a64; } else { cb = mpt_execute_req; } /* * Link the ccb and the request structure so we can find * the other knowing either the request or the ccb */ req->ccb = ccb; ccb->ccb_h.ccb_req_ptr = req; /* Now we build the command for the IOC */ mpt_req = req->req_vbuf; memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; if (raid_passthru) { mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } mpt_req->Bus = 0; /* we never set bus here */ } else { tgt = ccb->ccb_h.target_id; mpt_req->Bus = 0; /* XXX */ } mpt_req->SenseBufferLength = (csio->sense_len < MPT_SENSE_SIZE) ? csio->sense_len : MPT_SENSE_SIZE; /* * We use the message context to find the request structure when we * Get the command completion interrupt from the IOC. */ mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); /* Which physical device to do the I/O on */ mpt_req->TargetID = tgt; /* We assume a single level LUN type */ if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) { mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; } else { mpt_req->LUN[1] = ccb->ccb_h.target_lun; } /* Set the direction of the transfer */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { mpt_req->Control = MPI_SCSIIO_CONTROL_READ; } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; } else { mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; } if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { switch(ccb->csio.tag_action) { case MSG_HEAD_OF_Q_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; break; case MSG_ACA_TASK: mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; break; case MSG_ORDERED_Q_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; break; case MSG_SIMPLE_Q_TAG: default: mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; break; } } else { if (mpt->is_fc || mpt->is_sas) { mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; } else { /* XXX No such thing for a target doing packetized. */ mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; } } if (mpt->is_spi) { if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; } } mpt_req->Control = htole32(mpt_req->Control); /* Copy the scsi command block into place */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); } mpt_req->CDBLength = csio->cdb_len; mpt_req->DataLength = htole32(csio->dxfer_len); mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); /* * Do a *short* print here if we're set to MPT_PRT_DEBUG */ if (mpt->verbose == MPT_PRT_DEBUG) { U32 df; mpt_prt(mpt, "mpt_start: %s op 0x%x ", (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { mpt_prtc(mpt, "(%s %u byte%s ", (df == MPI_SCSIIO_CONTROL_READ)? "read" : "write", csio->dxfer_len, (csio->dxfer_len == 1)? ")" : "s)"); } mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, ccb->ccb_h.target_lun, req, req->serno); } error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb, req, 0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the controller queue * until our mapping is returned. */ xpt_freeze_simq(mpt->sim, 1); ccbh->status |= CAM_RELEASE_SIMQ; } } static int mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, int sleep_ok) { int error; uint16_t status; uint8_t response; error = mpt_scsi_send_tmf(mpt, (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 0, /* XXX How do I get the channel ID? */ tgt != CAM_TARGET_WILDCARD ? tgt : 0, lun != CAM_LUN_WILDCARD ? lun : 0, 0, sleep_ok); if (error != 0) { /* * mpt_scsi_send_tmf hard resets on failure, so no * need to do so here. */ mpt_prt(mpt, "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); return (EIO); } /* Wait for bus reset to be processed by the IOC. */ error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, REQ_STATE_DONE, sleep_ok, 5000); status = le16toh(mpt->tmf_req->IOCStatus); response = mpt->tmf_req->ResponseCode; mpt->tmf_req->state = REQ_STATE_FREE; if (error) { mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " "Resetting controller.\n"); mpt_reset(mpt, TRUE); return (ETIMEDOUT); } if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " "Resetting controller.\n", status); mpt_reset(mpt, TRUE); return (EIO); } if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " "Resetting controller.\n", response); mpt_reset(mpt, TRUE); return (EIO); } return (0); } static int mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) { int r = 0; request_t *req; PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; req = mpt_get_request(mpt, FALSE); if (req == NULL) { return (ENOMEM); } fc = req->req_vbuf; memset(fc, 0, sizeof(*fc)); fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; fc->MsgContext = htole32(req->index | fc_els_handler_id); mpt_send_cmd(mpt, req); if (dowait) { r = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, FALSE, 60 * 1000); if (r == 0) { mpt_free_request(mpt, req); } } return (r); } static int mpt_cam_event(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) { uint32_t data0, data1; data0 = le32toh(msg->Data[0]); data1 = le32toh(msg->Data[1]); switch(msg->Event & 0xFF) { case MPI_EVENT_UNIT_ATTENTION: mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", (data0 >> 8) & 0xff, data0 & 0xff); break; case MPI_EVENT_IOC_BUS_RESET: /* We generated a bus reset */ mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", (data0 >> 8) & 0xff); xpt_async(AC_BUS_RESET, mpt->path, NULL); break; case MPI_EVENT_EXT_BUS_RESET: /* Someone else generated a bus reset */ mpt_prt(mpt, "External Bus Reset Detected\n"); /* * These replies don't return EventData like the MPI * spec says they do */ xpt_async(AC_BUS_RESET, mpt->path, NULL); break; case MPI_EVENT_RESCAN: #if __FreeBSD_version >= 600000 { union ccb *ccb; uint32_t pathid; /* * In general this means a device has been added to the loop. */ mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); if (mpt->ready == 0) { break; } if (mpt->phydisk_sim) { pathid = cam_sim_path(mpt->phydisk_sim); } else { pathid = cam_sim_path(mpt->sim); } /* * Allocate a CCB, create a wildcard path for this bus, * and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mpt_prt(mpt, "unable to alloc CCB for rescan\n"); break; } if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create path for rescan\n"); xpt_free_ccb(ccb); break; } xpt_rescan(ccb); break; } #else mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); break; #endif case MPI_EVENT_LINK_STATUS_CHANGE: mpt_prt(mpt, "Port %d: LinkState: %s\n", (data1 >> 8) & 0xff, ((data0 & 0xff) == 0)? "Failed" : "Active"); break; case MPI_EVENT_LOOP_STATE_CHANGE: switch ((data0 >> 16) & 0xff) { case 0x01: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " "(Loop Initialization)\n", (data1 >> 8) & 0xff, (data0 >> 8) & 0xff, (data0 ) & 0xff); switch ((data0 >> 8) & 0xff) { case 0xF7: if ((data0 & 0xff) == 0xF7) { mpt_prt(mpt, "Device needs AL_PA\n"); } else { mpt_prt(mpt, "Device %02x doesn't like " "FC performance\n", data0 & 0xFF); } break; case 0xF8: if ((data0 & 0xff) == 0xF7) { mpt_prt(mpt, "Device had loop failure " "at its receiver prior to acquiring" " AL_PA\n"); } else { mpt_prt(mpt, "Device %02x detected loop" " failure at its receiver\n", data0 & 0xFF); } break; default: mpt_prt(mpt, "Device %02x requests that device " "%02x reset itself\n", data0 & 0xFF, (data0 >> 8) & 0xFF); break; } break; case 0x02: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " "LPE(%02x,%02x) (Loop Port Enable)\n", (data1 >> 8) & 0xff, /* Port */ (data0 >> 8) & 0xff, /* Character 3 */ (data0 ) & 0xff /* Character 4 */); break; case 0x03: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " "LPB(%02x,%02x) (Loop Port Bypass)\n", (data1 >> 8) & 0xff, /* Port */ (data0 >> 8) & 0xff, /* Character 3 */ (data0 ) & 0xff /* Character 4 */); break; default: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " "FC event (%02x %02x %02x)\n", (data1 >> 8) & 0xff, /* Port */ (data0 >> 16) & 0xff, /* Event */ (data0 >> 8) & 0xff, /* Character 3 */ (data0 ) & 0xff /* Character 4 */); } break; case MPI_EVENT_LOGOUT: mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", (data1 >> 8) & 0xff, data0); break; case MPI_EVENT_QUEUE_FULL: { struct cam_sim *sim; struct cam_path *tmppath; struct ccb_relsim crs; PTR_EVENT_DATA_QUEUE_FULL pqf; lun_id_t lun_id; pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; pqf->CurrentDepth = le16toh(pqf->CurrentDepth); mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); if (mpt->phydisk_sim && mpt_is_raid_member(mpt, pqf->TargetID) != 0) { sim = mpt->phydisk_sim; } else { sim = mpt->sim; } for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), pqf->TargetID, lun_id) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create a path to send " "XPT_REL_SIMQ"); break; } xpt_setup_ccb(&crs.ccb_h, tmppath, 5); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = CAM_DEV_QFREEZE; crs.release_flags = RELSIM_ADJUST_OPENINGS; crs.openings = pqf->CurrentDepth - 1; xpt_action((union ccb *)&crs); if (crs.ccb_h.status != CAM_REQ_CMP) { mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); } xpt_free_path(tmppath); } break; } case MPI_EVENT_IR_RESYNC_UPDATE: mpt_prt(mpt, "IR resync update %d completed\n", (data0 >> 16) & 0xff); break; case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: { union ccb *ccb; struct cam_sim *sim; struct cam_path *tmppath; PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc; psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data; if (mpt->phydisk_sim && mpt_is_raid_member(mpt, psdsc->TargetID) != 0) sim = mpt->phydisk_sim; else sim = mpt->sim; switch(psdsc->ReasonCode) { case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mpt_prt(mpt, "unable to alloc CCB for rescan\n"); break; } if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim), psdsc->TargetID, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create path for rescan\n"); xpt_free_ccb(ccb); break; } xpt_rescan(ccb); break; case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), psdsc->TargetID, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create path for async event"); break; } xpt_async(AC_LOST_DEVICE, tmppath, NULL); xpt_free_path(tmppath); break; case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET: case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL: case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: break; default: mpt_lprt(mpt, MPT_PRT_WARN, "SAS device status change: Bus: 0x%02x TargetID: " "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus, psdsc->TargetID, psdsc->ReasonCode); break; } break; } case MPI_EVENT_SAS_DISCOVERY_ERROR: { PTR_EVENT_DATA_DISCOVERY_ERROR pde; pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data; pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus); mpt_lprt(mpt, MPT_PRT_WARN, "SAS discovery error: Port: 0x%02x Status: 0x%08x\n", pde->Port, pde->DiscoveryStatus); break; } case MPI_EVENT_EVENT_CHANGE: case MPI_EVENT_INTEGRATED_RAID: case MPI_EVENT_IR2: case MPI_EVENT_LOG_ENTRY_ADDED: case MPI_EVENT_SAS_DISCOVERY: case MPI_EVENT_SAS_PHY_LINK_STATUS: case MPI_EVENT_SAS_SES: break; default: mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", msg->Event & 0xFF); return (0); } return (1); } /* * Reply path for all SCSI I/O requests, called from our * interrupt handler by extracting our handler index from * the MsgContext field of the reply from the IOC. * * This routine is optimized for the common case of a * completion without error. All exception handling is * offloaded to non-inlined helper routines to minimize * cache footprint. */ static int mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { MSG_SCSI_IO_REQUEST *scsi_req; union ccb *ccb; if (req->state == REQ_STATE_FREE) { mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); return (TRUE); } scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; ccb = req->ccb; if (ccb == NULL) { mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", req, req->serno); return (TRUE); } mpt_req_untimeout(req, mpt_timeout, ccb); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } if (reply_frame == NULL) { /* * Context only reply, completion without error status. */ ccb->csio.resid = 0; mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->csio.scsi_status = SCSI_STATUS_OK; } else { mpt_scsi_reply_frame_handler(mpt, req, reply_frame); } if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { struct scsi_inquiry_data *iq = (struct scsi_inquiry_data *)ccb->csio.data_ptr; if (scsi_req->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { /* * Fake out the device type so that only the * pass-thru device will attach. */ iq->device &= ~0x1F; iq->device |= T_NODEVICE; } } if (mpt->verbose == MPT_PRT_DEBUG) { mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", req, req->serno); } KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); if ((req->state & REQ_STATE_TIMEDOUT) == 0) { TAILQ_REMOVE(&mpt->request_pending_list, req, links); } else { mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", req, req->serno); TAILQ_REMOVE(&mpt->request_timeout_list, req, links); } KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, ("CCB req needed wakeup")); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); #endif mpt_free_request(mpt, req); return (TRUE); } static int mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); #endif tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; /* Record IOC Status and Response Code of TMF for any waiters. */ req->IOCStatus = le16toh(tmf_reply->IOCStatus); req->ResponseCode = tmf_reply->ResponseCode; mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", req, req->serno, le16toh(tmf_reply->IOCStatus)); TAILQ_REMOVE(&mpt->request_pending_list, req, links); if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { req->state |= REQ_STATE_DONE; wakeup(req); } else { mpt->tmf_req->state = REQ_STATE_FREE; } return (TRUE); } /* * XXX: Move to definitions file */ #define ELS 0x22 #define FC4LS 0x32 #define ABTS 0x81 #define BA_ACC 0x84 #define LS_RJT 0x01 #define LS_ACC 0x02 #define PLOGI 0x03 #define LOGO 0x05 #define SRR 0x14 #define PRLI 0x20 #define PRLO 0x21 #define ADISC 0x52 #define RSCN 0x61 static void mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) { uint32_t fl; MSG_LINK_SERVICE_RSP_REQUEST tmp; PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; /* * We are going to reuse the ELS request to send this response back. */ rsp = &tmp; memset(rsp, 0, sizeof(*rsp)); #ifdef USE_IMMEDIATE_LINK_DATA /* * Apparently the IMMEDIATE stuff doesn't seem to work. */ rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; #endif rsp->RspLength = length; rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; rsp->MsgContext = htole32(req->index | fc_els_handler_id); /* * Copy over information from the original reply frame to * it's correct place in the response. */ memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); /* * And now copy back the temporary area to the original frame. */ memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); rsp = req->req_vbuf; #ifdef USE_IMMEDIATE_LINK_DATA memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); #else { PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; bus_addr_t paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); fl = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; fl <<= MPI_SGE_FLAGS_SHIFT; fl |= (length); se->FlagsLength = htole32(fl); se->Address = htole32((uint32_t) paddr); } #endif /* * Send it on... */ mpt_send_cmd(mpt, req); } static int mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; U8 rctl; U8 type; U8 cmd; U16 status = le16toh(reply_frame->IOCStatus); U32 *elsbuf; int ioindex; int do_refresh = TRUE; #ifdef INVARIANTS KASSERT(mpt_req_on_free_list(mpt, req) == 0, ("fc_els_reply_handler: req %p:%u for function %x on freelist!", req, req->serno, rp->Function)); if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); } else { mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); } #endif mpt_lprt(mpt, MPT_PRT_DEBUG, "FC_ELS Complete: req %p:%u, reply %p function %x\n", req, req->serno, reply_frame, reply_frame->Function); if (status != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", status, reply_frame->Function); if (status == MPI_IOCSTATUS_INVALID_STATE) { /* * XXX: to get around shutdown issue */ mpt->disabled = 1; return (TRUE); } return (TRUE); } /* * If the function of a link service response, we recycle the * response to be a refresh for a new link service request. * * The request pointer is bogus in this case and we have to fetch * it based upon the TransactionContext. */ if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { /* Freddie Uncle Charlie Katie */ /* We don't get the IOINDEX as part of the Link Svc Rsp */ for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) if (mpt->els_cmd_ptrs[ioindex] == req) { break; } KASSERT(ioindex < mpt->els_cmds_allocated, ("can't find my mommie!")); /* remove from active list as we're going to re-post it */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_post_els(mpt, req, ioindex); return (TRUE); } if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; if (req->state & REQ_STATE_TIMEDOUT) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Sync Primitive Send Completed After Timeout\n"); mpt_free_request(mpt, req); } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Async Primitive Send Complete\n"); mpt_free_request(mpt, req); } else { mpt_lprt(mpt, MPT_PRT_DEBUG, "Sync Primitive Send Complete- Waking Waiter\n"); wakeup(req); } return (TRUE); } if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " "Length %d Message Flags %x\n", rp->Function, rp->Flags, rp->MsgLength, rp->MsgFlags); return (TRUE); } if (rp->MsgLength <= 5) { /* * This is just a ack of an original ELS buffer post */ mpt_lprt(mpt, MPT_PRT_DEBUG, "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); return (TRUE); } rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; cmd = be32toh(elsbuf[0]) >> 24; if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); return (TRUE); } ioindex = le32toh(rp->TransactionContext); req = mpt->els_cmd_ptrs[ioindex]; if (rctl == ELS && type == 1) { switch (cmd) { case PRLI: /* * Send back a PRLI ACC */ mpt_prt(mpt, "PRLI from 0x%08x%08x\n", le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); elsbuf[0] = htobe32(0x02100014); elsbuf[1] |= htobe32(0x00000100); elsbuf[4] = htobe32(0x00000002); if (mpt->role & MPT_ROLE_TARGET) elsbuf[4] |= htobe32(0x00000010); if (mpt->role & MPT_ROLE_INITIATOR) elsbuf[4] |= htobe32(0x00000020); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 20); do_refresh = FALSE; break; case PRLO: memset(elsbuf, 0, 5 * (sizeof (U32))); elsbuf[0] = htobe32(0x02100014); elsbuf[1] = htobe32(0x08000100); mpt_prt(mpt, "PRLO from 0x%08x%08x\n", le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 20); do_refresh = FALSE; break; default: mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); break; } } else if (rctl == ABTS && type == 0) { uint16_t rx_id = le16toh(rp->Rxid); uint16_t ox_id = le16toh(rp->Oxid); request_t *tgt_req = NULL; mpt_prt(mpt, "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); if (rx_id >= mpt->mpt_max_tgtcmds) { mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); } else if (mpt->tgt_cmd_ptrs == NULL) { mpt_prt(mpt, "No TGT CMD PTRS\n"); } else { tgt_req = mpt->tgt_cmd_ptrs[rx_id]; } if (tgt_req) { mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); union ccb *ccb; uint32_t ct_id; /* * Check to make sure we have the correct command * The reply descriptor in the target state should * should contain an IoIndex that should match the * RX_ID. * * It'd be nice to have OX_ID to crosscheck with * as well. */ ct_id = GET_IO_INDEX(tgt->reply_desc); if (ct_id != rx_id) { mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", rx_id, ct_id); goto skip; } ccb = tgt->ccb; if (ccb) { mpt_prt(mpt, "CCB (%p): lun %u flags %x status %x\n", ccb, ccb->ccb_h.target_lun, ccb->ccb_h.flags, ccb->ccb_h.status); } mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " "%x nxfers %x\n", tgt->state, tgt->resid, tgt->bytes_xfered, tgt->reply_desc, tgt->nxfers); skip: if (mpt_abort_target_cmd(mpt, tgt_req)) { mpt_prt(mpt, "unable to start TargetAbort\n"); } } else { mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); } memset(elsbuf, 0, 5 * (sizeof (U32))); elsbuf[0] = htobe32(0); elsbuf[1] = htobe32((ox_id << 16) | rx_id); elsbuf[2] = htobe32(0x000ffff); /* * Dork with the reply frame so that the response to it * will be correct. */ rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 12); do_refresh = FALSE; } else { mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); } if (do_refresh == TRUE) { /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_post_els(mpt, req, ioindex); } return (TRUE); } /* * Clean up all SCSI Initiator personality state in response * to a controller reset. */ static void mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) { /* * The pending list is already run down by * the generic handler. Perform the same * operation on the timed out request list. */ mpt_complete_request_chain(mpt, &mpt->request_timeout_list, MPI_IOCSTATUS_INVALID_STATE); /* * XXX: We need to repost ELS and Target Command Buffers? */ /* * Inform the XPT that a bus reset has occurred. */ xpt_async(AC_BUS_RESET, mpt->path, NULL); } /* * Parse additional completion information in the reply * frame for SCSI I/O requests. */ static int mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, MSG_DEFAULT_REPLY *reply_frame) { union ccb *ccb; MSG_SCSI_IO_REPLY *scsi_io_reply; u_int ioc_status; u_int sstate; MPT_DUMP_REPLY_FRAME(mpt, reply_frame); KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, ("MPT SCSI I/O Handler called with incorrect reply type")); KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, ("MPT SCSI I/O Handler called with continuation reply")); scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; ioc_status = le16toh(scsi_io_reply->IOCStatus); ioc_status &= MPI_IOCSTATUS_MASK; sstate = scsi_io_reply->SCSIState; ccb = req->ccb; ccb->csio.resid = ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { uint32_t sense_returned; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; sense_returned = le32toh(scsi_io_reply->SenseCount); if (sense_returned < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - sense_returned; else ccb->csio.sense_resid = 0; bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); bcopy(req->sense_vbuf, &ccb->csio.sense_data, min(ccb->csio.sense_len, sense_returned)); } if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { /* * Tag messages rejected, but non-tagged retry * was successful. XXXX mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); */ } switch(ioc_status) { case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* * XXX * Linux driver indicates that a zero * transfer length with this error code * indicates a CRC error. * * No need to swap the bytes for checking * against zero. */ if (scsi_io_reply->TransferCount == 0) { mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); break; } /* FALLTHROUGH */ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: case MPI_IOCSTATUS_SUCCESS: case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { /* * Status was never returned for this transaction. */ mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { /* XXX Handle SPI-Packet and FCP-2 response info. */ mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } else mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); break; case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); break; case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* * Since selection timeouts and "device really not * there" are grouped into this error code, report * selection timeout. Selection timeouts are * typically retried before giving up on the device * whereas "device not there" errors are considered * unretryable. */ mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); break; case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); break; case MPI_IOCSTATUS_SCSI_INVALID_BUS: mpt_set_ccb_status(ccb, CAM_PATH_INVALID); break; case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: mpt_set_ccb_status(ccb, CAM_TID_INVALID); break; case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: ccb->ccb_h.status = CAM_UA_TERMIO; break; case MPI_IOCSTATUS_INVALID_STATE: /* * The IOC has been reset. Emulate a bus reset. */ /* FALLTHROUGH */ case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* * Don't clobber any timeout status that has * already been set for this transaction. We * want the SCSI layer to be able to differentiate * between the command we aborted due to timeout * and any innocent bystanders. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) break; mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); break; case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); break; case MPI_IOCSTATUS_BUSY: mpt_set_ccb_status(ccb, CAM_BUSY); break; case MPI_IOCSTATUS_INVALID_FUNCTION: case MPI_IOCSTATUS_INVALID_SGL: case MPI_IOCSTATUS_INTERNAL_ERROR: case MPI_IOCSTATUS_INVALID_FIELD: default: /* XXX * Some of the above may need to kick * of a recovery action!!!! */ ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; break; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { mpt_freeze_ccb(ccb); } return (TRUE); } static void mpt_action(struct cam_sim *sim, union ccb *ccb) { struct mpt_softc *mpt; struct ccb_trans_settings *cts; target_id_t tgt; lun_id_t lun; int raid_passthru; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); mpt = (struct mpt_softc *)cam_sim_softc(sim); raid_passthru = (sim == mpt->phydisk_sim); MPT_LOCK_ASSERT(mpt); tgt = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ && ccb->ccb_h.func_code != XPT_RESET_BUS && ccb->ccb_h.func_code != XPT_RESET_DEV) { if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } } ccb->ccb_h.ccb_mpt_ptr = mpt; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ /* * Do a couple of preliminary checks... */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } } /* Max supported CDB length is 16 bytes */ /* XXX Unless we implement the new 32byte message type */ if (ccb->csio.cdb_len > sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } #ifdef MPT_TEST_MULTIPATH if (mpt->failure_id == ccb->ccb_h.target_id) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); break; } #endif ccb->csio.scsi_status = SCSI_STATUS_OK; mpt_start(sim, ccb); return; case XPT_RESET_BUS: if (raid_passthru) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } case XPT_RESET_DEV: if (ccb->ccb_h.func_code == XPT_RESET_BUS) { if (bootverbose) { xpt_print(ccb->ccb_h.path, "reset bus\n"); } } else { xpt_print(ccb->ccb_h.path, "reset device\n"); } (void) mpt_bus_reset(mpt, tgt, lun, FALSE); /* * mpt_bus_reset is always successful in that it * will fall back to a hard reset should a bus * reset attempt fail. */ ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; case XPT_ABORT: { union ccb *accb = ccb->cab.abort_ccb; switch (accb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMEDIATE_NOTIFY: ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); break; case XPT_CONT_TARGET_IO: mpt_prt(mpt, "cannot abort active CTIOs yet\n"); ccb->ccb_h.status = CAM_UA_ABORT; break; case XPT_SCSI_IO: ccb->ccb_h.status = CAM_UA_ABORT; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } break; } #ifdef CAM_NEW_TRAN_CODE #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) #else #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) #endif #define DP_DISC_ENABLE 0x1 #define DP_DISC_DISABL 0x2 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) #define DP_TQING_ENABLE 0x4 #define DP_TQING_DISABL 0x8 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) #define DP_WIDE 0x10 #define DP_NARROW 0x20 #define DP_WIDTH (DP_WIDE|DP_NARROW) #define DP_SYNC 0x40 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ { #ifdef CAM_NEW_TRAN_CODE struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; #endif uint8_t dval; u_int period; u_int offset; int i, j; cts = &ccb->cts; if (mpt->is_fc || mpt->is_sas) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } #ifdef CAM_NEW_TRAN_CODE scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; /* * We can be called just to valid transport and proto versions */ if (scsi->valid == 0 && spi->valid == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } #endif /* * Skip attempting settings on RAID volume disks. * Other devices on the bus get the normal treatment. */ if (mpt->phydisk_sim && raid_passthru == 0 && mpt_is_raid_volume(mpt, tgt) != 0) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "no transfer settings for RAID vols\n"); mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } i = mpt->mpt_port_page2.PortSettings & MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; j = mpt->mpt_port_page2.PortFlags & MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { mpt_lprt(mpt, MPT_PRT_ALWAYS, "honoring BIOS transfer negotiations\n"); mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } dval = 0; period = 0; offset = 0; #ifndef CAM_NEW_TRAN_CODE if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? DP_DISC_ENABLE : DP_DISC_DISABL; } if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? DP_TQING_ENABLE : DP_TQING_DISABL; } if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { dval |= cts->bus_width ? DP_WIDE : DP_NARROW; } if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { dval |= DP_SYNC; period = cts->sync_period; offset = cts->sync_offset; } #else if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? DP_DISC_ENABLE : DP_DISC_DISABL; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? DP_TQING_ENABLE : DP_TQING_DISABL; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? DP_WIDE : DP_NARROW; } if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { dval |= DP_SYNC; offset = spi->sync_offset; } else { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = &mpt->mpt_dev_page1[tgt]; offset = ptr->RequestedParameters; offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; } if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { dval |= DP_SYNC; period = spi->sync_period; } else { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = &mpt->mpt_dev_page1[tgt]; period = ptr->RequestedParameters; period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; } #endif if (dval & DP_DISC_ENABLE) { mpt->mpt_disc_enable |= (1 << tgt); } else if (dval & DP_DISC_DISABL) { mpt->mpt_disc_enable &= ~(1 << tgt); } if (dval & DP_TQING_ENABLE) { mpt->mpt_tag_enable |= (1 << tgt); } else if (dval & DP_TQING_DISABL) { mpt->mpt_tag_enable &= ~(1 << tgt); } if (dval & DP_WIDTH) { mpt_setwidth(mpt, tgt, 1); } if (dval & DP_SYNC) { mpt_setsync(mpt, tgt, period, offset); } if (dval == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "set [%d]: 0x%x period 0x%x offset %d\n", tgt, dval, period, offset); if (mpt_update_spi_config(mpt, tgt)) { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } else { mpt_set_ccb_status(ccb, CAM_REQ_CMP); } break; } case XPT_GET_TRAN_SETTINGS: { #ifdef CAM_NEW_TRAN_CODE struct ccb_trans_settings_scsi *scsi; cts = &ccb->cts; cts->protocol = PROTO_SCSI; if (mpt->is_fc) { struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; cts->protocol_version = SCSI_REV_SPC; cts->transport = XPORT_FC; cts->transport_version = 0; fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 100000; } else if (mpt->is_sas) { struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_SAS; cts->transport_version = 0; sas->valid = CTS_SAS_VALID_SPEED; sas->bitrate = 300000; } else { cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (mpt_get_spi_settings(mpt, cts) != 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); break; } } scsi = &cts->proto_specific.scsi; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; #else cts = &ccb->cts; if (mpt->is_fc) { cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } else if (mpt->is_sas) { cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } else if (mpt_get_spi_settings(mpt, cts) != 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); break; } #endif mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; if (ccg->block_size == 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } cam_calc_geometry(ccg, /* extended */ 1); KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->target_sprt = 0; cpi->hba_eng_cnt = 0; cpi->max_target = mpt->port_facts[0].MaxDevices - 1; cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE; /* * FC cards report MAX_DEVICES of 512, but * the MSG_SCSI_IO_REQUEST target id field * is only 8 bits. Until we fix the driver * to support 'channels' for bus overflow, * just limit it. */ if (cpi->max_target > 255) { cpi->max_target = 255; } /* * VMware ESX reports > 16 devices and then dies when we probe. */ if (mpt->is_spi && cpi->max_target > 15) { cpi->max_target = 15; } if (mpt->is_spi) cpi->max_lun = 7; else cpi->max_lun = MPT_MAX_LUNS; cpi->initiator_id = mpt->mpt_ini_id; cpi->bus_id = cam_sim_bus(sim); /* * The base speed is the speed of the underlying connection. */ #ifdef CAM_NEW_TRAN_CODE cpi->protocol = PROTO_SCSI; if (mpt->is_fc) { cpi->hba_misc = PIM_NOBUSRESET; cpi->base_transfer_speed = 100000; cpi->hba_inquiry = PI_TAG_ABLE; cpi->transport = XPORT_FC; cpi->transport_version = 0; cpi->protocol_version = SCSI_REV_SPC; } else if (mpt->is_sas) { cpi->hba_misc = PIM_NOBUSRESET; cpi->base_transfer_speed = 300000; cpi->hba_inquiry = PI_TAG_ABLE; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol_version = SCSI_REV_SPC2; } else { cpi->hba_misc = PIM_SEQSCAN; cpi->base_transfer_speed = 3300; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol_version = SCSI_REV_2; } #else if (mpt->is_fc) { cpi->hba_misc = PIM_NOBUSRESET; cpi->base_transfer_speed = 100000; cpi->hba_inquiry = PI_TAG_ABLE; } else if (mpt->is_sas) { cpi->hba_misc = PIM_NOBUSRESET; cpi->base_transfer_speed = 300000; cpi->hba_inquiry = PI_TAG_ABLE; } else { cpi->hba_misc = PIM_SEQSCAN; cpi->base_transfer_speed = 3300; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; } #endif /* * We give our fake RAID passhtru bus a width that is MaxVolumes * wide and restrict it to one lun. */ if (raid_passthru) { cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; cpi->initiator_id = cpi->max_target + 1; cpi->max_lun = 0; } if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { cpi->hba_misc |= PIM_NOINITIATOR; } if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; } else { cpi->target_sprt = 0; } strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_EN_LUN: /* Enable LUN as a target */ { int result; if (ccb->cel.enable) result = mpt_enable_lun(mpt, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); else result = mpt_disable_lun(mpt, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); if (result == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); } else { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } break; } case XPT_NOTIFY_ACKNOWLEDGE: /* recycle notify ack */ case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ { tgt_resource_t *trtp; lun_id_t lun = ccb->ccb_h.target_lun; ccb->ccb_h.sim_priv.entries[0].field = 0; ccb->ccb_h.sim_priv.entries[1].ptr = mpt; ccb->ccb_h.flags = 0; if (lun == CAM_LUN_WILDCARD) { if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } trtp = &mpt->trt_wildcard; } else if (lun >= MPT_MAX_LUNS) { mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } else { trtp = &mpt->trt[lun]; } if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { mpt_lprt(mpt, MPT_PRT_DEBUG1, "Put FREE ATIO %p lun %d\n", ccb, lun); STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, sim_links.stqe); } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { mpt_lprt(mpt, MPT_PRT_DEBUG1, "Put FREE INOT lun %d\n", lun); STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, sim_links.stqe); } else { mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); } mpt_set_ccb_status(ccb, CAM_REQ_INPROG); return; } case XPT_CONT_TARGET_IO: mpt_target_start_io(mpt, ccb); return; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } static int mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) { #ifdef CAM_NEW_TRAN_CODE struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; #endif target_id_t tgt; uint32_t dval, pval, oval; int rv; if (IS_CURRENT_SETTINGS(cts) == 0) { tgt = cts->ccb_h.target_id; } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { return (-1); } } else { tgt = cts->ccb_h.target_id; } /* * We aren't looking at Port Page 2 BIOS settings here- * sometimes these have been known to be bogus XXX. * * For user settings, we pick the max from port page 0 * * For current settings we read the current settings out from * device page 0 for that target. */ if (IS_CURRENT_SETTINGS(cts)) { CONFIG_PAGE_SCSI_DEVICE_0 tmp; dval = 0; tmp = mpt->mpt_dev_page0[tgt]; rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp), FALSE, 5000); if (rv) { mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); return (rv); } mpt2host_config_page_scsi_device_0(&tmp); mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, tmp.NegotiatedParameters, tmp.Information); dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? DP_WIDE : DP_NARROW; dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? DP_DISC_ENABLE : DP_DISC_DISABL; dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? DP_TQING_ENABLE : DP_TQING_DISABL; oval = tmp.NegotiatedParameters; oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; pval = tmp.NegotiatedParameters; pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; mpt->mpt_dev_page0[tgt] = tmp; } else { dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; oval = mpt->mpt_port_page0.Capabilities; oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); pval = mpt->mpt_port_page0.Capabilities; pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); } #ifndef CAM_NEW_TRAN_CODE cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); cts->valid = 0; cts->sync_period = pval; cts->sync_offset = oval; cts->valid |= CCB_TRANS_SYNC_RATE_VALID; cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID; cts->valid |= CCB_TRANS_BUS_WIDTH_VALID; if (dval & DP_WIDE) { cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; if (dval & DP_DISC_ENABLE) { cts->flags |= CCB_TRANS_DISC_ENB; } if (dval & DP_TQING_ENABLE) { cts->flags |= CCB_TRANS_TAG_ENB; } } #else spi->valid = 0; scsi->valid = 0; spi->flags = 0; scsi->flags = 0; spi->sync_offset = oval; spi->sync_period = pval; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; if (dval & DP_WIDE) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; if (dval & DP_TQING_ENABLE) { scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->valid |= CTS_SPI_VALID_DISC; if (dval & DP_DISC_ENABLE) { spi->flags |= CTS_SPI_FLAGS_DISC_ENB; } } #endif mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); return (0); } static void mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; ptr = &mpt->mpt_dev_page1[tgt]; if (onoff) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; } else { ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; } } static void mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; ptr = &mpt->mpt_dev_page1[tgt]; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; if (period == 0) { return; } ptr->RequestedParameters |= period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; ptr->RequestedParameters |= offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; if (period < 0xa) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; } if (period < 0x9) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; } } static int mpt_update_spi_config(struct mpt_softc *mpt, int tgt) { CONFIG_PAGE_SCSI_DEVICE_1 tmp; int rv; mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); tmp = mpt->mpt_dev_page1[tgt]; host2mpt_config_page_scsi_device_1(&tmp); rv = mpt_write_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp), FALSE, 5000); if (rv) { mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); return (-1); } return (0); } /****************************** Timeout Recovery ******************************/ static int mpt_spawn_recovery_thread(struct mpt_softc *mpt) { int error; error = mpt_kthread_create(mpt_recovery_thread, mpt, &mpt->recovery_thread, /*flags*/0, /*altstack*/0, "mpt_recovery%d", mpt->unit); return (error); } static void mpt_terminate_recovery_thread(struct mpt_softc *mpt) { if (mpt->recovery_thread == NULL) { return; } mpt->shutdwn_recovery = 1; wakeup(mpt); /* * Sleep on a slightly different location * for this interlock just for added safety. */ mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); } static void mpt_recovery_thread(void *arg) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)arg; MPT_LOCK(mpt); for (;;) { if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { if (mpt->shutdwn_recovery == 0) { mpt_sleep(mpt, mpt, PUSER, "idle", 0); } } if (mpt->shutdwn_recovery != 0) { break; } mpt_recover_commands(mpt); } mpt->recovery_thread = NULL; wakeup(&mpt->recovery_thread); MPT_UNLOCK(mpt); mpt_kthread_exit(0); } static int mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) { MSG_SCSI_TASK_MGMT *tmf_req; int error; /* * Wait for any current TMF request to complete. * We're only allowed to issue one TMF at a time. */ error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, sleep_ok, MPT_TMF_MAX_TIMEOUT); if (error != 0) { mpt_reset(mpt, TRUE); return (ETIMEDOUT); } mpt_assign_serno(mpt, mpt->tmf_req); mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; memset(tmf_req, 0, sizeof(*tmf_req)); tmf_req->TargetID = target; tmf_req->Bus = channel; tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; tmf_req->TaskType = type; tmf_req->MsgFlags = flags; tmf_req->MsgContext = htole32(mpt->tmf_req->index | scsi_tmf_handler_id); if (lun > MPT_MAX_LUNS) { tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); tmf_req->LUN[1] = lun & 0xff; } else { tmf_req->LUN[1] = lun; } tmf_req->TaskMsgContext = abort_ctx; mpt_lprt(mpt, MPT_PRT_DEBUG, "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, mpt->tmf_req->serno, tmf_req->MsgContext); if (mpt->verbose > MPT_PRT_DEBUG) { mpt_print_request(tmf_req); } KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, ("mpt_scsi_send_tmf: tmf_req already on pending list")); TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); if (error != MPT_OK) { TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); mpt->tmf_req->state = REQ_STATE_FREE; mpt_reset(mpt, TRUE); } return (error); } /* * When a command times out, it is placed on the requeust_timeout_list * and we wake our recovery thread. The MPT-Fusion architecture supports * only a single TMF operation at a time, so we serially abort/bdr, etc, * the timedout transactions. The next TMF is issued either by the * completion handler of the current TMF waking our recovery thread, * or the TMF timeout handler causing a hard reset sequence. */ static void mpt_recover_commands(struct mpt_softc *mpt) { request_t *req; union ccb *ccb; int error; if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { /* * No work to do- leave. */ mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); return; } /* * Flush any commands whose completion coincides with their timeout. */ mpt_intr(mpt); if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { /* * The timedout commands have already * completed. This typically means * that either the timeout value was on * the hairy edge of what the device * requires or - more likely - interrupts * are not happening. */ mpt_prt(mpt, "Timedout requests already complete. " "Interrupts may not be functioning.\n"); mpt_enable_ints(mpt); return; } /* * We have no visibility into the current state of the * controller, so attempt to abort the commands in the * order they timed-out. For initiator commands, we * depend on the reply handler pulling requests off * the timeout list. */ while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { uint16_t status; uint8_t response; MSG_REQUEST_HEADER *hdrp = req->req_vbuf; mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", req, req->serno, hdrp->Function); ccb = req->ccb; if (ccb == NULL) { mpt_prt(mpt, "null ccb in timed out request. " "Resetting Controller.\n"); mpt_reset(mpt, TRUE); continue; } mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); /* * Check to see if this is not an initiator command and * deal with it differently if it is. */ switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: break; default: /* * XXX: FIX ME: need to abort target assists... */ mpt_prt(mpt, "just putting it back on the pend q\n"); TAILQ_REMOVE(&mpt->request_timeout_list, req, links); TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); continue; } error = mpt_scsi_send_tmf(mpt, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, htole32(req->index | scsi_io_handler_id), TRUE); if (error != 0) { /* * mpt_scsi_send_tmf hard resets on failure, so no * need to do so here. Our queue should be emptied * by the hard reset. */ continue; } error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE, 500); status = le16toh(mpt->tmf_req->IOCStatus); response = mpt->tmf_req->ResponseCode; mpt->tmf_req->state = REQ_STATE_FREE; if (error != 0) { /* * If we've errored out,, reset the controller. */ mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " "Resetting controller\n"); mpt_reset(mpt, TRUE); continue; } if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " "Resetting controller.\n", status); mpt_reset(mpt, TRUE); continue; } if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " "Resetting controller.\n", response); mpt_reset(mpt, TRUE); continue; } mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); } } /************************ Target Mode Support ****************************/ static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) { MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; PTR_SGE_TRANSACTION32 tep; PTR_SGE_SIMPLE32 se; bus_addr_t paddr; uint32_t fl; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); fc = req->req_vbuf; memset(fc, 0, MPT_REQUEST_AREA); fc->BufferCount = 1; fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; fc->MsgContext = htole32(req->index | fc_els_handler_id); /* * Okay, set up ELS buffer pointers. ELS buffer pointers * consist of a TE SGL element (with details length of zero) * followed by a SIMPLE SGL element which holds the address * of the buffer. */ tep = (PTR_SGE_TRANSACTION32) &fc->SGL; tep->ContextSize = 4; tep->Flags = 0; tep->TransactionContext[0] = htole32(ioindex); se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; fl = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; fl <<= MPI_SGE_FLAGS_SHIFT; fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); se->FlagsLength = htole32(fl); se->Address = htole32((uint32_t) paddr); mpt_lprt(mpt, MPT_PRT_DEBUG, "add ELS index %d ioindex %d for %p:%u\n", req->index, ioindex, req, req->serno); KASSERT(((req->state & REQ_STATE_LOCKED) != 0), ("mpt_fc_post_els: request not locked")); mpt_send_cmd(mpt, req); } static void mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) { PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; PTR_CMD_BUFFER_DESCRIPTOR cb; bus_addr_t paddr; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); memset(req->req_vbuf, 0, MPT_REQUEST_AREA); MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; fc = req->req_vbuf; fc->BufferCount = 1; fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); cb = &fc->Buffer[0]; cb->IoIndex = htole16(ioindex); cb->u.PhysicalAddress32 = htole32((U32) paddr); mpt_check_doorbell(mpt); mpt_send_cmd(mpt, req); } static int mpt_add_els_buffers(struct mpt_softc *mpt) { int i; if (mpt->is_fc == 0) { return (TRUE); } if (mpt->els_cmds_allocated) { return (TRUE); } mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->els_cmd_ptrs == NULL) { return (FALSE); } /* * Feed the chip some ELS buffer resources */ for (i = 0; i < MPT_MAX_ELS; i++) { request_t *req = mpt_get_request(mpt, FALSE); if (req == NULL) { break; } req->state |= REQ_STATE_LOCKED; mpt->els_cmd_ptrs[i] = req; mpt_fc_post_els(mpt, req, i); } if (i == 0) { mpt_prt(mpt, "unable to add ELS buffer resources\n"); free(mpt->els_cmd_ptrs, M_DEVBUF); mpt->els_cmd_ptrs = NULL; return (FALSE); } if (i != MPT_MAX_ELS) { mpt_lprt(mpt, MPT_PRT_INFO, "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); } mpt->els_cmds_allocated = i; return(TRUE); } static int mpt_add_target_commands(struct mpt_softc *mpt) { int i, max; if (mpt->tgt_cmd_ptrs) { return (TRUE); } max = MPT_MAX_REQUESTS(mpt) >> 1; if (max > mpt->mpt_max_tgtcmds) { max = mpt->mpt_max_tgtcmds; } mpt->tgt_cmd_ptrs = malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->tgt_cmd_ptrs == NULL) { mpt_prt(mpt, "mpt_add_target_commands: could not allocate cmd ptrs\n"); return (FALSE); } for (i = 0; i < max; i++) { request_t *req; req = mpt_get_request(mpt, FALSE); if (req == NULL) { break; } req->state |= REQ_STATE_LOCKED; mpt->tgt_cmd_ptrs[i] = req; mpt_post_target_command(mpt, req, i); } if (i == 0) { mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); free(mpt->tgt_cmd_ptrs, M_DEVBUF); mpt->tgt_cmd_ptrs = NULL; return (FALSE); } mpt->tgt_cmds_allocated = i; if (i < max) { mpt_lprt(mpt, MPT_PRT_INFO, "added %d of %d target bufs\n", i, max); } return (i); } static int mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) { if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { mpt->twildcard = 1; } else if (lun >= MPT_MAX_LUNS) { return (EINVAL); } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { return (EINVAL); } if (mpt->tenabled == 0) { if (mpt->is_fc) { (void) mpt_fc_reset_link(mpt, 0); } mpt->tenabled = 1; } if (lun == CAM_LUN_WILDCARD) { mpt->trt_wildcard.enabled = 1; } else { mpt->trt[lun].enabled = 1; } return (0); } static int mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) { int i; if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { mpt->twildcard = 0; } else if (lun >= MPT_MAX_LUNS) { return (EINVAL); } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { return (EINVAL); } if (lun == CAM_LUN_WILDCARD) { mpt->trt_wildcard.enabled = 0; } else { mpt->trt[lun].enabled = 0; } for (i = 0; i < MPT_MAX_LUNS; i++) { if (mpt->trt[lun].enabled) { break; } } if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { if (mpt->is_fc) { (void) mpt_fc_reset_link(mpt, 0); } mpt->tenabled = 0; } return (0); } /* * Called with MPT lock held */ static void mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) { struct ccb_scsiio *csio = &ccb->csio; request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); switch (tgt->state) { case TGT_STATE_IN_CAM: break; case TGT_STATE_MOVING_DATA: mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_freeze_simq(mpt->sim, 1); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; xpt_done(ccb); return; default: mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); mpt_tgt_dump_req_state(mpt, cmd_req); mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); xpt_done(ccb); return; } if (csio->dxfer_len) { bus_dmamap_callback_t *cb; PTR_MSG_TARGET_ASSIST_REQUEST ta; request_t *req; int error; KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, ("dxfer_len %u but direction is NONE", csio->dxfer_len)); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); return; } ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; if (sizeof (bus_addr_t) > 4) { cb = mpt_execute_req_a64; } else { cb = mpt_execute_req; } req->ccb = ccb; ccb->ccb_h.ccb_req_ptr = req; /* * Record the currently active ccb and the * request for it in our target state area. */ tgt->ccb = ccb; tgt->req = req; memset(req->req_vbuf, 0, MPT_RQSL(mpt)); ta = req->req_vbuf; if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; ta->QueueTag = ssp->InitiatorTag; } else if (mpt->is_spi) { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; ta->QueueTag = sp->Tag; } ta->Function = MPI_FUNCTION_TARGET_ASSIST; ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); ta->ReplyWord = htole32(tgt->reply_desc); if (csio->ccb_h.target_lun > MPT_MAX_LUNS) { ta->LUN[0] = 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); ta->LUN[1] = csio->ccb_h.target_lun & 0xff; } else { ta->LUN[1] = csio->ccb_h.target_lun; } ta->RelativeOffset = tgt->bytes_xfered; ta->DataLength = ccb->csio.dxfer_len; if (ta->DataLength > tgt->resid) { ta->DataLength = tgt->resid; } /* * XXX Should be done after data transfer completes? */ tgt->resid -= csio->dxfer_len; tgt->bytes_xfered += csio->dxfer_len; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; } #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_AUTO_STATUS; } #endif tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; mpt_lprt(mpt, MPT_PRT_DEBUG, "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb, req, 0); if (error == EINPROGRESS) { xpt_freeze_simq(mpt->sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } } else { uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; /* * XXX: I don't know why this seems to happen, but * XXX: completing the CCB seems to make things happy. * XXX: This seems to happen if the initiator requests * XXX: enough data that we have to do multiple CTIOs. */ if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Meaningless STATUS CCB (%p): flags %x status %x " "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return; } if (ccb->ccb_h.flags & CAM_SEND_SENSE) { sp = sense; memcpy(sp, &csio->sense_data, min(csio->sense_len, MPT_SENSE_SIZE)); } mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); } } static void mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, uint32_t lun, int send, uint8_t *data, size_t length) { mpt_tgt_state_t *tgt; PTR_MSG_TARGET_ASSIST_REQUEST ta; SGE_SIMPLE32 *se; uint32_t flags; uint8_t *dptr; bus_addr_t pptr; request_t *req; /* * We enter with resid set to the data load for the command. */ tgt = MPT_TGT_STATE(mpt, cmd_req); if (length == 0 || tgt->resid == 0) { tgt->resid = 0; mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); return; } if ((req = mpt_get_request(mpt, FALSE)) == NULL) { mpt_prt(mpt, "out of resources- dropping local response\n"); return; } tgt->is_local = 1; memset(req->req_vbuf, 0, MPT_RQSL(mpt)); ta = req->req_vbuf; if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; ta->QueueTag = ssp->InitiatorTag; } else if (mpt->is_spi) { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; ta->QueueTag = sp->Tag; } ta->Function = MPI_FUNCTION_TARGET_ASSIST; ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); ta->ReplyWord = htole32(tgt->reply_desc); if (lun > MPT_MAX_LUNS) { ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); ta->LUN[1] = lun & 0xff; } else { ta->LUN[1] = lun; } ta->RelativeOffset = 0; ta->DataLength = length; dptr = req->req_vbuf; dptr += MPT_RQSL(mpt); pptr = req->req_pbuf; pptr += MPT_RQSL(mpt); memcpy(dptr, data, min(length, MPT_RQSL(mpt))); se = (SGE_SIMPLE32 *) &ta->SGL[0]; memset(se, 0,sizeof (*se)); flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; if (send) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } se->Address = pptr; MPI_pSGE_SET_LENGTH(se, length); flags |= MPI_SGE_FLAGS_LAST_ELEMENT; flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; MPI_pSGE_SET_FLAGS(se, flags); tgt->ccb = NULL; tgt->req = req; tgt->resid -= length; tgt->bytes_xfered = length; #ifdef WE_TRUST_AUTO_GOOD_STATUS tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; #else tgt->state = TGT_STATE_MOVING_DATA; #endif mpt_send_cmd(mpt, req); } /* * Abort queued up CCBs */ static cam_status mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) { struct mpt_hdr_stailq *lp; struct ccb_hdr *srch; int found = 0; union ccb *accb = ccb->cab.abort_ccb; tgt_resource_t *trtp; mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { trtp = &mpt->trt_wildcard; } else { trtp = &mpt->trt[ccb->ccb_h.target_lun]; } if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { lp = &trtp->atios; } else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { lp = &trtp->inots; } else { return (CAM_REQ_INVALID); } STAILQ_FOREACH(srch, lp, sim_links.stqe) { if (srch == &accb->ccb_h) { found = 1; STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); break; } } if (found) { accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); return (CAM_REQ_CMP); } mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); return (CAM_PATH_INVALID); } /* * Ask the MPT to abort the current target command */ static int mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) { int error; request_t *req; PTR_MSG_TARGET_MODE_ABORT abtp; req = mpt_get_request(mpt, FALSE); if (req == NULL) { return (-1); } abtp = req->req_vbuf; memset(abtp, 0, sizeof (*abtp)); abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); error = 0; if (mpt->is_fc || mpt->is_sas) { mpt_send_cmd(mpt, req); } else { error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); } return (error); } /* * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the * FC929 to set bogus FC_RSP fields (nonzero residuals * but w/o RESID fields set). This causes QLogic initiators * to think maybe that a frame was lost. * * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because * we use allocated requests to do TARGET_ASSIST and we * need to know when to release them. */ static void mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, uint8_t status, uint8_t const *sense_data) { uint8_t *cmd_vbuf; mpt_tgt_state_t *tgt; PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; request_t *req; bus_addr_t paddr; int resplen = 0; uint32_t fl; cmd_vbuf = cmd_req->req_vbuf; cmd_vbuf += MPT_RQSL(mpt); tgt = MPT_TGT_STATE(mpt, cmd_req); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } if (ccb) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); } else { mpt_prt(mpt, "could not allocate status request- dropping\n"); } return; } req->ccb = ccb; if (ccb) { ccb->ccb_h.ccb_mpt_ptr = mpt; ccb->ccb_h.ccb_req_ptr = req; } /* * Record the currently active ccb, if any, and the * request for it in our target state area. */ tgt->ccb = ccb; tgt->req = req; tgt->state = TGT_STATE_SENDING_STATUS; tp = req->req_vbuf; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); memset(tp, 0, sizeof (*tp)); tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; if (mpt->is_fc) { PTR_MPI_TARGET_FCP_CMD_BUFFER fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; uint8_t *sts_vbuf; uint32_t *rsp; sts_vbuf = req->req_vbuf; sts_vbuf += MPT_RQSL(mpt); rsp = (uint32_t *) sts_vbuf; memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); /* * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. * It has to be big-endian in memory and is organized * in 32 bit words, which are much easier to deal with * as words which are swizzled as needed. * * All we're filling here is the FC_RSP payload. * We may just have the chip synthesize it if * we have no residual and an OK status. * */ memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); rsp[2] = status; if (tgt->resid) { rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ rsp[3] = htobe32(tgt->resid); #ifdef WE_TRUST_AUTO_GOOD_STATUS resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); #endif } if (status == SCSI_STATUS_CHECK_COND) { int i; rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ rsp[4] = htobe32(MPT_SENSE_SIZE); if (sense_data) { memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); } else { mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" "TION but no sense data?\n"); memset(&rsp, 0, MPT_SENSE_SIZE); } for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { rsp[i] = htobe32(rsp[i]); } #ifdef WE_TRUST_AUTO_GOOD_STATUS resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); #endif } #ifndef WE_TRUST_AUTO_GOOD_STATUS resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); #endif rsp[2] = htobe32(rsp[2]); } else if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); } else { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; tp->StatusCode = status; tp->QueueTag = htole16(sp->Tag); memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); } tp->ReplyWord = htole32(tgt->reply_desc); tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); #ifdef WE_CAN_USE_AUTO_REPOST tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; #endif if (status == SCSI_STATUS_OK && resplen == 0) { tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; } else { tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); fl = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; fl <<= MPI_SGE_FLAGS_SHIFT; fl |= resplen; tp->StatusDataSGE.FlagsLength = htole32(fl); } mpt_lprt(mpt, MPT_PRT_DEBUG, "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, req->serno, tgt->resid); if (ccb) { ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); } mpt_send_cmd(mpt, req); } static void mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, tgt_resource_t *trtp, int init_id) { struct ccb_immediate_notify *inot; mpt_tgt_state_t *tgt; tgt = MPT_TGT_STATE(mpt, req); inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots); if (inot == NULL) { mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); return; } STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); mpt_lprt(mpt, MPT_PRT_DEBUG1, "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); inot->initiator_id = init_id; /* XXX */ /* * This is a somewhat grotesque attempt to map from task management * to old style SCSI messages. God help us all. */ switch (fc) { case MPT_ABORT_TASK_SET: inot->arg = MSG_ABORT_TAG; break; case MPT_CLEAR_TASK_SET: inot->arg = MSG_CLEAR_TASK_SET; break; case MPT_TARGET_RESET: inot->arg = MSG_TARGET_RESET; break; case MPT_CLEAR_ACA: inot->arg = MSG_CLEAR_ACA; break; case MPT_TERMINATE_TASK: inot->arg = MSG_ABORT_TAG; break; default: inot->arg = MSG_NOOP; break; } /* * XXX KDM we need the sequence/tag number for the target of the * task management operation, especially if it is an abort. */ tgt->ccb = (union ccb *) inot; inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; xpt_done((union ccb *)inot); } static void mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) { static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', '0', '0', '0', '1' }; struct ccb_accept_tio *atiop; lun_id_t lun; int tag_action = 0; mpt_tgt_state_t *tgt; tgt_resource_t *trtp = NULL; U8 *lunptr; U8 *vbuf; U16 itag; U16 ioindex; mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; uint8_t *cdbp; /* * Stash info for the current command where we can get at it later. */ vbuf = req->req_vbuf; vbuf += MPT_RQSL(mpt); /* * Get our state pointer set up. */ tgt = MPT_TGT_STATE(mpt, req); if (tgt->state != TGT_STATE_LOADED) { mpt_tgt_dump_req_state(mpt, req); panic("bad target state in mpt_scsi_tgt_atio"); } memset(tgt, 0, sizeof (mpt_tgt_state_t)); tgt->state = TGT_STATE_IN_CAM; tgt->reply_desc = reply_desc; ioindex = GET_IO_INDEX(reply_desc); if (mpt->verbose >= MPT_PRT_DEBUG) { mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); } if (mpt->is_fc) { PTR_MPI_TARGET_FCP_CMD_BUFFER fc; fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; if (fc->FcpCntl[2]) { /* * Task Management Request */ switch (fc->FcpCntl[2]) { case 0x2: fct = MPT_ABORT_TASK_SET; break; case 0x4: fct = MPT_CLEAR_TASK_SET; break; case 0x20: fct = MPT_TARGET_RESET; break; case 0x40: fct = MPT_CLEAR_ACA; break; case 0x80: fct = MPT_TERMINATE_TASK; break; default: mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", fc->FcpCntl[2]); mpt_scsi_tgt_status(mpt, 0, req, SCSI_STATUS_OK, 0); return; } } else { switch (fc->FcpCntl[1]) { case 0: tag_action = MSG_SIMPLE_Q_TAG; break; case 1: tag_action = MSG_HEAD_OF_Q_TAG; break; case 2: tag_action = MSG_ORDERED_Q_TAG; break; default: /* * Bah. Ignore Untagged Queing and ACA */ tag_action = MSG_SIMPLE_Q_TAG; break; } } tgt->resid = be32toh(fc->FcpDl); cdbp = fc->FcpCdb; lunptr = fc->FcpLun; itag = be16toh(fc->OptionalOxid); } else if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; cdbp = ssp->CDB; lunptr = ssp->LogicalUnitNumber; itag = ssp->InitiatorTag; } else { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; cdbp = sp->CDB; lunptr = sp->LogicalUnitNumber; itag = sp->Tag; } /* * Generate a simple lun */ switch (lunptr[0] & 0xc0) { case 0x40: lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; break; case 0: lun = lunptr[1]; break; default: mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); lun = 0xffff; break; } /* * Deal with non-enabled or bad luns here. */ if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || mpt->trt[lun].enabled == 0) { if (mpt->twildcard) { trtp = &mpt->trt_wildcard; } else if (fct == MPT_NIL_TMT_VALUE) { /* * In this case, we haven't got an upstream listener * for either a specific lun or wildcard luns. We * have to make some sensible response. For regular * inquiry, just return some NOT HERE inquiry data. * For VPD inquiry, report illegal field in cdb. * For REQUEST SENSE, just return NO SENSE data. * REPORT LUNS gets illegal command. * All other commands get 'no such device'. */ uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; size_t len; memset(buf, 0, MPT_SENSE_SIZE); cond = SCSI_STATUS_CHECK_COND; buf[0] = 0xf0; buf[2] = 0x5; buf[7] = 0x8; sp = buf; tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); switch (cdbp[0]) { case INQUIRY: { if (cdbp[1] != 0) { buf[12] = 0x26; buf[13] = 0x01; break; } len = min(tgt->resid, cdbp[4]); len = min(len, sizeof (null_iqd)); mpt_lprt(mpt, MPT_PRT_DEBUG, "local inquiry %ld bytes\n", (long) len); mpt_scsi_tgt_local(mpt, req, lun, 1, null_iqd, len); return; } case REQUEST_SENSE: { buf[2] = 0x0; len = min(tgt->resid, cdbp[4]); len = min(len, sizeof (buf)); mpt_lprt(mpt, MPT_PRT_DEBUG, "local reqsense %ld bytes\n", (long) len); mpt_scsi_tgt_local(mpt, req, lun, 1, buf, len); return; } case REPORT_LUNS: mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); buf[12] = 0x26; return; default: mpt_lprt(mpt, MPT_PRT_DEBUG, "CMD 0x%x to unmanaged lun %u\n", cdbp[0], lun); buf[12] = 0x25; break; } mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); return; } /* otherwise, leave trtp NULL */ } else { trtp = &mpt->trt[lun]; } /* * Deal with any task management */ if (fct != MPT_NIL_TMT_VALUE) { if (trtp == NULL) { mpt_prt(mpt, "task mgmt function %x but no listener\n", fct); mpt_scsi_tgt_status(mpt, 0, req, SCSI_STATUS_OK, 0); } else { mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, GET_INITIATOR_INDEX(reply_desc)); } return; } atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); if (atiop == NULL) { mpt_lprt(mpt, MPT_PRT_WARN, "no ATIOs for lun %u- sending back %s\n", lun, mpt->tenabled? "QUEUE FULL" : "BUSY"); mpt_scsi_tgt_status(mpt, NULL, req, mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, NULL); return; } STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); mpt_lprt(mpt, MPT_PRT_DEBUG1, "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); atiop->ccb_h.ccb_mpt_ptr = mpt; atiop->ccb_h.status = CAM_CDB_RECVD; atiop->ccb_h.target_lun = lun; atiop->sense_len = 0; atiop->init_id = GET_INITIATOR_INDEX(reply_desc); atiop->cdb_len = mpt_cdblen(cdbp[0], 16); memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); /* * The tag we construct here allows us to find the * original request that the command came in with. * * This way we don't have to depend on anything but the * tag to find things when CCBs show back up from CAM. */ atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); tgt->tag_id = atiop->tag_id; if (tag_action) { atiop->tag_action = tag_action; atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; } if (mpt->verbose >= MPT_PRT_DEBUG) { int i; mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, atiop->ccb_h.target_lun); for (i = 0; i < atiop->cdb_len; i++) { mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, (i == (atiop->cdb_len - 1))? '>' : ' '); } mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", itag, atiop->tag_id, tgt->reply_desc, tgt->resid); } xpt_done((union ccb *)atiop); } static void mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) { mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, tgt->tag_id, tgt->state); } static void mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) { mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, req->index, req->index, req->state); mpt_tgt_dump_tgt_state(mpt, req); } static int mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { int dbg; union ccb *ccb; U16 status; if (reply_frame == NULL) { /* * Figure out what the state of the command is. */ mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); #ifdef INVARIANTS mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); if (tgt->req) { mpt_req_not_spcl(mpt, tgt->req, "turbo scsi_tgt_reply associated req", __LINE__); } #endif switch(tgt->state) { case TGT_STATE_LOADED: /* * This is a new command starting. */ mpt_scsi_tgt_atio(mpt, req, reply_desc); break; case TGT_STATE_MOVING_DATA: { uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; ccb = tgt->ccb; if (tgt->req == NULL) { panic("mpt: turbo target reply with null " "associated request moving data"); /* NOTREACHED */ } if (ccb == NULL) { if (tgt->is_local == 0) { panic("mpt: turbo target reply with " "null associated ccb moving data"); /* NOTREACHED */ } mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_ASSIST local done\n"); TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL); return (TRUE); } tgt->ccb = NULL; tgt->nxfers++; mpt_req_untimeout(req, mpt_timeout, ccb); mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); /* * Free the Target Assist Request */ KASSERT(tgt->req->ccb == ccb, ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, tgt->req->serno, tgt->req->ccb)); TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; /* * Do we need to send status now? That is, are * we done with all our data transfers? */ if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); tgt->state = TGT_STATE_IN_CAM; if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } xpt_done(ccb); break; } /* * Otherwise, send status (and sense) */ if (ccb->ccb_h.flags & CAM_SEND_SENSE) { sp = sense; memcpy(sp, &ccb->csio.sense_data, min(ccb->csio.sense_len, MPT_SENSE_SIZE)); } mpt_scsi_tgt_status(mpt, ccb, req, ccb->csio.scsi_status, sp); break; } case TGT_STATE_SENDING_STATUS: case TGT_STATE_MOVING_DATA_AND_STATUS: { int ioindex; ccb = tgt->ccb; if (tgt->req == NULL) { panic("mpt: turbo target reply with null " "associated request sending status"); /* NOTREACHED */ } if (ccb) { tgt->ccb = NULL; if (tgt->state == TGT_STATE_MOVING_DATA_AND_STATUS) { tgt->nxfers++; } mpt_req_untimeout(req, mpt_timeout, ccb); if (ccb->ccb_h.flags & CAM_SEND_SENSE) { ccb->ccb_h.status |= CAM_SENT_SENSE; } mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_STATUS tag %x sts %x flgs %x req " "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, ccb->ccb_h.flags, tgt->req); /* * Free the Target Send Status Request */ KASSERT(tgt->req->ccb == ccb, ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, tgt->req->serno, tgt->req->ccb)); /* * Notify CAM that we're done */ mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("ZERO ccb sts at %d", __LINE__)); tgt->ccb = NULL; } else { mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_STATUS non-CAM for req %p:%u\n", tgt->req, tgt->req->serno); } TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; /* * And re-post the Command Buffer. * This will reset the state. */ ioindex = GET_IO_INDEX(reply_desc); TAILQ_REMOVE(&mpt->request_pending_list, req, links); tgt->is_local = 0; mpt_post_target_command(mpt, req, ioindex); /* * And post a done for anyone who cares */ if (ccb) { if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } xpt_done(ccb); } break; } case TGT_STATE_NIL: /* XXX This Never Happens XXX */ tgt->state = TGT_STATE_LOADED; break; default: mpt_prt(mpt, "Unknown Target State 0x%x in Context " "Reply Function\n", tgt->state); } return (TRUE); } status = le16toh(reply_frame->IOCStatus); if (status != MPI_IOCSTATUS_SUCCESS) { dbg = MPT_PRT_ERROR; } else { dbg = MPT_PRT_DEBUG1; } mpt_lprt(mpt, dbg, "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", req, req->serno, reply_frame, reply_frame->Function, status); switch (reply_frame->Function) { case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: { mpt_tgt_state_t *tgt; #ifdef INVARIANTS mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); #endif if (status != MPI_IOCSTATUS_SUCCESS) { /* * XXX What to do? */ break; } tgt = MPT_TGT_STATE(mpt, req); KASSERT(tgt->state == TGT_STATE_LOADING, ("bad state 0x%x on reply to buffer post", tgt->state)); mpt_assign_serno(mpt, req); tgt->state = TGT_STATE_LOADED; break; } case MPI_FUNCTION_TARGET_ASSIST: #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); #endif mpt_prt(mpt, "target assist completion\n"); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; case MPI_FUNCTION_TARGET_STATUS_SEND: #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); #endif mpt_prt(mpt, "status send completion\n"); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; case MPI_FUNCTION_TARGET_MODE_ABORT: { PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; PTR_MSG_TARGET_MODE_ABORT abtp = (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); #endif mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; } default: mpt_prt(mpt, "Unknown Target Address Reply Function code: " "0x%x\n", reply_frame->Function); break; } return (TRUE); } Index: projects/physbio/sys/kern/subr_busdma.c =================================================================== --- projects/physbio/sys/kern/subr_busdma.c (revision 243875) +++ projects/physbio/sys/kern/subr_busdma.c (revision 243876) @@ -1,109 +1,128 @@ /*- * Copyright (c) 2012 EMC Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_bus.h" #include #include #include #include #include #include +#include #include #include #include int bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { struct ccb_ataio *ataio; struct ccb_scsiio *csio; struct ccb_hdr *ccb_h; void *data_ptr; uint32_t dxfer_len; uint16_t sglist_cnt; ccb_h = &ccb->ccb_h; if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) { callback(callback_arg, NULL, 0, 0); } switch (ccb_h->func_code) { case XPT_SCSI_IO: csio = &ccb->csio; data_ptr = csio->data_ptr; dxfer_len = csio->dxfer_len; sglist_cnt = csio->sglist_cnt; break; case XPT_ATA_IO: ataio = &ccb->ataio; data_ptr = ataio->data_ptr; dxfer_len = ataio->dxfer_len; sglist_cnt = 0; break; default: panic("bus_dmamap_load_ccb: Unsupported func code %d", ccb_h->func_code); } - if ((ccb_h->flags & CAM_SCATTER_VALID) != 0) { - struct bus_dma_segment *segs; - - if ((ccb_h->flags & CAM_DATA_PHYS) != 0) - panic("bus_dmamap_load_ccb - Physical segment " - "pointers unsupported"); - - if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) - panic("bus_dmamap_load_ccb - Virtual segment " - "addresses unsupported"); - - /* Just use the segments provided */ - segs = (struct bus_dma_segment *)data_ptr; - callback(callback_arg, segs, sglist_cnt, 0); - } else if ((ccb_h->flags & CAM_DATA_PHYS) != 0) { - struct bus_dma_segment seg; - - seg.ds_addr = (bus_addr_t)(vm_offset_t)data_ptr; - seg.ds_len = dxfer_len; - callback(callback_arg, &seg, 1, 0); - } else { + switch ((ccb_h->flags & CAM_DATA_MASK)) { + case CAM_DATA_VADDR: return bus_dmamap_load(dmat, map, data_ptr, dxfer_len, callback, callback_arg, /*flags*/0); + case CAM_DATA_PADDR: { + struct bus_dma_segment seg; + + seg.ds_addr = (bus_addr_t)(vm_offset_t)data_ptr; + seg.ds_len = dxfer_len; + callback(callback_arg, &seg, 1, 0); + break; + } + case CAM_DATA_SG: { +#if 0 + struct uio sguio; + KASSERT((sizeof (sguio.uio_iov) == sizeof (data_ptr) && + sizeof (sguio.uio_iovcnt) >= sizeof (sglist_cnt) && + sizeof (sguio.uio_resid) >= sizeof (dxfer_len)), + ("uio won't fit csio data")); + sguio.uio_iov = (struct iovec *)data_ptr; + sguio.uio_iovcnt = csio->sglist_cnt; + sguio.uio_resid = csio->dxfer_len; + sguio.uio_segflg = UIO_SYSSPACE; + return bus_dmamap_load_uio(dmat, map, &sguio, callback, + callback_arg, 0); +#else + panic("bus_dmamap_load_ccb: flags 0x%X unimplemented", + ccb_h->flags); +#endif + } + case CAM_DATA_SG_PADDR: { + struct bus_dma_segment *segs; + /* Just use the segments provided */ + segs = (struct bus_dma_segment *)data_ptr; + callback(callback_arg, segs, sglist_cnt, 0); + break; + } + case CAM_DATA_BIO: + default: + panic("bus_dmamap_load_ccb: flags 0x%X unimplemented", + ccb_h->flags); } return (0); }