Index: head/sys/cam/cam_ccb.h =================================================================== --- head/sys/cam/cam_ccb.h (revision 196007) +++ head/sys/cam/cam_ccb.h (revision 196008) @@ -1,1038 +1,1138 @@ /*- * Data structures and definitions for CAM Control Blocks (CCBs). * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_CCB_H #define _CAM_CAM_CCB_H 1 #include #include #include #include #ifndef _KERNEL #include #endif #include #include #include /* General allocation length definitions for CCB structures */ #define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */ #define VUHBALEN 14 /* Vendor Unique HBA length */ #define SIM_IDLEN 16 /* ASCII string len for SIM ID */ #define HBA_IDLEN 16 /* ASCII string len for HBA ID */ #define DEV_IDLEN 16 /* ASCII string len for device names */ #define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */ #define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */ /* Struct definitions for CAM control blocks */ /* Common CCB header */ /* CAM CCB flags */ typedef enum { CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */ CAM_QUEUE_ENABLE = 0x00000002,/* SIM queue actions are enabled */ CAM_CDB_LINKED = 0x00000004,/* CCB contains a linked CDB */ CAM_NEGOTIATE = 0x00000008,/* * Perform transport negotiation * with this command. */ CAM_SCATTER_VALID = 0x00000010,/* Scatter/gather list is valid */ CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */ CAM_DIR_RESV = 0x00000000,/* Data direction (00:reserved) */ CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */ CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */ CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */ CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */ CAM_SOFT_RST_OP = 0x00000100,/* Use Soft reset alternative */ CAM_ENG_SYNC = 0x00000200,/* Flush resid bytes on complete */ CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */ CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */ CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */ CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */ CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/ CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/ CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/ CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */ CAM_SG_LIST_PHYS = 0x00040000,/* SG list has physical addrs. */ CAM_MSG_BUF_PHYS = 0x00080000,/* Message buffer ptr is physical*/ CAM_SNS_BUF_PHYS = 0x00100000,/* Autosense data ptr is physical*/ CAM_DATA_PHYS = 0x00200000,/* SG/Buffer data ptrs are phys. */ CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */ CAM_ENG_SGLIST = 0x00800000,/* SG list is for the HBA engine */ /* Phase cognizant mode flags */ CAM_DIS_AUTOSRP = 0x01000000,/* Disable autosave/restore ptrs */ CAM_DIS_AUTODISC = 0x02000000,/* Disable auto disconnect */ CAM_TGT_CCB_AVAIL = 0x04000000,/* Target CCB available */ CAM_TGT_PHASE_MODE = 0x08000000,/* The SIM runs in phase mode */ CAM_MSGB_VALID = 0x10000000,/* Message buffer valid */ CAM_STATUS_VALID = 0x20000000,/* Status buffer valid */ CAM_DATAB_VALID = 0x40000000,/* Data buffer valid */ /* Host target Mode flags */ CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */ CAM_TERM_IO = 0x10000000,/* Terminate I/O Message sup. */ CAM_DISCONNECT = 0x20000000,/* Disconnects are mandatory */ CAM_SEND_STATUS = 0x40000000 /* Send status after data phase */ } ccb_flags; /* XPT Opcodes for xpt_action */ typedef enum { /* Function code flags are bits greater than 0xff */ XPT_FC_QUEUED = 0x100, /* Non-immediate function code */ XPT_FC_USER_CCB = 0x200, XPT_FC_XPT_ONLY = 0x400, /* Only for the transport layer device */ XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED, /* Passes through the device queues */ /* Common function commands: 0x00->0x0F */ XPT_NOOP = 0x00, /* Execute Nothing */ XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED, /* Execute the requested I/O operation */ XPT_GDEV_TYPE = 0x02, /* Get type information for specified device */ XPT_GDEVLIST = 0x03, /* Get a list of peripheral devices */ XPT_PATH_INQ = 0x04, /* Path routing inquiry */ XPT_REL_SIMQ = 0x05, /* Release a frozen SIM queue */ XPT_SASYNC_CB = 0x06, /* Set Asynchronous Callback Parameters */ XPT_SDEV_TYPE = 0x07, /* Set device type information */ XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* (Re)Scan the SCSI Bus */ XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY, /* Get EDT entries matching the given pattern */ XPT_DEBUG = 0x0a, /* Turn on debugging for a bus, target or lun */ XPT_PATH_STATS = 0x0b, /* Path statistics (error counts, etc.) */ XPT_GDEV_STATS = 0x0c, /* Device statistics (error counts, etc.) */ /* SCSI Control Functions: 0x10->0x1F */ XPT_ABORT = 0x10, /* Abort the specified CCB */ XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY, /* Reset the specified SCSI bus */ XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED, /* Bus Device Reset the specified SCSI device */ XPT_TERM_IO = 0x13, /* Terminate the I/O process */ XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Logical Unit */ XPT_GET_TRAN_SETTINGS = 0x15, /* * Get default/user transfer settings * for the target */ XPT_SET_TRAN_SETTINGS = 0x16, /* * Set transfer rate/width * negotiation settings */ XPT_CALC_GEOMETRY = 0x17, /* * Calculate the geometry parameters for * a device give the sector size and * volume size. */ XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED, /* Execute the requested ATA I/O operation */ + XPT_GET_SIM_KNOB = 0x18, + /* + * Get SIM specific knob values. + */ + + XPT_SET_SIM_KNOB = 0x19, + /* + * Set SIM specific knob values. + */ /* HBA engine commands 0x20->0x2F */ XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY, /* HBA engine feature inquiry */ XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED, /* HBA execute engine request */ /* Target mode commands: 0x30->0x3F */ XPT_EN_LUN = 0x30, /* Enable LUN as a target */ XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED, /* Execute target I/O request */ XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Accept Host Target Mode CDB */ XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED, /* Continue Host Target I/O Connection */ XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB, - /* Notify Host Target driver of event */ + /* Notify Host Target driver of event (obsolete) */ XPT_NOTIFY_ACK = 0x35, + /* Acknowledgement of event (obsolete) */ + XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB, + /* Notify Host Target driver of event */ + XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Acknowledgement of event */ /* Vendor Unique codes: 0x80->0x8F */ XPT_VUNIQUE = 0x80 } xpt_opcode; #define XPT_FC_GROUP_MASK 0xF0 #define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK) #define XPT_FC_GROUP_COMMON 0x00 #define XPT_FC_GROUP_SCSI_CONTROL 0x10 #define XPT_FC_GROUP_HBA_ENGINE 0x20 #define XPT_FC_GROUP_TMODE 0x30 #define XPT_FC_GROUP_VENDOR_UNIQUE 0x80 #define XPT_FC_IS_DEV_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED) #define XPT_FC_IS_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0) typedef enum { PROTO_UNKNOWN, PROTO_UNSPECIFIED, PROTO_SCSI, /* Small Computer System Interface */ PROTO_ATA, /* AT Attachment */ PROTO_ATAPI, /* AT Attachment Packetized Interface */ PROTO_SATAPM, /* SATA Port Multiplier */ } cam_proto; typedef enum { XPORT_UNKNOWN, XPORT_UNSPECIFIED, XPORT_SPI, /* SCSI Parallel Interface */ XPORT_FC, /* Fiber Channel */ XPORT_SSA, /* Serial Storage Architecture */ XPORT_USB, /* Universal Serial Bus */ XPORT_PPB, /* Parallel Port Bus */ XPORT_ATA, /* AT Attachment */ XPORT_SAS, /* Serial Attached SCSI */ XPORT_SATA, /* Serial AT Attachment */ } cam_xport; #define PROTO_VERSION_UNKNOWN (UINT_MAX - 1) #define PROTO_VERSION_UNSPECIFIED UINT_MAX #define XPORT_VERSION_UNKNOWN (UINT_MAX - 1) #define XPORT_VERSION_UNSPECIFIED UINT_MAX typedef union { LIST_ENTRY(ccb_hdr) le; SLIST_ENTRY(ccb_hdr) sle; TAILQ_ENTRY(ccb_hdr) tqe; STAILQ_ENTRY(ccb_hdr) stqe; } camq_entry; typedef union { void *ptr; u_long field; u_int8_t bytes[sizeof(uintptr_t)]; } ccb_priv_entry; typedef union { ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE]; u_int8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_ppriv_area; typedef union { ccb_priv_entry entries[CCB_SIM_PRIV_SIZE]; u_int8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_spriv_area; struct ccb_hdr { cam_pinfo pinfo; /* Info for priority scheduling */ camq_entry xpt_links; /* For chaining in the XPT layer */ camq_entry sim_links; /* For chaining in the SIM layer */ camq_entry periph_links; /* For chaining in the type driver */ u_int32_t retry_count; void (*cbfcnp)(struct cam_periph *, union ccb *); /* Callback on completion function */ xpt_opcode func_code; /* XPT function code */ u_int32_t status; /* Status returned by CAM subsystem */ struct cam_path *path; /* Compiled path for this ccb */ path_id_t path_id; /* Path ID for the request */ target_id_t target_id; /* Target device ID */ lun_id_t target_lun; /* Target LUN number */ u_int32_t flags; /* ccb_flags */ ccb_ppriv_area periph_priv; ccb_spriv_area sim_priv; u_int32_t timeout; /* Timeout value */ /* * Deprecated, only for use by non-MPSAFE SIMs. All others must * allocate and initialize their own callout storage. */ struct callout_handle timeout_ch; }; /* Get Device Information CCB */ struct ccb_getdev { struct ccb_hdr ccb_h; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; u_int8_t serial_num[252]; u_int8_t reserved; u_int8_t serial_num_len; }; /* Device Statistics CCB */ struct ccb_getdevstats { struct ccb_hdr ccb_h; int dev_openings; /* Space left for more work on device*/ int dev_active; /* Transactions running on the device */ int devq_openings; /* Space left for more queued work */ int devq_queued; /* Transactions queued to be sent */ int held; /* * CCBs held by peripheral drivers * for this device */ int maxtags; /* * Boundary conditions for number of * tagged operations */ int mintags; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { CAM_GDEVLIST_LAST_DEVICE, CAM_GDEVLIST_LIST_CHANGED, CAM_GDEVLIST_MORE_DEVS, CAM_GDEVLIST_ERROR } ccb_getdevlist_status_e; struct ccb_getdevlist { struct ccb_hdr ccb_h; char periph_name[DEV_IDLEN]; u_int32_t unit_number; unsigned int generation; u_int32_t index; ccb_getdevlist_status_e status; }; typedef enum { PERIPH_MATCH_NONE = 0x000, PERIPH_MATCH_PATH = 0x001, PERIPH_MATCH_TARGET = 0x002, PERIPH_MATCH_LUN = 0x004, PERIPH_MATCH_NAME = 0x008, PERIPH_MATCH_UNIT = 0x010, PERIPH_MATCH_ANY = 0x01f } periph_pattern_flags; struct periph_match_pattern { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; periph_pattern_flags flags; }; typedef enum { DEV_MATCH_NONE = 0x000, DEV_MATCH_PATH = 0x001, DEV_MATCH_TARGET = 0x002, DEV_MATCH_LUN = 0x004, DEV_MATCH_INQUIRY = 0x008, DEV_MATCH_ANY = 0x00f } dev_pattern_flags; struct device_match_pattern { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; struct scsi_static_inquiry_pattern inq_pat; dev_pattern_flags flags; }; typedef enum { BUS_MATCH_NONE = 0x000, BUS_MATCH_PATH = 0x001, BUS_MATCH_NAME = 0x002, BUS_MATCH_UNIT = 0x004, BUS_MATCH_BUS_ID = 0x008, BUS_MATCH_ANY = 0x00f } bus_pattern_flags; struct bus_match_pattern { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; bus_pattern_flags flags; }; union match_pattern { struct periph_match_pattern periph_pattern; struct device_match_pattern device_pattern; struct bus_match_pattern bus_pattern; }; typedef enum { DEV_MATCH_PERIPH, DEV_MATCH_DEVICE, DEV_MATCH_BUS } dev_match_type; struct dev_match_pattern { dev_match_type type; union match_pattern pattern; }; struct periph_match_result { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; }; typedef enum { DEV_RESULT_NOFLAG = 0x00, DEV_RESULT_UNCONFIGURED = 0x01 } dev_result_flags; struct device_match_result { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; dev_result_flags flags; }; struct bus_match_result { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; }; union match_result { struct periph_match_result periph_result; struct device_match_result device_result; struct bus_match_result bus_result; }; struct dev_match_result { dev_match_type type; union match_result result; }; typedef enum { CAM_DEV_MATCH_LAST, CAM_DEV_MATCH_MORE, CAM_DEV_MATCH_LIST_CHANGED, CAM_DEV_MATCH_SIZE_ERROR, CAM_DEV_MATCH_ERROR } ccb_dev_match_status; typedef enum { CAM_DEV_POS_NONE = 0x000, CAM_DEV_POS_BUS = 0x001, CAM_DEV_POS_TARGET = 0x002, CAM_DEV_POS_DEVICE = 0x004, CAM_DEV_POS_PERIPH = 0x008, CAM_DEV_POS_PDPTR = 0x010, CAM_DEV_POS_TYPEMASK = 0xf00, CAM_DEV_POS_EDT = 0x100, CAM_DEV_POS_PDRV = 0x200 } dev_pos_type; struct ccb_dm_cookie { void *bus; void *target; void *device; void *periph; void *pdrv; }; struct ccb_dev_position { u_int generations[4]; #define CAM_BUS_GENERATION 0x00 #define CAM_TARGET_GENERATION 0x01 #define CAM_DEV_GENERATION 0x02 #define CAM_PERIPH_GENERATION 0x03 dev_pos_type position_type; struct ccb_dm_cookie cookie; }; struct ccb_dev_match { struct ccb_hdr ccb_h; ccb_dev_match_status status; u_int32_t num_patterns; u_int32_t pattern_buf_len; struct dev_match_pattern *patterns; u_int32_t num_matches; u_int32_t match_buf_len; struct dev_match_result *matches; struct ccb_dev_position pos; }; /* * Definitions for the path inquiry CCB fields. */ #define CAM_VERSION 0x15 /* Hex value for current version */ typedef enum { PI_MDP_ABLE = 0x80, /* Supports MDP message */ PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */ PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */ PI_SDTR_ABLE = 0x10, /* Supports SDTR message */ PI_LINKED_CDB = 0x08, /* Supports linked CDBs */ PI_SATAPM = 0x04, /* Supports SATA PM */ PI_TAG_ABLE = 0x02, /* Supports tag queue messages */ PI_SOFT_RST = 0x01 /* Supports soft reset alternative */ } pi_inqflag; typedef enum { PIT_PROCESSOR = 0x80, /* Target mode processor mode */ PIT_PHASE = 0x40, /* Target mode phase cog. mode */ PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */ PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */ PIT_GRP_6 = 0x08, /* Group 6 commands supported */ PIT_GRP_7 = 0x04 /* Group 7 commands supported */ } pi_tmflag; typedef enum { PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */ PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */ PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */ PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */ PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */ PIM_SEQSCAN = 0x04 /* Do bus scans sequentially, not in parallel */ } pi_miscflag; /* Path Inquiry CCB */ struct ccb_pathinq_settings_spi { u_int8_t ppr_options; }; + struct ccb_pathinq_settings_fc { u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; + struct ccb_pathinq_settings_sas { u_int32_t bitrate; /* Mbps */ }; #define PATHINQ_SETTINGS_SIZE 128 struct ccb_pathinq { struct ccb_hdr ccb_h; u_int8_t version_num; /* Version number for the SIM/HBA */ u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */ u_int8_t target_sprt; /* Flags for target mode support */ u_int8_t hba_misc; /* Misc HBA features */ u_int16_t hba_eng_cnt; /* HBA engine count */ /* Vendor Unique capabilities */ u_int8_t vuhba_flags[VUHBALEN]; u_int32_t max_target; /* Maximum supported Target */ u_int32_t max_lun; /* Maximum supported Lun */ u_int32_t async_flags; /* Installed Async handlers */ path_id_t hpath_id; /* Highest Path ID in the subsystem */ target_id_t initiator_id; /* ID of the HBA on the SCSI bus */ char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */ char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */ char dev_name[DEV_IDLEN];/* Device name for SIM */ u_int32_t unit_number; /* Unit number for SIM */ u_int32_t bus_id; /* Bus ID for SIM */ u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { struct ccb_pathinq_settings_spi spi; struct ccb_pathinq_settings_fc fc; struct ccb_pathinq_settings_sas sas; char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE]; } xport_specific; u_int maxio; /* Max supported I/O size, in bytes. */ }; /* Path Statistics CCB */ struct ccb_pathstats { struct ccb_hdr ccb_h; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef union { u_int8_t *sense_ptr; /* * Pointer to storage * for sense information */ /* Storage Area for sense information */ struct scsi_sense_data sense_buf; } sense_t; typedef union { u_int8_t *cdb_ptr; /* Pointer to the CDB bytes to send */ /* Area for the CDB send */ u_int8_t cdb_bytes[IOCDBLEN]; } cdb_t; /* * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO * function codes. */ struct ccb_scsiio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ u_int8_t *req_map; /* Ptr to mapping info */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ /* Autosense storage */ struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes to autosense */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int16_t sglist_cnt; /* Number of SG list entries */ u_int8_t scsi_status; /* Returned SCSI status */ u_int8_t sense_resid; /* Autosense resid length: 2's comp */ u_int32_t resid; /* Transfer residual length: 2's comp */ cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t *msg_ptr; /* Pointer to the message buffer */ u_int16_t msg_len; /* Number of bytes for the Message */ u_int8_t tag_action; /* What to do for tag queueing */ /* * The tag action should be either the define below (to send a * non-tagged transaction) or one of the defined scsi tag messages * from scsi_message.h. */ #define CAM_TAG_ACTION_NONE 0x00 u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ }; /* * ATA I/O Request CCB used for the XPT_ATA_IO function code. */ struct ccb_ataio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct ata_cmd cmd; /* ATA command register set */ struct ata_res res; /* ATA result register set */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int32_t resid; /* Transfer residual length: 2's comp */ u_int8_t tag_action; /* What to do for tag queueing */ /* * The tag action should be either the define below (to send a * non-tagged transaction) or one of the defined scsi tag messages * from scsi_message.h. */ #define CAM_TAG_ACTION_NONE 0x00 u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ }; struct ccb_accept_tio { struct ccb_hdr ccb_h; cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int8_t tag_action; /* What to do for tag queueing */ u_int8_t sense_len; /* Number of bytes of Sense Data */ u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ struct scsi_sense_data sense_data; }; /* Release SIM Queue */ struct ccb_relsim { struct ccb_hdr ccb_h; u_int32_t release_flags; #define RELSIM_ADJUST_OPENINGS 0x01 #define RELSIM_RELEASE_AFTER_TIMEOUT 0x02 #define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04 #define RELSIM_RELEASE_AFTER_QEMPTY 0x08 u_int32_t openings; u_int32_t release_timeout; u_int32_t qfrozen_cnt; }; /* * Definitions for the asynchronous callback CCB fields. */ typedef enum { + AC_CONTRACT = 0x1000,/* A contractual callback */ AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */ AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */ AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */ AC_LOST_DEVICE = 0x100,/* A device went away */ AC_FOUND_DEVICE = 0x080,/* A new device was found */ AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */ AC_PATH_REGISTERED = 0x020,/* A new path has been registered */ AC_SENT_BDR = 0x010,/* A BDR message was sent to target */ AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */ AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */ AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */ } ac_code; typedef void ac_callback_t (void *softc, u_int32_t code, struct cam_path *path, void *args); +/* + * Generic Asynchronous callbacks. + * + * Generic arguments passed bac which are then interpreted between a per-system + * contract number. + */ +#define AC_CONTRACT_DATA_MAX (128 - sizeof (u_int64_t)) +struct ac_contract { + u_int64_t contract_number; + u_int8_t contract_data[AC_CONTRACT_DATA_MAX]; +}; + +#define AC_CONTRACT_DEV_CHG 1 +struct ac_device_changed { + u_int64_t wwpn; + u_int32_t port; + target_id_t target; + u_int8_t arrived; +}; + /* Set Asynchronous Callback CCB */ struct ccb_setasync { struct ccb_hdr ccb_h; u_int32_t event_enable; /* Async Event enables */ ac_callback_t *callback; void *callback_arg; }; /* Set Device Type CCB */ struct ccb_setdev { struct ccb_hdr ccb_h; u_int8_t dev_type; /* Value for dev type field in EDT */ }; /* SCSI Control Functions */ /* Abort XPT request CCB */ struct ccb_abort { struct ccb_hdr ccb_h; union ccb *abort_ccb; /* Pointer to CCB to abort */ }; /* Reset SCSI Bus CCB */ struct ccb_resetbus { struct ccb_hdr ccb_h; }; /* Reset SCSI Device CCB */ struct ccb_resetdev { struct ccb_hdr ccb_h; }; /* Terminate I/O Process Request CCB */ struct ccb_termio { struct ccb_hdr ccb_h; union ccb *termio_ccb; /* Pointer to CCB to terminate */ }; typedef enum { CTS_TYPE_CURRENT_SETTINGS, CTS_TYPE_USER_SETTINGS } cts_type; struct ccb_trans_settings_scsi { u_int valid; /* Which fields to honor */ #define CTS_SCSI_VALID_TQ 0x01 u_int flags; #define CTS_SCSI_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_spi { u_int valid; /* Which fields to honor */ #define CTS_SPI_VALID_SYNC_RATE 0x01 #define CTS_SPI_VALID_SYNC_OFFSET 0x02 #define CTS_SPI_VALID_BUS_WIDTH 0x04 #define CTS_SPI_VALID_DISC 0x08 #define CTS_SPI_VALID_PPR_OPTIONS 0x10 u_int flags; #define CTS_SPI_FLAGS_DISC_ENB 0x01 u_int sync_period; u_int sync_offset; u_int bus_width; u_int ppr_options; }; struct ccb_trans_settings_fc { u_int valid; /* Which fields to honor */ #define CTS_FC_VALID_WWNN 0x8000 #define CTS_FC_VALID_WWPN 0x4000 #define CTS_FC_VALID_PORT 0x2000 #define CTS_FC_VALID_SPEED 0x1000 u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_sas { u_int valid; /* Which fields to honor */ #define CTS_SAS_VALID_SPEED 0x1000 u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_sata { u_int valid; /* Which fields to honor */ #define CTS_SATA_VALID_SPEED 0x01 #define CTS_SATA_VALID_PM 0x02 u_int32_t bitrate; /* Mbps */ u_int pm_present; /* PM is present (XPT->SIM) */ }; /* Get/Set transfer rate/width/disconnection/tag queueing settings */ struct ccb_trans_settings { struct ccb_hdr ccb_h; cts_type type; /* Current or User settings */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_scsi scsi; } proto_specific; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_spi spi; struct ccb_trans_settings_fc fc; struct ccb_trans_settings_sas sas; struct ccb_trans_settings_sata sata; } xport_specific; }; /* * Calculate the geometry parameters for a device * give the block size and volume size in blocks. */ struct ccb_calc_geometry { struct ccb_hdr ccb_h; u_int32_t block_size; u_int64_t volume_size; u_int32_t cylinders; u_int8_t heads; u_int8_t secs_per_track; }; /* + * Set or get SIM (and transport) specific knobs + */ + +#define KNOB_VALID_ADDRESS 0x1 +#define KNOB_VALID_ROLE 0x2 + + +#define KNOB_ROLE_NONE 0x0 +#define KNOB_ROLE_INITIATOR 0x1 +#define KNOB_ROLE_TARGET 0x2 +#define KNOB_ROLE_BOTH 0x3 + +struct ccb_sim_knob_settings_spi { + u_int valid; + u_int initiator_id; + u_int role; +}; + +struct ccb_sim_knob_settings_fc { + u_int valid; + u_int64_t wwnn; /* world wide node name */ + u_int64_t wwpn; /* world wide port name */ + u_int role; +}; + +struct ccb_sim_knob_settings_sas { + u_int valid; + u_int64_t wwnn; /* world wide node name */ + u_int role; +}; +#define KNOB_SETTINGS_SIZE 128 + +struct ccb_sim_knob { + struct ccb_hdr ccb_h; + union { + u_int valid; /* Which fields to honor */ + struct ccb_sim_knob_settings_spi spi; + struct ccb_sim_knob_settings_fc fc; + struct ccb_sim_knob_settings_sas sas; + char pad[KNOB_SETTINGS_SIZE]; + } xport_specific; +}; + +/* * Rescan the given bus, or bus/target/lun */ struct ccb_rescan { struct ccb_hdr ccb_h; cam_flags flags; }; /* * Turn on debugging for the given bus, bus/target, or bus/target/lun. */ struct ccb_debug { struct ccb_hdr ccb_h; cam_debug_flags flags; }; /* Target mode structures. */ struct ccb_en_lun { struct ccb_hdr ccb_h; u_int16_t grp6_len; /* Group 6 VU CDB length */ u_int16_t grp7_len; /* Group 7 VU CDB length */ u_int8_t enable; }; +/* old, barely used immediate notify, binary compatibility */ struct ccb_immed_notify { struct ccb_hdr ccb_h; struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes in sense buffer */ u_int8_t initiator_id; /* Id of initiator that selected */ u_int8_t message_args[7]; /* Message Arguments */ }; struct ccb_notify_ack { struct ccb_hdr ccb_h; u_int16_t seq_id; /* Sequence identifier */ u_int8_t event; /* Event flags */ }; +struct ccb_immediate_notify { + struct ccb_hdr ccb_h; + u_int tag_id; /* Tag for immediate notify */ + u_int seq_id; /* Tag for target of notify */ + u_int initiator_id; /* Initiator Identifier */ + u_int arg; /* Function specific */ +}; + +struct ccb_notify_acknowledge { + struct ccb_hdr ccb_h; + u_int tag_id; /* Tag for immediate notify */ + u_int seq_id; /* Tar for target of notify */ + u_int initiator_id; /* Initiator Identifier */ + u_int arg; /* Function specific */ +}; + /* HBA engine structures. */ typedef enum { EIT_BUFFER, /* Engine type: buffer memory */ EIT_LOSSLESS, /* Engine type: lossless compression */ EIT_LOSSY, /* Engine type: lossy compression */ EIT_ENCRYPT /* Engine type: encryption */ } ei_type; typedef enum { EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */ EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */ EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */ EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */ } ei_algo; struct ccb_eng_inq { struct ccb_hdr ccb_h; u_int16_t eng_num; /* The engine number for this inquiry */ ei_type eng_type; /* Returned engine type */ ei_algo eng_algo; /* Returned engine algorithm type */ u_int32_t eng_memeory; /* Returned engine memory size */ }; struct ccb_eng_exec { /* This structure must match SCSIIO size */ struct ccb_hdr ccb_h; u_int8_t *pdrv_ptr; /* Ptr used by the peripheral driver */ u_int8_t *req_map; /* Ptr for mapping info on the req. */ u_int8_t *data_ptr; /* Pointer to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int8_t *engdata_ptr; /* Pointer to the engine buffer data */ u_int16_t sglist_cnt; /* Num of scatter gather list entries */ u_int32_t dmax_len; /* Destination data maximum length */ u_int32_t dest_len; /* Destination data length */ int32_t src_resid; /* Source residual length: 2's comp */ u_int32_t timeout; /* Timeout value */ u_int16_t eng_num; /* Engine number for this request */ u_int16_t vu_flags; /* Vendor Unique flags */ }; /* * Definitions for the timeout field in the SCSI I/O CCB. */ #define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */ #define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */ #define CAM_SUCCESS 0 /* For signaling general success */ #define CAM_FAILURE 1 /* For signaling general failure */ #define CAM_FALSE 0 #define CAM_TRUE 1 #define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */ /* * Union of all CCB types for kernel space allocation. This union should * never be used for manipulating CCBs - its only use is for the allocation * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc * and the argument to xpt_ccb_free. */ union ccb { struct ccb_hdr ccb_h; /* For convenience */ struct ccb_scsiio csio; struct ccb_getdev cgd; struct ccb_getdevlist cgdl; struct ccb_pathinq cpi; struct ccb_relsim crs; struct ccb_setasync csa; struct ccb_setdev csd; struct ccb_pathstats cpis; struct ccb_getdevstats cgds; struct ccb_dev_match cdm; struct ccb_trans_settings cts; struct ccb_calc_geometry ccg; + struct ccb_sim_knob knob; struct ccb_abort cab; struct ccb_resetbus crb; struct ccb_resetdev crd; struct ccb_termio tio; struct ccb_accept_tio atio; struct ccb_scsiio ctio; struct ccb_en_lun cel; struct ccb_immed_notify cin; struct ccb_notify_ack cna; + struct ccb_immediate_notify cin1; + struct ccb_notify_acknowledge cna2; struct ccb_eng_inq cei; struct ccb_eng_exec cee; struct ccb_rescan crcn; struct ccb_debug cdbg; struct ccb_ataio ataio; }; __BEGIN_DECLS static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout); static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout); static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout); static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_SCSI_IO; csio->ccb_h.flags = flags; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->sense_len = sense_len; csio->cdb_len = cdb_len; csio->tag_action = tag_action; } static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_CONT_TARGET_IO; csio->ccb_h.flags = flags; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->scsi_status = scsi_status; csio->tag_action = tag_action; csio->tag_id = tag_id; csio->init_id = init_id; } static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { ataio->ccb_h.func_code = XPT_ATA_IO; ataio->ccb_h.flags = flags; ataio->ccb_h.retry_count = retries; ataio->ccb_h.cbfcnp = cbfcnp; ataio->ccb_h.timeout = timeout; ataio->data_ptr = data_ptr; ataio->dxfer_len = dxfer_len; ataio->tag_action = tag_action; } void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended); __END_DECLS #endif /* _CAM_CAM_CCB_H */ Index: head/sys/cam/cam_xpt.c =================================================================== --- head/sys/cam/cam_xpt.c (revision 196007) +++ head/sys/cam/cam_xpt.c (revision 196008) @@ -1,4993 +1,4997 @@ /*- * Implementation of the Common Access Method Transport (XPT) layer. * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef PC98 #include /* geometry translation */ #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for xpt_print below */ #include "opt_cam.h" /* * This is the maximum number of high powered commands (e.g. start unit) * that can be outstanding at a particular time. */ #ifndef CAM_MAX_HIGHPOWER #define CAM_MAX_HIGHPOWER 4 #endif /* Datastructures internal to the xpt layer */ MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); /* Object for defering XPT actions to a taskqueue */ struct xpt_task { struct task task; void *data1; uintptr_t data2; }; typedef enum { XPT_FLAG_OPEN = 0x01 } xpt_flags; struct xpt_softc { xpt_flags flags; u_int32_t xpt_generation; /* number of high powered commands that can go through right now */ STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq; int num_highpower; /* queue for handling async rescan requests. */ TAILQ_HEAD(, ccb_hdr) ccb_scanq; /* Registered busses */ TAILQ_HEAD(,cam_eb) xpt_busses; u_int bus_generation; struct intr_config_hook *xpt_config_hook; struct mtx xpt_topo_lock; struct mtx xpt_lock; }; typedef enum { DM_RET_COPY = 0x01, DM_RET_FLAG_MASK = 0x0f, DM_RET_NONE = 0x00, DM_RET_STOP = 0x10, DM_RET_DESCEND = 0x20, DM_RET_ERROR = 0x30, DM_RET_ACTION_MASK = 0xf0 } dev_match_ret; typedef enum { XPT_DEPTH_BUS, XPT_DEPTH_TARGET, XPT_DEPTH_DEVICE, XPT_DEPTH_PERIPH } xpt_traverse_depth; struct xpt_traverse_config { xpt_traverse_depth depth; void *tr_func; void *tr_arg; }; typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); /* Transport layer configuration information */ static struct xpt_softc xsoftc; /* Queues for our software interrupt handler */ typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t; typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t; static cam_simq_t cam_simq; static struct mtx cam_simq_lock; /* Pointers to software interrupt handlers */ static void *cambio_ih; struct cam_periph *xpt_periph; static periph_init_t xpt_periph_init; static struct periph_driver xpt_driver = { xpt_periph_init, "xpt", TAILQ_HEAD_INITIALIZER(xpt_driver.units) }; PERIPHDRIVER_DECLARE(xpt, xpt_driver); static d_open_t xptopen; static d_close_t xptclose; static d_ioctl_t xptioctl; static struct cdevsw xpt_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = xptopen, .d_close = xptclose, .d_ioctl = xptioctl, .d_name = "xpt", }; /* Storage for debugging datastructures */ #ifdef CAMDEBUG struct cam_path *cam_dpath; u_int32_t cam_dflags; u_int32_t cam_debug_delay; #endif /* Our boot-time initialization hook */ static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); static moduledata_t cam_moduledata = { "cam", cam_module_event_handler, NULL }; static int xpt_init(void *); DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); MODULE_VERSION(cam, 1); static void xpt_async_bcast(struct async_list *async_head, u_int32_t async_code, struct cam_path *path, void *async_arg); static path_id_t xptnextfreepathid(void); static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); static union ccb *xpt_get_ccb(struct cam_ed *device); static void xpt_run_dev_allocq(struct cam_eb *bus); static timeout_t xpt_release_devq_timeout; static void xpt_release_simq_timeout(void *arg) __unused; static void xpt_release_bus(struct cam_eb *bus); static void xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue); static struct cam_et* xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); static void xpt_release_target(struct cam_eb *bus, struct cam_et *target); static void xpt_release_device(struct cam_eb *bus, struct cam_et *target, struct cam_ed *device); static struct cam_eb* xpt_find_bus(path_id_t path_id); static struct cam_et* xpt_find_target(struct cam_eb *bus, target_id_t target_id); static struct cam_ed* xpt_find_device(struct cam_et *target, lun_id_t lun_id); static xpt_busfunc_t xptconfigbuscountfunc; static xpt_busfunc_t xptconfigfunc; static void xpt_config(void *arg); static xpt_devicefunc_t xptpassannouncefunc; static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb); static void xptaction(struct cam_sim *sim, union ccb *work_ccb); static void xptpoll(struct cam_sim *sim); static void camisr(void *); static void camisr_runqueue(void *); static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_eb *bus); static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_ed *device); static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_periph *periph); static xpt_busfunc_t xptedtbusfunc; static xpt_targetfunc_t xptedttargetfunc; static xpt_devicefunc_t xptedtdevicefunc; static xpt_periphfunc_t xptedtperiphfunc; static xpt_pdrvfunc_t xptplistpdrvfunc; static xpt_periphfunc_t xptplistperiphfunc; static int xptedtmatch(struct ccb_dev_match *cdm); static int xptperiphlistmatch(struct ccb_dev_match *cdm); static int xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg); static int xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, xpt_targetfunc_t *tr_func, void *arg); static int xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, xpt_devicefunc_t *tr_func, void *arg); static int xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg); static int xptpdrvtraverse(struct periph_driver **start_pdrv, xpt_pdrvfunc_t *tr_func, void *arg); static int xptpdperiphtraverse(struct periph_driver **pdrv, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg); static xpt_busfunc_t xptdefbusfunc; static xpt_targetfunc_t xptdeftargetfunc; static xpt_devicefunc_t xptdefdevicefunc; static xpt_periphfunc_t xptdefperiphfunc; static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg); static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg); static void xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static xpt_devicefunc_t xptsetasyncfunc; static xpt_busfunc_t xptsetasyncbusfunc; static cam_status xptregister(struct cam_periph *periph, void *arg); static void xpt_start_tags(struct cam_path *path); static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev); static __inline int periph_is_queued(struct cam_periph *periph); static __inline int device_is_alloc_queued(struct cam_ed *device); static __inline int device_is_send_queued(struct cam_ed *device); static __inline int dev_allocq_is_runnable(struct cam_devq *devq); static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev) { int retval; if (dev->ccbq.devq_openings > 0) { if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) { cam_ccbq_resize(&dev->ccbq, dev->ccbq.dev_openings + dev->ccbq.dev_active); dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED; } /* * The priority of a device waiting for CCB resources * is that of the the highest priority peripheral driver * enqueued. */ retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue, &dev->alloc_ccb_entry.pinfo, CAMQ_GET_HEAD(&dev->drvq)->priority); } else { retval = 0; } return (retval); } static __inline int periph_is_queued(struct cam_periph *periph) { return (periph->pinfo.index != CAM_UNQUEUED_INDEX); } static __inline int device_is_alloc_queued(struct cam_ed *device) { return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); } static __inline int device_is_send_queued(struct cam_ed *device) { return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); } static __inline int dev_allocq_is_runnable(struct cam_devq *devq) { /* * Have work to do. * Have space to do more work. * Allowed to do work. */ return ((devq->alloc_queue.qfrozen_cnt == 0) && (devq->alloc_queue.entries > 0) && (devq->alloc_openings > 0)); } static void xpt_periph_init() { make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); } static void xptdone(struct cam_periph *periph, union ccb *done_ccb) { /* Caller will release the CCB */ wakeup(&done_ccb->ccb_h.cbfcnp); } static int xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) { /* * Only allow read-write access. */ if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) return(EPERM); /* * We don't allow nonblocking access. */ if ((flags & O_NONBLOCK) != 0) { printf("%s: can't do nonblocking access\n", devtoname(dev)); return(ENODEV); } /* Mark ourselves open */ mtx_lock(&xsoftc.xpt_lock); xsoftc.flags |= XPT_FLAG_OPEN; mtx_unlock(&xsoftc.xpt_lock); return(0); } static int xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) { /* Mark ourselves closed */ mtx_lock(&xsoftc.xpt_lock); xsoftc.flags &= ~XPT_FLAG_OPEN; mtx_unlock(&xsoftc.xpt_lock); return(0); } /* * Don't automatically grab the xpt softc lock here even though this is going * through the xpt device. The xpt device is really just a back door for * accessing other devices and SIMs, so the right thing to do is to grab * the appropriate SIM lock once the bus/SIM is located. */ static int xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; error = 0; switch(cmd) { /* * For the transport layer CAMIOCOMMAND ioctl, we really only want * to accept CCB types that don't quite make sense to send through a * passthrough driver. XPT_PATH_INQ is an exception to this, as stated * in the CAM spec. */ case CAMIOCOMMAND: { union ccb *ccb; union ccb *inccb; struct cam_eb *bus; inccb = (union ccb *)addr; bus = xpt_find_bus(inccb->ccb_h.path_id); if (bus == NULL) { error = EINVAL; break; } switch(inccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_RESET_BUS: if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD) || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) { error = EINVAL; break; } /* FALLTHROUGH */ case XPT_PATH_INQ: case XPT_ENG_INQ: case XPT_SCAN_LUN: ccb = xpt_alloc_ccb(); CAM_SIM_LOCK(bus->sim); /* * Create a path using the bus, target, and lun the * user passed in. */ if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, inccb->ccb_h.path_id, inccb->ccb_h.target_id, inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; CAM_SIM_UNLOCK(bus->sim); xpt_free_ccb(ccb); break; } /* Ensure all of our fields are correct */ xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(ccb, inccb); ccb->ccb_h.cbfcnp = xptdone; cam_periph_runccb(ccb, NULL, 0, 0, NULL); bcopy(ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); CAM_SIM_UNLOCK(bus->sim); break; case XPT_DEBUG: { union ccb ccb; /* * This is an immediate CCB, so it's okay to * allocate it on the stack. */ CAM_SIM_LOCK(bus->sim); /* * Create a path using the bus, target, and lun the * user passed in. */ if (xpt_create_path(&ccb.ccb_h.path, xpt_periph, inccb->ccb_h.path_id, inccb->ccb_h.target_id, inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; CAM_SIM_UNLOCK(bus->sim); break; } /* Ensure all of our fields are correct */ xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(&ccb, inccb); ccb.ccb_h.cbfcnp = xptdone; xpt_action(&ccb); CAM_SIM_UNLOCK(bus->sim); bcopy(&ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb.ccb_h.path); break; } case XPT_DEV_MATCH: { struct cam_periph_map_info mapinfo; struct cam_path *old_path; /* * We can't deal with physical addresses for this * type of transaction. */ if (inccb->ccb_h.flags & CAM_DATA_PHYS) { error = EINVAL; break; } /* * Save this in case the caller had it set to * something in particular. */ old_path = inccb->ccb_h.path; /* * We really don't need a path for the matching * code. The path is needed because of the * debugging statements in xpt_action(). They * assume that the CCB has a valid path. */ inccb->ccb_h.path = xpt_periph->path; bzero(&mapinfo, sizeof(mapinfo)); /* * Map the pattern and match buffers into kernel * virtual address space. */ error = cam_periph_mapmem(inccb, &mapinfo); if (error) { inccb->ccb_h.path = old_path; break; } /* * This is an immediate CCB, we can send it on directly. */ xpt_action(inccb); /* * Map the buffers back into user space. */ cam_periph_unmapmem(inccb, &mapinfo); inccb->ccb_h.path = old_path; error = 0; break; } default: error = ENOTSUP; break; } xpt_release_bus(bus); break; } /* * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, * with the periphal driver name and unit name filled in. The other * fields don't really matter as input. The passthrough driver name * ("pass"), and unit number are passed back in the ccb. The current * device generation number, and the index into the device peripheral * driver list, and the status are also passed back. Note that * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is * (or rather should be) impossible for the device peripheral driver * list to change since we look at the whole thing in one pass, and * we do it with lock protection. * */ case CAMGETPASSTHRU: { union ccb *ccb; struct cam_periph *periph; struct periph_driver **p_drv; char *name; u_int unit; u_int cur_generation; int base_periph_found; int splbreaknum; ccb = (union ccb *)addr; unit = ccb->cgdl.unit_number; name = ccb->cgdl.periph_name; /* * Every 100 devices, we want to drop our lock protection to * give the software interrupt handler a chance to run. * Most systems won't run into this check, but this should * avoid starvation in the software interrupt handler in * large systems. */ splbreaknum = 100; ccb = (union ccb *)addr; base_periph_found = 0; /* * Sanity check -- make sure we don't get a null peripheral * driver name. */ if (*ccb->cgdl.periph_name == '\0') { error = EINVAL; break; } /* Keep the list from changing while we traverse it */ mtx_lock(&xsoftc.xpt_topo_lock); ptstartover: cur_generation = xsoftc.xpt_generation; /* first find our driver in the list of drivers */ for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) if (strcmp((*p_drv)->driver_name, name) == 0) break; if (*p_drv == NULL) { mtx_unlock(&xsoftc.xpt_topo_lock); ccb->ccb_h.status = CAM_REQ_CMP_ERR; ccb->cgdl.status = CAM_GDEVLIST_ERROR; *ccb->cgdl.periph_name = '\0'; ccb->cgdl.unit_number = 0; error = ENOENT; break; } /* * Run through every peripheral instance of this driver * and check to see whether it matches the unit passed * in by the user. If it does, get out of the loops and * find the passthrough driver associated with that * peripheral driver. */ for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; periph = TAILQ_NEXT(periph, unit_links)) { if (periph->unit_number == unit) { break; } else if (--splbreaknum == 0) { mtx_unlock(&xsoftc.xpt_topo_lock); mtx_lock(&xsoftc.xpt_topo_lock); splbreaknum = 100; if (cur_generation != xsoftc.xpt_generation) goto ptstartover; } } /* * If we found the peripheral driver that the user passed * in, go through all of the peripheral drivers for that * particular device and look for a passthrough driver. */ if (periph != NULL) { struct cam_ed *device; int i; base_periph_found = 1; device = periph->path->device; for (i = 0, periph = SLIST_FIRST(&device->periphs); periph != NULL; periph = SLIST_NEXT(periph, periph_links), i++) { /* * Check to see whether we have a * passthrough device or not. */ if (strcmp(periph->periph_name, "pass") == 0) { /* * Fill in the getdevlist fields. */ strcpy(ccb->cgdl.periph_name, periph->periph_name); ccb->cgdl.unit_number = periph->unit_number; if (SLIST_NEXT(periph, periph_links)) ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; else ccb->cgdl.status = CAM_GDEVLIST_LAST_DEVICE; ccb->cgdl.generation = device->generation; ccb->cgdl.index = i; /* * Fill in some CCB header fields * that the user may want. */ ccb->ccb_h.path_id = periph->path->bus->path_id; ccb->ccb_h.target_id = periph->path->target->target_id; ccb->ccb_h.target_lun = periph->path->device->lun_id; ccb->ccb_h.status = CAM_REQ_CMP; break; } } } /* * If the periph is null here, one of two things has * happened. The first possibility is that we couldn't * find the unit number of the particular peripheral driver * that the user is asking about. e.g. the user asks for * the passthrough driver for "da11". We find the list of * "da" peripherals all right, but there is no unit 11. * The other possibility is that we went through the list * of peripheral drivers attached to the device structure, * but didn't find one with the name "pass". Either way, * we return ENOENT, since we couldn't find something. */ if (periph == NULL) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; ccb->cgdl.status = CAM_GDEVLIST_ERROR; *ccb->cgdl.periph_name = '\0'; ccb->cgdl.unit_number = 0; error = ENOENT; /* * It is unfortunate that this is even necessary, * but there are many, many clueless users out there. * If this is true, the user is looking for the * passthrough driver, but doesn't have one in his * kernel. */ if (base_periph_found == 1) { printf("xptioctl: pass driver is not in the " "kernel\n"); printf("xptioctl: put \"device pass\" in " "your kernel config file\n"); } } mtx_unlock(&xsoftc.xpt_topo_lock); break; } default: error = ENOTTY; break; } return(error); } static int cam_module_event_handler(module_t mod, int what, void *arg) { int error; switch (what) { case MOD_LOAD: if ((error = xpt_init(NULL)) != 0) return (error); break; case MOD_UNLOAD: return EBUSY; default: return EOPNOTSUPP; } return 0; } /* thread to handle bus rescans */ static void xpt_scanner_thread(void *dummy) { cam_isrq_t queue; union ccb *ccb; struct cam_sim *sim; for (;;) { /* * Wait for a rescan request to come in. When it does, splice * it onto a queue from local storage so that the xpt lock * doesn't need to be held while the requests are being * processed. */ xpt_lock_buses(); msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, "ccb_scanq", 0); TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &xsoftc.ccb_scanq, sim_links.tqe); xpt_unlock_buses(); while ((ccb = (union ccb *)TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, &ccb->ccb_h, sim_links.tqe); sim = ccb->ccb_h.path->bus->sim; CAM_SIM_LOCK(sim); ccb->ccb_h.func_code = XPT_SCAN_BUS; ccb->ccb_h.cbfcnp = xptdone; xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5); cam_periph_runccb(ccb, NULL, 0, 0, NULL); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); CAM_SIM_UNLOCK(sim); } } } void xpt_rescan(union ccb *ccb) { struct ccb_hdr *hdr; /* * Don't make duplicate entries for the same paths. */ xpt_lock_buses(); TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { xpt_unlock_buses(); xpt_print(ccb->ccb_h.path, "rescan already queued\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } } TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); wakeup(&xsoftc.ccb_scanq); xpt_unlock_buses(); } /* Functions accessed by the peripheral drivers */ static int xpt_init(void *dummy) { struct cam_sim *xpt_sim; struct cam_path *path; struct cam_devq *devq; cam_status status; TAILQ_INIT(&xsoftc.xpt_busses); TAILQ_INIT(&cam_simq); TAILQ_INIT(&xsoftc.ccb_scanq); STAILQ_INIT(&xsoftc.highpowerq); xsoftc.num_highpower = CAM_MAX_HIGHPOWER; mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF); mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF); mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF); /* * The xpt layer is, itself, the equivelent of a SIM. * Allow 16 ccbs in the ccb pool for it. This should * give decent parallelism when we probe busses and * perform other XPT functions. */ devq = cam_simq_alloc(16); xpt_sim = cam_sim_alloc(xptaction, xptpoll, "xpt", /*softc*/NULL, /*unit*/0, /*mtx*/&xsoftc.xpt_lock, /*max_dev_transactions*/0, /*max_tagged_dev_transactions*/0, devq); if (xpt_sim == NULL) return (ENOMEM); xpt_sim->max_ccbs = 16; mtx_lock(&xsoftc.xpt_lock); if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { printf("xpt_init: xpt_bus_register failed with status %#x," " failing attach\n", status); return (EINVAL); } /* * Looking at the XPT from the SIM layer, the XPT is * the equivelent of a peripheral driver. Allocate * a peripheral driver entry for us. */ if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { printf("xpt_init: xpt_create_path failed with status %#x," " failing attach\n", status); return (EINVAL); } cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, path, NULL, 0, xpt_sim); xpt_free_path(path); mtx_unlock(&xsoftc.xpt_lock); /* * Register a callback for when interrupts are enabled. */ xsoftc.xpt_config_hook = (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), M_CAMXPT, M_NOWAIT | M_ZERO); if (xsoftc.xpt_config_hook == NULL) { printf("xpt_init: Cannot malloc config hook " "- failing attach\n"); return (ENOMEM); } xsoftc.xpt_config_hook->ich_func = xpt_config; if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) { free (xsoftc.xpt_config_hook, M_CAMXPT); printf("xpt_init: config_intrhook_establish failed " "- failing attach\n"); } /* fire up rescan thread */ if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) { printf("xpt_init: failed to create rescan thread\n"); } /* Install our software interrupt handlers */ swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih); return (0); } static cam_status xptregister(struct cam_periph *periph, void *arg) { struct cam_sim *xpt_sim; if (periph == NULL) { printf("xptregister: periph was NULL!!\n"); return(CAM_REQ_CMP_ERR); } xpt_sim = (struct cam_sim *)arg; xpt_sim->softc = periph; xpt_periph = periph; periph->softc = NULL; return(CAM_REQ_CMP); } int32_t xpt_add_periph(struct cam_periph *periph) { struct cam_ed *device; int32_t status; struct periph_list *periph_head; mtx_assert(periph->sim->mtx, MA_OWNED); device = periph->path->device; periph_head = &device->periphs; status = CAM_REQ_CMP; if (device != NULL) { /* * Make room for this peripheral * so it will fit in the queue * when it's scheduled to run */ status = camq_resize(&device->drvq, device->drvq.array_size + 1); device->generation++; SLIST_INSERT_HEAD(periph_head, periph, periph_links); } mtx_lock(&xsoftc.xpt_topo_lock); xsoftc.xpt_generation++; mtx_unlock(&xsoftc.xpt_topo_lock); return (status); } void xpt_remove_periph(struct cam_periph *periph) { struct cam_ed *device; mtx_assert(periph->sim->mtx, MA_OWNED); device = periph->path->device; if (device != NULL) { struct periph_list *periph_head; periph_head = &device->periphs; /* Release the slot for this peripheral */ camq_resize(&device->drvq, device->drvq.array_size - 1); device->generation++; SLIST_REMOVE(periph_head, periph, cam_periph, periph_links); } mtx_lock(&xsoftc.xpt_topo_lock); xsoftc.xpt_generation++; mtx_unlock(&xsoftc.xpt_topo_lock); } void xpt_announce_periph(struct cam_periph *periph, char *announce_string) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; struct cam_path *path; u_int speed; u_int freq; u_int mb; mtx_assert(periph->sim->mtx, MA_OWNED); path = periph->path; /* * To ensure that this is printed in one piece, * mask out CAM interrupts. */ printf("%s%d at %s%d bus %d target %d lun %d\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->target->target_id, path->device->lun_id); printf("%s%d: ", periph->periph_name, periph->unit_number); if (path->device->protocol == PROTO_SCSI) scsi_print_inquiry(&path->device->inq_data); else if (path->device->protocol == PROTO_ATA || path->device->protocol == PROTO_SATAPM) ata_print_ident(&path->device->ident_data); else printf("Unknown protocol device\n"); if (bootverbose && path->device->serial_num_len > 0) { /* Don't wrap the screen - print only the first 60 chars */ printf("%s%d: Serial Number %.60s\n", periph->periph_name, periph->unit_number, path->device->serial_num); } xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb*)&cts); if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { return; } /* Ask the SIM for its base transfer speed */ xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); speed = cpi.base_transfer_speed; freq = 0; if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi; spi = &cts.xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0 && spi->sync_offset != 0) { freq = scsi_calc_syncsrate(spi->sync_period); speed = freq; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) speed *= (0x01 << spi->bus_width); } if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_SPEED) speed = fc->bitrate; } if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) { struct ccb_trans_settings_sas *sas = &cts.xport_specific.sas; if (sas->valid & CTS_SAS_VALID_SPEED) speed = sas->bitrate; } if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &cts.xport_specific.sata; if (sata->valid & CTS_SATA_VALID_SPEED) speed = sata->bitrate; } mb = speed / 1000; if (mb > 0) printf("%s%d: %d.%03dMB/s transfers", periph->periph_name, periph->unit_number, mb, speed % 1000); else printf("%s%d: %dKB/s transfers", periph->periph_name, periph->unit_number, speed); /* Report additional information about SPI connections */ if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi; spi = &cts.xport_specific.spi; if (freq != 0) { printf(" (%d.%03dMHz%s, offset %d", freq / 1000, freq % 1000, (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 ? " DT" : "", spi->sync_offset); } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0 && spi->bus_width > 0) { if (freq != 0) { printf(", "); } else { printf(" ("); } printf("%dbit)", 8 * (0x01 << spi->bus_width)); } else if (freq != 0) { printf(")"); } } if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc; fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWNN) printf(" WWNN 0x%llx", (long long) fc->wwnn); if (fc->valid & CTS_FC_VALID_WWPN) printf(" WWPN 0x%llx", (long long) fc->wwpn); if (fc->valid & CTS_FC_VALID_PORT) printf(" PortID 0x%x", fc->port); } if (path->device->inq_flags & SID_CmdQue || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { printf("\n%s%d: Command Queueing enabled", periph->periph_name, periph->unit_number); } printf("\n"); /* * We only want to print the caller's announce string if they've * passed one in.. */ if (announce_string != NULL) printf("%s%d: %s\n", periph->periph_name, periph->unit_number, announce_string); } static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_eb *bus) { dev_match_ret retval; int i; retval = DM_RET_NONE; /* * If we aren't given something to match against, that's an error. */ if (bus == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this bus matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_DESCEND | DM_RET_COPY); for (i = 0; i < num_patterns; i++) { struct bus_match_pattern *cur_pattern; /* * If the pattern in question isn't for a bus node, we * aren't interested. However, we do indicate to the * calling routine that we should continue descending the * tree, since the user wants to match against lower-level * EDT elements. */ if (patterns[i].type != DEV_MATCH_BUS) { if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_DESCEND; continue; } cur_pattern = &patterns[i].pattern.bus_pattern; /* * If they want to match any bus node, we give them any * device node. */ if (cur_pattern->flags == BUS_MATCH_ANY) { /* set the copy flag */ retval |= DM_RET_COPY; /* * If we've already decided on an action, go ahead * and return. */ if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) return(retval); } /* * Not sure why someone would do this... */ if (cur_pattern->flags == BUS_MATCH_NONE) continue; if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) && (cur_pattern->path_id != bus->path_id)) continue; if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) && (cur_pattern->bus_id != bus->sim->bus_id)) continue; if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) && (cur_pattern->unit_number != bus->sim->unit_number)) continue; if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, DEV_IDLEN) != 0)) continue; /* * If we get to this point, the user definitely wants * information on this bus. So tell the caller to copy the * data out. */ retval |= DM_RET_COPY; /* * If the return action has been set to descend, then we * know that we've already seen a non-bus matching * expression, therefore we need to further descend the tree. * This won't change by continuing around the loop, so we * go ahead and return. If we haven't seen a non-bus * matching expression, we keep going around the loop until * we exhaust the matching expressions. We'll set the stop * flag once we fall out of the loop. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) return(retval); } /* * If the return action hasn't been set to descend yet, that means * we haven't seen anything other than bus matching patterns. So * tell the caller to stop descending the tree -- the user doesn't * want to match against lower level tree elements. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_STOP; return(retval); } static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_ed *device) { dev_match_ret retval; int i; retval = DM_RET_NONE; /* * If we aren't given something to match against, that's an error. */ if (device == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this device matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_DESCEND | DM_RET_COPY); for (i = 0; i < num_patterns; i++) { struct device_match_pattern *cur_pattern; /* * If the pattern in question isn't for a device node, we * aren't interested. */ if (patterns[i].type != DEV_MATCH_DEVICE) { if ((patterns[i].type == DEV_MATCH_PERIPH) && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) retval |= DM_RET_DESCEND; continue; } cur_pattern = &patterns[i].pattern.device_pattern; /* * If they want to match any device node, we give them any * device node. */ if (cur_pattern->flags == DEV_MATCH_ANY) { /* set the copy flag */ retval |= DM_RET_COPY; /* * If we've already decided on an action, go ahead * and return. */ if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) return(retval); } /* * Not sure why someone would do this... */ if (cur_pattern->flags == DEV_MATCH_NONE) continue; if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) && (cur_pattern->path_id != device->target->bus->path_id)) continue; if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) && (cur_pattern->target_id != device->target->target_id)) continue; if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) && (cur_pattern->target_lun != device->lun_id)) continue; if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) && (cam_quirkmatch((caddr_t)&device->inq_data, (caddr_t)&cur_pattern->inq_pat, 1, sizeof(cur_pattern->inq_pat), scsi_static_inquiry_match) == NULL)) continue; /* * If we get to this point, the user definitely wants * information on this device. So tell the caller to copy * the data out. */ retval |= DM_RET_COPY; /* * If the return action has been set to descend, then we * know that we've already seen a peripheral matching * expression, therefore we need to further descend the tree. * This won't change by continuing around the loop, so we * go ahead and return. If we haven't seen a peripheral * matching expression, we keep going around the loop until * we exhaust the matching expressions. We'll set the stop * flag once we fall out of the loop. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) return(retval); } /* * If the return action hasn't been set to descend yet, that means * we haven't seen any peripheral matching patterns. So tell the * caller to stop descending the tree -- the user doesn't want to * match against lower level tree elements. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_STOP; return(retval); } /* * Match a single peripheral against any number of match patterns. */ static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_periph *periph) { dev_match_ret retval; int i; /* * If we aren't given something to match against, that's an error. */ if (periph == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this peripheral matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_STOP | DM_RET_COPY); /* * There aren't any nodes below a peripheral node, so there's no * reason to descend the tree any further. */ retval = DM_RET_STOP; for (i = 0; i < num_patterns; i++) { struct periph_match_pattern *cur_pattern; /* * If the pattern in question isn't for a peripheral, we * aren't interested. */ if (patterns[i].type != DEV_MATCH_PERIPH) continue; cur_pattern = &patterns[i].pattern.periph_pattern; /* * If they want to match on anything, then we will do so. */ if (cur_pattern->flags == PERIPH_MATCH_ANY) { /* set the copy flag */ retval |= DM_RET_COPY; /* * We've already set the return action to stop, * since there are no nodes below peripherals in * the tree. */ return(retval); } /* * Not sure why someone would do this... */ if (cur_pattern->flags == PERIPH_MATCH_NONE) continue; if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) && (cur_pattern->path_id != periph->path->bus->path_id)) continue; /* * For the target and lun id's, we have to make sure the * target and lun pointers aren't NULL. The xpt peripheral * has a wildcard target and device. */ if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) && ((periph->path->target == NULL) ||(cur_pattern->target_id != periph->path->target->target_id))) continue; if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) && ((periph->path->device == NULL) || (cur_pattern->target_lun != periph->path->device->lun_id))) continue; if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) && (cur_pattern->unit_number != periph->unit_number)) continue; if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) && (strncmp(cur_pattern->periph_name, periph->periph_name, DEV_IDLEN) != 0)) continue; /* * If we get to this point, the user definitely wants * information on this peripheral. So tell the caller to * copy the data out. */ retval |= DM_RET_COPY; /* * The return action has already been set to stop, since * peripherals don't have any nodes below them in the EDT. */ return(retval); } /* * If we get to this point, the peripheral that was passed in * doesn't match any of the patterns. */ return(retval); } static int xptedtbusfunc(struct cam_eb *bus, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; /* * If our position is for something deeper in the tree, that means * that we've already seen this node. So, we keep going down. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target != NULL)) retval = DM_RET_DESCEND; else retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); /* * If we got an error, bail out of the search. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this bus out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; cdm->pos.cookie.bus = bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_BUS; cdm->matches[j].result.bus_result.path_id = bus->path_id; cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; cdm->matches[j].result.bus_result.unit_number = bus->sim->unit_number; strncpy(cdm->matches[j].result.bus_result.dev_name, bus->sim->sim_name, DEV_IDLEN); } /* * If the user is only interested in busses, there's no * reason to descend to the next level in the tree. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) return(1); /* * If there is a target generation recorded, check it to * make sure the target list hasn't changed. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (bus == cdm->pos.cookie.bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0) && (cdm->pos.generations[CAM_TARGET_GENERATION] != bus->generation)) { cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target != NULL)) return(xpttargettraverse(bus, (struct cam_et *)cdm->pos.cookie.target, xptedttargetfunc, arg)); else return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg)); } static int xptedttargetfunc(struct cam_et *target, void *arg) { struct ccb_dev_match *cdm; cdm = (struct ccb_dev_match *)arg; /* * If there is a device list generation recorded, check it to * make sure the device list hasn't changed. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == target->bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.generations[CAM_DEV_GENERATION] != 0) && (cdm->pos.generations[CAM_DEV_GENERATION] != target->generation)) { cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == target->bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device != NULL)) return(xptdevicetraverse(target, (struct cam_ed *)cdm->pos.cookie.device, xptedtdevicefunc, arg)); else return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg)); } static int xptedtdevicefunc(struct cam_ed *device, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; /* * If our position is for something deeper in the tree, that means * that we've already seen this node. So, we keep going down. */ if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device == device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) retval = DM_RET_DESCEND; else retval = xptdevicematch(cdm->patterns, cdm->num_patterns, device); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this device out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; cdm->pos.cookie.bus = device->target->bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->pos.cookie.target = device->target; cdm->pos.generations[CAM_TARGET_GENERATION] = device->target->bus->generation; cdm->pos.cookie.device = device; cdm->pos.generations[CAM_DEV_GENERATION] = device->target->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_DEVICE; cdm->matches[j].result.device_result.path_id = device->target->bus->path_id; cdm->matches[j].result.device_result.target_id = device->target->target_id; cdm->matches[j].result.device_result.target_lun = device->lun_id; cdm->matches[j].result.device_result.protocol = device->protocol; bcopy(&device->inq_data, &cdm->matches[j].result.device_result.inq_data, sizeof(struct scsi_inquiry_data)); bcopy(&device->ident_data, &cdm->matches[j].result.device_result.ident_data, sizeof(struct ata_params)); /* Let the user know whether this device is unconfigured */ if (device->flags & CAM_DEV_UNCONFIGURED) cdm->matches[j].result.device_result.flags = DEV_RESULT_UNCONFIGURED; else cdm->matches[j].result.device_result.flags = DEV_RESULT_NOFLAG; } /* * If the user isn't interested in peripherals, don't descend * the tree any further. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) return(1); /* * If there is a peripheral list generation recorded, make sure * it hasn't changed. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (device->target->bus == cdm->pos.cookie.bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (device->target == cdm->pos.cookie.target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (device == cdm->pos.cookie.device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) && (cdm->pos.generations[CAM_PERIPH_GENERATION] != device->generation)){ cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == device->target->bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == device->target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device == device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) return(xptperiphtraverse(device, (struct cam_periph *)cdm->pos.cookie.periph, xptedtperiphfunc, arg)); else return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg)); } static int xptedtperiphfunc(struct cam_periph *periph, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this peripheral out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | CAM_DEV_POS_PERIPH; cdm->pos.cookie.bus = periph->path->bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->pos.cookie.target = periph->path->target; cdm->pos.generations[CAM_TARGET_GENERATION] = periph->path->bus->generation; cdm->pos.cookie.device = periph->path->device; cdm->pos.generations[CAM_DEV_GENERATION] = periph->path->target->generation; cdm->pos.cookie.periph = periph; cdm->pos.generations[CAM_PERIPH_GENERATION] = periph->path->device->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_PERIPH; cdm->matches[j].result.periph_result.path_id = periph->path->bus->path_id; cdm->matches[j].result.periph_result.target_id = periph->path->target->target_id; cdm->matches[j].result.periph_result.target_lun = periph->path->device->lun_id; cdm->matches[j].result.periph_result.unit_number = periph->unit_number; strncpy(cdm->matches[j].result.periph_result.periph_name, periph->periph_name, DEV_IDLEN); } return(1); } static int xptedtmatch(struct ccb_dev_match *cdm) { int ret; cdm->num_matches = 0; /* * Check the bus list generation. If it has changed, the user * needs to reset everything and start over. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.generations[CAM_BUS_GENERATION] != 0) && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) { cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus != NULL)) ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus, xptedtbusfunc, cdm); else ret = xptbustraverse(NULL, xptedtbusfunc, cdm); /* * If we get back 0, that means that we had to stop before fully * traversing the EDT. It also means that one of the subroutines * has set the status field to the proper value. If we get back 1, * we've fully traversed the EDT and copied out any matching entries. */ if (ret == 1) cdm->status = CAM_DEV_MATCH_LAST; return(ret); } static int xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) { struct ccb_dev_match *cdm; cdm = (struct ccb_dev_match *)arg; if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv == pdrv) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) && (cdm->pos.generations[CAM_PERIPH_GENERATION] != (*pdrv)->generation)) { cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv == pdrv) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) return(xptpdperiphtraverse(pdrv, (struct cam_periph *)cdm->pos.cookie.periph, xptplistperiphfunc, arg)); else return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg)); } static int xptplistperiphfunc(struct cam_periph *periph, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this peripheral out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { struct periph_driver **pdrv; pdrv = NULL; bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | CAM_DEV_POS_PERIPH; /* * This may look a bit non-sensical, but it is * actually quite logical. There are very few * peripheral drivers, and bloating every peripheral * structure with a pointer back to its parent * peripheral driver linker set entry would cost * more in the long run than doing this quick lookup. */ for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { if (strcmp((*pdrv)->driver_name, periph->periph_name) == 0) break; } if (*pdrv == NULL) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } cdm->pos.cookie.pdrv = pdrv; /* * The periph generation slot does double duty, as * does the periph pointer slot. They are used for * both edt and pdrv lookups and positioning. */ cdm->pos.cookie.periph = periph; cdm->pos.generations[CAM_PERIPH_GENERATION] = (*pdrv)->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_PERIPH; cdm->matches[j].result.periph_result.path_id = periph->path->bus->path_id; /* * The transport layer peripheral doesn't have a target or * lun. */ if (periph->path->target) cdm->matches[j].result.periph_result.target_id = periph->path->target->target_id; else cdm->matches[j].result.periph_result.target_id = -1; if (periph->path->device) cdm->matches[j].result.periph_result.target_lun = periph->path->device->lun_id; else cdm->matches[j].result.periph_result.target_lun = -1; cdm->matches[j].result.periph_result.unit_number = periph->unit_number; strncpy(cdm->matches[j].result.periph_result.periph_name, periph->periph_name, DEV_IDLEN); } return(1); } static int xptperiphlistmatch(struct ccb_dev_match *cdm) { int ret; cdm->num_matches = 0; /* * At this point in the edt traversal function, we check the bus * list generation to make sure that no busses have been added or * removed since the user last sent a XPT_DEV_MATCH ccb through. * For the peripheral driver list traversal function, however, we * don't have to worry about new peripheral driver types coming or * going; they're in a linker set, and therefore can't change * without a recompile. */ if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv != NULL)) ret = xptpdrvtraverse( (struct periph_driver **)cdm->pos.cookie.pdrv, xptplistpdrvfunc, cdm); else ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); /* * If we get back 0, that means that we had to stop before fully * traversing the peripheral driver tree. It also means that one of * the subroutines has set the status field to the proper value. If * we get back 1, we've fully traversed the EDT and copied out any * matching entries. */ if (ret == 1) cdm->status = CAM_DEV_MATCH_LAST; return(ret); } static int xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) { struct cam_eb *bus, *next_bus; int retval; retval = 1; mtx_lock(&xsoftc.xpt_topo_lock); for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses)); bus != NULL; bus = next_bus) { next_bus = TAILQ_NEXT(bus, links); mtx_unlock(&xsoftc.xpt_topo_lock); CAM_SIM_LOCK(bus->sim); retval = tr_func(bus, arg); CAM_SIM_UNLOCK(bus->sim); if (retval == 0) return(retval); mtx_lock(&xsoftc.xpt_topo_lock); } mtx_unlock(&xsoftc.xpt_topo_lock); return(retval); } int xpt_sim_opened(struct cam_sim *sim) { struct cam_eb *bus; struct cam_et *target; struct cam_ed *device; struct cam_periph *periph; KASSERT(sim->refcount >= 1, ("sim->refcount >= 1")); mtx_assert(sim->mtx, MA_OWNED); mtx_lock(&xsoftc.xpt_topo_lock); TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) { if (bus->sim != sim) continue; TAILQ_FOREACH(target, &bus->et_entries, links) { TAILQ_FOREACH(device, &target->ed_entries, links) { SLIST_FOREACH(periph, &device->periphs, periph_links) { if (periph->refcount > 0) { mtx_unlock(&xsoftc.xpt_topo_lock); return (1); } } } } } mtx_unlock(&xsoftc.xpt_topo_lock); return (0); } static int xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, xpt_targetfunc_t *tr_func, void *arg) { struct cam_et *target, *next_target; int retval; retval = 1; for (target = (start_target ? start_target : TAILQ_FIRST(&bus->et_entries)); target != NULL; target = next_target) { next_target = TAILQ_NEXT(target, links); retval = tr_func(target, arg); if (retval == 0) return(retval); } return(retval); } static int xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, xpt_devicefunc_t *tr_func, void *arg) { struct cam_ed *device, *next_device; int retval; retval = 1; for (device = (start_device ? start_device : TAILQ_FIRST(&target->ed_entries)); device != NULL; device = next_device) { next_device = TAILQ_NEXT(device, links); retval = tr_func(device, arg); if (retval == 0) return(retval); } return(retval); } static int xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg) { struct cam_periph *periph, *next_periph; int retval; retval = 1; for (periph = (start_periph ? start_periph : SLIST_FIRST(&device->periphs)); periph != NULL; periph = next_periph) { next_periph = SLIST_NEXT(periph, periph_links); retval = tr_func(periph, arg); if (retval == 0) return(retval); } return(retval); } static int xptpdrvtraverse(struct periph_driver **start_pdrv, xpt_pdrvfunc_t *tr_func, void *arg) { struct periph_driver **pdrv; int retval; retval = 1; /* * We don't traverse the peripheral driver list like we do the * other lists, because it is a linker set, and therefore cannot be * changed during runtime. If the peripheral driver list is ever * re-done to be something other than a linker set (i.e. it can * change while the system is running), the list traversal should * be modified to work like the other traversal functions. */ for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); *pdrv != NULL; pdrv++) { retval = tr_func(pdrv, arg); if (retval == 0) return(retval); } return(retval); } static int xptpdperiphtraverse(struct periph_driver **pdrv, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg) { struct cam_periph *periph, *next_periph; int retval; retval = 1; for (periph = (start_periph ? start_periph : TAILQ_FIRST(&(*pdrv)->units)); periph != NULL; periph = next_periph) { next_periph = TAILQ_NEXT(periph, unit_links); retval = tr_func(periph, arg); if (retval == 0) return(retval); } return(retval); } static int xptdefbusfunc(struct cam_eb *bus, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_BUS) { xpt_busfunc_t *tr_func; tr_func = (xpt_busfunc_t *)tr_config->tr_func; return(tr_func(bus, tr_config->tr_arg)); } else return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); } static int xptdeftargetfunc(struct cam_et *target, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_TARGET) { xpt_targetfunc_t *tr_func; tr_func = (xpt_targetfunc_t *)tr_config->tr_func; return(tr_func(target, tr_config->tr_arg)); } else return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); } static int xptdefdevicefunc(struct cam_ed *device, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_DEVICE) { xpt_devicefunc_t *tr_func; tr_func = (xpt_devicefunc_t *)tr_config->tr_func; return(tr_func(device, tr_config->tr_arg)); } else return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); } static int xptdefperiphfunc(struct cam_periph *periph, void *arg) { struct xpt_traverse_config *tr_config; xpt_periphfunc_t *tr_func; tr_config = (struct xpt_traverse_config *)arg; tr_func = (xpt_periphfunc_t *)tr_config->tr_func; /* * Unlike the other default functions, we don't check for depth * here. The peripheral driver level is the last level in the EDT, * so if we're here, we should execute the function in question. */ return(tr_func(periph, tr_config->tr_arg)); } /* * Execute the given function for every bus in the EDT. */ static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) { struct xpt_traverse_config tr_config; tr_config.depth = XPT_DEPTH_BUS; tr_config.tr_func = tr_func; tr_config.tr_arg = arg; return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); } /* * Execute the given function for every device in the EDT. */ static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) { struct xpt_traverse_config tr_config; tr_config.depth = XPT_DEPTH_DEVICE; tr_config.tr_func = tr_func; tr_config.tr_arg = arg; return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); } static int xptsetasyncfunc(struct cam_ed *device, void *arg) { struct cam_path path; struct ccb_getdev cgd; struct async_node *cur_entry; cur_entry = (struct async_node *)arg; /* * Don't report unconfigured devices (Wildcard devs, * devices only for target mode, device instances * that have been invalidated but are waiting for * their last reference count to be released). */ if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) return (1); xpt_compile_path(&path, NULL, device->target->bus->path_id, device->target->target_id, device->lun_id); xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); cur_entry->callback(cur_entry->callback_arg, AC_FOUND_DEVICE, &path, &cgd); xpt_release_path(&path); return(1); } static int xptsetasyncbusfunc(struct cam_eb *bus, void *arg) { struct cam_path path; struct ccb_pathinq cpi; struct async_node *cur_entry; cur_entry = (struct async_node *)arg; xpt_compile_path(&path, /*periph*/NULL, bus->sim->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); cur_entry->callback(cur_entry->callback_arg, AC_PATH_REGISTERED, &path, &cpi); xpt_release_path(&path); return(1); } static void xpt_action_sasync_cb(void *context, int pending) { struct async_node *cur_entry; struct xpt_task *task; uint32_t added; task = (struct xpt_task *)context; cur_entry = (struct async_node *)task->data1; added = task->data2; if ((added & AC_FOUND_DEVICE) != 0) { /* * Get this peripheral up to date with all * the currently existing devices. */ xpt_for_all_devices(xptsetasyncfunc, cur_entry); } if ((added & AC_PATH_REGISTERED) != 0) { /* * Get this peripheral up to date with all * the currently existing busses. */ xpt_for_all_busses(xptsetasyncbusfunc, cur_entry); } free(task, M_CAMXPT); } void xpt_action(union ccb *start_ccb) { CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n")); start_ccb->ccb_h.status = CAM_REQ_INPROG; (*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb); } void xpt_action_default(union ccb *start_ccb) { CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action_default\n")); switch (start_ccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct cam_ed *device; #ifdef CAMDEBUG char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; struct cam_path *path; path = start_ccb->ccb_h.path; #endif /* * For the sake of compatibility with SCSI-1 * devices that may not understand the identify * message, we include lun information in the * second byte of all commands. SCSI-1 specifies * that luns are a 3 bit value and reserves only 3 * bits for lun information in the CDB. Later * revisions of the SCSI spec allow for more than 8 * luns, but have deprecated lun information in the * CDB. So, if the lun won't fit, we must omit. * * Also be aware that during initial probing for devices, * the inquiry information is unknown but initialized to 0. * This means that this code will be exercised while probing * devices with an ANSI revision greater than 2. */ device = start_ccb->ccb_h.path->device; if (device->protocol_version <= SCSI_REV_2 && start_ccb->ccb_h.target_lun < 8 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { start_ccb->csio.cdb_io.cdb_bytes[1] |= start_ccb->ccb_h.target_lun << 5; } start_ccb->csio.scsi_status = SCSI_STATUS_OK; CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n", scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0], &path->device->inq_data), scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes, cdb_str, sizeof(cdb_str)))); } /* FALLTHROUGH */ case XPT_TARGET_IO: case XPT_CONT_TARGET_IO: start_ccb->csio.sense_resid = 0; start_ccb->csio.resid = 0; /* FALLTHROUGH */ case XPT_ATA_IO: if (start_ccb->ccb_h.func_code == XPT_ATA_IO) { start_ccb->ataio.resid = 0; } case XPT_RESET_DEV: case XPT_ENG_EXEC: { struct cam_path *path; int runq; path = start_ccb->ccb_h.path; cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); if (path->device->qfrozen_cnt == 0) runq = xpt_schedule_dev_sendq(path->bus, path->device); else runq = 0; if (runq != 0) xpt_run_dev_sendq(path->bus); break; } case XPT_CALC_GEOMETRY: { struct cam_sim *sim; /* Filter out garbage */ if (start_ccb->ccg.block_size == 0 || start_ccb->ccg.volume_size == 0) { start_ccb->ccg.cylinders = 0; start_ccb->ccg.heads = 0; start_ccb->ccg.secs_per_track = 0; start_ccb->ccb_h.status = CAM_REQ_CMP; break; } #ifdef PC98 /* * In a PC-98 system, geometry translation depens on * the "real" device geometry obtained from mode page 4. * SCSI geometry translation is performed in the * initialization routine of the SCSI BIOS and the result * stored in host memory. If the translation is available * in host memory, use it. If not, rely on the default * translation the device driver performs. */ if (scsi_da_bios_params(&start_ccb->ccg) != 0) { start_ccb->ccb_h.status = CAM_REQ_CMP; break; } #endif sim = start_ccb->ccb_h.path->bus->sim; (*(sim->sim_action))(sim, start_ccb); break; } case XPT_ABORT: { union ccb* abort_ccb; abort_ccb = start_ccb->cab.abort_ccb; if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { if (abort_ccb->ccb_h.pinfo.index >= 0) { struct cam_ccbq *ccbq; ccbq = &abort_ccb->ccb_h.path->device->ccbq; cam_ccbq_remove_ccb(ccbq, abort_ccb); abort_ccb->ccb_h.status = CAM_REQ_ABORTED|CAM_DEV_QFRZN; xpt_freeze_devq(abort_ccb->ccb_h.path, 1); xpt_done(abort_ccb); start_ccb->ccb_h.status = CAM_REQ_CMP; break; } if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { /* * We've caught this ccb en route to * the SIM. Flag it for abort and the * SIM will do so just before starting * real work on the CCB. */ abort_ccb->ccb_h.status = CAM_REQ_ABORTED|CAM_DEV_QFRZN; xpt_freeze_devq(abort_ccb->ccb_h.path, 1); start_ccb->ccb_h.status = CAM_REQ_CMP; break; } } if (XPT_FC_IS_QUEUED(abort_ccb) && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { /* * It's already completed but waiting * for our SWI to get to it. */ start_ccb->ccb_h.status = CAM_UA_ABORT; break; } /* * If we weren't able to take care of the abort request * in the XPT, pass the request down to the SIM for processing. */ } /* FALLTHROUGH */ case XPT_ACCEPT_TARGET_IO: case XPT_EN_LUN: case XPT_IMMED_NOTIFY: case XPT_NOTIFY_ACK: case XPT_RESET_BUS: + case XPT_IMMEDIATE_NOTIFY: + case XPT_NOTIFY_ACKNOWLEDGE: + case XPT_GET_SIM_KNOB: + case XPT_SET_SIM_KNOB: { struct cam_sim *sim; sim = start_ccb->ccb_h.path->bus->sim; (*(sim->sim_action))(sim, start_ccb); break; } case XPT_PATH_INQ: { struct cam_sim *sim; sim = start_ccb->ccb_h.path->bus->sim; (*(sim->sim_action))(sim, start_ccb); break; } case XPT_PATH_STATS: start_ccb->cpis.last_reset = start_ccb->ccb_h.path->bus->last_reset; start_ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_GDEV_TYPE: { struct cam_ed *dev; dev = start_ccb->ccb_h.path->device; if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else { struct ccb_getdev *cgd; struct cam_eb *bus; struct cam_et *tar; cgd = &start_ccb->cgd; bus = cgd->ccb_h.path->bus; tar = cgd->ccb_h.path->target; cgd->protocol = dev->protocol; cgd->inq_data = dev->inq_data; cgd->ident_data = dev->ident_data; cgd->ccb_h.status = CAM_REQ_CMP; cgd->serial_num_len = dev->serial_num_len; if ((dev->serial_num_len > 0) && (dev->serial_num != NULL)) bcopy(dev->serial_num, cgd->serial_num, dev->serial_num_len); } break; } case XPT_GDEV_STATS: { struct cam_ed *dev; dev = start_ccb->ccb_h.path->device; if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else { struct ccb_getdevstats *cgds; struct cam_eb *bus; struct cam_et *tar; cgds = &start_ccb->cgds; bus = cgds->ccb_h.path->bus; tar = cgds->ccb_h.path->target; cgds->dev_openings = dev->ccbq.dev_openings; cgds->dev_active = dev->ccbq.dev_active; cgds->devq_openings = dev->ccbq.devq_openings; cgds->devq_queued = dev->ccbq.queue.entries; cgds->held = dev->ccbq.held; cgds->last_reset = tar->last_reset; cgds->maxtags = dev->maxtags; cgds->mintags = dev->mintags; if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) cgds->last_reset = bus->last_reset; cgds->ccb_h.status = CAM_REQ_CMP; } break; } case XPT_GDEVLIST: { struct cam_periph *nperiph; struct periph_list *periph_head; struct ccb_getdevlist *cgdl; u_int i; struct cam_ed *device; int found; found = 0; /* * Don't want anyone mucking with our data. */ device = start_ccb->ccb_h.path->device; periph_head = &device->periphs; cgdl = &start_ccb->cgdl; /* * Check and see if the list has changed since the user * last requested a list member. If so, tell them that the * list has changed, and therefore they need to start over * from the beginning. */ if ((cgdl->index != 0) && (cgdl->generation != device->generation)) { cgdl->status = CAM_GDEVLIST_LIST_CHANGED; break; } /* * Traverse the list of peripherals and attempt to find * the requested peripheral. */ for (nperiph = SLIST_FIRST(periph_head), i = 0; (nperiph != NULL) && (i <= cgdl->index); nperiph = SLIST_NEXT(nperiph, periph_links), i++) { if (i == cgdl->index) { strncpy(cgdl->periph_name, nperiph->periph_name, DEV_IDLEN); cgdl->unit_number = nperiph->unit_number; found = 1; } } if (found == 0) { cgdl->status = CAM_GDEVLIST_ERROR; break; } if (nperiph == NULL) cgdl->status = CAM_GDEVLIST_LAST_DEVICE; else cgdl->status = CAM_GDEVLIST_MORE_DEVS; cgdl->index++; cgdl->generation = device->generation; cgdl->ccb_h.status = CAM_REQ_CMP; break; } case XPT_DEV_MATCH: { dev_pos_type position_type; struct ccb_dev_match *cdm; cdm = &start_ccb->cdm; /* * There are two ways of getting at information in the EDT. * The first way is via the primary EDT tree. It starts * with a list of busses, then a list of targets on a bus, * then devices/luns on a target, and then peripherals on a * device/lun. The "other" way is by the peripheral driver * lists. The peripheral driver lists are organized by * peripheral driver. (obviously) So it makes sense to * use the peripheral driver list if the user is looking * for something like "da1", or all "da" devices. If the * user is looking for something on a particular bus/target * or lun, it's generally better to go through the EDT tree. */ if (cdm->pos.position_type != CAM_DEV_POS_NONE) position_type = cdm->pos.position_type; else { u_int i; position_type = CAM_DEV_POS_NONE; for (i = 0; i < cdm->num_patterns; i++) { if ((cdm->patterns[i].type == DEV_MATCH_BUS) ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ position_type = CAM_DEV_POS_EDT; break; } } if (cdm->num_patterns == 0) position_type = CAM_DEV_POS_EDT; else if (position_type == CAM_DEV_POS_NONE) position_type = CAM_DEV_POS_PDRV; } switch(position_type & CAM_DEV_POS_TYPEMASK) { case CAM_DEV_POS_EDT: xptedtmatch(cdm); break; case CAM_DEV_POS_PDRV: xptperiphlistmatch(cdm); break; default: cdm->status = CAM_DEV_MATCH_ERROR; break; } if (cdm->status == CAM_DEV_MATCH_ERROR) start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; else start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SASYNC_CB: { struct ccb_setasync *csa; struct async_node *cur_entry; struct async_list *async_head; u_int32_t added; csa = &start_ccb->csa; added = csa->event_enable; async_head = &csa->ccb_h.path->device->asyncs; /* * If there is already an entry for us, simply * update it. */ cur_entry = SLIST_FIRST(async_head); while (cur_entry != NULL) { if ((cur_entry->callback_arg == csa->callback_arg) && (cur_entry->callback == csa->callback)) break; cur_entry = SLIST_NEXT(cur_entry, links); } if (cur_entry != NULL) { /* * If the request has no flags set, * remove the entry. */ added &= ~cur_entry->event_enable; if (csa->event_enable == 0) { SLIST_REMOVE(async_head, cur_entry, async_node, links); csa->ccb_h.path->device->refcount--; free(cur_entry, M_CAMXPT); } else { cur_entry->event_enable = csa->event_enable; } } else { cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, M_NOWAIT); if (cur_entry == NULL) { csa->ccb_h.status = CAM_RESRC_UNAVAIL; break; } cur_entry->event_enable = csa->event_enable; cur_entry->callback_arg = csa->callback_arg; cur_entry->callback = csa->callback; SLIST_INSERT_HEAD(async_head, cur_entry, links); csa->ccb_h.path->device->refcount++; } /* * Need to decouple this operation via a taqskqueue so that * the locking doesn't become a mess. */ if ((added & (AC_FOUND_DEVICE | AC_PATH_REGISTERED)) != 0) { struct xpt_task *task; task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT); if (task == NULL) { csa->ccb_h.status = CAM_RESRC_UNAVAIL; break; } TASK_INIT(&task->task, 0, xpt_action_sasync_cb, task); task->data1 = cur_entry; task->data2 = added; taskqueue_enqueue(taskqueue_thread, &task->task); } start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_REL_SIMQ: { struct ccb_relsim *crs; struct cam_ed *dev; crs = &start_ccb->crs; dev = crs->ccb_h.path->device; if (dev == NULL) { crs->ccb_h.status = CAM_DEV_NOT_THERE; break; } if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) { /* Don't ever go below one opening */ if (crs->openings > 0) { xpt_dev_ccbq_resize(crs->ccb_h.path, crs->openings); if (bootverbose) { xpt_print(crs->ccb_h.path, "tagged openings now %d\n", crs->openings); } } } } if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { /* * Just extend the old timeout and decrement * the freeze count so that a single timeout * is sufficient for releasing the queue. */ start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; callout_stop(&dev->callout); } else { start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } callout_reset(&dev->callout, (crs->release_timeout * hz) / 1000, xpt_release_devq_timeout, dev); dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; } if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { /* * Decrement the freeze count so that a single * completion is still sufficient to unfreeze * the queue. */ start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; } else { dev->flags |= CAM_DEV_REL_ON_COMPLETE; start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } } if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 || (dev->ccbq.dev_active == 0)) { start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; } else { dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } } if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) { xpt_release_devq(crs->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt; start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_DEBUG: { #ifdef CAMDEBUG #ifdef CAM_DEBUG_DELAY cam_debug_delay = CAM_DEBUG_DELAY; #endif cam_dflags = start_ccb->cdbg.flags; if (cam_dpath != NULL) { xpt_free_path(cam_dpath); cam_dpath = NULL; } if (cam_dflags != CAM_DEBUG_NONE) { if (xpt_create_path(&cam_dpath, xpt_periph, start_ccb->ccb_h.path_id, start_ccb->ccb_h.target_id, start_ccb->ccb_h.target_lun) != CAM_REQ_CMP) { start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; cam_dflags = CAM_DEBUG_NONE; } else { start_ccb->ccb_h.status = CAM_REQ_CMP; xpt_print(cam_dpath, "debugging flags now %x\n", cam_dflags); } } else { cam_dpath = NULL; start_ccb->ccb_h.status = CAM_REQ_CMP; } #else /* !CAMDEBUG */ start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; #endif /* CAMDEBUG */ break; } case XPT_NOOP: if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) xpt_freeze_devq(start_ccb->ccb_h.path, 1); start_ccb->ccb_h.status = CAM_REQ_CMP; break; default: case XPT_SDEV_TYPE: case XPT_TERM_IO: case XPT_ENG_INQ: /* XXX Implement */ start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; break; } } void xpt_polled_action(union ccb *start_ccb) { u_int32_t timeout; struct cam_sim *sim; struct cam_devq *devq; struct cam_ed *dev; timeout = start_ccb->ccb_h.timeout; sim = start_ccb->ccb_h.path->bus->sim; devq = sim->devq; dev = start_ccb->ccb_h.path->device; mtx_assert(sim->mtx, MA_OWNED); /* * Steal an opening so that no other queued requests * can get it before us while we simulate interrupts. */ dev->ccbq.devq_openings--; dev->ccbq.dev_openings--; while(((devq != NULL && devq->send_openings <= 0) || dev->ccbq.dev_openings < 0) && (--timeout > 0)) { DELAY(1000); (*(sim->sim_poll))(sim); camisr_runqueue(&sim->sim_doneq); } dev->ccbq.devq_openings++; dev->ccbq.dev_openings++; if (timeout != 0) { xpt_action(start_ccb); while(--timeout > 0) { (*(sim->sim_poll))(sim); camisr_runqueue(&sim->sim_doneq); if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) break; DELAY(1000); } if (timeout == 0) { /* * XXX Is it worth adding a sim_timeout entry * point so we can attempt recovery? If * this is only used for dumps, I don't think * it is. */ start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; } } else { start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; } } /* * Schedule a peripheral driver to receive a ccb when it's * target device has space for more transactions. */ void xpt_schedule(struct cam_periph *perph, u_int32_t new_priority) { struct cam_ed *device; int runq; mtx_assert(perph->sim->mtx, MA_OWNED); CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); device = perph->path->device; if (periph_is_queued(perph)) { /* Simply reorder based on new priority */ CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, (" change priority to %d\n", new_priority)); if (new_priority < perph->pinfo.priority) { camq_change_priority(&device->drvq, perph->pinfo.index, new_priority); } runq = 0; } else { /* New entry on the queue */ CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, (" added periph to queue\n")); perph->pinfo.priority = new_priority; perph->pinfo.generation = ++device->drvq.generation; camq_insert(&device->drvq, &perph->pinfo); runq = xpt_schedule_dev_allocq(perph->path->bus, device); } if (runq != 0) { CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, (" calling xpt_run_devq\n")); xpt_run_dev_allocq(perph->path->bus); } } /* * Schedule a device to run on a given queue. * If the device was inserted as a new entry on the queue, * return 1 meaning the device queue should be run. If we * were already queued, implying someone else has already * started the queue, return 0 so the caller doesn't attempt * to run the queue. */ int xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, u_int32_t new_priority) { int retval; u_int32_t old_priority; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); old_priority = pinfo->priority; /* * Are we already queued? */ if (pinfo->index != CAM_UNQUEUED_INDEX) { /* Simply reorder based on new priority */ if (new_priority < old_priority) { camq_change_priority(queue, pinfo->index, new_priority); CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("changed priority to %d\n", new_priority)); } retval = 0; } else { /* New entry on the queue */ if (new_priority < old_priority) pinfo->priority = new_priority; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("Inserting onto queue\n")); pinfo->generation = ++queue->generation; camq_insert(queue, pinfo); retval = 1; } return (retval); } static void xpt_run_dev_allocq(struct cam_eb *bus) { struct cam_devq *devq; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n")); devq = bus->sim->devq; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, (" qfrozen_cnt == 0x%x, entries == %d, " "openings == %d, active == %d\n", devq->alloc_queue.qfrozen_cnt, devq->alloc_queue.entries, devq->alloc_openings, devq->alloc_active)); devq->alloc_queue.qfrozen_cnt++; while ((devq->alloc_queue.entries > 0) && (devq->alloc_openings > 0) && (devq->alloc_queue.qfrozen_cnt <= 1)) { struct cam_ed_qinfo *qinfo; struct cam_ed *device; union ccb *work_ccb; struct cam_periph *drv; struct camq *drvq; qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue, CAMQ_HEAD); device = qinfo->device; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("running device %p\n", device)); drvq = &device->drvq; #ifdef CAMDEBUG if (drvq->entries <= 0) { panic("xpt_run_dev_allocq: " "Device on queue without any work to do"); } #endif if ((work_ccb = xpt_get_ccb(device)) != NULL) { devq->alloc_openings--; devq->alloc_active++; drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD); xpt_setup_ccb(&work_ccb->ccb_h, drv->path, drv->pinfo.priority); CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("calling periph start\n")); drv->periph_start(drv, work_ccb); } else { /* * Malloc failure in alloc_ccb */ /* * XXX add us to a list to be run from free_ccb * if we don't have any ccbs active on this * device queue otherwise we may never get run * again. */ break; } if (drvq->entries > 0) { /* We have more work. Attempt to reschedule */ xpt_schedule_dev_allocq(bus, device); } } devq->alloc_queue.qfrozen_cnt--; } void xpt_run_dev_sendq(struct cam_eb *bus) { struct cam_devq *devq; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n")); devq = bus->sim->devq; devq->send_queue.qfrozen_cnt++; while ((devq->send_queue.entries > 0) && (devq->send_openings > 0)) { struct cam_ed_qinfo *qinfo; struct cam_ed *device; union ccb *work_ccb; struct cam_sim *sim; if (devq->send_queue.qfrozen_cnt > 1) { break; } qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue, CAMQ_HEAD); device = qinfo->device; /* * If the device has been "frozen", don't attempt * to run it. */ if (device->qfrozen_cnt > 0) { continue; } CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("running device %p\n", device)); work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); if (work_ccb == NULL) { printf("device on run queue with no ccbs???\n"); continue; } if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { mtx_lock(&xsoftc.xpt_lock); if (xsoftc.num_highpower <= 0) { /* * We got a high power command, but we * don't have any available slots. Freeze * the device queue until we have a slot * available. */ device->qfrozen_cnt++; STAILQ_INSERT_TAIL(&xsoftc.highpowerq, &work_ccb->ccb_h, xpt_links.stqe); mtx_unlock(&xsoftc.xpt_lock); continue; } else { /* * Consume a high power slot while * this ccb runs. */ xsoftc.num_highpower--; } mtx_unlock(&xsoftc.xpt_lock); } devq->active_dev = device; cam_ccbq_remove_ccb(&device->ccbq, work_ccb); cam_ccbq_send_ccb(&device->ccbq, work_ccb); devq->send_openings--; devq->send_active++; if (device->ccbq.queue.entries > 0) xpt_schedule_dev_sendq(bus, device); if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){ /* * The client wants to freeze the queue * after this CCB is sent. */ device->qfrozen_cnt++; } /* In Target mode, the peripheral driver knows best... */ if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { if ((device->inq_flags & SID_CmdQue) != 0 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; else /* * Clear this in case of a retried CCB that * failed due to a rejected tag. */ work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; } /* * Device queues can be shared among multiple sim instances * that reside on different busses. Use the SIM in the queue * CCB's path, rather than the one in the bus that was passed * into this function. */ sim = work_ccb->ccb_h.path->bus->sim; (*(sim->sim_action))(sim, work_ccb); devq->active_dev = NULL; } devq->send_queue.qfrozen_cnt--; } /* * This function merges stuff from the slave ccb into the master ccb, while * keeping important fields in the master ccb constant. */ void xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) { /* * Pull fields that are valid for peripheral drivers to set * into the master CCB along with the CCB "payload". */ master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); } void xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); ccb_h->pinfo.priority = priority; ccb_h->path = path; ccb_h->path_id = path->bus->path_id; if (path->target) ccb_h->target_id = path->target->target_id; else ccb_h->target_id = CAM_TARGET_WILDCARD; if (path->device) { ccb_h->target_lun = path->device->lun_id; ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; } else { ccb_h->target_lun = CAM_TARGET_WILDCARD; } ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; ccb_h->flags = 0; } /* Path manipulation functions */ cam_status xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_path *path; cam_status status; path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT); if (path == NULL) { status = CAM_RESRC_UNAVAIL; return(status); } status = xpt_compile_path(path, perph, path_id, target_id, lun_id); if (status != CAM_REQ_CMP) { free(path, M_CAMXPT); path = NULL; } *new_path_ptr = path; return (status); } cam_status xpt_create_path_unlocked(struct cam_path **new_path_ptr, struct cam_periph *periph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_path *path; struct cam_eb *bus = NULL; cam_status status; int need_unlock = 0; path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK); if (path_id != CAM_BUS_WILDCARD) { bus = xpt_find_bus(path_id); if (bus != NULL) { need_unlock = 1; CAM_SIM_LOCK(bus->sim); } } status = xpt_compile_path(path, periph, path_id, target_id, lun_id); if (need_unlock) CAM_SIM_UNLOCK(bus->sim); if (status != CAM_REQ_CMP) { free(path, M_CAMXPT); path = NULL; } *new_path_ptr = path; return (status); } cam_status xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_eb *bus; struct cam_et *target; struct cam_ed *device; cam_status status; status = CAM_REQ_CMP; /* Completed without error */ target = NULL; /* Wildcarded */ device = NULL; /* Wildcarded */ /* * We will potentially modify the EDT, so block interrupts * that may attempt to create cam paths. */ bus = xpt_find_bus(path_id); if (bus == NULL) { status = CAM_PATH_INVALID; } else { target = xpt_find_target(bus, target_id); if (target == NULL) { /* Create one */ struct cam_et *new_target; new_target = xpt_alloc_target(bus, target_id); if (new_target == NULL) { status = CAM_RESRC_UNAVAIL; } else { target = new_target; } } if (target != NULL) { device = xpt_find_device(target, lun_id); if (device == NULL) { /* Create one */ struct cam_ed *new_device; new_device = (*(bus->xport->alloc_device))(bus, target, lun_id); if (new_device == NULL) { status = CAM_RESRC_UNAVAIL; } else { device = new_device; } } } } /* * Only touch the user's data if we are successful. */ if (status == CAM_REQ_CMP) { new_path->periph = perph; new_path->bus = bus; new_path->target = target; new_path->device = device; CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); } else { if (device != NULL) xpt_release_device(bus, target, device); if (target != NULL) xpt_release_target(bus, target); if (bus != NULL) xpt_release_bus(bus); } return (status); } void xpt_release_path(struct cam_path *path) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); if (path->device != NULL) { xpt_release_device(path->bus, path->target, path->device); path->device = NULL; } if (path->target != NULL) { xpt_release_target(path->bus, path->target); path->target = NULL; } if (path->bus != NULL) { xpt_release_bus(path->bus); path->bus = NULL; } } void xpt_free_path(struct cam_path *path) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); xpt_release_path(path); free(path, M_CAMXPT); } /* * Return -1 for failure, 0 for exact match, 1 for match with wildcards * in path1, 2 for match with wildcards in path2. */ int xpt_path_comp(struct cam_path *path1, struct cam_path *path2) { int retval = 0; if (path1->bus != path2->bus) { if (path1->bus->path_id == CAM_BUS_WILDCARD) retval = 1; else if (path2->bus->path_id == CAM_BUS_WILDCARD) retval = 2; else return (-1); } if (path1->target != path2->target) { if (path1->target->target_id == CAM_TARGET_WILDCARD) { if (retval == 0) retval = 1; } else if (path2->target->target_id == CAM_TARGET_WILDCARD) retval = 2; else return (-1); } if (path1->device != path2->device) { if (path1->device->lun_id == CAM_LUN_WILDCARD) { if (retval == 0) retval = 1; } else if (path2->device->lun_id == CAM_LUN_WILDCARD) retval = 2; else return (-1); } return (retval); } void xpt_print_path(struct cam_path *path) { if (path == NULL) printf("(nopath): "); else { if (path->periph != NULL) printf("(%s%d:", path->periph->periph_name, path->periph->unit_number); else printf("(noperiph:"); if (path->bus != NULL) printf("%s%d:%d:", path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id); else printf("nobus:"); if (path->target != NULL) printf("%d:", path->target->target_id); else printf("X:"); if (path->device != NULL) printf("%d): ", path->device->lun_id); else printf("X): "); } } void xpt_print(struct cam_path *path, const char *fmt, ...) { va_list ap; xpt_print_path(path); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); } int xpt_path_string(struct cam_path *path, char *str, size_t str_len) { struct sbuf sb; #ifdef INVARIANTS if (path != NULL && path->bus != NULL) mtx_assert(path->bus->sim->mtx, MA_OWNED); #endif sbuf_new(&sb, str, str_len, 0); if (path == NULL) sbuf_printf(&sb, "(nopath): "); else { if (path->periph != NULL) sbuf_printf(&sb, "(%s%d:", path->periph->periph_name, path->periph->unit_number); else sbuf_printf(&sb, "(noperiph:"); if (path->bus != NULL) sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id); else sbuf_printf(&sb, "nobus:"); if (path->target != NULL) sbuf_printf(&sb, "%d:", path->target->target_id); else sbuf_printf(&sb, "X:"); if (path->device != NULL) sbuf_printf(&sb, "%d): ", path->device->lun_id); else sbuf_printf(&sb, "X): "); } sbuf_finish(&sb); return(sbuf_len(&sb)); } path_id_t xpt_path_path_id(struct cam_path *path) { mtx_assert(path->bus->sim->mtx, MA_OWNED); return(path->bus->path_id); } target_id_t xpt_path_target_id(struct cam_path *path) { mtx_assert(path->bus->sim->mtx, MA_OWNED); if (path->target != NULL) return (path->target->target_id); else return (CAM_TARGET_WILDCARD); } lun_id_t xpt_path_lun_id(struct cam_path *path) { mtx_assert(path->bus->sim->mtx, MA_OWNED); if (path->device != NULL) return (path->device->lun_id); else return (CAM_LUN_WILDCARD); } struct cam_sim * xpt_path_sim(struct cam_path *path) { return (path->bus->sim); } struct cam_periph* xpt_path_periph(struct cam_path *path) { mtx_assert(path->bus->sim->mtx, MA_OWNED); return (path->periph); } /* * Release a CAM control block for the caller. Remit the cost of the structure * to the device referenced by the path. If the this device had no 'credits' * and peripheral drivers have registered async callbacks for this notification * call them now. */ void xpt_release_ccb(union ccb *free_ccb) { struct cam_path *path; struct cam_ed *device; struct cam_eb *bus; struct cam_sim *sim; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); path = free_ccb->ccb_h.path; device = path->device; bus = path->bus; sim = bus->sim; mtx_assert(sim->mtx, MA_OWNED); cam_ccbq_release_opening(&device->ccbq); if (sim->ccb_count > sim->max_ccbs) { xpt_free_ccb(free_ccb); sim->ccb_count--; } else { SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h, xpt_links.sle); } if (sim->devq == NULL) { return; } sim->devq->alloc_openings++; sim->devq->alloc_active--; /* XXX Turn this into an inline function - xpt_run_device?? */ if ((device_is_alloc_queued(device) == 0) && (device->drvq.entries > 0)) { xpt_schedule_dev_allocq(bus, device); } if (dev_allocq_is_runnable(sim->devq)) xpt_run_dev_allocq(bus); } /* Functions accessed by SIM drivers */ static struct xpt_xport xport_default = { .alloc_device = xpt_alloc_device_default, .action = xpt_action_default, .async = xpt_dev_async_default, }; /* * A sim structure, listing the SIM entry points and instance * identification info is passed to xpt_bus_register to hook the SIM * into the CAM framework. xpt_bus_register creates a cam_eb entry * for this new bus and places it in the array of busses and assigns * it a path_id. The path_id may be influenced by "hard wiring" * information specified by the user. Once interrupt services are * available, the bus will be probed. */ int32_t xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) { struct cam_eb *new_bus; struct cam_eb *old_bus; struct ccb_pathinq cpi; struct cam_path path; cam_status status; mtx_assert(sim->mtx, MA_OWNED); sim->bus_id = bus; new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), M_CAMXPT, M_NOWAIT); if (new_bus == NULL) { /* Couldn't satisfy request */ return (CAM_RESRC_UNAVAIL); } if (strcmp(sim->sim_name, "xpt") != 0) { sim->path_id = xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); } TAILQ_INIT(&new_bus->et_entries); new_bus->path_id = sim->path_id; cam_sim_hold(sim); new_bus->sim = sim; timevalclear(&new_bus->last_reset); new_bus->flags = 0; new_bus->refcount = 1; /* Held until a bus_deregister event */ new_bus->generation = 0; mtx_lock(&xsoftc.xpt_topo_lock); old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); while (old_bus != NULL && old_bus->path_id < new_bus->path_id) old_bus = TAILQ_NEXT(old_bus, links); if (old_bus != NULL) TAILQ_INSERT_BEFORE(old_bus, new_bus, links); else TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); xsoftc.bus_generation++; mtx_unlock(&xsoftc.xpt_topo_lock); /* * Set a default transport so that a PATH_INQ can be issued to * the SIM. This will then allow for probing and attaching of * a more appropriate transport. */ new_bus->xport = &xport_default; bzero(&path, sizeof(path)); status = xpt_compile_path(&path, /*periph*/NULL, sim->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) printf("xpt_compile_path returned %d\n", status); xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status == CAM_REQ_CMP) { switch (cpi.transport) { case XPORT_SPI: case XPORT_SAS: case XPORT_FC: case XPORT_USB: new_bus->xport = scsi_get_xport(); break; case XPORT_ATA: case XPORT_SATA: new_bus->xport = ata_get_xport(); break; default: new_bus->xport = &xport_default; break; } } /* Notify interested parties */ if (sim->path_id != CAM_XPT_PATH_ID) { xpt_async(AC_PATH_REGISTERED, &path, &cpi); } xpt_release_path(&path); return (CAM_SUCCESS); } int32_t xpt_bus_deregister(path_id_t pathid) { struct cam_path bus_path; cam_status status; status = xpt_compile_path(&bus_path, NULL, pathid, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) return (status); xpt_async(AC_LOST_DEVICE, &bus_path, NULL); xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); /* Release the reference count held while registered. */ xpt_release_bus(bus_path.bus); xpt_release_path(&bus_path); return (CAM_REQ_CMP); } static path_id_t xptnextfreepathid(void) { struct cam_eb *bus; path_id_t pathid; const char *strval; pathid = 0; mtx_lock(&xsoftc.xpt_topo_lock); bus = TAILQ_FIRST(&xsoftc.xpt_busses); retry: /* Find an unoccupied pathid */ while (bus != NULL && bus->path_id <= pathid) { if (bus->path_id == pathid) pathid++; bus = TAILQ_NEXT(bus, links); } mtx_unlock(&xsoftc.xpt_topo_lock); /* * Ensure that this pathid is not reserved for * a bus that may be registered in the future. */ if (resource_string_value("scbus", pathid, "at", &strval) == 0) { ++pathid; /* Start the search over */ mtx_lock(&xsoftc.xpt_topo_lock); goto retry; } return (pathid); } static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus) { path_id_t pathid; int i, dunit, val; char buf[32]; const char *dname; pathid = CAM_XPT_PATH_ID; snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); i = 0; while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { if (strcmp(dname, "scbus")) { /* Avoid a bit of foot shooting. */ continue; } if (dunit < 0) /* unwired?! */ continue; if (resource_int_value("scbus", dunit, "bus", &val) == 0) { if (sim_bus == val) { pathid = dunit; break; } } else if (sim_bus == 0) { /* Unspecified matches bus 0 */ pathid = dunit; break; } else { printf("Ambiguous scbus configuration for %s%d " "bus %d, cannot wire down. The kernel " "config entry for scbus%d should " "specify a controller bus.\n" "Scbus will be assigned dynamically.\n", sim_name, sim_unit, sim_bus, dunit); break; } } if (pathid == CAM_XPT_PATH_ID) pathid = xptnextfreepathid(); return (pathid); } void xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) { struct cam_eb *bus; struct cam_et *target, *next_target; struct cam_ed *device, *next_device; mtx_assert(path->bus->sim->mtx, MA_OWNED); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n")); /* * Most async events come from a CAM interrupt context. In * a few cases, the error recovery code at the peripheral layer, * which may run from our SWI or a process context, may signal * deferred events with a call to xpt_async. */ bus = path->bus; if (async_code == AC_BUS_RESET) { /* Update our notion of when the last reset occurred */ microtime(&bus->last_reset); } for (target = TAILQ_FIRST(&bus->et_entries); target != NULL; target = next_target) { next_target = TAILQ_NEXT(target, links); if (path->target != target && path->target->target_id != CAM_TARGET_WILDCARD && target->target_id != CAM_TARGET_WILDCARD) continue; if (async_code == AC_SENT_BDR) { /* Update our notion of when the last reset occurred */ microtime(&path->target->last_reset); } for (device = TAILQ_FIRST(&target->ed_entries); device != NULL; device = next_device) { next_device = TAILQ_NEXT(device, links); if (path->device != device && path->device->lun_id != CAM_LUN_WILDCARD && device->lun_id != CAM_LUN_WILDCARD) continue; (*(bus->xport->async))(async_code, bus, target, device, async_arg); xpt_async_bcast(&device->asyncs, async_code, path, async_arg); } } /* * If this wasn't a fully wildcarded async, tell all * clients that want all async events. */ if (bus != xpt_periph->path->bus) xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code, path, async_arg); } static void xpt_async_bcast(struct async_list *async_head, u_int32_t async_code, struct cam_path *path, void *async_arg) { struct async_node *cur_entry; cur_entry = SLIST_FIRST(async_head); while (cur_entry != NULL) { struct async_node *next_entry; /* * Grab the next list entry before we call the current * entry's callback. This is because the callback function * can delete its async callback entry. */ next_entry = SLIST_NEXT(cur_entry, links); if ((cur_entry->event_enable & async_code) != 0) cur_entry->callback(cur_entry->callback_arg, async_code, path, async_arg); cur_entry = next_entry; } } static void xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { printf("xpt_dev_async called\n"); } u_int32_t xpt_freeze_devq(struct cam_path *path, u_int count) { struct ccb_hdr *ccbh; mtx_assert(path->bus->sim->mtx, MA_OWNED); path->device->qfrozen_cnt += count; /* * Mark the last CCB in the queue as needing * to be requeued if the driver hasn't * changed it's state yet. This fixes a race * where a ccb is just about to be queued to * a controller driver when it's interrupt routine * freezes the queue. To completly close the * hole, controller drives must check to see * if a ccb's status is still CAM_REQ_INPROG * just before they queue * the CCB. See ahc_action/ahc_freeze_devq for * an example. */ ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq); if (ccbh && ccbh->status == CAM_REQ_INPROG) ccbh->status = CAM_REQUEUE_REQ; return (path->device->qfrozen_cnt); } u_int32_t xpt_freeze_simq(struct cam_sim *sim, u_int count) { mtx_assert(sim->mtx, MA_OWNED); sim->devq->send_queue.qfrozen_cnt += count; if (sim->devq->active_dev != NULL) { struct ccb_hdr *ccbh; ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs, ccb_hdr_tailq); if (ccbh && ccbh->status == CAM_REQ_INPROG) ccbh->status = CAM_REQUEUE_REQ; } return (sim->devq->send_queue.qfrozen_cnt); } static void xpt_release_devq_timeout(void *arg) { struct cam_ed *device; device = (struct cam_ed *)arg; xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE); } void xpt_release_devq(struct cam_path *path, u_int count, int run_queue) { mtx_assert(path->bus->sim->mtx, MA_OWNED); xpt_release_devq_device(path->device, count, run_queue); } static void xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) { int rundevq; rundevq = 0; if (dev->qfrozen_cnt > 0) { count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count; dev->qfrozen_cnt -= count; if (dev->qfrozen_cnt == 0) { /* * No longer need to wait for a successful * command completion. */ dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; /* * Remove any timeouts that might be scheduled * to release this queue. */ if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { callout_stop(&dev->callout); dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; } /* * Now that we are unfrozen schedule the * device so any pending transactions are * run. */ if ((dev->ccbq.queue.entries > 0) && (xpt_schedule_dev_sendq(dev->target->bus, dev)) && (run_queue != 0)) { rundevq = 1; } } } if (rundevq != 0) xpt_run_dev_sendq(dev->target->bus); } void xpt_release_simq(struct cam_sim *sim, int run_queue) { struct camq *sendq; mtx_assert(sim->mtx, MA_OWNED); sendq = &(sim->devq->send_queue); if (sendq->qfrozen_cnt > 0) { sendq->qfrozen_cnt--; if (sendq->qfrozen_cnt == 0) { struct cam_eb *bus; /* * If there is a timeout scheduled to release this * sim queue, remove it. The queue frozen count is * already at 0. */ if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ callout_stop(&sim->callout); sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; } bus = xpt_find_bus(sim->path_id); if (run_queue) { /* * Now that we are unfrozen run the send queue. */ xpt_run_dev_sendq(bus); } xpt_release_bus(bus); } } } /* * XXX Appears to be unused. */ static void xpt_release_simq_timeout(void *arg) { struct cam_sim *sim; sim = (struct cam_sim *)arg; xpt_release_simq(sim, /* run_queue */ TRUE); } void xpt_done(union ccb *done_ccb) { struct cam_sim *sim; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n")); if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) { /* * Queue up the request for handling by our SWI handler * any of the "non-immediate" type of ccbs. */ sim = done_ccb->ccb_h.path->bus->sim; switch (done_ccb->ccb_h.path->periph->type) { case CAM_PERIPH_BIO: TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h, sim_links.tqe); done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) { mtx_lock(&cam_simq_lock); TAILQ_INSERT_TAIL(&cam_simq, sim, links); sim->flags |= CAM_SIM_ON_DONEQ; mtx_unlock(&cam_simq_lock); } if ((done_ccb->ccb_h.path->periph->flags & CAM_PERIPH_POLLED) == 0) swi_sched(cambio_ih, 0); break; default: panic("unknown periph type %d", done_ccb->ccb_h.path->periph->type); } } } union ccb * xpt_alloc_ccb() { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK); return (new_ccb); } union ccb * xpt_alloc_ccb_nowait() { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT); return (new_ccb); } void xpt_free_ccb(union ccb *free_ccb) { free(free_ccb, M_CAMXPT); } /* Private XPT functions */ /* * Get a CAM control block for the caller. Charge the structure to the device * referenced by the path. If the this device has no 'credits' then the * device already has the maximum number of outstanding operations under way * and we return NULL. If we don't have sufficient resources to allocate more * ccbs, we also return NULL. */ static union ccb * xpt_get_ccb(struct cam_ed *device) { union ccb *new_ccb; struct cam_sim *sim; sim = device->sim; if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) { new_ccb = xpt_alloc_ccb_nowait(); if (new_ccb == NULL) { return (NULL); } if ((sim->flags & CAM_SIM_MPSAFE) == 0) callout_handle_init(&new_ccb->ccb_h.timeout_ch); SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h, xpt_links.sle); sim->ccb_count++; } cam_ccbq_take_opening(&device->ccbq); SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle); return (new_ccb); } static void xpt_release_bus(struct cam_eb *bus) { if ((--bus->refcount == 0) && (TAILQ_FIRST(&bus->et_entries) == NULL)) { mtx_lock(&xsoftc.xpt_topo_lock); TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); xsoftc.bus_generation++; mtx_unlock(&xsoftc.xpt_topo_lock); cam_sim_release(bus->sim); free(bus, M_CAMXPT); } } static struct cam_et * xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *target; target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT); if (target != NULL) { struct cam_et *cur_target; TAILQ_INIT(&target->ed_entries); target->bus = bus; target->target_id = target_id; target->refcount = 1; target->generation = 0; timevalclear(&target->last_reset); /* * Hold a reference to our parent bus so it * will not go away before we do. */ bus->refcount++; /* Insertion sort into our bus's target list */ cur_target = TAILQ_FIRST(&bus->et_entries); while (cur_target != NULL && cur_target->target_id < target_id) cur_target = TAILQ_NEXT(cur_target, links); if (cur_target != NULL) { TAILQ_INSERT_BEFORE(cur_target, target, links); } else { TAILQ_INSERT_TAIL(&bus->et_entries, target, links); } bus->generation++; } return (target); } static void xpt_release_target(struct cam_eb *bus, struct cam_et *target) { if ((--target->refcount == 0) && (TAILQ_FIRST(&target->ed_entries) == NULL)) { TAILQ_REMOVE(&bus->et_entries, target, links); bus->generation++; free(target, M_CAMXPT); xpt_release_bus(bus); } } static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device, *cur_device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); device->mintags = 1; device->maxtags = 1; bus->sim->max_ccbs = device->ccbq.devq_openings; cur_device = TAILQ_FIRST(&target->ed_entries); while (cur_device != NULL && cur_device->lun_id < lun_id) cur_device = TAILQ_NEXT(cur_device, links); if (cur_device != NULL) { TAILQ_INSERT_BEFORE(cur_device, device, links); } else { TAILQ_INSERT_TAIL(&target->ed_entries, device, links); } target->generation++; return (device); } struct cam_ed * xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; struct cam_devq *devq; cam_status status; /* Make space for us in the device queue on our bus */ devq = bus->sim->devq; status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1); if (status != CAM_REQ_CMP) { device = NULL; } else { device = (struct cam_ed *)malloc(sizeof(*device), M_CAMXPT, M_NOWAIT); } if (device != NULL) { cam_init_pinfo(&device->alloc_ccb_entry.pinfo); device->alloc_ccb_entry.device = device; cam_init_pinfo(&device->send_ccb_entry.pinfo); device->send_ccb_entry.device = device; device->target = target; device->lun_id = lun_id; device->sim = bus->sim; /* Initialize our queues */ if (camq_init(&device->drvq, 0) != 0) { free(device, M_CAMXPT); return (NULL); } if (cam_ccbq_init(&device->ccbq, bus->sim->max_dev_openings) != 0) { camq_fini(&device->drvq); free(device, M_CAMXPT); return (NULL); } SLIST_INIT(&device->asyncs); SLIST_INIT(&device->periphs); device->generation = 0; device->owner = NULL; device->qfrozen_cnt = 0; device->flags = CAM_DEV_UNCONFIGURED; device->tag_delay_count = 0; device->tag_saved_openings = 0; device->refcount = 1; if (bus->sim->flags & CAM_SIM_MPSAFE) callout_init_mtx(&device->callout, bus->sim->mtx, 0); else callout_init_mtx(&device->callout, &Giant, 0); /* * Hold a reference to our parent target so it * will not go away before we do. */ target->refcount++; } return (device); } static void xpt_release_device(struct cam_eb *bus, struct cam_et *target, struct cam_ed *device) { if ((--device->refcount == 0) && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) { struct cam_devq *devq; if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX) panic("Removing device while still queued for ccbs"); if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) callout_stop(&device->callout); TAILQ_REMOVE(&target->ed_entries, device,links); target->generation++; bus->sim->max_ccbs -= device->ccbq.devq_openings; /* Release our slot in the devq */ devq = bus->sim->devq; cam_devq_resize(devq, devq->alloc_queue.array_size - 1); camq_fini(&device->drvq); camq_fini(&device->ccbq.queue); free(device, M_CAMXPT); xpt_release_target(bus, target); } } u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) { int diff; int result; struct cam_ed *dev; dev = path->device; diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings); result = cam_ccbq_resize(&dev->ccbq, newopenings); if (result == CAM_REQ_CMP && (diff < 0)) { dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED; } if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (dev->inq_flags & SID_CmdQue) != 0) dev->tag_saved_openings = newopenings; /* Adjust the global limit */ dev->sim->max_ccbs += diff; return (result); } static struct cam_eb * xpt_find_bus(path_id_t path_id) { struct cam_eb *bus; mtx_lock(&xsoftc.xpt_topo_lock); for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); bus != NULL; bus = TAILQ_NEXT(bus, links)) { if (bus->path_id == path_id) { bus->refcount++; break; } } mtx_unlock(&xsoftc.xpt_topo_lock); return (bus); } static struct cam_et * xpt_find_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *target; for (target = TAILQ_FIRST(&bus->et_entries); target != NULL; target = TAILQ_NEXT(target, links)) { if (target->target_id == target_id) { target->refcount++; break; } } return (target); } static struct cam_ed * xpt_find_device(struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; for (device = TAILQ_FIRST(&target->ed_entries); device != NULL; device = TAILQ_NEXT(device, links)) { if (device->lun_id == lun_id) { device->refcount++; break; } } return (device); } static void xpt_start_tags(struct cam_path *path) { struct ccb_relsim crs; struct cam_ed *device; struct cam_sim *sim; int newopenings; device = path->device; sim = path->bus->sim; device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; xpt_freeze_devq(path, /*count*/1); device->inq_flags |= SID_CmdQue; if (device->tag_saved_openings != 0) newopenings = device->tag_saved_openings; else newopenings = min(device->maxtags, sim->max_tagged_dev_openings); xpt_dev_ccbq_resize(path, newopenings); xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; crs.openings = crs.release_timeout = crs.qfrozen_cnt = 0; xpt_action((union ccb *)&crs); } static int busses_to_config; static int busses_to_reset; static int xptconfigbuscountfunc(struct cam_eb *bus, void *arg) { mtx_assert(bus->sim->mtx, MA_OWNED); if (bus->path_id != CAM_XPT_PATH_ID) { struct cam_path path; struct ccb_pathinq cpi; int can_negotiate; busses_to_config++; xpt_compile_path(&path, NULL, bus->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); can_negotiate = cpi.hba_inquiry; can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE); if ((cpi.hba_misc & PIM_NOBUSRESET) == 0 && can_negotiate) busses_to_reset++; xpt_release_path(&path); } return(1); } static int xptconfigfunc(struct cam_eb *bus, void *arg) { struct cam_path *path; union ccb *work_ccb; mtx_assert(bus->sim->mtx, MA_OWNED); if (bus->path_id != CAM_XPT_PATH_ID) { cam_status status; int can_negotiate; work_ccb = xpt_alloc_ccb_nowait(); if (work_ccb == NULL) { busses_to_config--; xpt_finishconfig(xpt_periph, NULL); return(0); } if ((status = xpt_create_path(&path, xpt_periph, bus->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){ printf("xptconfigfunc: xpt_create_path failed with " "status %#x for bus %d\n", status, bus->path_id); printf("xptconfigfunc: halting bus configuration\n"); xpt_free_ccb(work_ccb); busses_to_config--; xpt_finishconfig(xpt_periph, NULL); return(0); } xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1); work_ccb->ccb_h.func_code = XPT_PATH_INQ; xpt_action(work_ccb); if (work_ccb->ccb_h.status != CAM_REQ_CMP) { printf("xptconfigfunc: CPI failed on bus %d " "with status %d\n", bus->path_id, work_ccb->ccb_h.status); xpt_finishconfig(xpt_periph, work_ccb); return(1); } can_negotiate = work_ccb->cpi.hba_inquiry; can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE); if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0 && (can_negotiate != 0)) { xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1); work_ccb->ccb_h.func_code = XPT_RESET_BUS; work_ccb->ccb_h.cbfcnp = NULL; CAM_DEBUG(path, CAM_DEBUG_SUBTRACE, ("Resetting Bus\n")); xpt_action(work_ccb); xpt_finishconfig(xpt_periph, work_ccb); } else { /* Act as though we performed a successful BUS RESET */ work_ccb->ccb_h.func_code = XPT_RESET_BUS; xpt_finishconfig(xpt_periph, work_ccb); } } return(1); } static void xpt_config(void *arg) { /* * Now that interrupts are enabled, go find our devices */ #ifdef CAMDEBUG /* Setup debugging flags and path */ #ifdef CAM_DEBUG_FLAGS cam_dflags = CAM_DEBUG_FLAGS; #else /* !CAM_DEBUG_FLAGS */ cam_dflags = CAM_DEBUG_NONE; #endif /* CAM_DEBUG_FLAGS */ #ifdef CAM_DEBUG_BUS if (cam_dflags != CAM_DEBUG_NONE) { /* * Locking is specifically omitted here. No SIMs have * registered yet, so xpt_create_path will only be searching * empty lists of targets and devices. */ if (xpt_create_path(&cam_dpath, xpt_periph, CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN) != CAM_REQ_CMP) { printf("xpt_config: xpt_create_path() failed for debug" " target %d:%d:%d, debugging disabled\n", CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); cam_dflags = CAM_DEBUG_NONE; } } else cam_dpath = NULL; #else /* !CAM_DEBUG_BUS */ cam_dpath = NULL; #endif /* CAM_DEBUG_BUS */ #endif /* CAMDEBUG */ /* * Scan all installed busses. */ xpt_for_all_busses(xptconfigbuscountfunc, NULL); if (busses_to_config == 0) { /* Call manually because we don't have any busses */ xpt_finishconfig(xpt_periph, NULL); } else { if (busses_to_reset > 0 && scsi_delay >= 2000) { printf("Waiting %d seconds for SCSI " "devices to settle\n", scsi_delay/1000); } xpt_for_all_busses(xptconfigfunc, NULL); } } /* * If the given device only has one peripheral attached to it, and if that * peripheral is the passthrough driver, announce it. This insures that the * user sees some sort of announcement for every peripheral in their system. */ static int xptpassannouncefunc(struct cam_ed *device, void *arg) { struct cam_periph *periph; int i; for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; periph = SLIST_NEXT(periph, periph_links), i++); periph = SLIST_FIRST(&device->periphs); if ((i == 1) && (strncmp(periph->periph_name, "pass", 4) == 0)) xpt_announce_periph(periph, NULL); return(1); } static void xpt_finishconfig_task(void *context, int pending) { struct periph_driver **p_drv; int i; if (busses_to_config == 0) { /* Register all the peripheral drivers */ /* XXX This will have to change when we have loadable modules */ p_drv = periph_drivers; for (i = 0; p_drv[i] != NULL; i++) { (*p_drv[i]->init)(); } /* * Check for devices with no "standard" peripheral driver * attached. For any devices like that, announce the * passthrough driver so the user will see something. */ xpt_for_all_devices(xptpassannouncefunc, NULL); /* Release our hook so that the boot can continue. */ config_intrhook_disestablish(xsoftc.xpt_config_hook); free(xsoftc.xpt_config_hook, M_CAMXPT); xsoftc.xpt_config_hook = NULL; } free(context, M_CAMXPT); } static void xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb) { struct xpt_task *task; if (done_ccb != NULL) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_finishconfig\n")); switch(done_ccb->ccb_h.func_code) { case XPT_RESET_BUS: if (done_ccb->ccb_h.status == CAM_REQ_CMP) { done_ccb->ccb_h.func_code = XPT_SCAN_BUS; done_ccb->ccb_h.cbfcnp = xpt_finishconfig; done_ccb->crcn.flags = 0; xpt_action(done_ccb); return; } /* FALLTHROUGH */ case XPT_SCAN_BUS: default: xpt_free_path(done_ccb->ccb_h.path); busses_to_config--; break; } } if (busses_to_config == 0) { task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT); if (task != NULL) { TASK_INIT(&task->task, 0, xpt_finishconfig_task, task); taskqueue_enqueue(taskqueue_thread, &task->task); } } if (done_ccb != NULL) xpt_free_ccb(done_ccb); } cam_status xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, struct cam_path *path) { struct ccb_setasync csa; cam_status status; int xptpath = 0; if (path == NULL) { mtx_lock(&xsoftc.xpt_lock); status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { mtx_unlock(&xsoftc.xpt_lock); return (status); } xptpath = 1; } xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = event; csa.callback = cbfunc; csa.callback_arg = cbarg; xpt_action((union ccb *)&csa); status = csa.ccb_h.status; if (xptpath) { xpt_free_path(path); mtx_unlock(&xsoftc.xpt_lock); } return (status); } static void xptaction(struct cam_sim *sim, union ccb *work_ccb) { CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); switch (work_ccb->ccb_h.func_code) { /* Common cases first */ case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi; cpi = &work_ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 0; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "", HBA_IDLEN); strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); cpi->unit_number = sim->unit_number; cpi->bus_id = sim->bus_id; cpi->base_transfer_speed = 0; cpi->protocol = PROTO_UNSPECIFIED; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->transport = XPORT_UNSPECIFIED; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(work_ccb); break; } default: work_ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(work_ccb); break; } } /* * The xpt as a "controller" has no interrupt sources, so polling * is a no-op. */ static void xptpoll(struct cam_sim *sim) { } void xpt_lock_buses(void) { mtx_lock(&xsoftc.xpt_topo_lock); } void xpt_unlock_buses(void) { mtx_unlock(&xsoftc.xpt_topo_lock); } static void camisr(void *dummy) { cam_simq_t queue; struct cam_sim *sim; mtx_lock(&cam_simq_lock); TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &cam_simq, links); mtx_unlock(&cam_simq_lock); while ((sim = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, sim, links); CAM_SIM_LOCK(sim); sim->flags &= ~CAM_SIM_ON_DONEQ; camisr_runqueue(&sim->sim_doneq); CAM_SIM_UNLOCK(sim); } } static void camisr_runqueue(void *V_queue) { cam_isrq_t *queue = V_queue; struct ccb_hdr *ccb_h; while ((ccb_h = TAILQ_FIRST(queue)) != NULL) { int runq; TAILQ_REMOVE(queue, ccb_h, sim_links.tqe); ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE, ("camisr\n")); runq = FALSE; if (ccb_h->flags & CAM_HIGH_POWER) { struct highpowerlist *hphead; union ccb *send_ccb; mtx_lock(&xsoftc.xpt_lock); hphead = &xsoftc.highpowerq; send_ccb = (union ccb *)STAILQ_FIRST(hphead); /* * Increment the count since this command is done. */ xsoftc.num_highpower++; /* * Any high powered commands queued up? */ if (send_ccb != NULL) { STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe); mtx_unlock(&xsoftc.xpt_lock); xpt_release_devq(send_ccb->ccb_h.path, /*count*/1, /*runqueue*/TRUE); } else mtx_unlock(&xsoftc.xpt_lock); } if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { struct cam_ed *dev; dev = ccb_h->path->device; cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); ccb_h->path->bus->sim->devq->send_active--; ccb_h->path->bus->sim->devq->send_openings++; if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ) || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 && (dev->ccbq.dev_active == 0))) { xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); } if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 && (--dev->tag_delay_count == 0)) xpt_start_tags(ccb_h->path); if ((dev->ccbq.queue.entries > 0) && (dev->qfrozen_cnt == 0) && (device_is_send_queued(dev) == 0)) { runq = xpt_schedule_dev_sendq(ccb_h->path->bus, dev); } } if (ccb_h->status & CAM_RELEASE_SIMQ) { xpt_release_simq(ccb_h->path->bus->sim, /*run_queue*/TRUE); ccb_h->status &= ~CAM_RELEASE_SIMQ; runq = FALSE; } if ((ccb_h->flags & CAM_DEV_QFRZDIS) && (ccb_h->status & CAM_DEV_QFRZN)) { xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); ccb_h->status &= ~CAM_DEV_QFRZN; } else if (runq) { xpt_run_dev_sendq(ccb_h->path->bus); } /* Call the peripheral driver's callback */ (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); } } Index: head/sys/dev/isp/isp.c =================================================================== --- head/sys/dev/isp/isp.c (revision 196007) +++ head/sys/dev/isp/isp.c (revision 196008) @@ -1,8354 +1,8393 @@ /*- - * Copyright (c) 1997-2007 by Matthew Jacob + * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * */ /* * Machine and OS Independent (well, as best as possible) * code for the Qlogic ISP SCSI and FC-SCSI adapters. */ /* * Inspiration and ideas about this driver are from Erik Moe's Linux driver * (qlogicisp.c) and Dave Miller's SBus version of same (qlogicisp.c). Some * ideas dredged from the Solaris driver. */ /* * Include header file appropriate for platform we're building on. */ #ifdef __NetBSD__ #include __KERNEL_RCSID(0, "$NetBSD$"); #include #endif #ifdef __FreeBSD__ #include __FBSDID("$FreeBSD$"); #include #endif #ifdef __OpenBSD__ #include #endif #ifdef __linux__ #include "isp_linux.h" #endif #ifdef __svr4__ #include "isp_solaris.h" #endif /* * General defines */ #define MBOX_DELAY_COUNT 1000000 / 100 -#define ISP_MARK_PORTDB(a, b) \ - isp_prt(isp, ISP_LOGSANCFG, "line %d: markportdb", __LINE__); \ - isp_mark_portdb(a, b) +#define ISP_MARK_PORTDB(a, b, c) \ + isp_prt(isp, ISP_LOGSANCFG, \ + "Chan %d ISP_MARK_PORTDB@LINE %d", b, __LINE__); \ + isp_mark_portdb(a, b, c) /* * Local static data */ -static const char fconf[] = - "PortDB[%d] changed:\n current =(0x%x@0x%06x 0x%08x%08x 0x%08x%08x)\n" - " database=(0x%x@0x%06x 0x%08x%08x 0x%08x%08x)"; -static const char notresp[] = - "Not RESPONSE in RESPONSE Queue (type 0x%x) @ idx %d (next %d) nlooked %d"; -static const char xact1[] = - "HBA attempted queued transaction with disconnect not set for %d.%d.%d"; -static const char xact2[] = - "HBA attempted queued transaction to target routine %d on target %d bus %d"; -static const char xact3[] = - "HBA attempted queued cmd for %d.%d.%d when queueing disabled"; -static const char pskip[] = - "SCSI phase skipped for target %d.%d.%d"; -static const char topology[] = - "HBA PortID 0x%06x N-Port Handle %d, Connection Topology '%s'"; -static const char ourwwn[] = - "HBA WWNN 0x%08x%08x HBA WWPN 0x%08x%08x"; -static const char finmsg[] = - "%d.%d.%d: FIN dl%d resid %d STS 0x%x SKEY %c XS_ERR=0x%x"; -static const char sc0[] = - "%s CHAN %d FTHRSH %d IID %d RESETD %d RETRYC %d RETRYD %d ASD 0x%x"; -static const char sc1[] = - "%s RAAN 0x%x DLAN 0x%x DDMAB 0x%x CDMAB 0x%x SELTIME %d MQD %d"; -static const char sc2[] = "%s CHAN %d TGT %d FLAGS 0x%x 0x%x/0x%x"; -static const char sc3[] = "Generated"; +static const char fconf[] = "Chan %d PortDB[%d] changed:\n current =(0x%x@0x%06x 0x%08x%08x 0x%08x%08x)\n database=(0x%x@0x%06x 0x%08x%08x 0x%08x%08x)"; +static const char notresp[] = "Not RESPONSE in RESPONSE Queue (type 0x%x) @ idx %d (next %d) nlooked %d"; +static const char xact1[] = "HBA attempted queued transaction with disconnect not set for %d.%d.%d"; +static const char xact2[] = "HBA attempted queued transaction to target routine %d on target %d bus %d"; +static const char xact3[] = "HBA attempted queued cmd for %d.%d.%d when queueing disabled"; +static const char pskip[] = "SCSI phase skipped for target %d.%d.%d"; +static const char topology[] = "Chan %d WWPN 0x%08x%08x PortID 0x%06x N-Port Handle %d, Connection '%s'"; +static const char finmsg[] = "%d.%d.%d: FIN dl%d resid %ld STS 0x%x SKEY %c XS_ERR=0x%x"; static const char sc4[] = "NVRAM"; -static const char bun[] = - "bad underrun for %d.%d (count %d, resid %d, status %s)"; +static const char bun[] = "bad underrun for %d.%d (count %d, resid %d, status %s)"; +static const char lipd[] = "Chan %d LIP destroyed %d active commands"; +static const char sacq[] = "unable to acquire scratch area"; +static const uint8_t alpa_map[] = { + 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, + 0xd9, 0xd6, 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, + 0xcd, 0xcc, 0xcb, 0xca, 0xc9, 0xc7, 0xc6, 0xc5, + 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5, 0xb4, 0xb3, + 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9, + 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, + 0x98, 0x97, 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, + 0x80, 0x7c, 0x7a, 0x79, 0x76, 0x75, 0x74, 0x73, + 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, + 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56, + 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, + 0x4b, 0x4a, 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, + 0x3a, 0x39, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, + 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x27, 0x26, + 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17, + 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01, 0x00 +}; + /* * Local function prototypes. */ static int isp_parse_async(ispsoftc_t *, uint16_t); -static int isp_handle_other_response(ispsoftc_t *, int, isphdr_t *, - uint32_t *); -static void -isp_parse_status(ispsoftc_t *, ispstatusreq_t *, XS_T *, long *); -static void +static int isp_handle_other_response(ispsoftc_t *, int, isphdr_t *, uint32_t *); +static void isp_parse_status(ispsoftc_t *, ispstatusreq_t *, XS_T *, long *); static void isp_parse_status_24xx(ispsoftc_t *, isp24xx_statusreq_t *, XS_T *, long *); static void isp_fastpost_complete(ispsoftc_t *, uint16_t); static int isp_mbox_continue(ispsoftc_t *); static void isp_scsi_init(ispsoftc_t *); static void isp_scsi_channel_init(ispsoftc_t *, int); static void isp_fibre_init(ispsoftc_t *); static void isp_fibre_init_2400(ispsoftc_t *); -static void isp_mark_portdb(ispsoftc_t *, int); -static int isp_plogx(ispsoftc_t *, uint16_t, uint32_t, int, int); +static void isp_mark_portdb(ispsoftc_t *, int, int); +static int isp_plogx(ispsoftc_t *, int, uint16_t, uint32_t, int, int); static int isp_port_login(ispsoftc_t *, uint16_t, uint32_t); static int isp_port_logout(ispsoftc_t *, uint16_t, uint32_t); -static int isp_getpdb(ispsoftc_t *, uint16_t, isp_pdb_t *, int); -static uint64_t isp_get_portname(ispsoftc_t *, int, int); -static int isp_fclink_test(ispsoftc_t *, int); -static const char *ispfc_fw_statename(int); -static int isp_pdb_sync(ispsoftc_t *); -static int isp_scan_loop(ispsoftc_t *); -static int isp_gid_ft_sns(ispsoftc_t *); -static int isp_gid_ft_ct_passthru(ispsoftc_t *); -static int isp_scan_fabric(ispsoftc_t *); -static int isp_login_device(ispsoftc_t *, uint32_t, isp_pdb_t *, uint16_t *); -static int isp_register_fc4_type(ispsoftc_t *); -static int isp_register_fc4_type_24xx(ispsoftc_t *); -static uint16_t isp_nxt_handle(ispsoftc_t *, uint16_t); -static void isp_fw_state(ispsoftc_t *); +static int isp_getpdb(ispsoftc_t *, int, uint16_t, isp_pdb_t *, int); +static void isp_dump_chip_portdb(ispsoftc_t *, int, int); +static uint64_t isp_get_wwn(ispsoftc_t *, int, int, int); +static int isp_fclink_test(ispsoftc_t *, int, int); +static int isp_pdb_sync(ispsoftc_t *, int); +static int isp_scan_loop(ispsoftc_t *, int); +static int isp_gid_ft_sns(ispsoftc_t *, int); +static int isp_gid_ft_ct_passthru(ispsoftc_t *, int); +static int isp_scan_fabric(ispsoftc_t *, int); +static int isp_login_device(ispsoftc_t *, int, uint32_t, isp_pdb_t *, uint16_t *); +static int isp_register_fc4_type(ispsoftc_t *, int); +static int isp_register_fc4_type_24xx(ispsoftc_t *, int); +static uint16_t isp_nxt_handle(ispsoftc_t *, int, uint16_t); +static void isp_fw_state(ispsoftc_t *, int); static void isp_mboxcmd_qnw(ispsoftc_t *, mbreg_t *, int); static void isp_mboxcmd(ispsoftc_t *, mbreg_t *); -static void isp_update(ispsoftc_t *); -static void isp_update_bus(ispsoftc_t *, int); -static void isp_setdfltparm(ispsoftc_t *, int); -static void isp_setdfltfcparm(ispsoftc_t *); -static int isp_read_nvram(ispsoftc_t *); -static int isp_read_nvram_2400(ispsoftc_t *); +static void isp_spi_update(ispsoftc_t *, int); +static void isp_setdfltsdparm(ispsoftc_t *); +static void isp_setdfltfcparm(ispsoftc_t *, int); +static int isp_read_nvram(ispsoftc_t *, int); +static int isp_read_nvram_2400(ispsoftc_t *, uint8_t *); static void isp_rdnvram_word(ispsoftc_t *, int, uint16_t *); static void isp_rd_2400_nvram(ispsoftc_t *, uint32_t, uint32_t *); static void isp_parse_nvram_1020(ispsoftc_t *, uint8_t *); static void isp_parse_nvram_1080(ispsoftc_t *, int, uint8_t *); static void isp_parse_nvram_12160(ispsoftc_t *, int, uint8_t *); -static void isp_fix_nvram_wwns(ispsoftc_t *); static void isp_parse_nvram_2100(ispsoftc_t *, uint8_t *); static void isp_parse_nvram_2400(ispsoftc_t *, uint8_t *); /* * Reset Hardware. * * Hit the chip over the head, download new f/w if available and set it running. * * Locking done elsewhere. */ void -isp_reset(ispsoftc_t *isp) +isp_reset(ispsoftc_t *isp, int do_load_defaults) { mbreg_t mbs; uint32_t code_org, val; int loops, i, dodnld = 1; - static const char *btype = "????"; + const char *btype = "????"; static const char dcrc[] = "Downloaded RISC Code Checksum Failure"; isp->isp_state = ISP_NILSTATE; + if (isp->isp_dead) { + isp_shutdown(isp); + ISP_DISABLE_INTS(isp); + return; + } /* * Basic types (SCSI, FibreChannel and PCI or SBus) * have been set in the MD code. We figure out more * here. Possibly more refined types based upon PCI * identification. Chip revision has been gathered. * * After we've fired this chip up, zero out the conf1 register * for SCSI adapters and do other settings for the 2100. */ - /* - * Get the current running firmware revision out of the - * chip before we hit it over the head (if this is our - * first time through). Note that we store this as the - * 'ROM' firmware revision- which it may not be. In any - * case, we don't really use this yet, but we may in - * the future. - */ - if (isp->isp_touched == 0) { - /* - * First see whether or not we're sitting in the ISP PROM. - * If we've just been reset, we'll have the string "ISP " - * spread through outgoing mailbox registers 1-3. We do - * this for PCI cards because otherwise we really don't - * know what state the card is in and we could hang if - * we try this command otherwise. - * - * For SBus cards, we just do this because they almost - * certainly will be running firmware by now. - */ - if (ISP_READ(isp, OUTMAILBOX1) != 0x4953 || - ISP_READ(isp, OUTMAILBOX2) != 0x5020 || - ISP_READ(isp, OUTMAILBOX3) != 0x2020) { - /* - * Just in case it was paused... - */ - if (IS_24XX(isp)) { - ISP_WRITE(isp, BIU2400_HCCR, - HCCR_2400_CMD_RELEASE); - } else { - ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); - } - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_ABOUT_FIRMWARE; - mbs.logval = MBLOGNONE; - isp_mboxcmd(isp, &mbs); - if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { - isp->isp_romfw_rev[0] = mbs.param[1]; - isp->isp_romfw_rev[1] = mbs.param[2]; - isp->isp_romfw_rev[2] = mbs.param[3]; - } - } - isp->isp_touched = 1; - } - ISP_DISABLE_INTS(isp); /* * Pick an initial maxcmds value which will be used * to allocate xflist pointer space. It may be changed * later by the firmware. */ if (IS_24XX(isp)) { isp->isp_maxcmds = 4096; } else if (IS_2322(isp)) { isp->isp_maxcmds = 2048; } else if (IS_23XX(isp) || IS_2200(isp)) { isp->isp_maxcmds = 1024; } else { isp->isp_maxcmds = 512; } /* - * Set up DMA for the request and result queues. + * Set up DMA for the request and response queues. * * We do this now so we can use the request queue - * for a dma + * for dma to load firmware from. */ if (ISP_MBOXDMASETUP(isp) != 0) { isp_prt(isp, ISP_LOGERR, "Cannot setup DMA"); return; } - /* * Set up default request/response queue in-pointer/out-pointer * register indices. */ if (IS_24XX(isp)) { isp->isp_rqstinrp = BIU2400_REQINP; isp->isp_rqstoutrp = BIU2400_REQOUTP; isp->isp_respinrp = BIU2400_RSPINP; isp->isp_respoutrp = BIU2400_RSPOUTP; - isp->isp_atioinrp = BIU2400_ATIO_RSPINP; - isp->isp_atiooutrp = BIU2400_ATIO_REQINP; } else if (IS_23XX(isp)) { isp->isp_rqstinrp = BIU_REQINP; isp->isp_rqstoutrp = BIU_REQOUTP; isp->isp_respinrp = BIU_RSPINP; isp->isp_respoutrp = BIU_RSPOUTP; } else { isp->isp_rqstinrp = INMAILBOX4; isp->isp_rqstoutrp = OUTMAILBOX4; isp->isp_respinrp = OUTMAILBOX5; isp->isp_respoutrp = INMAILBOX5; } /* * Put the board into PAUSE mode (so we can read the SXP registers * or write FPM/FBM registers). */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_HOST_INT); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_PAUSE); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); } if (IS_FC(isp)) { switch (isp->isp_type) { case ISP_HA_FC_2100: btype = "2100"; break; case ISP_HA_FC_2200: btype = "2200"; break; case ISP_HA_FC_2300: btype = "2300"; break; case ISP_HA_FC_2312: btype = "2312"; break; case ISP_HA_FC_2322: btype = "2322"; break; case ISP_HA_FC_2400: btype = "2422"; break; + case ISP_HA_FC_2500: + btype = "2532"; + break; default: break; } if (!IS_24XX(isp)) { /* * While we're paused, reset the FPM module and FBM * fifos. */ ISP_WRITE(isp, BIU2100_CSR, BIU2100_FPM0_REGS); ISP_WRITE(isp, FPM_DIAG_CONFIG, FPM_SOFT_RESET); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FB_REGS); ISP_WRITE(isp, FBM_CMD, FBMCMD_FIFO_RESET_ALL); ISP_WRITE(isp, BIU2100_CSR, BIU2100_RISC_REGS); } } else if (IS_1240(isp)) { - sdparam *sdp = isp->isp_param; + sdparam *sdp; + btype = "1240"; isp->isp_clock = 60; + sdp = SDPARAM(isp, 0); sdp->isp_ultramode = 1; - sdp++; + sdp = SDPARAM(isp, 1); sdp->isp_ultramode = 1; /* * XXX: Should probably do some bus sensing. */ } else if (IS_ULTRA3(isp)) { sdparam *sdp = isp->isp_param; isp->isp_clock = 100; if (IS_10160(isp)) btype = "10160"; else if (IS_12160(isp)) btype = "12160"; else btype = ""; sdp->isp_lvdmode = 1; if (IS_DUALBUS(isp)) { sdp++; sdp->isp_lvdmode = 1; } } else if (IS_ULTRA2(isp)) { static const char m[] = "bus %d is in %s Mode"; uint16_t l; - sdparam *sdp = isp->isp_param; + sdparam *sdp = SDPARAM(isp, 0); isp->isp_clock = 100; if (IS_1280(isp)) btype = "1280"; else if (IS_1080(isp)) btype = "1080"; else btype = ""; l = ISP_READ(isp, SXP_PINS_DIFF) & ISP1080_MODE_MASK; switch (l) { case ISP1080_LVD_MODE: sdp->isp_lvdmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "LVD"); break; case ISP1080_HVD_MODE: sdp->isp_diffmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "Differential"); break; case ISP1080_SE_MODE: sdp->isp_ultramode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "Single-Ended"); break; default: isp_prt(isp, ISP_LOGERR, "unknown mode on bus %d (0x%x)", 0, l); break; } if (IS_DUALBUS(isp)) { - sdp++; + sdp = SDPARAM(isp, 1); l = ISP_READ(isp, SXP_PINS_DIFF|SXP_BANK1_SELECT); l &= ISP1080_MODE_MASK; - switch(l) { + switch (l) { case ISP1080_LVD_MODE: sdp->isp_lvdmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "LVD"); break; case ISP1080_HVD_MODE: sdp->isp_diffmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "Differential"); break; case ISP1080_SE_MODE: sdp->isp_ultramode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "Single-Ended"); break; default: isp_prt(isp, ISP_LOGERR, "unknown mode on bus %d (0x%x)", 1, l); break; } } } else { - sdparam *sdp = isp->isp_param; + sdparam *sdp = SDPARAM(isp, 0); i = ISP_READ(isp, BIU_CONF0) & BIU_CONF0_HW_MASK; switch (i) { default: isp_prt(isp, ISP_LOGALL, "Unknown Chip Type 0x%x", i); /* FALLTHROUGH */ case 1: btype = "1020"; isp->isp_type = ISP_HA_SCSI_1020; isp->isp_clock = 40; break; case 2: /* * Some 1020A chips are Ultra Capable, but don't * run the clock rate up for that unless told to * do so by the Ultra Capable bits being set. */ btype = "1020A"; isp->isp_type = ISP_HA_SCSI_1020A; isp->isp_clock = 40; break; case 3: btype = "1040"; isp->isp_type = ISP_HA_SCSI_1040; isp->isp_clock = 60; break; case 4: btype = "1040A"; isp->isp_type = ISP_HA_SCSI_1040A; isp->isp_clock = 60; break; case 5: btype = "1040B"; isp->isp_type = ISP_HA_SCSI_1040B; isp->isp_clock = 60; break; case 6: btype = "1040C"; isp->isp_type = ISP_HA_SCSI_1040C; isp->isp_clock = 60; break; } /* * Now, while we're at it, gather info about ultra * and/or differential mode. */ if (ISP_READ(isp, SXP_PINS_DIFF) & SXP_PINS_DIFF_MODE) { isp_prt(isp, ISP_LOGCONFIG, "Differential Mode"); sdp->isp_diffmode = 1; } else { sdp->isp_diffmode = 0; } i = ISP_READ(isp, RISC_PSR); if (isp->isp_bustype == ISP_BT_SBUS) { i &= RISC_PSR_SBUS_ULTRA; } else { i &= RISC_PSR_PCI_ULTRA; } if (i != 0) { isp_prt(isp, ISP_LOGCONFIG, "Ultra Mode Capable"); sdp->isp_ultramode = 1; /* * If we're in Ultra Mode, we have to be 60MHz clock- * even for the SBus version. */ isp->isp_clock = 60; } else { sdp->isp_ultramode = 0; /* * Clock is known. Gronk. */ } /* * Machine dependent clock (if set) overrides * our generic determinations. */ if (isp->isp_mdvec->dv_clock) { if (isp->isp_mdvec->dv_clock < isp->isp_clock) { isp->isp_clock = isp->isp_mdvec->dv_clock; } } } /* * Clear instrumentation */ isp->isp_intcnt = isp->isp_intbogus = 0; /* * Do MD specific pre initialization */ ISP_RESET0(isp); /* * Hit the chip over the head with hammer, - * and give the ISP a chance to recover. + * and give it a chance to recover. */ if (IS_SCSI(isp)) { ISP_WRITE(isp, BIU_ICR, BIU_ICR_SOFT_RESET); /* * A slight delay... */ - USEC_DELAY(100); + ISP_DELAY(100); /* * Clear data && control DMA engines. */ - ISP_WRITE(isp, CDMA_CONTROL, - DMA_CNTRL_CLEAR_CHAN | DMA_CNTRL_RESET_INT); - ISP_WRITE(isp, DDMA_CONTROL, - DMA_CNTRL_CLEAR_CHAN | DMA_CNTRL_RESET_INT); + ISP_WRITE(isp, CDMA_CONTROL, DMA_CNTRL_CLEAR_CHAN | DMA_CNTRL_RESET_INT); + ISP_WRITE(isp, DDMA_CONTROL, DMA_CNTRL_CLEAR_CHAN | DMA_CNTRL_RESET_INT); } else if (IS_24XX(isp)) { /* * Stop DMA and wait for it to stop. */ ISP_WRITE(isp, BIU2400_CSR, BIU2400_DMA_STOP|(3 << 4)); for (val = loops = 0; loops < 30000; loops++) { - USEC_DELAY(10); + ISP_DELAY(10); val = ISP_READ(isp, BIU2400_CSR); if ((val & BIU2400_DMA_ACTIVE) == 0) { break; } - } + } if (val & BIU2400_DMA_ACTIVE) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "DMA Failed to Stop on Reset"); return; } /* * Hold it in SOFT_RESET and STOP state for 100us. */ - ISP_WRITE(isp, BIU2400_CSR, - BIU2400_SOFT_RESET|BIU2400_DMA_STOP|(3 << 4)); - USEC_DELAY(100); + ISP_WRITE(isp, BIU2400_CSR, BIU2400_SOFT_RESET|BIU2400_DMA_STOP|(3 << 4)); + ISP_DELAY(100); for (loops = 0; loops < 10000; loops++) { - USEC_DELAY(5); + ISP_DELAY(5); val = ISP_READ(isp, OUTMAILBOX0); } for (val = loops = 0; loops < 500000; loops ++) { val = ISP_READ(isp, BIU2400_CSR); if ((val & BIU2400_SOFT_RESET) == 0) { break; } } if (val & BIU2400_SOFT_RESET) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "Failed to come out of reset"); return; } } else { ISP_WRITE(isp, BIU2100_CSR, BIU2100_SOFT_RESET); /* * A slight delay... */ - USEC_DELAY(100); + ISP_DELAY(100); /* * Clear data && control DMA engines. */ - ISP_WRITE(isp, CDMA2100_CONTROL, - DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); - ISP_WRITE(isp, TDMA2100_CONTROL, - DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); - ISP_WRITE(isp, RDMA2100_CONTROL, - DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); + ISP_WRITE(isp, CDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); + ISP_WRITE(isp, TDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); + ISP_WRITE(isp, RDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); } /* * Wait for ISP to be ready to go... */ loops = MBOX_DELAY_COUNT; for (;;) { if (IS_SCSI(isp)) { if (!(ISP_READ(isp, BIU_ICR) & BIU_ICR_SOFT_RESET)) { break; } } else if (IS_24XX(isp)) { if (ISP_READ(isp, OUTMAILBOX0) == 0) { break; } } else { if (!(ISP_READ(isp, BIU2100_CSR) & BIU2100_SOFT_RESET)) break; } - USEC_DELAY(100); + ISP_DELAY(100); if (--loops < 0) { ISP_DUMPREGS(isp, "chip reset timed out"); ISP_RESET0(isp); return; } } /* * After we've fired this chip up, zero out the conf1 register * for SCSI adapters and other settings for the 2100. */ if (IS_SCSI(isp)) { ISP_WRITE(isp, BIU_CONF1, 0); } else if (!IS_24XX(isp)) { ISP_WRITE(isp, BIU2100_CSR, 0); } /* * Reset RISC Processor */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RELEASE); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RESET); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); - USEC_DELAY(100); + ISP_DELAY(100); ISP_WRITE(isp, BIU_SEMA, 0); } - /* * Post-RISC Reset stuff. */ if (IS_24XX(isp)) { for (val = loops = 0; loops < 5000000; loops++) { - USEC_DELAY(5); + ISP_DELAY(5); val = ISP_READ(isp, OUTMAILBOX0); if (val == 0) { break; } } if (val != 0) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "reset didn't clear"); return; } } else if (IS_SCSI(isp)) { uint16_t tmp = isp->isp_mdvec->dv_conf1; /* * Busted FIFO. Turn off all but burst enables. */ if (isp->isp_type == ISP_HA_SCSI_1040A) { tmp &= BIU_BURST_ENABLE; } ISP_SETBITS(isp, BIU_CONF1, tmp); if (tmp & BIU_BURST_ENABLE) { ISP_SETBITS(isp, CDMA_CONF, DMA_ENABLE_BURST); ISP_SETBITS(isp, DDMA_CONF, DMA_ENABLE_BURST); } - if (SDPARAM(isp)->isp_ptisp) { - if (SDPARAM(isp)->isp_ultramode) { + if (SDPARAM(isp, 0)->isp_ptisp) { + if (SDPARAM(isp, 0)->isp_ultramode) { while (ISP_READ(isp, RISC_MTR) != 0x1313) { ISP_WRITE(isp, RISC_MTR, 0x1313); ISP_WRITE(isp, HCCR, HCCR_CMD_STEP); } } else { ISP_WRITE(isp, RISC_MTR, 0x1212); } /* * PTI specific register */ ISP_WRITE(isp, RISC_EMB, DUAL_BANK); } else { ISP_WRITE(isp, RISC_MTR, 0x1212); } ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); } else { ISP_WRITE(isp, RISC_MTR2100, 0x1212); if (IS_2200(isp) || IS_23XX(isp)) { ISP_WRITE(isp, HCCR, HCCR_2X00_DISABLE_PARITY_PAUSE); } ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); } ISP_WRITE(isp, isp->isp_rqstinrp, 0); ISP_WRITE(isp, isp->isp_rqstoutrp, 0); ISP_WRITE(isp, isp->isp_respinrp, 0); ISP_WRITE(isp, isp->isp_respoutrp, 0); + if (IS_24XX(isp)) { + ISP_WRITE(isp, BIU2400_PRI_REQINP, 0); + ISP_WRITE(isp, BIU2400_PRI_REQOUTP, 0); + ISP_WRITE(isp, BIU2400_ATIO_RSPINP, 0); + ISP_WRITE(isp, BIU2400_ATIO_RSPOUTP, 0); + } - /* * Do MD specific post initialization */ ISP_RESET1(isp); /* * Wait for everything to finish firing up. * - * Avoid doing this on the 2312 because you can generate a PCI + * Avoid doing this on early 2312s because you can generate a PCI * parity error (chip breakage). */ - if (IS_2312(isp)) { - USEC_DELAY(100); + if (IS_2312(isp) && isp->isp_revision < 2) { + ISP_DELAY(100); } else { loops = MBOX_DELAY_COUNT; while (ISP_READ(isp, OUTMAILBOX0) == MBOX_BUSY) { - USEC_DELAY(100); + ISP_DELAY(100); if (--loops < 0) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "MBOX_BUSY never cleared on reset"); return; } } } /* * Up until this point we've done everything by just reading or * setting registers. From this point on we rely on at least *some* * kind of firmware running in the card. */ /* - * Do some sanity checking. + * Do some sanity checking by running a NOP command. + * If it succeeds, the ROM firmware is now running. */ - MEMZERO(&mbs, sizeof (mbs)); + ISP_MEMZERO(&mbs, sizeof (mbs)); mbs.param[0] = MBOX_NO_OP; mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { + isp_prt(isp, ISP_LOGERR, "NOP ommand failed (%x)", mbs.param[0]); ISP_RESET0(isp); return; } + /* + * Do some operational tests + */ + if (IS_SCSI(isp) || IS_24XX(isp)) { - MEMZERO(&mbs, sizeof (mbs)); + ISP_MEMZERO(&mbs, sizeof (mbs)); mbs.param[0] = MBOX_MAILBOX_REG_TEST; mbs.param[1] = 0xdead; mbs.param[2] = 0xbeef; mbs.param[3] = 0xffff; mbs.param[4] = 0x1111; mbs.param[5] = 0xa5a5; mbs.param[6] = 0x0000; mbs.param[7] = 0x0000; mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } if (mbs.param[1] != 0xdead || mbs.param[2] != 0xbeef || mbs.param[3] != 0xffff || mbs.param[4] != 0x1111 || mbs.param[5] != 0xa5a5) { ISP_RESET0(isp); - isp_prt(isp, ISP_LOGERR, - "Register Test Failed (0x%x 0x%x 0x%x 0x%x 0x%x)", - mbs.param[1], mbs.param[2], mbs.param[3], - mbs.param[4], mbs.param[5]); + isp_prt(isp, ISP_LOGERR, "Register Test Failed (0x%x 0x%x 0x%x 0x%x 0x%x)", mbs.param[1], mbs.param[2], mbs.param[3], mbs.param[4], mbs.param[5]); return; } } /* * Download new Firmware, unless requested not to do so. * This is made slightly trickier in some cases where the * firmware of the ROM revision is newer than the revision * compiled into the driver. So, where we used to compare * versions of our f/w and the ROM f/w, now we just see * whether we have f/w at all and whether a config flag * has disabled our download. */ - if ((isp->isp_mdvec->dv_ispfw == NULL) || - (isp->isp_confopts & ISP_CFG_NORELOAD)) { + if ((isp->isp_mdvec->dv_ispfw == NULL) || (isp->isp_confopts & ISP_CFG_NORELOAD)) { dodnld = 0; } if (IS_24XX(isp)) { code_org = ISP_CODE_ORG_2400; } else if (IS_23XX(isp)) { code_org = ISP_CODE_ORG_2300; } else { code_org = ISP_CODE_ORG; } if (dodnld && IS_24XX(isp)) { const uint32_t *ptr = isp->isp_mdvec->dv_ispfw; /* - * NB: Whatever you do do, do *not* issue the VERIFY FIRMWARE - * NB: command to the 2400 while loading new firmware. This - * NB: causes the new f/w to start and immediately crash back - * NB: to the ROM. - */ - - /* * Keep loading until we run out of f/w. */ code_org = ptr[2]; /* 1st load address is our start addr */ for (;;) { uint32_t la, wi, wl; - isp_prt(isp, ISP_LOGDEBUG0, - "load 0x%x words of code at load address 0x%x", - ptr[3], ptr[2]); + isp_prt(isp, ISP_LOGDEBUG0, "load 0x%x words of code at load address 0x%x", ptr[3], ptr[2]); wi = 0; la = ptr[2]; wl = ptr[3]; while (wi < ptr[3]) { uint32_t *cp; uint32_t nw; nw = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) >> 2; if (nw > wl) { nw = wl; } cp = isp->isp_rquest; for (i = 0; i < nw; i++) { ISP_IOXPUT_32(isp, ptr[wi++], &cp[i]); wl--; } - MEMORYBARRIER(isp, SYNC_REQUEST, - 0, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp))); - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_LOAD_RISC_RAM; - mbs.param[1] = la; - mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); - mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); - mbs.param[4] = nw >> 16; - mbs.param[5] = nw; - mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); - mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); - mbs.param[8] = la >> 16; + MEMORYBARRIER(isp, SYNC_REQUEST, 0, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp))); + ISP_MEMZERO(&mbs, sizeof (mbs)); + if (la < 0x10000 && nw < 0x10000) { + mbs.param[0] = MBOX_LOAD_RISC_RAM_2100; + mbs.param[1] = la; + mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); + mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); + mbs.param[4] = nw; + mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); + mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); + } else { + mbs.param[0] = MBOX_LOAD_RISC_RAM; + mbs.param[1] = la; + mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); + mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); + mbs.param[4] = nw >> 16; + mbs.param[5] = nw; + mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); + mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); + mbs.param[8] = la >> 16; + } mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { - isp_prt(isp, ISP_LOGERR, - "F/W Risc Ram Load Failed"); + isp_prt(isp, ISP_LOGERR, "F/W Risc Ram Load Failed"); ISP_RESET0(isp); return; } la += nw; } if (ptr[1] == 0) { break; } ptr += ptr[3]; - } + } isp->isp_loaded_fw = 1; } else if (dodnld && IS_23XX(isp)) { const uint16_t *ptr = isp->isp_mdvec->dv_ispfw; uint16_t wi, wl, segno; uint32_t la; la = code_org; segno = 0; for (;;) { uint32_t nxtaddr; - isp_prt(isp, ISP_LOGDEBUG0, - "load 0x%x words of code at load address 0x%x", - ptr[3], la); + isp_prt(isp, ISP_LOGDEBUG0, "load 0x%x words of code at load address 0x%x", ptr[3], la); wi = 0; wl = ptr[3]; while (wi < ptr[3]) { uint16_t *cp; - uint32_t nw; - + uint16_t nw; + nw = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) >> 1; if (nw > wl) { nw = wl; } if (nw > (1 << 15)) { nw = 1 << 15; } cp = isp->isp_rquest; for (i = 0; i < nw; i++) { ISP_IOXPUT_16(isp, ptr[wi++], &cp[i]); wl--; } - MEMORYBARRIER(isp, SYNC_REQUEST, - 0, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp))); - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_LOAD_RISC_RAM; - mbs.param[1] = la; - mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); - mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); - mbs.param[4] = nw; - mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); - mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); - mbs.param[8] = la >> 16; + MEMORYBARRIER(isp, SYNC_REQUEST, 0, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp))); + ISP_MEMZERO(&mbs, sizeof (mbs)); + if (la < 0x10000) { + mbs.param[0] = MBOX_LOAD_RISC_RAM_2100; + mbs.param[1] = la; + mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); + mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); + mbs.param[4] = nw; + mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); + mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); + } else { + mbs.param[0] = MBOX_LOAD_RISC_RAM; + mbs.param[1] = la; + mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); + mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); + mbs.param[4] = nw; + mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); + mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); + mbs.param[8] = la >> 16; + } mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { - isp_prt(isp, ISP_LOGERR, - "F/W Risc Ram Load Failed"); + isp_prt(isp, ISP_LOGERR, "F/W Risc Ram Load Failed"); ISP_RESET0(isp); return; } la += nw; } if (!IS_2322(isp)) { - /* - * Verify that it downloaded correctly. - */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_VERIFY_CHECKSUM; - mbs.param[1] = code_org; - mbs.logval = MBLOGNONE; - isp_mboxcmd(isp, &mbs); - if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { - isp_prt(isp, ISP_LOGERR, dcrc); - ISP_RESET0(isp); - return; - } break; } if (++segno == 3) { break; } /* * If we're a 2322, the firmware actually comes in * three chunks. We loaded the first at the code_org * address. The other two chunks, which follow right * after each other in memory here, get loaded at * addresses specfied at offset 0x9..0xB. */ nxtaddr = ptr[3]; ptr = &ptr[nxtaddr]; la = ptr[5] | ((ptr[4] & 0x3f) << 16); } isp->isp_loaded_fw = 1; } else if (dodnld) { union { const uint16_t *cp; uint16_t *np; - } u; - u.cp = isp->isp_mdvec->dv_ispfw; - isp->isp_mbxworkp = &u.np[1]; - isp->isp_mbxwrk0 = u.np[3] - 1; + } ucd; + ucd.cp = isp->isp_mdvec->dv_ispfw; + isp->isp_mbxworkp = &ucd.np[1]; + isp->isp_mbxwrk0 = ucd.np[3] - 1; isp->isp_mbxwrk1 = code_org + 1; - MEMZERO(&mbs, sizeof (mbs)); + ISP_MEMZERO(&mbs, sizeof (mbs)); mbs.param[0] = MBOX_WRITE_RAM_WORD; mbs.param[1] = code_org; - mbs.param[2] = u.np[0]; + mbs.param[2] = ucd.np[0]; mbs.logval = MBLOGNONE; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { - isp_prt(isp, ISP_LOGERR, - "F/W download failed at word %d", - isp->isp_mbxwrk1 - code_org); + isp_prt(isp, ISP_LOGERR, "F/W download failed at word %d", isp->isp_mbxwrk1 - code_org); ISP_RESET0(isp); return; } - /* - * Verify that it downloaded correctly. - */ - MEMZERO(&mbs, sizeof (mbs)); + } else { + isp->isp_loaded_fw = 0; + isp_prt(isp, ISP_LOGDEBUG2, "skipping f/w download"); + } + + /* + * If we loaded firmware, verify its checksum + */ + if (isp->isp_loaded_fw) { + ISP_MEMZERO(&mbs, sizeof (mbs)); mbs.param[0] = MBOX_VERIFY_CHECKSUM; - mbs.param[1] = code_org; - mbs.logval = MBLOGNONE; + if (IS_24XX(isp)) { + mbs.param[1] = code_org >> 16; + mbs.param[2] = code_org; + } else { + mbs.param[1] = code_org; + } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, dcrc); ISP_RESET0(isp); return; } - isp->isp_loaded_fw = 1; - } else { - isp->isp_loaded_fw = 0; - isp_prt(isp, ISP_LOGDEBUG2, "skipping f/w download"); } /* * Now start it rolling. * * If we didn't actually download f/w, * we still need to (re)start it. */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.timeout = 1000000; - mbs.param[0] = MBOX_EXEC_FIRMWARE; + MBSINIT(&mbs, MBOX_EXEC_FIRMWARE, MBLOGALL, 1000000); if (IS_24XX(isp)) { mbs.param[1] = code_org >> 16; mbs.param[2] = code_org; if (isp->isp_loaded_fw) { mbs.param[3] = 0; } else { mbs.param[3] = 1; } + if (IS_25XX(isp)) { + mbs.ibits |= 0x10; + } } else if (IS_2322(isp)) { mbs.param[1] = code_org; if (isp->isp_loaded_fw) { mbs.param[2] = 0; } else { mbs.param[2] = 1; } } else { mbs.param[1] = code_org; } - - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (IS_2322(isp) || IS_24XX(isp)) { if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } } /* * Give it a chance to finish starting up. + * Give the 24XX more time. */ - USEC_DELAY(250000); - - if (IS_SCSI(isp)) { + if (IS_24XX(isp)) { + ISP_DELAY(500000); /* - * Set CLOCK RATE, but only if asked to. + * Check to see if the 24XX firmware really started. */ - if (isp->isp_clock) { - mbs.param[0] = MBOX_SET_CLOCK_RATE; - mbs.param[1] = isp->isp_clock; - mbs.logval = MBLOGNONE; - isp_mboxcmd(isp, &mbs); - /* we will try not to care if this fails */ + if (mbs.param[1] == 0xdead) { + isp_prt(isp, ISP_LOGERR, "f/w didn't *really* start"); + ISP_RESET0(isp); + return; } + } else { + ISP_DELAY(250000); + if (IS_SCSI(isp)) { + /* + * Set CLOCK RATE, but only if asked to. + */ + if (isp->isp_clock) { + mbs.param[0] = MBOX_SET_CLOCK_RATE; + mbs.param[1] = isp->isp_clock; + mbs.logval = MBLOGNONE; + isp_mboxcmd(isp, &mbs); + /* we will try not to care if this fails */ + } + } } - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_ABOUT_FIRMWARE; - mbs.logval = MBLOGALL; + /* + * Ask the chip for the current firmware version. + * This should prove that the new firmware is working. + */ + MBSINIT(&mbs, MBOX_ABOUT_FIRMWARE, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } - if (IS_24XX(isp) && mbs.param[1] == 0xdead) { - isp_prt(isp, ISP_LOGERR, "f/w didn't *really* start"); - ISP_RESET0(isp); - return; - } - /* * The SBus firmware that we are using apparently does not return * major, minor, micro revisions in the mailbox registers, which * is really, really, annoying. */ if (ISP_SBUS_SUPPORTED && isp->isp_bustype == ISP_BT_SBUS) { if (dodnld) { #ifdef ISP_TARGET_MODE isp->isp_fwrev[0] = 7; isp->isp_fwrev[1] = 55; #else isp->isp_fwrev[0] = 1; isp->isp_fwrev[1] = 37; #endif isp->isp_fwrev[2] = 0; - } + } } else { isp->isp_fwrev[0] = mbs.param[1]; isp->isp_fwrev[1] = mbs.param[2]; isp->isp_fwrev[2] = mbs.param[3]; } - isp_prt(isp, ISP_LOGALL, - "Board Type %s, Chip Revision 0x%x, %s F/W Revision %d.%d.%d", - btype, isp->isp_revision, dodnld? "loaded" : "resident", - isp->isp_fwrev[0], isp->isp_fwrev[1], isp->isp_fwrev[2]); + isp_prt(isp, ISP_LOGCONFIG, "Board Type %s, Chip Revision 0x%x, %s F/W Revision %d.%d.%d", + btype, isp->isp_revision, dodnld? "loaded" : "resident", isp->isp_fwrev[0], isp->isp_fwrev[1], isp->isp_fwrev[2]); if (IS_FC(isp)) { /* * We do not believe firmware attributes for 2100 code less * than 1.17.0, unless it's the firmware we specifically * are loading. * * Note that all 22XX and later f/w is greater than 1.X.0. */ if ((ISP_FW_OLDER_THAN(isp, 1, 17, 1))) { #ifdef USE_SMALLER_2100_FIRMWARE - FCPARAM(isp)->isp_fwattr = ISP_FW_ATTR_SCCLUN; + isp->isp_fwattr = ISP_FW_ATTR_SCCLUN; #else - FCPARAM(isp)->isp_fwattr = 0; + isp->isp_fwattr = 0; #endif } else { - FCPARAM(isp)->isp_fwattr = mbs.param[6]; - isp_prt(isp, ISP_LOGDEBUG0, - "Firmware Attributes = 0x%x", mbs.param[6]); + isp->isp_fwattr = mbs.param[6]; + isp_prt(isp, ISP_LOGDEBUG0, "Firmware Attributes = 0x%x", mbs.param[6]); } - FCPARAM(isp)->isp_2klogin = 0; - FCPARAM(isp)->isp_sccfw = 0; - FCPARAM(isp)->isp_tmode = 0; - if (IS_24XX(isp)) { - FCPARAM(isp)->isp_2klogin = 1; - FCPARAM(isp)->isp_sccfw = 1; - FCPARAM(isp)->isp_tmode = 1; - } else { - if (FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) { - FCPARAM(isp)->isp_sccfw = 1; - } - if (FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_2KLOGINS) { - FCPARAM(isp)->isp_2klogin = 1; - FCPARAM(isp)->isp_sccfw = 1; - } - if (FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) { - FCPARAM(isp)->isp_tmode = 1; - } - } - if (FCPARAM(isp)->isp_2klogin) { - isp_prt(isp, ISP_LOGCONFIG, "2K Logins Supported"); - } + } else { +#ifndef ISP_TARGET_MODE + isp->isp_fwattr = ISP_FW_ATTR_TMODE; +#else + isp->isp_fwattr = 0; +#endif } - if (isp->isp_romfw_rev[0] || isp->isp_romfw_rev[1] || - isp->isp_romfw_rev[2]) { - isp_prt(isp, ISP_LOGCONFIG, "Last F/W revision was %d.%d.%d", - isp->isp_romfw_rev[0], isp->isp_romfw_rev[1], - isp->isp_romfw_rev[2]); - } - if (!IS_24XX(isp)) { - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_GET_FIRMWARE_STATUS; - mbs.logval = MBLOGALL; + MBSINIT(&mbs, MBOX_GET_FIRMWARE_STATUS, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } if (isp->isp_maxcmds >= mbs.param[2]) { isp->isp_maxcmds = mbs.param[2]; } } - isp_prt(isp, ISP_LOGCONFIG, - "%d max I/O command limit set", isp->isp_maxcmds); - isp_fw_state(isp); + isp_prt(isp, ISP_LOGCONFIG, "%d max I/O command limit set", isp->isp_maxcmds); + /* + * If we don't have Multi-ID f/w loaded, we need to restrict channels to one. + * Only make this check for non-SCSI cards (I'm not sure firmware attributes + * work for them). + */ + if (IS_FC(isp) && ISP_CAP_MULTI_ID(isp) == 0 && isp->isp_nchan > 1) { + isp_prt(isp, ISP_LOGWARN, "non-MULTIID f/w loaded, only can enable 1 of %d channels", isp->isp_nchan); + isp->isp_nchan = 1; + } + + for (i = 0; i < isp->isp_nchan; i++) { + isp_fw_state(isp, i); + } + if (isp->isp_dead) { + isp_shutdown(isp); + ISP_DISABLE_INTS(isp); + return; + } + isp->isp_state = ISP_RESETSTATE; /* * Okay- now that we have new firmware running, we now (re)set our * notion of how many luns we support. This is somewhat tricky because * if we haven't loaded firmware, we sometimes do not have an easy way * of knowing how many luns we support. * * Expanded lun firmware gives you 32 luns for SCSI cards and * 16384 luns for Fibre Channel cards. * * It turns out that even for QLogic 2100s with ROM 1.10 and above * we do get a firmware attributes word returned in mailbox register 6. * * Because the lun is in a different position in the Request Queue * Entry structure for Fibre Channel with expanded lun firmware, we * can only support one lun (lun zero) when we don't know what kind * of firmware we're running. */ if (IS_SCSI(isp)) { if (dodnld) { if (IS_ULTRA2(isp) || IS_ULTRA3(isp)) { isp->isp_maxluns = 32; } else { isp->isp_maxluns = 8; } } else { isp->isp_maxluns = 8; } } else { - if (FCPARAM(isp)->isp_sccfw) { + if (ISP_CAP_SCCFW(isp)) { isp->isp_maxluns = 16384; } else { isp->isp_maxluns = 16; } } + /* - * Must do this first to get defaults established. + * We get some default values established. As a side + * effect, NVRAM is read here (unless overriden by + * a configuration flag). */ - if (IS_SCSI(isp)) { - isp_setdfltparm(isp, 0); - if (IS_DUALBUS(isp)) { - isp_setdfltparm(isp, 1); + if (do_load_defaults) { + if (IS_SCSI(isp)) { + isp_setdfltsdparm(isp); + } else { + for (i = 0; i < isp->isp_nchan; i++) { + isp_setdfltfcparm(isp, i); + } } - } else { - isp_setdfltfcparm(isp); } - } /* * Initialize Parameters of Hardware to a known state. * * Locks are held before coming here. */ void isp_init(ispsoftc_t *isp) { if (IS_FC(isp)) { - /* - * Do this *before* initializing the firmware. - */ - ISP_MARK_PORTDB(isp, 0); - FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; - FCPARAM(isp)->isp_loopstate = LOOP_NIL; - - if (isp->isp_role != ISP_ROLE_NONE) { - if (IS_24XX(isp)) { - isp_fibre_init_2400(isp); - } else { - isp_fibre_init(isp); - } + if (IS_24XX(isp)) { + isp_fibre_init_2400(isp); + } else { + isp_fibre_init(isp); } } else { isp_scsi_init(isp); } + GET_NANOTIME(&isp->isp_init_time); } static void isp_scsi_init(ispsoftc_t *isp) { sdparam *sdp_chan0, *sdp_chan1; mbreg_t mbs; - sdp_chan0 = isp->isp_param; + sdp_chan0 = SDPARAM(isp, 0); sdp_chan1 = sdp_chan0; if (IS_DUALBUS(isp)) { - sdp_chan1++; + sdp_chan1 = SDPARAM(isp, 1); } - /* - * If we have no role (neither target nor initiator), return. - */ - if (isp->isp_role == ISP_ROLE_NONE) { - return; - } - /* First do overall per-card settings. */ /* * If we have fast memory timing enabled, turn it on. */ if (sdp_chan0->isp_fast_mttr) { ISP_WRITE(isp, RISC_MTR, 0x1313); } /* * Set Retry Delay and Count. * You set both channels at the same time. */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_SET_RETRY_COUNT; + MBSINIT(&mbs, MBOX_SET_RETRY_COUNT, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_retry_count; mbs.param[2] = sdp_chan0->isp_retry_delay; mbs.param[6] = sdp_chan1->isp_retry_count; mbs.param[7] = sdp_chan1->isp_retry_delay; - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* * Set ASYNC DATA SETUP time. This is very important. */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_SET_ASYNC_DATA_SETUP_TIME; + MBSINIT(&mbs, MBOX_SET_ASYNC_DATA_SETUP_TIME, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_async_data_setup; mbs.param[2] = sdp_chan1->isp_async_data_setup; - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* * Set ACTIVE Negation State. */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_SET_ACT_NEG_STATE; + MBSINIT(&mbs, MBOX_SET_ACT_NEG_STATE, MBLOGNONE, 0); mbs.param[1] = (sdp_chan0->isp_req_ack_active_neg << 4) | (sdp_chan0->isp_data_line_active_neg << 5); mbs.param[2] = (sdp_chan1->isp_req_ack_active_neg << 4) | (sdp_chan1->isp_data_line_active_neg << 5); - mbs.logval = MBLOGNONE; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "failed to set active negation state (%d,%d), (%d,%d)", sdp_chan0->isp_req_ack_active_neg, sdp_chan0->isp_data_line_active_neg, sdp_chan1->isp_req_ack_active_neg, sdp_chan1->isp_data_line_active_neg); /* * But don't return. */ } /* * Set the Tag Aging limit */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_SET_TAG_AGE_LIMIT; + MBSINIT(&mbs, MBOX_SET_TAG_AGE_LIMIT, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_tag_aging; mbs.param[2] = sdp_chan1->isp_tag_aging; - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "failed to set tag age limit (%d,%d)", sdp_chan0->isp_tag_aging, sdp_chan1->isp_tag_aging); return; } /* * Set selection timeout. */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_SET_SELECT_TIMEOUT; + MBSINIT(&mbs, MBOX_SET_SELECT_TIMEOUT, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_selection_timeout; mbs.param[2] = sdp_chan1->isp_selection_timeout; - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* now do per-channel settings */ isp_scsi_channel_init(isp, 0); if (IS_DUALBUS(isp)) isp_scsi_channel_init(isp, 1); /* * Now enable request/response queues */ if (IS_ULTRA2(isp) || IS_1240(isp)) { - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_INIT_RES_QUEUE_A64; + MBSINIT(&mbs, MBOX_INIT_RES_QUEUE_A64, MBLOGALL, 0); mbs.param[1] = RESULT_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_result_dma); mbs.param[3] = DMA_WD0(isp->isp_result_dma); mbs.param[4] = 0; mbs.param[6] = DMA_WD3(isp->isp_result_dma); mbs.param[7] = DMA_WD2(isp->isp_result_dma); - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_residx = mbs.param[5]; - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_INIT_REQ_QUEUE_A64; + MBSINIT(&mbs, MBOX_INIT_REQ_QUEUE_A64, MBLOGALL, 0); mbs.param[1] = RQUEST_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[5] = 0; mbs.param[6] = DMA_WD3(isp->isp_result_dma); mbs.param[7] = DMA_WD2(isp->isp_result_dma); - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = isp->isp_reqodx = mbs.param[4]; } else { - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_INIT_RES_QUEUE; + MBSINIT(&mbs, MBOX_INIT_RES_QUEUE, MBLOGALL, 0); mbs.param[1] = RESULT_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_result_dma); mbs.param[3] = DMA_WD0(isp->isp_result_dma); mbs.param[4] = 0; - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_residx = mbs.param[5]; - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_INIT_REQ_QUEUE; + MBSINIT(&mbs, MBOX_INIT_REQ_QUEUE, MBLOGALL, 0); mbs.param[1] = RQUEST_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[5] = 0; - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = isp->isp_reqodx = mbs.param[4]; } /* * Turn on Fast Posting, LVD transitions * * Ultra2 F/W always has had fast posting (and LVD transitions) * * Ultra and older (i.e., SBus) cards may not. It's just safer * to assume not for them. */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_SET_FW_FEATURES; - mbs.param[1] = 0; + MBSINIT(&mbs, MBOX_SET_FW_FEATURES, MBLOGALL, 0); if (IS_ULTRA2(isp)) mbs.param[1] |= FW_FEATURE_LVD_NOTIFY; #ifndef ISP_NO_RIO if (IS_ULTRA2(isp) || IS_1240(isp)) mbs.param[1] |= FW_FEATURE_RIO_16BIT; #else if (IS_ULTRA2(isp) || IS_1240(isp)) mbs.param[1] |= FW_FEATURE_FAST_POST; #endif if (mbs.param[1] != 0) { uint16_t sfeat = mbs.param[1]; - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGINFO, "Enabled FW features (0x%x)", sfeat); } } - /* - * Let the outer layers decide whether to issue a SCSI bus reset. - */ isp->isp_state = ISP_INITSTATE; } static void -isp_scsi_channel_init(ispsoftc_t *isp, int channel) +isp_scsi_channel_init(ispsoftc_t *isp, int chan) { sdparam *sdp; mbreg_t mbs; int tgt; - sdp = isp->isp_param; - sdp += channel; + sdp = SDPARAM(isp, chan); /* * Set (possibly new) Initiator ID. */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_SET_INIT_SCSI_ID; - mbs.param[1] = (channel << 7) | sdp->isp_initiator_id; - mbs.logval = MBLOGALL; + MBSINIT(&mbs, MBOX_SET_INIT_SCSI_ID, MBLOGALL, 0); + mbs.param[1] = (chan << 7) | sdp->isp_initiator_id; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } - isp_prt(isp, ISP_LOGINFO, "Initiator ID is %d on Channel %d", - sdp->isp_initiator_id, channel); + isp_prt(isp, ISP_LOGINFO, "Chan %d Initiator ID is %d", + chan, sdp->isp_initiator_id); /* * Set current per-target parameters to an initial safe minimum. */ for (tgt = 0; tgt < MAX_TARGETS; tgt++) { int lun; uint16_t sdf; if (sdp->isp_devparam[tgt].dev_enable == 0) { continue; } #ifndef ISP_TARGET_MODE sdf = sdp->isp_devparam[tgt].goal_flags; sdf &= DPARM_SAFE_DFLT; /* * It is not quite clear when this changed over so that * we could force narrow and async for 1000/1020 cards, * but assume that this is only the case for loaded * firmware. */ if (isp->isp_loaded_fw) { sdf |= DPARM_NARROW | DPARM_ASYNC; } #else /* * The !$*!)$!$)* f/w uses the same index into some * internal table to decide how to respond to negotiations, * so if we've said "let's be safe" for ID X, and ID X * selects *us*, the negotiations will back to 'safe' * (as in narrow/async). What the f/w *should* do is * use the initiator id settings to decide how to respond. */ sdp->isp_devparam[tgt].goal_flags = sdf = DPARM_DEFAULT; #endif - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_SET_TARGET_PARAMS; - mbs.param[1] = (channel << 15) | (tgt << 8); + MBSINIT(&mbs, MBOX_SET_TARGET_PARAMS, MBLOGNONE, 0); + mbs.param[1] = (chan << 15) | (tgt << 8); mbs.param[2] = sdf; if ((sdf & DPARM_SYNC) == 0) { mbs.param[3] = 0; } else { mbs.param[3] = (sdp->isp_devparam[tgt].goal_offset << 8) | (sdp->isp_devparam[tgt].goal_period); } isp_prt(isp, ISP_LOGDEBUG0, "Initial Settings bus%d tgt%d flags 0x%x off 0x%x per 0x%x", - channel, tgt, mbs.param[2], mbs.param[3] >> 8, + chan, tgt, mbs.param[2], mbs.param[3] >> 8, mbs.param[3] & 0xff); - mbs.logval = MBLOGNONE; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { sdf = DPARM_SAFE_DFLT; - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_SET_TARGET_PARAMS; - mbs.param[1] = (tgt << 8) | (channel << 15); + MBSINIT(&mbs, MBOX_SET_TARGET_PARAMS, MBLOGALL, 0); + mbs.param[1] = (tgt << 8) | (chan << 15); mbs.param[2] = sdf; mbs.param[3] = 0; - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { continue; } } /* * We don't update any information directly from the f/w * because we need to run at least one command to cause a * new state to be latched up. So, we just assume that we * converge to the values we just had set. * * Ensure that we don't believe tagged queuing is enabled yet. * It turns out that sometimes the ISP just ignores our * attempts to set parameters for devices that it hasn't * seen yet. */ sdp->isp_devparam[tgt].actv_flags = sdf & ~DPARM_TQING; for (lun = 0; lun < (int) isp->isp_maxluns; lun++) { - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_SET_DEV_QUEUE_PARAMS; - mbs.param[1] = (channel << 15) | (tgt << 8) | lun; + MBSINIT(&mbs, MBOX_SET_DEV_QUEUE_PARAMS, MBLOGALL, 0); + mbs.param[1] = (chan << 15) | (tgt << 8) | lun; mbs.param[2] = sdp->isp_max_queue_depth; mbs.param[3] = sdp->isp_devparam[tgt].exc_throttle; - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } } } for (tgt = 0; tgt < MAX_TARGETS; tgt++) { if (sdp->isp_devparam[tgt].dev_refresh) { - isp->isp_sendmarker |= (1 << channel); - isp->isp_update |= (1 << channel); + sdp->sendmarker = 1; + sdp->update = 1; break; } } } /* * Fibre Channel specific initialization. */ static void isp_fibre_init(ispsoftc_t *isp) { fcparam *fcp; isp_icb_t local, *icbp = &local; mbreg_t mbs; int ownloopid; - uint64_t nwwn, pwwn; - fcp = isp->isp_param; + /* + * We only support one channel on non-24XX cards + */ + fcp = FCPARAM(isp, 0); + if (fcp->role == ISP_ROLE_NONE) { + isp->isp_state = ISP_INITSTATE; + return; + } - MEMZERO(icbp, sizeof (*icbp)); + ISP_MEMZERO(icbp, sizeof (*icbp)); icbp->icb_version = ICB_VERSION1; icbp->icb_fwoptions = fcp->isp_fwoptions; /* * Firmware Options are either retrieved from NVRAM or * are patched elsewhere. We check them for sanity here * and make changes based on board revision, but otherwise * let others decide policy. */ /* * If this is a 2100 < revision 5, we have to turn off FAIRNESS. */ if (IS_2100(isp) && isp->isp_revision < 5) { icbp->icb_fwoptions &= ~ICBOPT_FAIRNESS; } /* * We have to use FULL LOGIN even though it resets the loop too much * because otherwise port database entries don't get updated after * a LIP- this is a known f/w bug for 2100 f/w less than 1.17.0. */ if (!ISP_FW_NEWER_THAN(isp, 1, 17, 0)) { icbp->icb_fwoptions |= ICBOPT_FULL_LOGIN; } /* * Insist on Port Database Update Async notifications */ icbp->icb_fwoptions |= ICBOPT_PDBCHANGE_AE; /* * Make sure that target role reflects into fwoptions. */ - if (isp->isp_role & ISP_ROLE_TARGET) { + if (fcp->role & ISP_ROLE_TARGET) { icbp->icb_fwoptions |= ICBOPT_TGT_ENABLE; } else { icbp->icb_fwoptions &= ~ICBOPT_TGT_ENABLE; } - if (isp->isp_role & ISP_ROLE_INITIATOR) { + if (fcp->role & ISP_ROLE_INITIATOR) { icbp->icb_fwoptions &= ~ICBOPT_INI_DISABLE; } else { icbp->icb_fwoptions |= ICBOPT_INI_DISABLE; } - icbp->icb_maxfrmlen = fcp->isp_maxfrmlen; + icbp->icb_maxfrmlen = DEFAULT_FRAMESIZE(isp); if (icbp->icb_maxfrmlen < ICB_MIN_FRMLEN || icbp->icb_maxfrmlen > ICB_MAX_FRMLEN) { isp_prt(isp, ISP_LOGERR, "bad frame length (%d) from NVRAM- using %d", - fcp->isp_maxfrmlen, ICB_DFLT_FRMLEN); + DEFAULT_FRAMESIZE(isp), ICB_DFLT_FRMLEN); icbp->icb_maxfrmlen = ICB_DFLT_FRMLEN; } icbp->icb_maxalloc = fcp->isp_maxalloc; if (icbp->icb_maxalloc < 1) { isp_prt(isp, ISP_LOGERR, "bad maximum allocation (%d)- using 16", fcp->isp_maxalloc); icbp->icb_maxalloc = 16; } - icbp->icb_execthrottle = fcp->isp_execthrottle; + icbp->icb_execthrottle = DEFAULT_EXEC_THROTTLE(isp); if (icbp->icb_execthrottle < 1) { isp_prt(isp, ISP_LOGERR, - "bad execution throttle of %d- using 16", - fcp->isp_execthrottle); + "bad execution throttle of %d- using %d", + DEFAULT_EXEC_THROTTLE(isp), ICB_DFLT_THROTTLE); icbp->icb_execthrottle = ICB_DFLT_THROTTLE; } icbp->icb_retry_delay = fcp->isp_retry_delay; icbp->icb_retry_count = fcp->isp_retry_count; icbp->icb_hardaddr = fcp->isp_loopid; ownloopid = (isp->isp_confopts & ISP_CFG_OWNLOOPID) != 0; - if (icbp->icb_hardaddr > 125) { + if (icbp->icb_hardaddr >= LOCAL_LOOP_LIM) { icbp->icb_hardaddr = 0; ownloopid = 0; } /* * Our life seems so much better with 2200s and later with * the latest f/w if we set Hard Address. */ if (ownloopid || ISP_FW_NEWER_THAN(isp, 2, 2, 5)) { icbp->icb_fwoptions |= ICBOPT_HARD_ADDRESS; } /* * Right now we just set extended options to prefer point-to-point * over loop based upon some soft config options. - * + * * NB: for the 2300, ICBOPT_EXTENDED is required. */ if (IS_2200(isp) || IS_23XX(isp)) { icbp->icb_fwoptions |= ICBOPT_EXTENDED; /* * Prefer or force Point-To-Point instead Loop? */ - switch(isp->isp_confopts & ISP_CFG_PORT_PREF) { + switch (isp->isp_confopts & ISP_CFG_PORT_PREF) { case ISP_CFG_NPORT: icbp->icb_xfwoptions |= ICBXOPT_PTP_2_LOOP; break; case ISP_CFG_NPORT_ONLY: icbp->icb_xfwoptions |= ICBXOPT_PTP_ONLY; break; case ISP_CFG_LPORT_ONLY: icbp->icb_xfwoptions |= ICBXOPT_LOOP_ONLY; break; default: icbp->icb_xfwoptions |= ICBXOPT_LOOP_2_PTP; break; } if (IS_2200(isp)) { + /* + * There seems to just be too much breakage here + * with RIO and Fast Posting- it probably actually + * works okay but this driver is messing it up. + * This card is really ancient by now, so let's + * just opt for safety and not use the feature. + */ +#if 0 + if (ISP_FW_NEWER_THAN(isp, 1, 17, 0)) { + icbp->icb_xfwoptions |= ICBXOPT_RIO_16BIT; + icbp->icb_fwoptions &= ~ICBOPT_FAST_POST; + icbp->icb_racctimer = 4; + icbp->icb_idelaytimer = 8; + } else { + icbp->icb_fwoptions |= ICBOPT_FAST_POST; + } +#else + icbp->icb_xfwoptions &= ~ICBXOPT_RIO_16BIT; icbp->icb_fwoptions &= ~ICBOPT_FAST_POST; +#endif } else { /* * QLogic recommends that FAST Posting be turned * off for 23XX cards and instead allow the HBA * to write response queue entries and interrupt * after a delay (ZIO). */ icbp->icb_fwoptions &= ~ICBOPT_FAST_POST; - if ((fcp->isp_xfwoptions & ICBXOPT_TIMER_MASK) == - ICBXOPT_ZIO) { + if ((fcp->isp_xfwoptions & ICBXOPT_TIMER_MASK) == ICBXOPT_ZIO) { icbp->icb_xfwoptions |= ICBXOPT_ZIO; icbp->icb_idelaytimer = 10; } if (isp->isp_confopts & ISP_CFG_ONEGB) { icbp->icb_zfwoptions |= ICBZOPT_RATE_ONEGB; } else if (isp->isp_confopts & ISP_CFG_TWOGB) { icbp->icb_zfwoptions |= ICBZOPT_RATE_TWOGB; } else { icbp->icb_zfwoptions |= ICBZOPT_RATE_AUTO; } if (fcp->isp_zfwoptions & ICBZOPT_50_OHM) { icbp->icb_zfwoptions |= ICBZOPT_50_OHM; } } } /* * For 22XX > 2.1.26 && 23XX, set some options. * XXX: Probably okay for newer 2100 f/w too. */ if (ISP_FW_NEWER_THAN(isp, 2, 26, 0)) { /* * Turn on LIP F8 async event (1) * Turn on generate AE 8013 on all LIP Resets (2) * Disable LIP F7 switching (8) */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_SET_FIRMWARE_OPTIONS; + MBSINIT(&mbs, MBOX_SET_FIRMWARE_OPTIONS, MBLOGALL, 0); mbs.param[1] = 0xb; mbs.param[2] = 0; mbs.param[3] = 0; - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } } icbp->icb_logintime = ICB_LOGIN_TOV; icbp->icb_lunetimeout = ICB_LUN_ENABLE_TOV; - nwwn = ISP_NODEWWN(isp); - pwwn = ISP_PORTWWN(isp); - if (nwwn && pwwn) { + if (fcp->isp_wwnn && fcp->isp_wwpn && (fcp->isp_wwnn >> 60) != 2) { icbp->icb_fwoptions |= ICBOPT_BOTH_WWNS; - MAKE_NODE_NAME_FROM_WWN(icbp->icb_nodename, nwwn); - MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, pwwn); + MAKE_NODE_NAME_FROM_WWN(icbp->icb_nodename, fcp->isp_wwnn); + MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Node 0x%08x%08x Port 0x%08x%08x", - ((uint32_t) (nwwn >> 32)), - ((uint32_t) (nwwn & 0xffffffff)), - ((uint32_t) (pwwn >> 32)), - ((uint32_t) (pwwn & 0xffffffff))); - } else if (pwwn) { + ((uint32_t) (fcp->isp_wwnn >> 32)), + ((uint32_t) (fcp->isp_wwnn)), + ((uint32_t) (fcp->isp_wwpn >> 32)), + ((uint32_t) (fcp->isp_wwpn))); + } else if (fcp->isp_wwpn) { icbp->icb_fwoptions &= ~ICBOPT_BOTH_WWNS; - MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, pwwn); + MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Port 0x%08x%08x", - ((uint32_t) (pwwn >> 32)), - ((uint32_t) (pwwn & 0xffffffff))); + ((uint32_t) (fcp->isp_wwpn >> 32)), + ((uint32_t) (fcp->isp_wwpn))); } else { isp_prt(isp, ISP_LOGERR, "No valid WWNs to use"); return; } icbp->icb_rqstqlen = RQUEST_QUEUE_LEN(isp); if (icbp->icb_rqstqlen < 1) { isp_prt(isp, ISP_LOGERR, "bad request queue length"); } icbp->icb_rsltqlen = RESULT_QUEUE_LEN(isp); if (icbp->icb_rsltqlen < 1) { isp_prt(isp, ISP_LOGERR, "bad result queue length"); } icbp->icb_rqstaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_rquest_dma); icbp->icb_respaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_result_dma); + if (FC_SCRATCH_ACQUIRE(isp, 0)) { + isp_prt(isp, ISP_LOGERR, sacq); + return; + } isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init: fwopt 0x%x xfwopt 0x%x zfwopt 0x%x", icbp->icb_fwoptions, icbp->icb_xfwoptions, icbp->icb_zfwoptions); - FC_SCRATCH_ACQUIRE(isp); isp_put_icb(isp, icbp, (isp_icb_t *)fcp->isp_scratch); /* * Init the firmware */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_INIT_FIRMWARE; + MBSINIT(&mbs, MBOX_INIT_FIRMWARE, MBLOGALL, 30000000); mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); mbs.logval = MBLOGALL; - mbs.timeout = 30 * 1000000; isp_prt(isp, ISP_LOGDEBUG0, "INIT F/W from %p (%08x%08x)", fcp->isp_scratch, (uint32_t) ((uint64_t)fcp->isp_scdma >> 32), (uint32_t) fcp->isp_scdma); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, sizeof (*icbp)); isp_mboxcmd(isp, &mbs); - FC_SCRATCH_RELEASE(isp); + FC_SCRATCH_RELEASE(isp, 0); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_print_bytes(isp, "isp_fibre_init", sizeof (*icbp), icbp); return; } isp->isp_reqidx = 0; isp->isp_reqodx = 0; isp->isp_residx = 0; /* * Whatever happens, we're now committed to being here. */ isp->isp_state = ISP_INITSTATE; } static void isp_fibre_init_2400(ispsoftc_t *isp) { fcparam *fcp; isp_icb_2400_t local, *icbp = &local; mbreg_t mbs; - int ownloopid; - uint64_t nwwn, pwwn; + int chan; - fcp = isp->isp_param; + /* + * Check to see whether all channels have *some* kind of role + */ + for (chan = 0; chan < isp->isp_nchan; chan++) { + fcp = FCPARAM(isp, chan); + if (fcp->role != ISP_ROLE_NONE) { + break; + } + } + if (chan == isp->isp_nchan) { + isp_prt(isp, ISP_LOGDEBUG0, "all %d channels with role 'none'", chan); + isp->isp_state = ISP_INITSTATE; + return; + } /* + * Start with channel 0. + */ + fcp = FCPARAM(isp, 0); + + /* * Turn on LIP F8 async event (1) */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_SET_FIRMWARE_OPTIONS; + MBSINIT(&mbs, MBOX_SET_FIRMWARE_OPTIONS, MBLOGALL, 0); mbs.param[1] = 1; - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } - /* - * XXX: This should be applied to icb- not fwoptions - */ - if (isp->isp_role & ISP_ROLE_TARGET) { - fcp->isp_fwoptions |= ICB2400_OPT1_TGT_ENABLE; + ISP_MEMZERO(icbp, sizeof (*icbp)); + icbp->icb_fwoptions1 = fcp->isp_fwoptions; + if (fcp->role & ISP_ROLE_TARGET) { + icbp->icb_fwoptions1 |= ICB2400_OPT1_TGT_ENABLE; } else { - fcp->isp_fwoptions &= ~ICB2400_OPT1_TGT_ENABLE; + icbp->icb_fwoptions1 &= ~ICB2400_OPT1_TGT_ENABLE; } - if (isp->isp_role & ISP_ROLE_INITIATOR) { - fcp->isp_fwoptions &= ~ICB2400_OPT1_INI_DISABLE; + if (fcp->role & ISP_ROLE_INITIATOR) { + icbp->icb_fwoptions1 &= ~ICB2400_OPT1_INI_DISABLE; } else { - fcp->isp_fwoptions |= ICB2400_OPT1_INI_DISABLE; + icbp->icb_fwoptions1 |= ICB2400_OPT1_INI_DISABLE; } - MEMZERO(icbp, sizeof (*icbp)); icbp->icb_version = ICB_VERSION1; - icbp->icb_maxfrmlen = fcp->isp_maxfrmlen; - if (icbp->icb_maxfrmlen < ICB_MIN_FRMLEN || - icbp->icb_maxfrmlen > ICB_MAX_FRMLEN) { - isp_prt(isp, ISP_LOGERR, - "bad frame length (%d) from NVRAM- using %d", - fcp->isp_maxfrmlen, ICB_DFLT_FRMLEN); + icbp->icb_maxfrmlen = DEFAULT_FRAMESIZE(isp); + if (icbp->icb_maxfrmlen < ICB_MIN_FRMLEN || icbp->icb_maxfrmlen > ICB_MAX_FRMLEN) { + isp_prt(isp, ISP_LOGERR, "bad frame length (%d) from NVRAM- using %d", DEFAULT_FRAMESIZE(isp), ICB_DFLT_FRMLEN); icbp->icb_maxfrmlen = ICB_DFLT_FRMLEN; } - icbp->icb_execthrottle = fcp->isp_execthrottle; + icbp->icb_execthrottle = DEFAULT_EXEC_THROTTLE(isp); if (icbp->icb_execthrottle < 1) { - isp_prt(isp, ISP_LOGERR, - "bad execution throttle of %d- using 16", - fcp->isp_execthrottle); + isp_prt(isp, ISP_LOGERR, "bad execution throttle of %d- using %d", DEFAULT_EXEC_THROTTLE(isp), ICB_DFLT_THROTTLE); icbp->icb_execthrottle = ICB_DFLT_THROTTLE; } - if (isp->isp_role & ISP_ROLE_TARGET) { + if (icbp->icb_fwoptions1 & ICB2400_OPT1_TGT_ENABLE) { /* * Get current resource count */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_GET_RESOURCE_COUNT; + MBSINIT(&mbs, MBOX_GET_RESOURCE_COUNT, MBLOGALL, 0); mbs.obits = 0x4cf; - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } icbp->icb_xchgcnt = mbs.param[3]; } - icbp->icb_fwoptions1 = fcp->isp_fwoptions; icbp->icb_hardaddr = fcp->isp_loopid; - ownloopid = (isp->isp_confopts & ISP_CFG_OWNLOOPID) != 0; - if (icbp->icb_hardaddr > 125) { + if (icbp->icb_hardaddr >= LOCAL_LOOP_LIM) { icbp->icb_hardaddr = 0; - ownloopid = 0; } - if (ownloopid) { - icbp->icb_fwoptions1 |= ICB2400_OPT1_HARD_ADDRESS; - } + /* + * Force this on. + */ + icbp->icb_fwoptions1 |= ICB2400_OPT1_HARD_ADDRESS; + icbp->icb_fwoptions2 = fcp->isp_xfwoptions; - switch(isp->isp_confopts & ISP_CFG_PORT_PREF) { + switch (isp->isp_confopts & ISP_CFG_PORT_PREF) { +#if 0 case ISP_CFG_NPORT: + /* + * XXX: This causes the f/w to crash. + */ icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; icbp->icb_fwoptions2 |= ICB2400_OPT2_PTP_2_LOOP; break; +#endif case ISP_CFG_NPORT_ONLY: icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; icbp->icb_fwoptions2 |= ICB2400_OPT2_PTP_ONLY; break; case ISP_CFG_LPORT_ONLY: icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; icbp->icb_fwoptions2 |= ICB2400_OPT2_LOOP_ONLY; break; default: icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; icbp->icb_fwoptions2 |= ICB2400_OPT2_LOOP_2_PTP; break; } + /* force this on for now */ + icbp->icb_fwoptions2 |= ICB2400_OPT2_ZIO; + switch (icbp->icb_fwoptions2 & ICB2400_OPT2_TIMER_MASK) { case ICB2400_OPT2_ZIO: case ICB2400_OPT2_ZIO1: icbp->icb_idelaytimer = 0; break; case 0: break; default: - isp_prt(isp, ISP_LOGWARN, "bad value %x in fwopt2 timer field", - icbp->icb_fwoptions2 & ICB2400_OPT2_TIMER_MASK); + isp_prt(isp, ISP_LOGWARN, "bad value %x in fwopt2 timer field", icbp->icb_fwoptions2 & ICB2400_OPT2_TIMER_MASK); icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TIMER_MASK; break; } + /* + * We don't support FCTAPE, so clear it. + */ + icbp->icb_fwoptions2 &= ~ICB2400_OPT2_FCTAPE; + icbp->icb_fwoptions3 = fcp->isp_zfwoptions; icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_AUTO; if (isp->isp_confopts & ISP_CFG_ONEGB) { icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_ONEGB; } else if (isp->isp_confopts & ISP_CFG_TWOGB) { icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_TWOGB; } else if (isp->isp_confopts & ISP_CFG_FOURGB) { icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_FOURGB; } else { icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_AUTO; } if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) { icbp->icb_fwoptions3 |= ICB2400_OPT3_SOFTID; } icbp->icb_logintime = ICB_LOGIN_TOV; - nwwn = ISP_NODEWWN(isp); - pwwn = ISP_PORTWWN(isp); - - if (nwwn && pwwn) { + if (fcp->isp_wwnn && fcp->isp_wwpn && (fcp->isp_wwnn >> 60) != 2) { icbp->icb_fwoptions1 |= ICB2400_OPT1_BOTH_WWNS; - MAKE_NODE_NAME_FROM_WWN(icbp->icb_nodename, nwwn); - MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, pwwn); - isp_prt(isp, ISP_LOGDEBUG1, - "Setting ICB Node 0x%08x%08x Port 0x%08x%08x", - ((uint32_t) (nwwn >> 32)), - ((uint32_t) (nwwn & 0xffffffff)), - ((uint32_t) (pwwn >> 32)), - ((uint32_t) (pwwn & 0xffffffff))); - } else if (pwwn) { + MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); + MAKE_NODE_NAME_FROM_WWN(icbp->icb_nodename, fcp->isp_wwnn); + isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Node 0x%08x%08x Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwnn >> 32)), ((uint32_t) (fcp->isp_wwnn)), + ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); + } else if (fcp->isp_wwpn) { icbp->icb_fwoptions1 &= ~ICB2400_OPT1_BOTH_WWNS; - MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, pwwn); - isp_prt(isp, ISP_LOGDEBUG1, - "Setting ICB Port 0x%08x%08x", - ((uint32_t) (pwwn >> 32)), - ((uint32_t) (pwwn & 0xffffffff))); + MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); + isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Node to be same as Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); } else { isp_prt(isp, ISP_LOGERR, "No valid WWNs to use"); return; } icbp->icb_retry_count = fcp->isp_retry_count; icbp->icb_rqstqlen = RQUEST_QUEUE_LEN(isp); if (icbp->icb_rqstqlen < 8) { - isp_prt(isp, ISP_LOGERR, "bad request queue length %d", - icbp->icb_rqstqlen); + isp_prt(isp, ISP_LOGERR, "bad request queue length %d", icbp->icb_rqstqlen); return; } icbp->icb_rsltqlen = RESULT_QUEUE_LEN(isp); if (icbp->icb_rsltqlen < 8) { isp_prt(isp, ISP_LOGERR, "bad result queue length %d", icbp->icb_rsltqlen); return; } icbp->icb_rqstaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_rquest_dma); icbp->icb_respaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_result_dma); #ifdef ISP_TARGET_MODE - if (isp->isp_role & ISP_ROLE_TARGET) { - icbp->icb_atioqlen = RESULT_QUEUE_LEN(isp); - if (icbp->icb_atioqlen < 8) { - isp_prt(isp, ISP_LOGERR, "bad ATIO queue length %d", - icbp->icb_atioqlen); - return; - } - icbp->icb_atioqaddr[RQRSP_ADDR0015] = - DMA_WD0(isp->isp_atioq_dma); - icbp->icb_atioqaddr[RQRSP_ADDR1631] = - DMA_WD1(isp->isp_atioq_dma); - icbp->icb_atioqaddr[RQRSP_ADDR3247] = - DMA_WD2(isp->isp_atioq_dma); - icbp->icb_atioqaddr[RQRSP_ADDR4863] = - DMA_WD3(isp->isp_atioq_dma); - isp_prt(isp, ISP_LOGDEBUG0, - "isp_fibre_init_2400: atioq %04x%04x%04x%04x", - DMA_WD3(isp->isp_atioq_dma), DMA_WD2(isp->isp_atioq_dma), - DMA_WD1(isp->isp_atioq_dma), DMA_WD0(isp->isp_atioq_dma)); + /* unconditionally set up the ATIO queue if we support target mode */ + icbp->icb_atioqlen = RESULT_QUEUE_LEN(isp); + if (icbp->icb_atioqlen < 8) { + isp_prt(isp, ISP_LOGERR, "bad ATIO queue length %d", icbp->icb_atioqlen); + return; } + icbp->icb_atioqaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_atioq_dma); + icbp->icb_atioqaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_atioq_dma); + icbp->icb_atioqaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_atioq_dma); + icbp->icb_atioqaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_atioq_dma); + isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init_2400: atioq %04x%04x%04x%04x", DMA_WD3(isp->isp_atioq_dma), DMA_WD2(isp->isp_atioq_dma), + DMA_WD1(isp->isp_atioq_dma), DMA_WD0(isp->isp_atioq_dma)); #endif - isp_prt(isp, ISP_LOGDEBUG0, - "isp_fibre_init_2400: fwopt1 0x%x fwopt2 0x%x fwopt3 0x%x", - icbp->icb_fwoptions1, icbp->icb_fwoptions2, icbp->icb_fwoptions3); + isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init_2400: fwopt1 0x%x fwopt2 0x%x fwopt3 0x%x", icbp->icb_fwoptions1, icbp->icb_fwoptions2, icbp->icb_fwoptions3); - isp_prt(isp, ISP_LOGDEBUG0, - "isp_fibre_init_2400: rqst %04x%04x%04x%04x rsp %04x%04x%04x%04x", - DMA_WD3(isp->isp_rquest_dma), DMA_WD2(isp->isp_rquest_dma), - DMA_WD1(isp->isp_rquest_dma), DMA_WD0(isp->isp_rquest_dma), - DMA_WD3(isp->isp_result_dma), DMA_WD2(isp->isp_result_dma), + isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init_2400: rqst %04x%04x%04x%04x rsp %04x%04x%04x%04x", DMA_WD3(isp->isp_rquest_dma), DMA_WD2(isp->isp_rquest_dma), + DMA_WD1(isp->isp_rquest_dma), DMA_WD0(isp->isp_rquest_dma), DMA_WD3(isp->isp_result_dma), DMA_WD2(isp->isp_result_dma), DMA_WD1(isp->isp_result_dma), DMA_WD0(isp->isp_result_dma)); if (isp->isp_dblev & ISP_LOGDEBUG1) { - isp_print_bytes(isp, "isp_fibre_init_2400", sizeof (*icbp), - icbp); + isp_print_bytes(isp, "isp_fibre_init_2400", sizeof (*icbp), icbp); } - FC_SCRATCH_ACQUIRE(isp); + + if (FC_SCRATCH_ACQUIRE(isp, 0)) { + isp_prt(isp, ISP_LOGERR, sacq); + return; + } + ISP_MEMZERO(fcp->isp_scratch, ISP_FC_SCRLEN); isp_put_icb_2400(isp, icbp, fcp->isp_scratch); + /* + * Now fill in information about any additional channels + */ + if (isp->isp_nchan > 1) { + isp_icb_2400_vpinfo_t vpinfo, *vdst; + vp_port_info_t pi, *pdst; + size_t amt = 0; + uint8_t *off; + vpinfo.vp_count = isp->isp_nchan - 1; + vpinfo.vp_global_options = 0; + off = fcp->isp_scratch; + off += ICB2400_VPINFO_OFF; + vdst = (isp_icb_2400_vpinfo_t *) off; + isp_put_icb_2400_vpinfo(isp, &vpinfo, vdst); + amt = ICB2400_VPINFO_OFF + sizeof (isp_icb_2400_vpinfo_t); + for (chan = 1; chan < isp->isp_nchan; chan++) { + fcparam *fcp2; + + ISP_MEMZERO(&pi, sizeof (pi)); + fcp2 = FCPARAM(isp, chan); + if (fcp2->role != ISP_ROLE_NONE) { + pi.vp_port_options = ICB2400_VPOPT_ENABLED; + if (fcp2->role & ISP_ROLE_INITIATOR) { + pi.vp_port_options |= ICB2400_VPOPT_INI_ENABLE; + } + if ((fcp2->role & ISP_ROLE_TARGET) == 0) { + pi.vp_port_options |= ICB2400_VPOPT_TGT_DISABLE; + } + MAKE_NODE_NAME_FROM_WWN(pi.vp_port_portname, fcp2->isp_wwpn); + MAKE_NODE_NAME_FROM_WWN(pi.vp_port_nodename, fcp2->isp_wwnn); + } + off = fcp->isp_scratch; + off += ICB2400_VPINFO_PORT_OFF(chan); + pdst = (vp_port_info_t *) off; + isp_put_vp_port_info(isp, &pi, pdst); + amt += ICB2400_VPOPT_WRITE_SIZE; + } + } + /* * Init the firmware */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_INIT_FIRMWARE; + MBSINIT(&mbs, 0, MBLOGALL, 30000000); + if (isp->isp_nchan > 1) { + mbs.param[0] = MBOX_INIT_FIRMWARE_MULTI_ID; + } else { + mbs.param[0] = MBOX_INIT_FIRMWARE; + } mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); - mbs.logval = MBLOGALL; - mbs.timeout = 30 * 1000000; - isp_prt(isp, ISP_LOGDEBUG0, "INIT F/W from %04x%04x%04x%04x", - DMA_WD3(fcp->isp_scdma), DMA_WD2(fcp->isp_scdma), - DMA_WD1(fcp->isp_scdma), DMA_WD0(fcp->isp_scdma)); + isp_prt(isp, ISP_LOGDEBUG0, "INIT F/W from %04x%04x%04x%04x", DMA_WD3(fcp->isp_scdma), DMA_WD2(fcp->isp_scdma), DMA_WD1(fcp->isp_scdma), DMA_WD0(fcp->isp_scdma)); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, sizeof (*icbp)); isp_mboxcmd(isp, &mbs); - FC_SCRATCH_RELEASE(isp); + FC_SCRATCH_RELEASE(isp, 0); + if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = 0; isp->isp_reqodx = 0; isp->isp_residx = 0; /* * Whatever happens, we're now committed to being here. */ isp->isp_state = ISP_INITSTATE; } static void -isp_mark_portdb(ispsoftc_t *isp, int onprobation) +isp_mark_portdb(ispsoftc_t *isp, int chan, int disposition) { - fcparam *fcp = (fcparam *) isp->isp_param; + fcparam *fcp = FCPARAM(isp, chan); int i; + if (chan < 0 || chan >= isp->isp_nchan) { + isp_prt(isp, ISP_LOGWARN, "isp_mark_portdb: bad channel %d", chan); + return; + } for (i = 0; i < MAX_FC_TARG; i++) { - if (onprobation == 0) { - MEMZERO(&fcp->portdb[i], sizeof (fcportdb_t)); + if (fcp->portdb[i].target_mode) { + if (disposition < 0) { + isp_prt(isp, ISP_LOGTINFO, "isp_mark_portdb: Chan %d zeroing handle 0x" "%04x port 0x%06x", chan, + fcp->portdb[i].handle, fcp->portdb[i].portid); + ISP_MEMZERO(&fcp->portdb[i], sizeof (fcportdb_t)); + } + continue; + } + if (disposition == 0) { + ISP_MEMZERO(&fcp->portdb[i], sizeof (fcportdb_t)); } else { switch (fcp->portdb[i].state) { case FC_PORTDB_STATE_CHANGED: case FC_PORTDB_STATE_PENDING_VALID: case FC_PORTDB_STATE_VALID: case FC_PORTDB_STATE_PROBATIONAL: - fcp->portdb[i].state = - FC_PORTDB_STATE_PROBATIONAL; + fcp->portdb[i].state = FC_PORTDB_STATE_PROBATIONAL; break; case FC_PORTDB_STATE_ZOMBIE: break; case FC_PORTDB_STATE_NIL: default: - MEMZERO(&fcp->portdb[i], sizeof (fcportdb_t)); - fcp->portdb[i].state = - FC_PORTDB_STATE_NIL; + ISP_MEMZERO(&fcp->portdb[i], sizeof (fcportdb_t)); + fcp->portdb[i].state = FC_PORTDB_STATE_NIL; break; } } } } /* * Perform an IOCB PLOGI or LOGO via EXECUTE IOCB A64 for 24XX cards * or via FABRIC LOGIN/FABRIC LOGOUT for other cards. */ static int -isp_plogx(ispsoftc_t *isp, uint16_t handle, uint32_t portid, int flags, int gs) +isp_plogx(ispsoftc_t *isp, int chan, uint16_t handle, uint32_t portid, + int flags, int gs) { mbreg_t mbs; uint8_t q[QENTRY_LEN]; isp_plogx_t *plp; + fcparam *fcp; uint8_t *scp; uint32_t sst, parm1; - int rval; + int rval, lev; + const char *msg; + char buf[64]; if (!IS_24XX(isp)) { int action = flags & PLOGX_FLG_CMD_MASK; if (action == PLOGX_FLG_CMD_PLOGI) { return (isp_port_login(isp, handle, portid)); } else if (action == PLOGX_FLG_CMD_LOGO) { return (isp_port_logout(isp, handle, portid)); } else { return (MBOX_INVALID_COMMAND); } } - MEMZERO(q, QENTRY_LEN); + ISP_MEMZERO(q, QENTRY_LEN); plp = (isp_plogx_t *) q; plp->plogx_header.rqs_entry_count = 1; plp->plogx_header.rqs_entry_type = RQSTYPE_LOGIN; plp->plogx_handle = 0xffffffff; plp->plogx_nphdl = handle; + plp->plogx_vphdl = chan; plp->plogx_portlo = portid; plp->plogx_rspsz_porthi = (portid >> 16) & 0xff; plp->plogx_flags = flags; if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "IOCB LOGX", QENTRY_LEN, plp); } if (gs == 0) { - FC_SCRATCH_ACQUIRE(isp); + if (FC_SCRATCH_ACQUIRE(isp, chan)) { + isp_prt(isp, ISP_LOGERR, sacq); + return (-1); + } } - scp = FCPARAM(isp)->isp_scratch; + fcp = FCPARAM(isp, chan); + scp = fcp->isp_scratch; isp_put_plogx(isp, plp, (isp_plogx_t *) scp); - - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_EXEC_COMMAND_IOCB_A64; + MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 500000); mbs.param[1] = QENTRY_LEN; - mbs.param[2] = DMA_WD1(FCPARAM(isp)->isp_scdma); - mbs.param[3] = DMA_WD0(FCPARAM(isp)->isp_scdma); - mbs.param[6] = DMA_WD3(FCPARAM(isp)->isp_scdma); - mbs.param[7] = DMA_WD2(FCPARAM(isp)->isp_scdma); - mbs.timeout = 500000; - mbs.logval = MBLOGALL; + mbs.param[2] = DMA_WD1(fcp->isp_scdma); + mbs.param[3] = DMA_WD0(fcp->isp_scdma); + mbs.param[6] = DMA_WD3(fcp->isp_scdma); + mbs.param[7] = DMA_WD2(fcp->isp_scdma); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, QENTRY_LEN); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { rval = mbs.param[0]; goto out; } MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, QENTRY_LEN); scp += QENTRY_LEN; isp_get_plogx(isp, (isp_plogx_t *) scp, plp); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "IOCB LOGX response", QENTRY_LEN, plp); } if (plp->plogx_status == PLOGX_STATUS_OK) { rval = 0; goto out; } else if (plp->plogx_status != PLOGX_STATUS_IOCBERR) { - isp_prt(isp, ISP_LOGWARN, "status 0x%x on port login IOCB", - plp->plogx_status); + isp_prt(isp, ISP_LOGWARN, + "status 0x%x on port login IOCB chanel %d", + plp->plogx_status, chan); rval = -1; goto out; } sst = plp->plogx_ioparm[0].lo16 | (plp->plogx_ioparm[0].hi16 << 16); parm1 = plp->plogx_ioparm[1].lo16 | (plp->plogx_ioparm[1].hi16 << 16); rval = -1; + lev = ISP_LOGERR; + msg = NULL; switch (sst) { case PLOGX_IOCBERR_NOLINK: - isp_prt(isp, ISP_LOGERR, "PLOGX failed- no link"); + msg = "no link"; break; case PLOGX_IOCBERR_NOIOCB: - isp_prt(isp, ISP_LOGERR, "PLOGX failed- no IOCB buffer"); + msg = "no IOCB buffer"; break; case PLOGX_IOCBERR_NOXGHG: - isp_prt(isp, ISP_LOGERR, - "PLOGX failed- no Exchange Control Block"); + msg = "no Exchange Control Block"; break; case PLOGX_IOCBERR_FAILED: - isp_prt(isp, ISP_LOGERR, - "PLOGX(0x%x) of Port 0x%06x failed: reason 0x%x (last LOGIN" - " state 0x%x)", flags, portid, parm1 & 0xff, - (parm1 >> 8) & 0xff); + ISP_SNPRINTF(buf, sizeof (buf), + "reason 0x%x (last LOGIN state 0x%x)", + parm1 & 0xff, (parm1 >> 8) & 0xff); + msg = buf; break; case PLOGX_IOCBERR_NOFABRIC: - isp_prt(isp, ISP_LOGERR, "PLOGX failed- no fabric"); + msg = "no fabric"; break; case PLOGX_IOCBERR_NOTREADY: - isp_prt(isp, ISP_LOGERR, "PLOGX failed- f/w not ready"); + msg = "firmware not ready"; break; case PLOGX_IOCBERR_NOLOGIN: - isp_prt(isp, ISP_LOGERR, - "PLOGX failed- not logged in (last LOGIN state 0x%x)", + ISP_SNPRINTF(buf, sizeof (buf), "not logged in (last state 0x%x)", parm1); + msg = buf; rval = MBOX_NOT_LOGGED_IN; break; case PLOGX_IOCBERR_REJECT: - isp_prt(isp, ISP_LOGERR, "PLOGX failed: LS_RJT = 0x%x", parm1); + ISP_SNPRINTF(buf, sizeof (buf), "LS_RJT = 0x%x", parm1); + msg = buf; break; case PLOGX_IOCBERR_NOPCB: - isp_prt(isp, ISP_LOGERR, "PLOGX failed- no PCB allocated"); + msg = "no PCB allocated"; break; case PLOGX_IOCBERR_EINVAL: - isp_prt(isp, ISP_LOGERR, - "PLOGX failed: invalid parameter at offset 0x%x", parm1); + ISP_SNPRINTF(buf, sizeof (buf), "invalid parameter at offset 0x%x", + parm1); + msg = buf; break; case PLOGX_IOCBERR_PORTUSED: - isp_prt(isp, ISP_LOGDEBUG0, - "portid 0x%x already logged in with N-port handle 0x%x", - portid, parm1); - rval = MBOX_PORT_ID_USED | (handle << 16); + lev = ISP_LOGSANCFG|ISP_LOGDEBUG0; + ISP_SNPRINTF(buf, sizeof (buf), + "already logged in with N-Port handle 0x%x", parm1); + msg = buf; + rval = MBOX_PORT_ID_USED | (parm1 << 16); break; case PLOGX_IOCBERR_HNDLUSED: - isp_prt(isp, ISP_LOGDEBUG0, - "N-port handle 0x%x already used for portid 0x%x", - handle, parm1); + lev = ISP_LOGSANCFG|ISP_LOGDEBUG0; + ISP_SNPRINTF(buf, sizeof (buf), + "handle already used for PortID 0x%06x", parm1); + msg = buf; rval = MBOX_LOOP_ID_USED; break; case PLOGX_IOCBERR_NOHANDLE: - isp_prt(isp, ISP_LOGERR, "PLOGX failed- no handle allocated"); + msg = "no handle allocated"; break; case PLOGX_IOCBERR_NOFLOGI: - isp_prt(isp, ISP_LOGERR, "PLOGX failed- no FLOGI_ACC"); + msg = "no FLOGI_ACC"; break; default: - isp_prt(isp, ISP_LOGERR, "status %x from %x", plp->plogx_status, - flags); - rval = -1; + ISP_SNPRINTF(buf, sizeof (buf), "status %x from %x", + plp->plogx_status, flags); + msg = buf; break; } + if (msg) { + isp_prt(isp, ISP_LOGERR, + "Chan %d PLOGX PortID 0x%06x to N-Port handle 0x%x: %s", + chan, portid, handle, msg); + } out: if (gs == 0) { - FC_SCRATCH_RELEASE(isp); + FC_SCRATCH_RELEASE(isp, chan); } return (rval); } static int isp_port_login(ispsoftc_t *isp, uint16_t handle, uint32_t portid) { mbreg_t mbs; - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_FABRIC_LOGIN; - if (FCPARAM(isp)->isp_2klogin) { + MBSINIT(&mbs, MBOX_FABRIC_LOGIN, MBLOGNONE, 500000); + if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = handle; mbs.ibits = (1 << 10); } else { mbs.param[1] = handle << 8; } mbs.param[2] = portid >> 16; mbs.param[3] = portid; mbs.logval = MBLOGNONE; mbs.timeout = 500000; isp_mboxcmd(isp, &mbs); switch (mbs.param[0]) { case MBOX_PORT_ID_USED: isp_prt(isp, ISP_LOGDEBUG0, - "isp_plogi_old: portid 0x%06x already logged in as %u", + "isp_port_login: portid 0x%06x already logged in as %u", portid, mbs.param[1]); return (MBOX_PORT_ID_USED | (mbs.param[1] << 16)); case MBOX_LOOP_ID_USED: isp_prt(isp, ISP_LOGDEBUG0, - "isp_plogi_old: handle %u in use for port id 0x%02xXXXX", + "isp_port_login: handle 0x%04x in use for port id 0x%02xXXXX", handle, mbs.param[1] & 0xff); return (MBOX_LOOP_ID_USED); case MBOX_COMMAND_COMPLETE: return (0); case MBOX_COMMAND_ERROR: isp_prt(isp, ISP_LOGINFO, - "isp_plogi_old: error 0x%x in PLOGI to port 0x%06x", + "isp_port_login: error 0x%x in PLOGI to port 0x%06x", mbs.param[1], portid); return (MBOX_COMMAND_ERROR); case MBOX_ALL_IDS_USED: isp_prt(isp, ISP_LOGINFO, - "isp_plogi_old: all IDs used for fabric login"); + "isp_port_login: all IDs used for fabric login"); return (MBOX_ALL_IDS_USED); default: isp_prt(isp, ISP_LOGINFO, - "isp_plogi_old: error 0x%x on port login of 0x%06x@0x%0x", + "isp_port_login: error 0x%x on port login of 0x%06x@0x%0x", mbs.param[0], portid, handle); return (mbs.param[0]); } } static int isp_port_logout(ispsoftc_t *isp, uint16_t handle, uint32_t portid) { mbreg_t mbs; - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_FABRIC_LOGOUT; - if (FCPARAM(isp)->isp_2klogin) { + MBSINIT(&mbs, MBOX_FABRIC_LOGOUT, MBLOGNONE, 500000); + if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = handle; mbs.ibits = (1 << 10); } else { mbs.param[1] = handle << 8; } - mbs.logval = MBLOGNONE; - mbs.timeout = 100000; isp_mboxcmd(isp, &mbs); return (mbs.param[0] == MBOX_COMMAND_COMPLETE? 0 : mbs.param[0]); } static int -isp_getpdb(ispsoftc_t *isp, uint16_t id, isp_pdb_t *pdb, int dolock) +isp_getpdb(ispsoftc_t *isp, int chan, uint16_t id, isp_pdb_t *pdb, int dolock) { - fcparam *fcp = (fcparam *) isp->isp_param; + fcparam *fcp = FCPARAM(isp, chan); mbreg_t mbs; union { isp_pdb_21xx_t fred; isp_pdb_24xx_t bill; } un; - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_GET_PORT_DB; + MBSINIT(&mbs, MBOX_GET_PORT_DB, MBLOGALL & ~MBOX_COMMAND_PARAM_ERROR, 250000); if (IS_24XX(isp)) { - mbs.ibits = 0x3ff; + mbs.ibits = (1 << 9)|(1 << 10); mbs.param[1] = id; - } else if (FCPARAM(isp)->isp_2klogin) { + mbs.param[9] = chan; + } else if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = id; - mbs.ibits = (1 << 10); } else { mbs.param[1] = id << 8; } mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); - mbs.timeout = 250000; - mbs.logval = MBLOGALL & ~MBOX_COMMAND_PARAM_ERROR; if (dolock) { - FC_SCRATCH_ACQUIRE(isp); + if (FC_SCRATCH_ACQUIRE(isp, chan)) { + isp_prt(isp, ISP_LOGERR, sacq); + return (-1); + } } MEMORYBARRIER(isp, SYNC_SFORDEV, 0, sizeof (un)); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { if (dolock) { - FC_SCRATCH_RELEASE(isp); + FC_SCRATCH_RELEASE(isp, chan); } - return (-1); + return (mbs.param[0]); } if (IS_24XX(isp)) { isp_get_pdb_24xx(isp, fcp->isp_scratch, &un.bill); pdb->handle = un.bill.pdb_handle; pdb->s3_role = un.bill.pdb_prli_svc3; pdb->portid = BITS2WORD_24XX(un.bill.pdb_portid_bits); - MEMCPY(pdb->portname, un.bill.pdb_portname, 8); - MEMCPY(pdb->nodename, un.bill.pdb_nodename, 8); + ISP_MEMCPY(pdb->portname, un.bill.pdb_portname, 8); + ISP_MEMCPY(pdb->nodename, un.bill.pdb_nodename, 8); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d Port 0x%06x flags 0x%x curstate %x", + chan, pdb->portid, un.bill.pdb_flags, + un.bill.pdb_curstate); + if (un.bill.pdb_curstate < PDB2400_STATE_PLOGI_DONE || + un.bill.pdb_curstate > PDB2400_STATE_LOGGED_IN) { + mbs.param[0] = MBOX_NOT_LOGGED_IN; + if (dolock) { + FC_SCRATCH_RELEASE(isp, chan); + } + return (mbs.param[0]); + } } else { isp_get_pdb_21xx(isp, fcp->isp_scratch, &un.fred); pdb->handle = un.fred.pdb_loopid; pdb->s3_role = un.fred.pdb_prli_svc3; pdb->portid = BITS2WORD(un.fred.pdb_portid_bits); - MEMCPY(pdb->portname, un.fred.pdb_portname, 8); - MEMCPY(pdb->nodename, un.fred.pdb_nodename, 8); + ISP_MEMCPY(pdb->portname, un.fred.pdb_portname, 8); + ISP_MEMCPY(pdb->nodename, un.fred.pdb_nodename, 8); } if (dolock) { - FC_SCRATCH_RELEASE(isp); + FC_SCRATCH_RELEASE(isp, chan); } return (0); } +static void +isp_dump_chip_portdb(ispsoftc_t *isp, int chan, int dolock) +{ + isp_pdb_t pdb; + int lim, loopid; + + if (ISP_CAP_2KLOGIN(isp)) { + lim = NPH_MAX_2K; + } else { + lim = NPH_MAX; + } + for (loopid = 0; loopid != lim; loopid++) { + if (isp_getpdb(isp, chan, loopid, &pdb, dolock)) { + continue; + } + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGINFO, "Chan %d Loopid 0x%04x " + "PortID 0x%06x WWPN 0x%02x%02x%02x%02x%02x%02x%02x%02x", + chan, loopid, pdb.portid, pdb.portname[0], pdb.portname[1], + pdb.portname[2], pdb.portname[3], pdb.portname[4], + pdb.portname[5], pdb.portname[6], pdb.portname[7]); + } +} + static uint64_t -isp_get_portname(ispsoftc_t *isp, int loopid, int nodename) +isp_get_wwn(ispsoftc_t *isp, int chan, int loopid, int nodename) { - uint64_t wwn = (uint64_t) -1; + uint64_t wwn = INI_NONE; + fcparam *fcp = FCPARAM(isp, chan); mbreg_t mbs; - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_GET_PORT_NAME; - if (FCPARAM(isp)->isp_2klogin || IS_24XX(isp)) { + if (fcp->isp_fwstate < FW_READY || + fcp->isp_loopstate < LOOP_PDB_RCVD) { + return (wwn); + } + MBSINIT(&mbs, MBOX_GET_PORT_NAME, MBLOGALL & ~MBOX_COMMAND_PARAM_ERROR, 500000); + if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = loopid; mbs.ibits = (1 << 10); if (nodename) { mbs.param[10] = 1; } + if (ISP_CAP_MULTI_ID(isp)) { + mbs.ibits |= (1 << 9); + mbs.param[9] = chan; + } } else { mbs.param[1] = loopid << 8; if (nodename) { mbs.param[1] |= 1; } } - mbs.logval = MBLOGALL & ~MBOX_COMMAND_PARAM_ERROR; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return (wwn); } if (IS_24XX(isp)) { wwn = - (((uint64_t)(mbs.param[2] >> 8)) << 56) | + (((uint64_t)(mbs.param[2] >> 8)) << 56) | (((uint64_t)(mbs.param[2] & 0xff)) << 48) | (((uint64_t)(mbs.param[3] >> 8)) << 40) | (((uint64_t)(mbs.param[3] & 0xff)) << 32) | (((uint64_t)(mbs.param[6] >> 8)) << 24) | (((uint64_t)(mbs.param[6] & 0xff)) << 16) | (((uint64_t)(mbs.param[7] >> 8)) << 8) | (((uint64_t)(mbs.param[7] & 0xff))); } else { wwn = (((uint64_t)(mbs.param[2] & 0xff)) << 56) | (((uint64_t)(mbs.param[2] >> 8)) << 48) | (((uint64_t)(mbs.param[3] & 0xff)) << 40) | (((uint64_t)(mbs.param[3] >> 8)) << 32) | (((uint64_t)(mbs.param[6] & 0xff)) << 24) | (((uint64_t)(mbs.param[6] >> 8)) << 16) | (((uint64_t)(mbs.param[7] & 0xff)) << 8) | (((uint64_t)(mbs.param[7] >> 8))); } return (wwn); } /* * Make sure we have good FC link. */ static int -isp_fclink_test(ispsoftc_t *isp, int usdelay) +isp_fclink_test(ispsoftc_t *isp, int chan, int usdelay) { - static const char *toponames[] = { - "Private Loop", - "FL Port", - "N-Port to N-Port", - "F Port", - "F Port (no FLOGI_ACC response)" - }; mbreg_t mbs; - int count, check_for_fabric; + int count, check_for_fabric, r; uint8_t lwfs; int loopid; fcparam *fcp; fcportdb_t *lp; isp_pdb_t pdb; - fcp = isp->isp_param; + fcp = FCPARAM(isp, chan); - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "FC Link Test Entry"); - ISP_MARK_PORTDB(isp, 1); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Chan %d FC Link Test Entry", chan); + ISP_MARK_PORTDB(isp, chan, 1); /* * Wait up to N microseconds for F/W to go to a ready state. */ lwfs = FW_CONFIG_WAIT; count = 0; while (count < usdelay) { uint64_t enano; uint32_t wrk; NANOTIME_T hra, hrb; GET_NANOTIME(&hra); - isp_fw_state(isp); + isp_fw_state(isp, chan); if (lwfs != fcp->isp_fwstate) { - isp_prt(isp, ISP_LOGCONFIG|ISP_LOGSANCFG, - "Firmware State <%s->%s>", - ispfc_fw_statename((int)lwfs), - ispfc_fw_statename((int)fcp->isp_fwstate)); + isp_prt(isp, ISP_LOGCONFIG|ISP_LOGSANCFG, "Chan %d Firmware State <%s->%s>", chan, isp_fc_fw_statename((int)lwfs), isp_fc_fw_statename((int)fcp->isp_fwstate)); lwfs = fcp->isp_fwstate; } if (fcp->isp_fwstate == FW_READY) { break; } GET_NANOTIME(&hrb); /* * Get the elapsed time in nanoseconds. * Always guaranteed to be non-zero. */ enano = NANOTIME_SUB(&hrb, &hra); - isp_prt(isp, ISP_LOGDEBUG1, - "usec%d: 0x%lx->0x%lx enano 0x%x%08x", - count, (long) GET_NANOSEC(&hra), (long) GET_NANOSEC(&hrb), - (uint32_t)(enano >> 32), (uint32_t)(enano & 0xffffffff)); + isp_prt(isp, ISP_LOGDEBUG1, "usec%d: 0x%lx->0x%lx enano 0x%x%08x", count, (long) GET_NANOSEC(&hra), (long) GET_NANOSEC(&hrb), (uint32_t)(enano >> 32), (uint32_t)(enano)); /* * If the elapsed time is less than 1 millisecond, * delay a period of time up to that millisecond of * waiting. * * This peculiar code is an attempt to try and avoid * invoking uint64_t math support functions for some * platforms where linkage is a problem. */ if (enano < (1000 * 1000)) { count += 1000; enano = (1000 * 1000) - enano; while (enano > (uint64_t) 4000000000U) { - USEC_SLEEP(isp, 4000000); + ISP_SLEEP(isp, 4000000); enano -= (uint64_t) 4000000000U; } wrk = enano; wrk /= 1000; - USEC_SLEEP(isp, wrk); + ISP_SLEEP(isp, wrk); } else { while (enano > (uint64_t) 4000000000U) { count += 4000000; enano -= (uint64_t) 4000000000U; } wrk = enano; count += (wrk / 1000); } } + + /* * If we haven't gone to 'ready' state, return. */ if (fcp->isp_fwstate != FW_READY) { - isp_prt(isp, ISP_LOGSANCFG, - "isp_fclink_test: not at FW_READY state"); + isp_prt(isp, ISP_LOGSANCFG, "%s: chan %d not at FW_READY state", __func__, chan); return (-1); } /* * Get our Loop ID and Port ID. */ - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_GET_LOOP_ID; - mbs.logval = MBLOGALL; + MBSINIT(&mbs, MBOX_GET_LOOP_ID, MBLOGALL, 0); + if (ISP_CAP_MULTI_ID(isp)) { + mbs.param[9] = chan; + mbs.ibits = (1 << 9); + mbs.obits = (1 << 7); + } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return (-1); } - if (FCPARAM(isp)->isp_2klogin) { + if (ISP_CAP_2KLOGIN(isp)) { fcp->isp_loopid = mbs.param[1]; } else { fcp->isp_loopid = mbs.param[1] & 0xff; } if (IS_2100(isp)) { fcp->isp_topo = TOPO_NL_PORT; } else { int topo = (int) mbs.param[6]; if (topo < TOPO_NL_PORT || topo > TOPO_PTP_STUB) { topo = TOPO_PTP_STUB; } fcp->isp_topo = topo; } fcp->isp_portid = mbs.param[2] | (mbs.param[3] << 16); if (IS_2100(isp)) { /* * Don't bother with fabric if we are using really old * 2100 firmware. It's just not worth it. */ if (ISP_FW_NEWER_THAN(isp, 1, 15, 37)) { check_for_fabric = 1; } else { check_for_fabric = 0; } - } else if (fcp->isp_topo == TOPO_FL_PORT || - fcp->isp_topo == TOPO_F_PORT) { + } else if (fcp->isp_topo == TOPO_FL_PORT || fcp->isp_topo == TOPO_F_PORT) { check_for_fabric = 1; } else { check_for_fabric = 0; } - if (IS_24XX(isp)) { + /* + * Check to make sure we got a valid loopid + * The 24XX seems to mess this up for multiple channels. + */ + if (fcp->isp_topo == TOPO_FL_PORT || fcp->isp_topo == TOPO_NL_PORT) { + uint8_t alpa = fcp->isp_portid; + + if (alpa == 0) { + /* "Cannot Happen" */ + isp_prt(isp, ISP_LOGWARN, "Zero AL_PA for Loop Topology?"); + } else { + int i; + for (i = 0; alpa_map[i]; i++) { + if (alpa_map[i] == alpa) { + break; + } + } + if (alpa_map[i] && fcp->isp_loopid != i) { + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Chan %d deriving loopid %d from AL_PA map (AL_PA 0x%x) and ignoring returned value %d (AL_PA 0x%x)", chan, i, alpa_map[i], fcp->isp_loopid, alpa); + fcp->isp_loopid = i; + } + } + } + + + if (IS_24XX(isp)) { /* XXX SHOULDN'T THIS BE FOR 2K F/W? XXX */ loopid = NPH_FL_ID; } else { loopid = FL_ID; } - - if (check_for_fabric && isp_getpdb(isp, loopid, &pdb, 1) == 0) { - int r; + if (check_for_fabric) { + r = isp_getpdb(isp, chan, loopid, &pdb, 1); + if (r && (fcp->isp_topo == TOPO_F_PORT || fcp->isp_topo == TOPO_FL_PORT)) { + isp_prt(isp, ISP_LOGWARN, "fabric topology but cannot get info about fabric controller (0x%x)", r); + fcp->isp_topo = TOPO_PTP_STUB; + } + } else { + r = -1; + } + if (r == 0) { if (IS_2100(isp)) { fcp->isp_topo = TOPO_FL_PORT; } if (pdb.portid == 0) { /* * Crock. */ fcp->isp_topo = TOPO_NL_PORT; goto not_on_fabric; } /* * Save the Fabric controller's port database entry. */ lp = &fcp->portdb[FL_ID]; lp->state = FC_PORTDB_STATE_PENDING_VALID; MAKE_WWN_FROM_NODE_NAME(lp->node_wwn, pdb.nodename); MAKE_WWN_FROM_NODE_NAME(lp->port_wwn, pdb.portname); lp->roles = (pdb.s3_role & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; lp->portid = pdb.portid; lp->handle = pdb.handle; lp->new_portid = lp->portid; lp->new_roles = lp->roles; if (IS_24XX(isp)) { - r = isp_register_fc4_type_24xx(isp); + fcp->inorder = (mbs.param[7] & ISP24XX_INORDER) != 0; + if (ISP_FW_NEWER_THAN(isp, 4, 0, 27)) { + fcp->npiv_fabric = (mbs.param[7] & ISP24XX_NPIV_SAN) != 0; + if (fcp->npiv_fabric) { + isp_prt(isp, ISP_LOGCONFIG, "fabric supports NP-IV"); + } + } + if (chan) { + fcp->isp_sns_hdl = NPH_SNS_HDLBASE + chan; + r = isp_plogx(isp, chan, fcp->isp_sns_hdl, SNS_PORT_ID, PLOGX_FLG_CMD_PLOGI | PLOGX_FLG_COND_PLOGI | PLOGX_FLG_SKIP_PRLI, 0); + if (r) { + isp_prt(isp, ISP_LOGWARN, "%s: Chan %d cannot log into SNS", __func__, chan); + return (-1); + } + } else { + fcp->isp_sns_hdl = NPH_SNS_ID; + } + r = isp_register_fc4_type_24xx(isp, chan); } else { - r = isp_register_fc4_type(isp); + fcp->isp_sns_hdl = SNS_ID; + r = isp_register_fc4_type(isp, chan); } if (r) { - isp_prt(isp, ISP_LOGSANCFG, - "isp_fclink_test: register fc4 type failed"); + isp_prt(isp, ISP_LOGWARN|ISP_LOGSANCFG, "%s: register fc4 type failed", __func__); return (-1); } } else { not_on_fabric: fcp->portdb[FL_ID].state = FC_PORTDB_STATE_NIL; } fcp->isp_gbspeed = 1; if (IS_23XX(isp) || IS_24XX(isp)) { - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_GET_SET_DATA_RATE; + MBSINIT(&mbs, MBOX_GET_SET_DATA_RATE, MBLOGALL, 3000000); mbs.param[1] = MBGSD_GET_RATE; /* mbs.param[2] undefined if we're just getting rate */ - mbs.logval = MBLOGALL; - mbs.timeout = 3000000; isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { - if (mbs.param[1] == MBGSD_FOURGB) { - isp_prt(isp, ISP_LOGINFO, "4Gb link speed/s"); + if (mbs.param[1] == MBGSD_EIGHTGB) { + isp_prt(isp, ISP_LOGINFO, "Chan %d 8Gb link speed", chan); + fcp->isp_gbspeed = 8; + } else if (mbs.param[1] == MBGSD_FOURGB) { + isp_prt(isp, ISP_LOGINFO, "Chan %d 4Gb link speed", chan); fcp->isp_gbspeed = 4; - } if (mbs.param[1] == MBGSD_TWOGB) { - isp_prt(isp, ISP_LOGINFO, "2Gb link speed/s"); + } else if (mbs.param[1] == MBGSD_TWOGB) { + isp_prt(isp, ISP_LOGINFO, "Chan %d 2Gb link speed", chan); fcp->isp_gbspeed = 2; + } else if (mbs.param[1] == MBGSD_ONEGB) { + isp_prt(isp, ISP_LOGINFO, "Chan %d 1Gb link speed", chan); + fcp->isp_gbspeed = 1; } } } /* * Announce ourselves, too. */ - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGCONFIG, topology, fcp->isp_portid, - fcp->isp_loopid, toponames[fcp->isp_topo]); - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGCONFIG, ourwwn, - (uint32_t) (ISP_NODEWWN(isp) >> 32), - (uint32_t) ISP_NODEWWN(isp), - (uint32_t) (ISP_PORTWWN(isp) >> 32), - (uint32_t) ISP_PORTWWN(isp)); - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "FC Link Test Complete"); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGCONFIG, topology, chan, (uint32_t) (fcp->isp_wwpn >> 32), (uint32_t) fcp->isp_wwpn, fcp->isp_portid, fcp->isp_loopid, isp_fc_toponame(fcp)); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Chan %d FC Link Test Complete", chan); return (0); } -static const char * -ispfc_fw_statename(int state) -{ - switch(state) { - case FW_CONFIG_WAIT: return "Config Wait"; - case FW_WAIT_AL_PA: return "Waiting for AL_PA"; - case FW_WAIT_LOGIN: return "Wait Login"; - case FW_READY: return "Ready"; - case FW_LOSS_OF_SYNC: return "Loss Of Sync"; - case FW_ERROR: return "Error"; - case FW_REINIT: return "Re-Init"; - case FW_NON_PART: return "Nonparticipating"; - default: return "?????"; - } -} - /* * Complete the synchronization of our Port Database. * * At this point, we've scanned the local loop (if any) and the fabric * and performed fabric logins on all new devices. * * Our task here is to go through our port database and remove any entities * that are still marked probational (issuing PLOGO for ones which we had * PLOGI'd into) or are dead. * * Our task here is to also check policy to decide whether devices which * have *changed* in some way should still be kept active. For example, * if a device has just changed PortID, we can either elect to treat it * as an old device or as a newly arrived device (and notify the outer * layer appropriately). * * We also do initiator map target id assignment here for new initiator * devices and refresh old ones ot make sure that they point to the corret * entities. */ static int -isp_pdb_sync(ispsoftc_t *isp) +isp_pdb_sync(ispsoftc_t *isp, int chan) { - fcparam *fcp = isp->isp_param; + fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; uint16_t dbidx; if (fcp->isp_loopstate == LOOP_READY) { return (0); } /* * Make sure we're okay for doing this right now. */ if (fcp->isp_loopstate != LOOP_PDB_RCVD && fcp->isp_loopstate != LOOP_FSCAN_DONE && fcp->isp_loopstate != LOOP_LSCAN_DONE) { isp_prt(isp, ISP_LOGWARN, "isp_pdb_sync: bad loopstate %d", fcp->isp_loopstate); return (-1); } if (fcp->isp_topo == TOPO_FL_PORT || fcp->isp_topo == TOPO_NL_PORT || fcp->isp_topo == TOPO_N_PORT) { if (fcp->isp_loopstate < LOOP_LSCAN_DONE) { - if (isp_scan_loop(isp) != 0) { + if (isp_scan_loop(isp, chan) != 0) { isp_prt(isp, ISP_LOGWARN, "isp_pdb_sync: isp_scan_loop failed"); return (-1); } } } if (fcp->isp_topo == TOPO_F_PORT || fcp->isp_topo == TOPO_FL_PORT) { if (fcp->isp_loopstate < LOOP_FSCAN_DONE) { - if (isp_scan_fabric(isp) != 0) { + if (isp_scan_fabric(isp, chan) != 0) { isp_prt(isp, ISP_LOGWARN, "isp_pdb_sync: isp_scan_fabric failed"); return (-1); } } } - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Synchronizing PDBs"); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d Synchronizing PDBs", chan); fcp->isp_loopstate = LOOP_SYNCING_PDB; for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { lp = &fcp->portdb[dbidx]; - if (lp->state == FC_PORTDB_STATE_NIL) { + if (lp->state == FC_PORTDB_STATE_NIL || lp->target_mode) { continue; } if (lp->state == FC_PORTDB_STATE_VALID) { if (dbidx != FL_ID) { isp_prt(isp, ISP_LOGERR, "portdb idx %d already valid", dbidx); } continue; } switch (lp->state) { case FC_PORTDB_STATE_PROBATIONAL: case FC_PORTDB_STATE_DEAD: /* - * It's up to the outer layers to clear isp_ini_map. + * It's up to the outer layers to clear isp_dev_map. */ lp->state = FC_PORTDB_STATE_NIL; - isp_async(isp, ISPASYNC_DEV_GONE, lp); + isp_async(isp, ISPASYNC_DEV_GONE, chan, lp); if (lp->autologin == 0) { - (void) isp_plogx(isp, lp->handle, lp->portid, + (void) isp_plogx(isp, chan, lp->handle, + lp->portid, PLOGX_FLG_CMD_LOGO | PLOGX_FLG_IMPLICIT | PLOGX_FLG_FREE_NPHDL, 0); } else { lp->autologin = 0; } lp->new_roles = 0; lp->new_portid = 0; /* * Note that we might come out of this with our state * set to FC_PORTDB_STATE_ZOMBIE. */ break; case FC_PORTDB_STATE_NEW: /* * It's up to the outer layers to assign a virtual - * target id in isp_ini_map (if any). + * target id in isp_dev_map (if any). */ lp->portid = lp->new_portid; lp->roles = lp->new_roles; lp->state = FC_PORTDB_STATE_VALID; - isp_async(isp, ISPASYNC_DEV_ARRIVED, lp); + isp_async(isp, ISPASYNC_DEV_ARRIVED, chan, lp); lp->new_roles = 0; lp->new_portid = 0; lp->reserved = 0; lp->new_reserved = 0; break; case FC_PORTDB_STATE_CHANGED: /* * XXXX FIX THIS */ lp->state = FC_PORTDB_STATE_VALID; - isp_async(isp, ISPASYNC_DEV_CHANGED, lp); + isp_async(isp, ISPASYNC_DEV_CHANGED, chan, lp); lp->new_roles = 0; lp->new_portid = 0; lp->reserved = 0; lp->new_reserved = 0; break; case FC_PORTDB_STATE_PENDING_VALID: lp->portid = lp->new_portid; lp->roles = lp->new_roles; - if (lp->ini_map_idx) { - int t = lp->ini_map_idx - 1; - fcp->isp_ini_map[t] = dbidx + 1; + if (lp->dev_map_idx) { + int t = lp->dev_map_idx - 1; + fcp->isp_dev_map[t] = dbidx + 1; } lp->state = FC_PORTDB_STATE_VALID; - isp_async(isp, ISPASYNC_DEV_STAYED, lp); + isp_async(isp, ISPASYNC_DEV_STAYED, chan, lp); if (dbidx != FL_ID) { lp->new_roles = 0; lp->new_portid = 0; } lp->reserved = 0; lp->new_reserved = 0; break; case FC_PORTDB_STATE_ZOMBIE: break; default: isp_prt(isp, ISP_LOGWARN, "isp_scan_loop: state %d for idx %d", lp->state, dbidx); - isp_dump_portdb(isp); + isp_dump_portdb(isp, chan); } } /* * If we get here, we've for sure seen not only a valid loop * but know what is or isn't on it, so mark this for usage * in isp_start. */ fcp->loop_seen_once = 1; fcp->isp_loopstate = LOOP_READY; return (0); } /* * Scan local loop for devices. */ static int -isp_scan_loop(ispsoftc_t *isp) +isp_scan_loop(ispsoftc_t *isp, int chan) { fcportdb_t *lp, tmp; - fcparam *fcp = isp->isp_param; + fcparam *fcp = FCPARAM(isp, chan); int i; isp_pdb_t pdb; uint16_t handle, lim = 0; if (fcp->isp_fwstate < FW_READY || fcp->isp_loopstate < LOOP_PDB_RCVD) { return (-1); } if (fcp->isp_loopstate > LOOP_SCANNING_LOOP) { return (0); } /* * Check our connection topology. * * If we're a public or private loop, we scan 0..125 as handle values. - * The firmware has (typically) peformed a PLOGI for us. + * The firmware has (typically) peformed a PLOGI for us. We skip this + * step if we're a ISP_24XX in NP-IV mode. * * If we're a N-port connection, we treat this is a short loop (0..1). - * - * If we're in target mode, we can all possible handles to see who - * might have logged into us. */ switch (fcp->isp_topo) { case TOPO_NL_PORT: + lim = LOCAL_LOOP_LIM; + break; case TOPO_FL_PORT: + if (IS_24XX(isp) && isp->isp_nchan > 1) { + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d Skipping Local Loop Scan", chan); + fcp->isp_loopstate = LOOP_LSCAN_DONE; + return (0); + } lim = LOCAL_LOOP_LIM; break; case TOPO_N_PORT: lim = 2; break; default: - isp_prt(isp, ISP_LOGDEBUG0, "no loop topology to scan"); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d no loop topology to scan", chan); fcp->isp_loopstate = LOOP_LSCAN_DONE; return (0); } fcp->isp_loopstate = LOOP_SCANNING_LOOP; - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "FC scan loop 0..%d", lim-1); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d FC scan loop 0..%d", chan, lim-1); /* * Run through the list and get the port database info for each one. */ for (handle = 0; handle < lim; handle++) { + int r; /* - * But don't even try for ourselves... - */ - if (handle == fcp->isp_loopid) { + * Don't scan "special" ids. + */ + if (handle >= FL_ID && handle <= SNS_ID) { continue; } - + if (ISP_CAP_2KLOGIN(isp)) { + if (handle >= NPH_RESERVED && handle <= NPH_FL_ID) { + continue; + } + } /* * In older cards with older f/w GET_PORT_DATABASE has been * known to hang. This trick gets around that problem. */ if (IS_2100(isp) || IS_2200(isp)) { - uint64_t node_wwn = isp_get_portname(isp, handle, 1); + uint64_t node_wwn = isp_get_wwn(isp, chan, handle, 1); if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) { + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d FC scan loop DONE (bad)", chan); return (-1); } - if (node_wwn == 0) { + if (node_wwn == INI_NONE) { continue; } } /* * Get the port database entity for this index. */ - if (isp_getpdb(isp, handle, &pdb, 1) != 0) { + r = isp_getpdb(isp, chan, handle, &pdb, 1); + if (r != 0) { + isp_prt(isp, ISP_LOGDEBUG1, + "Chan %d FC scan loop handle %d returned %x", + chan, handle, r); if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) { - ISP_MARK_PORTDB(isp, 1); + ISP_MARK_PORTDB(isp, chan, 1); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d FC scan loop DONE (bad)", chan); return (-1); } continue; } if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) { - ISP_MARK_PORTDB(isp, 1); + ISP_MARK_PORTDB(isp, chan, 1); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d FC scan loop DONE (bad)", chan); return (-1); } /* * On *very* old 2100 firmware we would end up sometimes * with the firmware returning the port database entry * for something else. We used to restart this, but * now we just punt. */ if (IS_2100(isp) && pdb.handle != handle) { isp_prt(isp, ISP_LOGWARN, - "giving up on synchronizing the port database"); - ISP_MARK_PORTDB(isp, 1); + "Chan %d cannot synchronize port database", chan); + ISP_MARK_PORTDB(isp, chan, 1); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d FC scan loop DONE (bad)", chan); return (-1); } /* * Save the pertinent info locally. */ MAKE_WWN_FROM_NODE_NAME(tmp.node_wwn, pdb.nodename); MAKE_WWN_FROM_NODE_NAME(tmp.port_wwn, pdb.portname); tmp.roles = (pdb.s3_role & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; tmp.portid = pdb.portid; tmp.handle = pdb.handle; /* * Check to make sure it's still a valid entry. The 24XX seems * to return a portid but not a WWPN/WWNN or role for devices * which shift on a loop. */ if (tmp.node_wwn == 0 || tmp.port_wwn == 0 || tmp.portid == 0) { int a, b, c; a = (tmp.node_wwn == 0); b = (tmp.port_wwn == 0); c = (tmp.portid == 0); + if (a == 0 && b == 0) { + tmp.node_wwn = + isp_get_wwn(isp, chan, handle, 1); + tmp.port_wwn = + isp_get_wwn(isp, chan, handle, 0); + if (tmp.node_wwn && tmp.port_wwn) { + isp_prt(isp, ISP_LOGINFO, "DODGED!"); + goto cont; + } + } isp_prt(isp, ISP_LOGWARN, - "bad pdb (%1d%1d%1d) @ handle 0x%x", a, b, c, - handle); - isp_dump_portdb(isp); + "Chan %d bad pdb (%1d%1d%1d) @ handle 0x%x", chan, + a, b, c, handle); + isp_dump_portdb(isp, chan); continue; } + cont: /* * Now search the entire port database * for the same Port and Node WWN. */ for (i = 0; i < MAX_FC_TARG; i++) { lp = &fcp->portdb[i]; - if (lp->state == FC_PORTDB_STATE_NIL) { + + if (lp->state == FC_PORTDB_STATE_NIL || + lp->target_mode) { continue; } if (lp->node_wwn != tmp.node_wwn) { continue; } if (lp->port_wwn != tmp.port_wwn) { continue; } /* * Okay- we've found a non-nil entry that matches. * Check to make sure it's probational or a zombie. */ if (lp->state != FC_PORTDB_STATE_PROBATIONAL && lp->state != FC_PORTDB_STATE_ZOMBIE) { isp_prt(isp, ISP_LOGERR, - "[%d] not probational/zombie (0x%x)", - i, lp->state); - isp_dump_portdb(isp); - ISP_MARK_PORTDB(isp, 1); + "Chan %d [%d] not probational/zombie (0x%x)", + chan, i, lp->state); + isp_dump_portdb(isp, chan); + ISP_MARK_PORTDB(isp, chan, 1); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d FC scan loop DONE (bad)", chan); return (-1); } /* * Mark the device as something the f/w logs into * automatically. */ lp->autologin = 1; /* * Check to make see if really still the same * device. If it is, we mark it pending valid. */ if (lp->portid == tmp.portid && lp->handle == tmp.handle && lp->roles == tmp.roles) { lp->new_portid = tmp.portid; lp->new_roles = tmp.roles; lp->state = FC_PORTDB_STATE_PENDING_VALID; - isp_prt(isp, ISP_LOGSANCFG, - "Loop Port 0x%02x@0x%x Pending Valid", - tmp.portid, tmp.handle); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d Loop Port 0x%06x@0x%04x Pending " + "Valid", chan, tmp.portid, tmp.handle); break; } - + /* * We can wipe out the old handle value * here because it's no longer valid. */ lp->handle = tmp.handle; /* * Claim that this has changed and let somebody else * decide what to do. */ - isp_prt(isp, ISP_LOGSANCFG, - "Loop Port 0x%02x@0x%x changed", - tmp.portid, tmp.handle); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d Loop Port 0x%06x@0x%04x changed", + chan, tmp.portid, tmp.handle); lp->state = FC_PORTDB_STATE_CHANGED; lp->new_portid = tmp.portid; lp->new_roles = tmp.roles; break; } /* * Did we find and update an old entry? */ if (i < MAX_FC_TARG) { continue; } /* * Ah. A new device entry. Find an empty slot * for it and save info for later disposition. */ for (i = 0; i < MAX_FC_TARG; i++) { + if (fcp->portdb[i].target_mode) { + continue; + } if (fcp->portdb[i].state == FC_PORTDB_STATE_NIL) { break; } } if (i == MAX_FC_TARG) { - isp_prt(isp, ISP_LOGERR, "out of portdb entries"); + isp_prt(isp, ISP_LOGERR, + "Chan %d out of portdb entries", chan); continue; } lp = &fcp->portdb[i]; - MEMZERO(lp, sizeof (fcportdb_t)); + ISP_MEMZERO(lp, sizeof (fcportdb_t)); lp->autologin = 1; lp->state = FC_PORTDB_STATE_NEW; lp->new_portid = tmp.portid; lp->new_roles = tmp.roles; lp->handle = tmp.handle; lp->port_wwn = tmp.port_wwn; lp->node_wwn = tmp.node_wwn; - isp_prt(isp, ISP_LOGSANCFG, - "Loop Port 0x%02x@0x%x is New Entry", - tmp.portid, tmp.handle); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d Loop Port 0x%06x@0x%04x is New Entry", + chan, tmp.portid, tmp.handle); } fcp->isp_loopstate = LOOP_LSCAN_DONE; + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d FC scan loop DONE", chan); return (0); } /* * Scan the fabric for devices and add them to our port database. * * Use the GID_FT command to get all Port IDs for FC4 SCSI devices it knows. * * For 2100-23XX cards, we can use the SNS mailbox command to pass simple * name server commands to the switch management server via the QLogic f/w. * * For the 24XX card, we have to use CT-Pass through run via the Execute IOCB * mailbox command. * * The net result is to leave the list of Port IDs setting untranslated in * offset IGPOFF of the FC scratch area, whereupon we'll canonicalize it to * host order at OGPOFF. */ /* - * Take less than half of our scratch area to store Port IDs + * Take less than half of our scratch area to store Port IDs */ -#define GIDLEN ((ISP2100_SCRLEN >> 1) - 16 - SNS_GID_FT_REQ_SIZE) +#define GIDLEN ((ISP_FC_SCRLEN >> 1) - 16 - SNS_GID_FT_REQ_SIZE) #define NGENT ((GIDLEN - 16) >> 2) #define IGPOFF (2 * QENTRY_LEN) -#define OGPOFF (ISP2100_SCRLEN >> 1) -#define ZTXOFF (ISP2100_SCRLEN - (1 * QENTRY_LEN)) -#define CTXOFF (ISP2100_SCRLEN - (2 * QENTRY_LEN)) -#define XTXOFF (ISP2100_SCRLEN - (3 * QENTRY_LEN)) +#define OGPOFF (ISP_FC_SCRLEN >> 1) +#define ZTXOFF (ISP_FC_SCRLEN - (1 * QENTRY_LEN)) +#define CTXOFF (ISP_FC_SCRLEN - (2 * QENTRY_LEN)) +#define XTXOFF (ISP_FC_SCRLEN - (3 * QENTRY_LEN)) static int -isp_gid_ft_sns(ispsoftc_t *isp) +isp_gid_ft_sns(ispsoftc_t *isp, int chan) { union { sns_gid_ft_req_t _x; uint8_t _y[SNS_GID_FT_REQ_SIZE]; } un; - fcparam *fcp = FCPARAM(isp); + fcparam *fcp = FCPARAM(isp, chan); sns_gid_ft_req_t *rq = &un._x; mbreg_t mbs; - isp_prt(isp, ISP_LOGDEBUG0, "scanning fabric (GID_FT) via SNS"); + isp_prt(isp, ISP_LOGDEBUG0, + "Chan %d scanning fabric (GID_FT) via SNS", chan); - MEMZERO(rq, SNS_GID_FT_REQ_SIZE); + ISP_MEMZERO(rq, SNS_GID_FT_REQ_SIZE); rq->snscb_rblen = GIDLEN >> 1; rq->snscb_addr[RQRSP_ADDR0015] = DMA_WD0(fcp->isp_scdma + IGPOFF); rq->snscb_addr[RQRSP_ADDR1631] = DMA_WD1(fcp->isp_scdma + IGPOFF); rq->snscb_addr[RQRSP_ADDR3247] = DMA_WD2(fcp->isp_scdma + IGPOFF); rq->snscb_addr[RQRSP_ADDR4863] = DMA_WD3(fcp->isp_scdma + IGPOFF); rq->snscb_sblen = 6; rq->snscb_cmd = SNS_GID_FT; rq->snscb_mword_div_2 = NGENT; rq->snscb_fc4_type = FC4_SCSI; isp_put_gid_ft_request(isp, rq, fcp->isp_scratch); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, SNS_GID_FT_REQ_SIZE); - MEMZERO(&mbs, sizeof (mbs)); + MBSINIT(&mbs, MBOX_SEND_SNS, MBLOGALL, 10000000); mbs.param[0] = MBOX_SEND_SNS; mbs.param[1] = SNS_GID_FT_REQ_SIZE >> 1; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); - mbs.logval = MBLOGALL; - mbs.timeout = 10000000; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { if (mbs.param[0] == MBOX_INVALID_COMMAND) { return (1); } else { return (-1); } } return (0); } static int -isp_gid_ft_ct_passthru(ispsoftc_t *isp) +isp_gid_ft_ct_passthru(ispsoftc_t *isp, int chan) { mbreg_t mbs; - fcparam *fcp = FCPARAM(isp); + fcparam *fcp = FCPARAM(isp, chan); union { isp_ct_pt_t plocal; ct_hdr_t clocal; uint8_t q[QENTRY_LEN]; } un; isp_ct_pt_t *pt; ct_hdr_t *ct; uint32_t *rp; uint8_t *scp = fcp->isp_scratch; - isp_prt(isp, ISP_LOGDEBUG0, "scanning fabric (GID_FT) via CT"); + isp_prt(isp, ISP_LOGDEBUG0, + "Chan %d scanning fabric (GID_FT) via CT", chan); if (!IS_24XX(isp)) { return (1); } /* * Build a Passthrough IOCB in memory. */ pt = &un.plocal; - MEMZERO(un.q, QENTRY_LEN); + ISP_MEMZERO(un.q, QENTRY_LEN); pt->ctp_header.rqs_entry_count = 1; pt->ctp_header.rqs_entry_type = RQSTYPE_CT_PASSTHRU; pt->ctp_handle = 0xffffffff; - pt->ctp_nphdl = NPH_SNS_ID; + pt->ctp_nphdl = fcp->isp_sns_hdl; pt->ctp_cmd_cnt = 1; + pt->ctp_vpidx = ISP_GET_VPIDX(isp, chan); pt->ctp_time = 30; pt->ctp_rsp_cnt = 1; pt->ctp_rsp_bcnt = GIDLEN; pt->ctp_cmd_bcnt = sizeof (*ct) + sizeof (uint32_t); pt->ctp_dataseg[0].ds_base = DMA_LO32(fcp->isp_scdma+XTXOFF); pt->ctp_dataseg[0].ds_basehi = DMA_HI32(fcp->isp_scdma+XTXOFF); pt->ctp_dataseg[0].ds_count = sizeof (*ct) + sizeof (uint32_t); pt->ctp_dataseg[1].ds_base = DMA_LO32(fcp->isp_scdma+IGPOFF); pt->ctp_dataseg[1].ds_basehi = DMA_HI32(fcp->isp_scdma+IGPOFF); pt->ctp_dataseg[1].ds_count = GIDLEN; if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "ct IOCB", QENTRY_LEN, pt); } isp_put_ct_pt(isp, pt, (isp_ct_pt_t *) &scp[CTXOFF]); /* * Build the CT header and command in memory. * * Note that the CT header has to end up as Big Endian format in memory. */ ct = &un.clocal; - MEMZERO(ct, sizeof (*ct)); + ISP_MEMZERO(ct, sizeof (*ct)); ct->ct_revision = CT_REVISION; ct->ct_fcs_type = CT_FC_TYPE_FC; ct->ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct->ct_cmd_resp = SNS_GID_FT; ct->ct_bcnt_resid = (GIDLEN - 16) >> 2; isp_put_ct_hdr(isp, ct, (ct_hdr_t *) &scp[XTXOFF]); rp = (uint32_t *) &scp[XTXOFF+sizeof (*ct)]; ISP_IOZPUT_32(isp, FC4_SCSI, rp); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "CT HDR + payload after put", sizeof (*ct) + sizeof (uint32_t), &scp[XTXOFF]); } - MEMZERO(&scp[ZTXOFF], QENTRY_LEN); - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_EXEC_COMMAND_IOCB_A64; + ISP_MEMZERO(&scp[ZTXOFF], QENTRY_LEN); + MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 500000); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(fcp->isp_scdma + CTXOFF); mbs.param[3] = DMA_WD0(fcp->isp_scdma + CTXOFF); mbs.param[6] = DMA_WD3(fcp->isp_scdma + CTXOFF); mbs.param[7] = DMA_WD2(fcp->isp_scdma + CTXOFF); - mbs.timeout = 500000; - mbs.logval = MBLOGALL; MEMORYBARRIER(isp, SYNC_SFORDEV, XTXOFF, 2 * QENTRY_LEN); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return (-1); } MEMORYBARRIER(isp, SYNC_SFORCPU, ZTXOFF, QENTRY_LEN); pt = &un.plocal; isp_get_ct_pt(isp, (isp_ct_pt_t *) &scp[ZTXOFF], pt); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "IOCB response", QENTRY_LEN, pt); } if (pt->ctp_status && pt->ctp_status != RQCS_DATA_UNDERRUN) { - isp_prt(isp, ISP_LOGWARN, "CT Passthrough returned 0x%x", - pt->ctp_status); + isp_prt(isp, ISP_LOGWARN, + "Chan %d ISP GID FT CT Passthrough returned 0x%x", + chan, pt->ctp_status); return (-1); } MEMORYBARRIER(isp, SYNC_SFORCPU, IGPOFF, GIDLEN + 16); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "CT response", GIDLEN+16, &scp[IGPOFF]); } return (0); } static int -isp_scan_fabric(ispsoftc_t *isp) +isp_scan_fabric(ispsoftc_t *isp, int chan) { - fcparam *fcp = FCPARAM(isp); + fcparam *fcp = FCPARAM(isp, chan); uint32_t portid; - uint16_t handle, oldhandle; + uint16_t handle, oldhandle, loopid; + isp_pdb_t pdb; int portidx, portlim, r; sns_gid_ft_rsp_t *rs0, *rs1; - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "FC Scan Fabric"); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d FC Scan Fabric", chan); if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate < LOOP_LSCAN_DONE) { return (-1); } if (fcp->isp_loopstate > LOOP_SCANNING_FABRIC) { return (0); } if (fcp->isp_topo != TOPO_FL_PORT && fcp->isp_topo != TOPO_F_PORT) { fcp->isp_loopstate = LOOP_FSCAN_DONE; isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "FC Scan Fabric Done (no fabric)"); + "Chan %d FC Scan Fabric Done (no fabric)", chan); return (0); } - FC_SCRATCH_ACQUIRE(isp); fcp->isp_loopstate = LOOP_SCANNING_FABRIC; + if (FC_SCRATCH_ACQUIRE(isp, chan)) { + isp_prt(isp, ISP_LOGERR, sacq); + ISP_MARK_PORTDB(isp, chan, 1); + return (-1); + } + if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { + FC_SCRATCH_RELEASE(isp, chan); + ISP_MARK_PORTDB(isp, chan, 1); + return (-1); + } + /* + * Make sure we still are logged into the fabric controller. + */ + if (IS_24XX(isp)) { /* XXX SHOULDN'T THIS BE TRUE FOR 2K F/W? XXX */ + loopid = NPH_FL_ID; + } else { + loopid = FL_ID; + } + r = isp_getpdb(isp, chan, loopid, &pdb, 0); + if (r == MBOX_NOT_LOGGED_IN) { + isp_dump_chip_portdb(isp, chan, 0); + } + if (r) { + fcp->isp_loopstate = LOOP_PDB_RCVD; + FC_SCRATCH_RELEASE(isp, chan); + ISP_MARK_PORTDB(isp, chan, 1); + return (-1); + } + if (IS_24XX(isp)) { - r = isp_gid_ft_ct_passthru(isp); + r = isp_gid_ft_ct_passthru(isp, chan); } else { - r = isp_gid_ft_sns(isp); + r = isp_gid_ft_sns(isp, chan); } + if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { + FC_SCRATCH_RELEASE(isp, chan); + ISP_MARK_PORTDB(isp, chan, 1); + return (-1); + } + if (r > 0) { fcp->isp_loopstate = LOOP_FSCAN_DONE; - FC_SCRATCH_RELEASE(isp); + FC_SCRATCH_RELEASE(isp, chan); return (0); } else if (r < 0) { fcp->isp_loopstate = LOOP_PDB_RCVD; /* try again */ - FC_SCRATCH_RELEASE(isp); + FC_SCRATCH_RELEASE(isp, chan); return (0); } - if (fcp->isp_loopstate != LOOP_SCANNING_FABRIC) { - FC_SCRATCH_RELEASE(isp); - return (-1); - } MEMORYBARRIER(isp, SYNC_SFORCPU, IGPOFF, GIDLEN); rs0 = (sns_gid_ft_rsp_t *) ((uint8_t *)fcp->isp_scratch+IGPOFF); rs1 = (sns_gid_ft_rsp_t *) ((uint8_t *)fcp->isp_scratch+OGPOFF); isp_get_gid_ft_response(isp, rs0, rs1, NGENT); + if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { + FC_SCRATCH_RELEASE(isp, chan); + ISP_MARK_PORTDB(isp, chan, 1); + return (-1); + } if (rs1->snscb_cthdr.ct_cmd_resp != LS_ACC) { int level; if (rs1->snscb_cthdr.ct_reason == 9 && rs1->snscb_cthdr.ct_explanation == 7) { level = ISP_LOGSANCFG|ISP_LOGDEBUG0; } else { level = ISP_LOGWARN; } - isp_prt(isp, level, "Fabric Nameserver rejected GID_FT " - "(Reason=0x%x Expl=0x%x)", rs1->snscb_cthdr.ct_reason, + isp_prt(isp, level, "Chan %d Fabric Nameserver rejected GID_FT" + " (Reason=0x%x Expl=0x%x)", chan, + rs1->snscb_cthdr.ct_reason, rs1->snscb_cthdr.ct_explanation); - FC_SCRATCH_RELEASE(isp); + FC_SCRATCH_RELEASE(isp, chan); fcp->isp_loopstate = LOOP_FSCAN_DONE; return (0); } /* * If we get this far, we certainly still have the fabric controller. */ fcp->portdb[FL_ID].state = FC_PORTDB_STATE_PENDING_VALID; /* * Prime the handle we will start using. */ - oldhandle = NIL_HANDLE; + oldhandle = FCPARAM(isp, 0)->isp_lasthdl; /* - * Okay, we now have a list of Port IDs for all FC4 SCSI devices - * that the Fabric Name server knows about. Go through the list - * and remove duplicate port ids. + * Go through the list and remove duplicate port ids. */ portlim = 0; portidx = 0; for (portidx = 0; portidx < NGENT-1; portidx++) { if (rs1->snscb_ports[portidx].control & 0x80) { break; } } /* * If we're not at the last entry, our list wasn't big enough. */ if ((rs1->snscb_ports[portidx].control & 0x80) == 0) { isp_prt(isp, ISP_LOGWARN, - "fabric too big for scratch area: increase ISP2100_SCRLEN"); + "fabric too big for scratch area: increase ISP_FC_SCRLEN"); } portlim = portidx + 1; isp_prt(isp, ISP_LOGSANCFG, - "got %d ports back from name server", portlim); + "Chan %d got %d ports back from name server", chan, portlim); for (portidx = 0; portidx < portlim; portidx++) { int npidx; portid = ((rs1->snscb_ports[portidx].portid[0]) << 16) | ((rs1->snscb_ports[portidx].portid[1]) << 8) | ((rs1->snscb_ports[portidx].portid[2])); for (npidx = portidx + 1; npidx < portlim; npidx++) { uint32_t new_portid = ((rs1->snscb_ports[npidx].portid[0]) << 16) | ((rs1->snscb_ports[npidx].portid[1]) << 8) | ((rs1->snscb_ports[npidx].portid[2])); if (new_portid == portid) { break; } } if (npidx < portlim) { rs1->snscb_ports[npidx].portid[0] = 0; rs1->snscb_ports[npidx].portid[1] = 0; rs1->snscb_ports[npidx].portid[2] = 0; isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "removing duplicate PortID 0x%x entry from list", - portid); + "Chan %d removing duplicate PortID 0x%06x" + " entry from list", chan, portid); } } /* - * Okay, we now have a list of Port IDs for all FC4 SCSI devices + * We now have a list of Port IDs for all FC4 SCSI devices * that the Fabric Name server knows about. * * For each entry on this list go through our port database looking * for probational entries- if we find one, then an old entry is - * is maybe still this one. We get some information to find out. + * maybe still this one. We get some information to find out. * * Otherwise, it's a new fabric device, and we log into it * (unconditionally). After searching the entire database * again to make sure that we never ever ever ever have more * than one entry that has the same PortID or the same * WWNN/WWPN duple, we enter the device into our database. */ for (portidx = 0; portidx < portlim; portidx++) { fcportdb_t *lp; - isp_pdb_t pdb; uint64_t wwnn, wwpn; int dbidx, nr; portid = ((rs1->snscb_ports[portidx].portid[0]) << 16) | ((rs1->snscb_ports[portidx].portid[1]) << 8) | ((rs1->snscb_ports[portidx].portid[2])); if (portid == 0) { isp_prt(isp, ISP_LOGSANCFG, - "skipping null PortID at idx %d", portidx); + "Chan %d skipping null PortID at idx %d", + chan, portidx); continue; } /* - * Skip ourselves... + * Skip ourselves here and on other channels. If we're + * multi-id, we can't check the portids in other FCPARAM + * arenas because the resolutions here aren't synchronized. + * The best way to do this is to exclude looking at portids + * that have the same domain and area code as our own + * portid. */ - if (portid == fcp->isp_portid) { + if (ISP_CAP_MULTI_ID(isp)) { + if ((portid >> 8) == (fcp->isp_portid >> 8)) { + isp_prt(isp, ISP_LOGSANCFG, + "Chan %d skip PortID 0x%06x", + chan, portid); + continue; + } + } else if (portid == fcp->isp_portid) { isp_prt(isp, ISP_LOGSANCFG, - "skip ourselves @ PortID 0x%06x", portid); + "Chan %d skip ourselves on @ PortID 0x%06x", + chan, portid); continue; } + isp_prt(isp, ISP_LOGSANCFG, - "Checking Fabric Port 0x%06x", portid); + "Chan %d Checking Fabric Port 0x%06x", chan, portid); /* * We now search our Port Database for any * probational entries with this PortID. We don't * look for zombies here- only probational * entries (we've already logged out of zombies). */ for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { lp = &fcp->portdb[dbidx]; - if (lp->state != FC_PORTDB_STATE_PROBATIONAL) { + if (lp->state != FC_PORTDB_STATE_PROBATIONAL || + lp->target_mode) { continue; } if (lp->portid == portid) { break; } } /* * We found a probational entry with this Port ID. */ if (dbidx < MAX_FC_TARG) { int handle_changed = 0; lp = &fcp->portdb[dbidx]; /* * See if we're still logged into it. * * If we aren't, mark it as a dead device and * leave the new portid in the database entry * for somebody further along to decide what to * do (policy choice). * * If we are, check to see if it's the same * device still (it should be). If for some * reason it isn't, mark it as a changed device * and leave the new portid and role in the * database entry for somebody further along to * decide what to do (policy choice). * */ - r = isp_getpdb(isp, lp->handle, &pdb, 0); + r = isp_getpdb(isp, chan, lp->handle, &pdb, 0); if (fcp->isp_loopstate != LOOP_SCANNING_FABRIC) { - FC_SCRATCH_RELEASE(isp); - ISP_MARK_PORTDB(isp, 1); + FC_SCRATCH_RELEASE(isp, chan); + ISP_MARK_PORTDB(isp, chan, 1); return (-1); } if (r != 0) { lp->new_portid = portid; lp->state = FC_PORTDB_STATE_DEAD; isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "Fabric Port 0x%06x considered dead", - portid); + "Chan %d Fabric Port 0x%06x is dead", + chan, portid); continue; } /* * Check to make sure that handle, portid, WWPN and * WWNN agree. If they don't, then the association * between this PortID and the stated handle has been * broken by the firmware. */ MAKE_WWN_FROM_NODE_NAME(wwnn, pdb.nodename); MAKE_WWN_FROM_NODE_NAME(wwpn, pdb.portname); if (pdb.handle != lp->handle || pdb.portid != portid || wwpn != lp->port_wwn || wwnn != lp->node_wwn) { isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - fconf, dbidx, pdb.handle, pdb.portid, + fconf, chan, dbidx, pdb.handle, pdb.portid, (uint32_t) (wwnn >> 32), (uint32_t) wwnn, (uint32_t) (wwpn >> 32), (uint32_t) wwpn, lp->handle, portid, (uint32_t) (lp->node_wwn >> 32), (uint32_t) lp->node_wwn, (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn); /* * Try to re-login to this device using a * new handle. If that fails, mark it dead. - * + * * isp_login_device will check for handle and * portid consistency after re-login. - * + * */ - if (isp_login_device(isp, portid, &pdb, + if (isp_login_device(isp, chan, portid, &pdb, &oldhandle)) { lp->new_portid = portid; lp->state = FC_PORTDB_STATE_DEAD; if (fcp->isp_loopstate != LOOP_SCANNING_FABRIC) { - FC_SCRATCH_RELEASE(isp); - ISP_MARK_PORTDB(isp, 1); + FC_SCRATCH_RELEASE(isp, chan); + ISP_MARK_PORTDB(isp, chan, 1); return (-1); } continue; } + if (fcp->isp_loopstate != + LOOP_SCANNING_FABRIC) { + FC_SCRATCH_RELEASE(isp, chan); + ISP_MARK_PORTDB(isp, chan, 1); + return (-1); + } + FCPARAM(isp, 0)->isp_lasthdl = oldhandle; MAKE_WWN_FROM_NODE_NAME(wwnn, pdb.nodename); MAKE_WWN_FROM_NODE_NAME(wwpn, pdb.portname); if (wwpn != lp->port_wwn || wwnn != lp->node_wwn) { isp_prt(isp, ISP_LOGWARN, "changed WWN" " after relogin"); lp->new_portid = portid; lp->state = FC_PORTDB_STATE_DEAD; continue; } lp->handle = pdb.handle; handle_changed++; } nr = (pdb.s3_role & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; /* * Check to see whether the portid and roles have * stayed the same. If they have stayed the same, * we believe that this is the same device and it * hasn't become disconnected and reconnected, so * mark it as pending valid. * * If they aren't the same, mark the device as a * changed device and save the new port id and role * and let somebody else decide. */ lp->new_portid = portid; lp->new_roles = nr; if (pdb.portid != lp->portid || nr != lp->roles || handle_changed) { isp_prt(isp, ISP_LOGSANCFG, - "Fabric Port 0x%06x changed", portid); + "Chan %d Fabric Port 0x%06x changed", + chan, portid); lp->state = FC_PORTDB_STATE_CHANGED; } else { isp_prt(isp, ISP_LOGSANCFG, - "Fabric Port 0x%06x Now Pending Valid", - portid); + "Chan %d Fabric Port 0x%06x " + "Now Pending Valid", chan, portid); lp->state = FC_PORTDB_STATE_PENDING_VALID; } continue; } /* * Ah- a new entry. Search the database again for all non-NIL * entries to make sure we never ever make a new database entry * with the same port id. While we're at it, mark where the * last free entry was. */ - + dbidx = MAX_FC_TARG; for (lp = fcp->portdb; lp < &fcp->portdb[MAX_FC_TARG]; lp++) { if (lp >= &fcp->portdb[FL_ID] && lp <= &fcp->portdb[SNS_ID]) { continue; } + /* + * Skip any target mode entries. + */ + if (lp->target_mode) { + continue; + } if (lp->state == FC_PORTDB_STATE_NIL) { if (dbidx == MAX_FC_TARG) { dbidx = lp - fcp->portdb; } continue; } if (lp->state == FC_PORTDB_STATE_ZOMBIE) { continue; } if (lp->portid == portid) { break; } } if (lp < &fcp->portdb[MAX_FC_TARG]) { - isp_prt(isp, ISP_LOGWARN, - "PortID 0x%06x already at %d handle %d state %d", - portid, dbidx, lp->handle, lp->state); + isp_prt(isp, ISP_LOGWARN, "Chan %d PortID 0x%06x " + "already at %d handle %d state %d", + chan, portid, dbidx, lp->handle, lp->state); continue; } /* * We should have the index of the first free entry seen. */ if (dbidx == MAX_FC_TARG) { isp_prt(isp, ISP_LOGERR, "port database too small to login PortID 0x%06x" "- increase MAX_FC_TARG", portid); continue; } /* * Otherwise, point to our new home. */ lp = &fcp->portdb[dbidx]; /* * Try to see if we are logged into this device, * and maybe log into it. * * isp_login_device will check for handle and * portid consistency after login. */ - if (isp_login_device(isp, portid, &pdb, &oldhandle)) { + if (isp_login_device(isp, chan, portid, &pdb, &oldhandle)) { if (fcp->isp_loopstate != LOOP_SCANNING_FABRIC) { - FC_SCRATCH_RELEASE(isp); - ISP_MARK_PORTDB(isp, 1); + FC_SCRATCH_RELEASE(isp, chan); + ISP_MARK_PORTDB(isp, chan, 1); return (-1); } continue; } + if (fcp->isp_loopstate != LOOP_SCANNING_FABRIC) { + FC_SCRATCH_RELEASE(isp, chan); + ISP_MARK_PORTDB(isp, chan, 1); + return (-1); + } + FCPARAM(isp, 0)->isp_lasthdl = oldhandle; handle = pdb.handle; MAKE_WWN_FROM_NODE_NAME(wwnn, pdb.nodename); MAKE_WWN_FROM_NODE_NAME(wwpn, pdb.portname); nr = (pdb.s3_role & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; /* * And go through the database *one* more time to make sure * that we do not make more than one entry that has the same * WWNN/WWPN duple */ for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { if (dbidx >= FL_ID && dbidx <= SNS_ID) { continue; } - if (fcp->portdb[dbidx].state == FC_PORTDB_STATE_NIL) { + if (fcp->portdb[dbidx].target_mode) { continue; } if (fcp->portdb[dbidx].node_wwn == wwnn && fcp->portdb[dbidx].port_wwn == wwpn) { break; } } if (dbidx == MAX_FC_TARG) { - MEMZERO(lp, sizeof (fcportdb_t)); + ISP_MEMZERO(lp, sizeof (fcportdb_t)); lp->handle = handle; lp->node_wwn = wwnn; lp->port_wwn = wwpn; lp->new_portid = portid; lp->new_roles = nr; lp->state = FC_PORTDB_STATE_NEW; isp_prt(isp, ISP_LOGSANCFG, - "Fabric Port 0x%06x is New Entry", portid); + "Chan %d Fabric Port 0x%06x is a New Entry", + chan, portid); continue; } if (fcp->portdb[dbidx].state != FC_PORTDB_STATE_ZOMBIE) { isp_prt(isp, ISP_LOGWARN, - "PortID 0x%x 0x%08x%08x/0x%08x%08x %ld already at " - "idx %d, state 0x%x", portid, + "Chan %d PortID 0x%x 0x%08x%08x/0x%08x%08x %ld " + "already at idx %d, state 0x%x", chan, portid, (uint32_t) (wwnn >> 32), (uint32_t) wwnn, (uint32_t) (wwpn >> 32), (uint32_t) wwpn, (long) (lp - fcp->portdb), dbidx, fcp->portdb[dbidx].state); continue; } /* * We found a zombie entry that matches us. * Revive it. We know that WWN and WWPN * are the same. For fabric devices, we * don't care that handle is different * as we assign that. If role or portid * are different, it maybe a changed device. */ lp = &fcp->portdb[dbidx]; lp->handle = handle; lp->new_portid = portid; lp->new_roles = nr; if (lp->portid != portid || lp->roles != nr) { isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "Zombie Fabric Port 0x%06x Now Changed", portid); + "Chan %d Zombie Fabric Port 0x%06x Now Changed", + chan, portid); lp->state = FC_PORTDB_STATE_CHANGED; } else { isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "Zombie Fabric Port 0x%06x Now Pending Valid", - portid); + "Chan %d Zombie Fabric Port 0x%06x " + "Now Pending Valid", chan, portid); lp->state = FC_PORTDB_STATE_PENDING_VALID; } } - FC_SCRATCH_RELEASE(isp); + FC_SCRATCH_RELEASE(isp, chan); if (fcp->isp_loopstate != LOOP_SCANNING_FABRIC) { - ISP_MARK_PORTDB(isp, 1); + ISP_MARK_PORTDB(isp, chan, 1); return (-1); } fcp->isp_loopstate = LOOP_FSCAN_DONE; - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "FC Scan Fabric Done"); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, + "Chan %d FC Scan Fabric Done", chan); return (0); } /* * Find an unused handle and try and use to login to a port. */ static int -isp_login_device(ispsoftc_t *isp, uint32_t portid, isp_pdb_t *p, uint16_t *ohp) +isp_login_device(ispsoftc_t *isp, int chan, uint32_t portid, isp_pdb_t *p, + uint16_t *ohp) { int lim, i, r; uint16_t handle; - if (FCPARAM(isp)->isp_2klogin) { + if (ISP_CAP_2KLOGIN(isp)) { lim = NPH_MAX_2K; } else { lim = NPH_MAX; } - handle = isp_nxt_handle(isp, *ohp); + handle = isp_nxt_handle(isp, chan, *ohp); for (i = 0; i < lim; i++) { /* * See if we're still logged into something with * this handle and that something agrees with this * port id. */ - r = isp_getpdb(isp, handle, p, 0); + r = isp_getpdb(isp, chan, handle, p, 0); if (r == 0 && p->portid != portid) { - (void) isp_plogx(isp, handle, portid, + (void) isp_plogx(isp, chan, handle, portid, PLOGX_FLG_CMD_LOGO | PLOGX_FLG_IMPLICIT, 1); } else if (r == 0) { break; } - if (FCPARAM(isp)->isp_loopstate != LOOP_SCANNING_FABRIC) { + if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) { return (-1); } /* * Now try and log into the device */ - r = isp_plogx(isp, handle, portid, PLOGX_FLG_CMD_PLOGI, 1); - if (FCPARAM(isp)->isp_loopstate != LOOP_SCANNING_FABRIC) { + r = isp_plogx(isp, chan, handle, portid, + PLOGX_FLG_CMD_PLOGI, 1); + if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) { return (-1); } if (r == 0) { *ohp = handle; break; } else if ((r & 0xffff) == MBOX_PORT_ID_USED) { handle = r >> 16; break; } else if (r != MBOX_LOOP_ID_USED) { i = lim; break; + } else if (r == MBOX_TIMEOUT) { + return (-1); } else { *ohp = handle; - handle = isp_nxt_handle(isp, *ohp); + handle = isp_nxt_handle(isp, chan, *ohp); } } if (i == lim) { - isp_prt(isp, ISP_LOGWARN, "PLOGI 0x%06x failed", portid); + isp_prt(isp, ISP_LOGWARN, "Chan %d PLOGI 0x%06x failed", + chan, portid); return (-1); } /* * If we successfully logged into it, get the PDB for it * so we can crosscheck that it is still what we think it * is and that we also have the role it plays */ - r = isp_getpdb(isp, handle, p, 0); - if (FCPARAM(isp)->isp_loopstate != LOOP_SCANNING_FABRIC) { + r = isp_getpdb(isp, chan, handle, p, 0); + if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) { return (-1); } if (r != 0) { - isp_prt(isp, ISP_LOGERR, "new device 0x%06x@0x%x disappeared", - portid, handle); + isp_prt(isp, ISP_LOGERR, + "Chan %d new device 0x%06x@0x%x disappeared", + chan, portid, handle); return (-1); } if (p->handle != handle || p->portid != portid) { isp_prt(isp, ISP_LOGERR, - "new device 0x%06x@0x%x changed (0x%06x@0x%0x)", - portid, handle, p->portid, p->handle); + "Chan %d new device 0x%06x@0x%x changed (0x%06x@0x%0x)", + chan, portid, handle, p->portid, p->handle); return (-1); } return (0); } static int -isp_register_fc4_type(ispsoftc_t *isp) +isp_register_fc4_type(ispsoftc_t *isp, int chan) { - fcparam *fcp = isp->isp_param; + fcparam *fcp = FCPARAM(isp, chan); uint8_t local[SNS_RFT_ID_REQ_SIZE]; sns_screq_t *reqp = (sns_screq_t *) local; mbreg_t mbs; - MEMZERO((void *) reqp, SNS_RFT_ID_REQ_SIZE); + ISP_MEMZERO((void *) reqp, SNS_RFT_ID_REQ_SIZE); reqp->snscb_rblen = SNS_RFT_ID_RESP_SIZE >> 1; reqp->snscb_addr[RQRSP_ADDR0015] = DMA_WD0(fcp->isp_scdma + 0x100); reqp->snscb_addr[RQRSP_ADDR1631] = DMA_WD1(fcp->isp_scdma + 0x100); reqp->snscb_addr[RQRSP_ADDR3247] = DMA_WD2(fcp->isp_scdma + 0x100); reqp->snscb_addr[RQRSP_ADDR4863] = DMA_WD3(fcp->isp_scdma + 0x100); reqp->snscb_sblen = 22; reqp->snscb_data[0] = SNS_RFT_ID; reqp->snscb_data[4] = fcp->isp_portid & 0xffff; reqp->snscb_data[5] = (fcp->isp_portid >> 16) & 0xff; reqp->snscb_data[6] = (1 << FC4_SCSI); - FC_SCRATCH_ACQUIRE(isp); + if (FC_SCRATCH_ACQUIRE(isp, chan)) { + isp_prt(isp, ISP_LOGERR, sacq); + return (-1); + } isp_put_sns_request(isp, reqp, (sns_screq_t *) fcp->isp_scratch); - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_SEND_SNS; + MBSINIT(&mbs, MBOX_SEND_SNS, MBLOGALL, 1000000); mbs.param[1] = SNS_RFT_ID_REQ_SIZE >> 1; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); - mbs.logval = MBLOGALL; - mbs.timeout = 10000000; MEMORYBARRIER(isp, SYNC_SFORDEV, 0, SNS_RFT_ID_REQ_SIZE); isp_mboxcmd(isp, &mbs); - FC_SCRATCH_RELEASE(isp); + FC_SCRATCH_RELEASE(isp, chan); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { return (0); } else { return (-1); } } static int -isp_register_fc4_type_24xx(ispsoftc_t *isp) +isp_register_fc4_type_24xx(ispsoftc_t *isp, int chan) { mbreg_t mbs; - fcparam *fcp = FCPARAM(isp); + fcparam *fcp = FCPARAM(isp, chan); union { isp_ct_pt_t plocal; rft_id_t clocal; uint8_t q[QENTRY_LEN]; } un; isp_ct_pt_t *pt; ct_hdr_t *ct; rft_id_t *rp; uint8_t *scp = fcp->isp_scratch; - FC_SCRATCH_ACQUIRE(isp); + if (FC_SCRATCH_ACQUIRE(isp, chan)) { + isp_prt(isp, ISP_LOGERR, sacq); + return (-1); + } + /* * Build a Passthrough IOCB in memory. */ - MEMZERO(un.q, QENTRY_LEN); + ISP_MEMZERO(un.q, QENTRY_LEN); pt = &un.plocal; pt->ctp_header.rqs_entry_count = 1; pt->ctp_header.rqs_entry_type = RQSTYPE_CT_PASSTHRU; pt->ctp_handle = 0xffffffff; - pt->ctp_nphdl = NPH_SNS_ID; + pt->ctp_nphdl = fcp->isp_sns_hdl; pt->ctp_cmd_cnt = 1; + pt->ctp_vpidx = ISP_GET_VPIDX(isp, chan); pt->ctp_time = 1; pt->ctp_rsp_cnt = 1; pt->ctp_rsp_bcnt = sizeof (ct_hdr_t); pt->ctp_cmd_bcnt = sizeof (rft_id_t); pt->ctp_dataseg[0].ds_base = DMA_LO32(fcp->isp_scdma+XTXOFF); pt->ctp_dataseg[0].ds_basehi = DMA_HI32(fcp->isp_scdma+XTXOFF); pt->ctp_dataseg[0].ds_count = sizeof (rft_id_t); pt->ctp_dataseg[1].ds_base = DMA_LO32(fcp->isp_scdma+IGPOFF); pt->ctp_dataseg[1].ds_basehi = DMA_HI32(fcp->isp_scdma+IGPOFF); pt->ctp_dataseg[1].ds_count = sizeof (ct_hdr_t); isp_put_ct_pt(isp, pt, (isp_ct_pt_t *) &scp[CTXOFF]); + if (isp->isp_dblev & ISP_LOGDEBUG1) { + isp_print_bytes(isp, "IOCB CT Request", QENTRY_LEN, pt); + } /* * Build the CT header and command in memory. * * Note that the CT header has to end up as Big Endian format in memory. */ - MEMZERO(&un.clocal, sizeof (un.clocal)); + ISP_MEMZERO(&un.clocal, sizeof (un.clocal)); ct = &un.clocal.rftid_hdr; ct->ct_revision = CT_REVISION; ct->ct_fcs_type = CT_FC_TYPE_FC; ct->ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct->ct_cmd_resp = SNS_RFT_ID; ct->ct_bcnt_resid = (sizeof (rft_id_t) - sizeof (ct_hdr_t)) >> 2; rp = &un.clocal; rp->rftid_portid[0] = fcp->isp_portid >> 16; rp->rftid_portid[1] = fcp->isp_portid >> 8; rp->rftid_portid[2] = fcp->isp_portid; rp->rftid_fc4types[FC4_SCSI >> 5] = 1 << (FC4_SCSI & 0x1f); isp_put_rft_id(isp, rp, (rft_id_t *) &scp[XTXOFF]); + if (isp->isp_dblev & ISP_LOGDEBUG1) { + isp_print_bytes(isp, "CT Header", QENTRY_LEN, &scp[XTXOFF]); + } - MEMZERO(&scp[ZTXOFF], sizeof (ct_hdr_t)); + ISP_MEMZERO(&scp[ZTXOFF], sizeof (ct_hdr_t)); - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_EXEC_COMMAND_IOCB_A64; + MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 1000000); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(fcp->isp_scdma + CTXOFF); mbs.param[3] = DMA_WD0(fcp->isp_scdma + CTXOFF); mbs.param[6] = DMA_WD3(fcp->isp_scdma + CTXOFF); mbs.param[7] = DMA_WD2(fcp->isp_scdma + CTXOFF); - mbs.timeout = 500000; - mbs.logval = MBLOGALL; MEMORYBARRIER(isp, SYNC_SFORDEV, XTXOFF, 2 * QENTRY_LEN); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { - FC_SCRATCH_RELEASE(isp); + FC_SCRATCH_RELEASE(isp, chan); return (-1); } MEMORYBARRIER(isp, SYNC_SFORCPU, ZTXOFF, QENTRY_LEN); pt = &un.plocal; isp_get_ct_pt(isp, (isp_ct_pt_t *) &scp[ZTXOFF], pt); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "IOCB response", QENTRY_LEN, pt); } if (pt->ctp_status) { - FC_SCRATCH_RELEASE(isp); - isp_prt(isp, ISP_LOGWARN, "CT Passthrough returned 0x%x", - pt->ctp_status); - return (-1); + FC_SCRATCH_RELEASE(isp, chan); + isp_prt(isp, ISP_LOGWARN, + "Chan %d Register FC4 Type CT Passthrough returned 0x%x", + chan, pt->ctp_status); + return (1); } isp_get_ct_hdr(isp, (ct_hdr_t *) &scp[IGPOFF], ct); - FC_SCRATCH_RELEASE(isp); + FC_SCRATCH_RELEASE(isp, chan); if (ct->ct_cmd_resp == LS_RJT) { isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "Register FC4 Type rejected"); + "Chan %d Register FC4 Type rejected", chan); return (-1); } else if (ct->ct_cmd_resp == LS_ACC) { isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "Register FC4 Type accepted"); - return(0); + "Chan %d Register FC4 Type accepted", chan); + return (0); } else { isp_prt(isp, ISP_LOGWARN, - "Register FC4 Type: 0x%x", ct->ct_cmd_resp); + "Chan %d Register FC4 Type: 0x%x", + chan, ct->ct_cmd_resp); return (-1); } } static uint16_t -isp_nxt_handle(ispsoftc_t *isp, uint16_t handle) +isp_nxt_handle(ispsoftc_t *isp, int chan, uint16_t handle) { int i; if (handle == NIL_HANDLE) { - if (FCPARAM(isp)->isp_topo == TOPO_F_PORT) { + if (FCPARAM(isp, chan)->isp_topo == TOPO_F_PORT) { handle = 0; } else { handle = SNS_ID+1; } } else { handle += 1; if (handle >= FL_ID && handle <= SNS_ID) { handle = SNS_ID+1; } if (handle >= NPH_RESERVED && handle <= NPH_FL_ID) { handle = NPH_FL_ID+1; } - if (FCPARAM(isp)->isp_2klogin) { + if (ISP_CAP_2KLOGIN(isp)) { if (handle == NPH_MAX_2K) { handle = 0; } } else { if (handle == NPH_MAX) { handle = 0; } } } - if (handle == FCPARAM(isp)->isp_loopid) { - return (isp_nxt_handle(isp, handle)); + if (handle == FCPARAM(isp, chan)->isp_loopid) { + return (isp_nxt_handle(isp, chan, handle)); } for (i = 0; i < MAX_FC_TARG; i++) { - if (FCPARAM(isp)->portdb[i].state == FC_PORTDB_STATE_NIL) { + if (FCPARAM(isp, chan)->portdb[i].state == + FC_PORTDB_STATE_NIL) { continue; } - if (FCPARAM(isp)->portdb[i].handle == handle) { - return (isp_nxt_handle(isp, handle)); + if (FCPARAM(isp, chan)->portdb[i].handle == handle) { + return (isp_nxt_handle(isp, chan, handle)); } } return (handle); } /* * Start a command. Locking is assumed done in the caller. */ int isp_start(XS_T *xs) { ispsoftc_t *isp; - uint32_t nxti, optr, handle; + uint32_t handle; uint8_t local[QENTRY_LEN]; - ispreq_t *reqp, *qep; - void *cdbp; + ispreq_t *reqp; + void *cdbp, *qep; uint16_t *tptr; - int target, i, hdlidx = 0; + int target, dmaresult, hdlidx = 0; XS_INITERR(xs); isp = XS_ISP(xs); /* - * Check to make sure we're supporting initiator role. - */ - if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) { - XS_SETERR(xs, HBA_SELTIMEOUT); - return (CMD_COMPLETE); - } - - /* * Now make sure we're running. */ if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGERR, "Adapter not at RUNSTATE"); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } /* * Check command CDB length, etc.. We really are limited to 16 bytes * for Fibre Channel, but can do up to 44 bytes in parallel SCSI, * but probably only if we're running fairly new firmware (we'll * let the old f/w choke on an extended command queue entry). */ if (XS_CDBLEN(xs) > (IS_FC(isp)? 16 : 44) || XS_CDBLEN(xs) == 0) { - isp_prt(isp, ISP_LOGERR, - "unsupported cdb length (%d, CDB[0]=0x%x)", - XS_CDBLEN(xs), XS_CDBP(xs)[0] & 0xff); + isp_prt(isp, ISP_LOGERR, "unsupported cdb length (%d, CDB[0]=0x%x)", XS_CDBLEN(xs), XS_CDBP(xs)[0] & 0xff); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } /* * Translate the target to device handle as appropriate, checking * for correct device state as well. */ target = XS_TGT(xs); if (IS_FC(isp)) { - fcparam *fcp = isp->isp_param; + fcparam *fcp = FCPARAM(isp, XS_CHANNEL(xs)); + if ((fcp->role & ISP_ROLE_INITIATOR) == 0) { + XS_SETERR(xs, HBA_SELTIMEOUT); + return (CMD_COMPLETE); + } + /* * Try again later. */ - if (fcp->isp_fwstate != FW_READY || - fcp->isp_loopstate != LOOP_READY) { + if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate != LOOP_READY) { return (CMD_RQLATER); } if (XS_TGT(xs) >= MAX_FC_TARG) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } - hdlidx = fcp->isp_ini_map[XS_TGT(xs)] - 1; - isp_prt(isp, ISP_LOGDEBUG1, "XS_TGT(xs)=%d- hdlidx value %d", - XS_TGT(xs), hdlidx); + hdlidx = fcp->isp_dev_map[XS_TGT(xs)] - 1; + isp_prt(isp, ISP_LOGDEBUG2, "XS_TGT(xs)=%d- hdlidx value %d", XS_TGT(xs), hdlidx); if (hdlidx < 0 || hdlidx >= MAX_FC_TARG) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } if (fcp->portdb[hdlidx].state == FC_PORTDB_STATE_ZOMBIE) { return (CMD_RQLATER); } if (fcp->portdb[hdlidx].state != FC_PORTDB_STATE_VALID) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } target = fcp->portdb[hdlidx].handle; + fcp->portdb[hdlidx].dirty = 1; + } else { + sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); + if ((sdp->role & ISP_ROLE_INITIATOR) == 0) { + XS_SETERR(xs, HBA_SELTIMEOUT); + return (CMD_COMPLETE); + } + if (sdp->update) { + isp_spi_update(isp, XS_CHANNEL(xs)); + } } - /* - * Next check to see if any HBA or Device parameters need to be updated. - */ - if (isp->isp_update != 0) { - isp_update(isp); - } - start_again: - if (isp_getrqentry(isp, &nxti, &optr, (void *)&qep)) { + qep = isp_getrqentry(isp); + if (qep == NULL) { isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow"); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } + XS_SETERR(xs, HBA_NOERROR); /* * Now see if we need to synchronize the ISP with respect to anything. * We do dual duty here (cough) for synchronizing for busses other * than which we got here to send a command to. */ reqp = (ispreq_t *) local; - if (isp->isp_sendmarker) { + ISP_MEMZERO(local, QENTRY_LEN); + if (ISP_TST_SENDMARKER(isp, XS_CHANNEL(xs))) { if (IS_24XX(isp)) { - isp_marker_24xx_t *m = (isp_marker_24xx_t *) qep; - MEMZERO(m, QENTRY_LEN); + isp_marker_24xx_t *m = (isp_marker_24xx_t *) reqp; m->mrk_header.rqs_entry_count = 1; m->mrk_header.rqs_entry_type = RQSTYPE_MARKER; m->mrk_modifier = SYNC_ALL; - isp_put_marker_24xx(isp, m, (isp_marker_24xx_t *)qep); - ISP_ADD_REQUEST(isp, nxti); - isp->isp_sendmarker = 0; - goto start_again; + isp_put_marker_24xx(isp, m, qep); } else { - for (i = 0; i < (IS_DUALBUS(isp)? 2: 1); i++) { - isp_marker_t *m = (isp_marker_t *) qep; - if ((isp->isp_sendmarker & (1 << i)) == 0) { - continue; - } - MEMZERO(m, QENTRY_LEN); - m->mrk_header.rqs_entry_count = 1; - m->mrk_header.rqs_entry_type = RQSTYPE_MARKER; - m->mrk_target = (i << 7); /* bus # */ - m->mrk_modifier = SYNC_ALL; - isp_put_marker(isp, m, (isp_marker_t *) qep); - ISP_ADD_REQUEST(isp, nxti); - isp->isp_sendmarker &= ~(1 << i); - goto start_again; - } + isp_marker_t *m = (isp_marker_t *) reqp; + m->mrk_header.rqs_entry_count = 1; + m->mrk_header.rqs_entry_type = RQSTYPE_MARKER; + m->mrk_target = (XS_CHANNEL(xs) << 7); /* bus # */ + m->mrk_modifier = SYNC_ALL; + isp_put_marker(isp, m, qep); } + ISP_SYNC_REQUEST(isp); + ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 0); + goto start_again; } - MEMZERO((void *)reqp, QENTRY_LEN); reqp->req_header.rqs_entry_count = 1; if (IS_24XX(isp)) { reqp->req_header.rqs_entry_type = RQSTYPE_T7RQS; } else if (IS_FC(isp)) { reqp->req_header.rqs_entry_type = RQSTYPE_T2RQS; } else { - if (XS_CDBLEN(xs) > 12) + if (XS_CDBLEN(xs) > 12) { reqp->req_header.rqs_entry_type = RQSTYPE_CMDONLY; - else + } else { reqp->req_header.rqs_entry_type = RQSTYPE_REQUEST; + } } /* reqp->req_header.rqs_flags = 0; */ /* reqp->req_header.rqs_seqno = 0; */ if (IS_24XX(isp)) { int ttype; if (XS_TAG_P(xs)) { ttype = XS_TAG_TYPE(xs); } else { if (XS_CDBP(xs)[0] == 0x3) { ttype = REQFLAG_HTAG; } else { ttype = REQFLAG_STAG; } } if (ttype == REQFLAG_OTAG) { ttype = FCP_CMND_TASK_ATTR_ORDERED; } else if (ttype == REQFLAG_HTAG) { ttype = FCP_CMND_TASK_ATTR_HEAD; } else { ttype = FCP_CMND_TASK_ATTR_SIMPLE; } ((ispreqt7_t *)reqp)->req_task_attribute = ttype; } else if (IS_FC(isp)) { /* * See comment in isp_intr */ - /* XS_RESID(xs) = 0; */ + /* XS_SET_RESID(xs, 0); */ /* * Fibre Channel always requires some kind of tag. * The Qlogic drivers seem be happy not to use a tag, * but this breaks for some devices (IBM drives). */ if (XS_TAG_P(xs)) { ((ispreqt2_t *)reqp)->req_flags = XS_TAG_TYPE(xs); } else { /* * If we don't know what tag to use, use HEAD OF QUEUE * for Request Sense or Simple. */ if (XS_CDBP(xs)[0] == 0x3) /* REQUEST SENSE */ ((ispreqt2_t *)reqp)->req_flags = REQFLAG_HTAG; else ((ispreqt2_t *)reqp)->req_flags = REQFLAG_STAG; } } else { - sdparam *sdp = (sdparam *)isp->isp_param; - sdp += XS_CHANNEL(xs); - if ((sdp->isp_devparam[target].actv_flags & DPARM_TQING) && - XS_TAG_P(xs)) { + sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); + if ((sdp->isp_devparam[target].actv_flags & DPARM_TQING) && XS_TAG_P(xs)) { reqp->req_flags = XS_TAG_TYPE(xs); } } cdbp = reqp->req_cdb; tptr = &reqp->req_time; if (IS_SCSI(isp)) { reqp->req_target = target | (XS_CHANNEL(xs) << 7); reqp->req_lun_trn = XS_LUN(xs); reqp->req_cdblen = XS_CDBLEN(xs); } else if (IS_24XX(isp)) { fcportdb_t *lp; - lp = &FCPARAM(isp)->portdb[hdlidx]; + lp = &FCPARAM(isp, XS_CHANNEL(xs))->portdb[hdlidx]; ((ispreqt7_t *)reqp)->req_nphdl = target; ((ispreqt7_t *)reqp)->req_tidlo = lp->portid; ((ispreqt7_t *)reqp)->req_tidhi = lp->portid >> 16; + ((ispreqt7_t *)reqp)->req_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(xs)); if (XS_LUN(xs) > 256) { ((ispreqt7_t *)reqp)->req_lun[0] = XS_LUN(xs) >> 8; ((ispreqt7_t *)reqp)->req_lun[0] |= 0x40; } ((ispreqt7_t *)reqp)->req_lun[1] = XS_LUN(xs); cdbp = ((ispreqt7_t *)reqp)->req_cdb; tptr = &((ispreqt7_t *)reqp)->req_time; - } else if (FCPARAM(isp)->isp_2klogin) { + } else if (ISP_CAP_2KLOGIN(isp)) { ((ispreqt2e_t *)reqp)->req_target = target; ((ispreqt2e_t *)reqp)->req_scclun = XS_LUN(xs); - } else if (FCPARAM(isp)->isp_sccfw) { + } else if (ISP_CAP_SCCFW(isp)) { ((ispreqt2_t *)reqp)->req_target = target; ((ispreqt2_t *)reqp)->req_scclun = XS_LUN(xs); } else { ((ispreqt2_t *)reqp)->req_target = target; ((ispreqt2_t *)reqp)->req_lun_trn = XS_LUN(xs); } - MEMCPY(cdbp, XS_CDBP(xs), XS_CDBLEN(xs)); + ISP_MEMCPY(cdbp, XS_CDBP(xs), XS_CDBLEN(xs)); *tptr = XS_TIME(xs) / 1000; if (*tptr == 0 && XS_TIME(xs)) { *tptr = 1; } if (IS_24XX(isp) && *tptr > 0x1999) { *tptr = 0x1999; } if (isp_save_xs(isp, xs, &handle)) { isp_prt(isp, ISP_LOGDEBUG0, "out of xflist pointers"); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } /* Whew. Thankfully the same for type 7 requests */ reqp->req_handle = handle; /* - * Set up DMA and/or do any bus swizzling of the request entry + * Set up DMA and/or do any platform dependent swizzling of the request entry * so that the Qlogic F/W understands what is being asked of it. + * + * The callee is responsible for adding all requests at this point. */ - i = ISP_DMASETUP(isp, xs, reqp, &nxti, optr); - if (i != CMD_QUEUED) { + dmaresult = ISP_DMASETUP(isp, xs, reqp); + if (dmaresult != CMD_QUEUED) { isp_destroy_handle(isp, handle); /* * dmasetup sets actual error in packet, and * return what we were given to return. */ - return (i); + return (dmaresult); } - XS_SETERR(xs, HBA_NOERROR); - isp_prt(isp, ISP_LOGDEBUG0, - "START cmd for %d.%d.%d cmd 0x%x datalen %ld", - XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), XS_CDBP(xs)[0], - (long) XS_XFRLEN(xs)); - ISP_ADD_REQUEST(isp, nxti); + isp_prt(isp, ISP_LOGDEBUG0, "START cmd for %d.%d.%d cmd 0x%x datalen %ld", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), XS_CDBP(xs)[0], (long) XS_XFRLEN(xs)); isp->isp_nactive++; return (CMD_QUEUED); } /* * isp control * Locks (ints blocked) assumed held. */ int -isp_control(ispsoftc_t *isp, ispctl_t ctl, void *arg) +isp_control(ispsoftc_t *isp, ispctl_t ctl, ...) { XS_T *xs; - mbreg_t mbs; - int bus, tgt; + mbreg_t *mbr, mbs; + int chan, tgt; uint32_t handle; + va_list ap; - MEMZERO(&mbs, sizeof (mbs)); - switch (ctl) { - default: - isp_prt(isp, ISP_LOGERR, "Unknown Control Opcode 0x%x", ctl); - break; - case ISPCTL_RESET_BUS: /* * Issue a bus reset. */ if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGWARN, "RESET BUS NOT IMPLEMENTED"); break; } else if (IS_FC(isp)) { mbs.param[1] = 10; - bus = 0; + chan = 0; } else { - mbs.param[1] = SDPARAM(isp)->isp_bus_reset_delay; + va_start(ap, ctl); + chan = va_arg(ap, int); + va_end(ap); + mbs.param[1] = SDPARAM(isp, chan)->isp_bus_reset_delay; if (mbs.param[1] < 2) { mbs.param[1] = 2; } - bus = *((int *) arg); - if (IS_DUALBUS(isp)) { - mbs.param[2] = bus; - } + mbs.param[2] = chan; } - mbs.param[0] = MBOX_BUS_RESET; - isp->isp_sendmarker |= (1 << bus); - mbs.logval = MBLOGALL; + MBSINIT(&mbs, MBOX_BUS_RESET, MBLOGALL, 0); + ISP_SET_SENDMARKER(isp, chan, 1); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } isp_prt(isp, ISP_LOGINFO, - "driver initiated bus reset of bus %d", bus); + "driver initiated bus reset of bus %d", chan); return (0); case ISPCTL_RESET_DEV: - tgt = (*((int *) arg)) & 0xffff; + va_start(ap, ctl); + chan = va_arg(ap, int); + tgt = va_arg(ap, int); + va_end(ap); if (IS_24XX(isp)) { - isp_prt(isp, ISP_LOGWARN, "RESET DEV NOT IMPLEMENTED"); + uint8_t local[QENTRY_LEN]; + isp24xx_tmf_t *tmf; + isp24xx_statusreq_t *sp; + fcparam *fcp = FCPARAM(isp, chan); + fcportdb_t *lp; + int hdlidx; + + hdlidx = fcp->isp_dev_map[tgt] - 1; + if (hdlidx < 0 || hdlidx >= MAX_FC_TARG) { + isp_prt(isp, ISP_LOGWARN, + "Chan %d bad handle %d trying to reset" + "target %d", chan, hdlidx, tgt); + break; + } + lp = &fcp->portdb[hdlidx]; + if (lp->state != FC_PORTDB_STATE_VALID) { + isp_prt(isp, ISP_LOGWARN, + "Chan %d handle %d for abort of target %d " + "no longer valid", chan, + hdlidx, tgt); + break; + } + + tmf = (isp24xx_tmf_t *) local; + ISP_MEMZERO(tmf, QENTRY_LEN); + tmf->tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT; + tmf->tmf_header.rqs_entry_count = 1; + tmf->tmf_nphdl = lp->handle; + tmf->tmf_delay = 2; + tmf->tmf_timeout = 2; + tmf->tmf_flags = ISP24XX_TMF_TARGET_RESET; + tmf->tmf_tidlo = lp->portid; + tmf->tmf_tidhi = lp->portid >> 16; + tmf->tmf_vpidx = ISP_GET_VPIDX(isp, chan); + isp_prt(isp, ISP_LOGALL, "Chan %d Reset N-Port Handle 0x%04x @ Port 0x%06x", chan, lp->handle, lp->portid); + MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 5000000); + mbs.param[1] = QENTRY_LEN; + mbs.param[2] = DMA_WD1(fcp->isp_scdma); + mbs.param[3] = DMA_WD0(fcp->isp_scdma); + mbs.param[6] = DMA_WD3(fcp->isp_scdma); + mbs.param[7] = DMA_WD2(fcp->isp_scdma); + + if (FC_SCRATCH_ACQUIRE(isp, chan)) { + isp_prt(isp, ISP_LOGERR, sacq); + break; + } + isp_put_24xx_tmf(isp, tmf, fcp->isp_scratch); + MEMORYBARRIER(isp, SYNC_SFORDEV, 0, QENTRY_LEN); + fcp->sendmarker = 1; + isp_mboxcmd(isp, &mbs); + if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { + FC_SCRATCH_RELEASE(isp, chan); + break; + } + MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, + QENTRY_LEN); + sp = (isp24xx_statusreq_t *) local; + isp_get_24xx_response(isp, + &((isp24xx_statusreq_t *)fcp->isp_scratch)[1], sp); + FC_SCRATCH_RELEASE(isp, chan); + if (sp->req_completion_status == 0) { + return (0); + } + isp_prt(isp, ISP_LOGWARN, + "Chan %d reset of target %d returned 0x%x", + chan, tgt, sp->req_completion_status); break; } else if (IS_FC(isp)) { - if (FCPARAM(isp)->isp_2klogin) { + if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = tgt; mbs.ibits = (1 << 10); } else { mbs.param[1] = (tgt << 8); } - bus = 0; } else { - bus = (*((int *) arg)) >> 16; - mbs.param[1] = (bus << 15) | (tgt << 8); + mbs.param[1] = (chan << 15) | (tgt << 8); } - mbs.param[0] = MBOX_ABORT_TARGET; + MBSINIT(&mbs, MBOX_ABORT_TARGET, MBLOGALL, 0); mbs.param[2] = 3; /* 'delay', in seconds */ - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } isp_prt(isp, ISP_LOGINFO, - "Target %d on Bus %d Reset Succeeded", tgt, bus); - isp->isp_sendmarker |= (1 << bus); + "Target %d on Bus %d Reset Succeeded", tgt, chan); + ISP_SET_SENDMARKER(isp, chan, 1); return (0); case ISPCTL_ABORT_CMD: - xs = (XS_T *) arg; + va_start(ap, ctl); + xs = va_arg(ap, XS_T *); + va_end(ap); + tgt = XS_TGT(xs); + chan = XS_CHANNEL(xs); handle = isp_find_handle(isp, xs); if (handle == 0) { isp_prt(isp, ISP_LOGWARN, "cannot find handle for command to abort"); break; } if (IS_24XX(isp)) { - isp_prt(isp, ISP_LOGWARN, "ABORT CMD NOT IMPLEMENTED"); + isp24xx_abrt_t local, *ab = &local, *ab2; + fcparam *fcp; + fcportdb_t *lp; + int hdlidx; + + fcp = FCPARAM(isp, chan); + hdlidx = fcp->isp_dev_map[tgt] - 1; + if (hdlidx < 0 || hdlidx >= MAX_FC_TARG) { + isp_prt(isp, ISP_LOGWARN, + "Chan %d bad handle %d trying to abort" + "target %d", chan, hdlidx, tgt); + break; + } + lp = &fcp->portdb[hdlidx]; + if (lp->state != FC_PORTDB_STATE_VALID) { + isp_prt(isp, ISP_LOGWARN, + "Chan %d handle %d for abort of target %d " + "no longer valid", chan, hdlidx, tgt); + break; + } + isp_prt(isp, ISP_LOGALL, + "Chan %d Abort Cmd for N-Port 0x%04x @ Port " + "0x%06x %p", chan, lp->handle, lp->portid, xs); + ISP_MEMZERO(ab, QENTRY_LEN); + ab->abrt_header.rqs_entry_type = RQSTYPE_ABORT_IO; + ab->abrt_header.rqs_entry_count = 1; + ab->abrt_handle = lp->handle; + ab->abrt_cmd_handle = handle; + ab->abrt_tidlo = lp->portid; + ab->abrt_tidhi = lp->portid >> 16; + ab->abrt_vpidx = ISP_GET_VPIDX(isp, chan); + + ISP_MEMZERO(&mbs, sizeof (mbs)); + MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 5000000); + mbs.param[1] = QENTRY_LEN; + mbs.param[2] = DMA_WD1(fcp->isp_scdma); + mbs.param[3] = DMA_WD0(fcp->isp_scdma); + mbs.param[6] = DMA_WD3(fcp->isp_scdma); + mbs.param[7] = DMA_WD2(fcp->isp_scdma); + + if (FC_SCRATCH_ACQUIRE(isp, chan)) { + isp_prt(isp, ISP_LOGERR, sacq); + break; + } + isp_put_24xx_abrt(isp, ab, fcp->isp_scratch); + ab2 = (isp24xx_abrt_t *) + &((uint8_t *)fcp->isp_scratch)[QENTRY_LEN]; + ab2->abrt_nphdl = 0xdeaf; + MEMORYBARRIER(isp, SYNC_SFORDEV, 0, 2 * QENTRY_LEN); + isp_mboxcmd(isp, &mbs); + if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { + FC_SCRATCH_RELEASE(isp, chan); + break; + } + MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, + QENTRY_LEN); + isp_get_24xx_abrt(isp, ab2, ab); + FC_SCRATCH_RELEASE(isp, chan); + if (ab->abrt_nphdl == ISP24XX_ABRT_OKAY) { + return (0); + } + isp_prt(isp, ISP_LOGWARN, + "Chan %d handle %d abort returned 0x%x", chan, + hdlidx, ab->abrt_nphdl); break; } else if (IS_FC(isp)) { - if (FCPARAM(isp)->isp_sccfw) { - if (FCPARAM(isp)->isp_2klogin) { + if (ISP_CAP_SCCFW(isp)) { + if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = tgt; } else { mbs.param[1] = tgt << 8; } mbs.param[6] = XS_LUN(xs); } else { mbs.param[1] = tgt << 8 | XS_LUN(xs); } } else { - bus = XS_CHANNEL(xs); - mbs.param[1] = (bus << 15) | (tgt << 8) | XS_LUN(xs); + mbs.param[1] = (chan << 15) | (tgt << 8) | XS_LUN(xs); } - mbs.param[0] = MBOX_ABORT; + MBSINIT(&mbs, MBOX_ABORT, MBLOGALL & ~MBOX_COMMAND_ERROR, 0); mbs.param[2] = handle; - mbs.logval = MBLOGALL & ~MBOX_COMMAND_ERROR; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } return (0); case ISPCTL_UPDATE_PARAMS: - isp_update(isp); + va_start(ap, ctl); + chan = va_arg(ap, int); + va_end(ap); + isp_spi_update(isp, chan); return (0); case ISPCTL_FCLINK_TEST: if (IS_FC(isp)) { - int usdelay = *((int *) arg); + int usdelay; + va_start(ap, ctl); + chan = va_arg(ap, int); + usdelay = va_arg(ap, int); + va_end(ap); if (usdelay == 0) { usdelay = 250000; } - return (isp_fclink_test(isp, usdelay)); + return (isp_fclink_test(isp, chan, usdelay)); } break; case ISPCTL_SCAN_FABRIC: if (IS_FC(isp)) { - return (isp_scan_fabric(isp)); + va_start(ap, ctl); + chan = va_arg(ap, int); + va_end(ap); + return (isp_scan_fabric(isp, chan)); } break; case ISPCTL_SCAN_LOOP: if (IS_FC(isp)) { - return (isp_scan_loop(isp)); + va_start(ap, ctl); + chan = va_arg(ap, int); + va_end(ap); + return (isp_scan_loop(isp, chan)); } break; case ISPCTL_PDB_SYNC: if (IS_FC(isp)) { - return (isp_pdb_sync(isp)); + va_start(ap, ctl); + chan = va_arg(ap, int); + va_end(ap); + return (isp_pdb_sync(isp, chan)); } break; case ISPCTL_SEND_LIP: if (IS_FC(isp) && !IS_24XX(isp)) { - mbs.param[0] = MBOX_INIT_LIP; - if (FCPARAM(isp)->isp_2klogin) { + MBSINIT(&mbs, MBOX_INIT_LIP, MBLOGALL, 0); + if (ISP_CAP_2KLOGIN(isp)) { mbs.ibits = (1 << 10); } - mbs.logval = MBLOGALL; isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { return (0); } } break; case ISPCTL_GET_PDB: - if (IS_FC(isp) && arg) { - int id = *((int *)arg); - isp_pdb_t *pdb = arg; - return (isp_getpdb(isp, id, pdb, 1)); + if (IS_FC(isp)) { + isp_pdb_t *pdb; + va_start(ap, ctl); + chan = va_arg(ap, int); + tgt = va_arg(ap, int); + pdb = va_arg(ap, isp_pdb_t *); + va_end(ap); + return (isp_getpdb(isp, chan, tgt, pdb, 1)); } break; - case ISPCTL_GET_PORTNAME: + case ISPCTL_GET_NAMES: { - uint64_t *wwnp = arg; - int loopid = *wwnp; - *wwnp = isp_get_portname(isp, loopid, 0); - if (*wwnp == (uint64_t) -1) { + uint64_t *wwnn, *wwnp; + va_start(ap, ctl); + chan = va_arg(ap, int); + tgt = va_arg(ap, int); + wwnn = va_arg(ap, uint64_t *); + wwnp = va_arg(ap, uint64_t *); + va_end(ap); + if (wwnn == NULL && wwnp == NULL) { break; - } else { - return (0); } + if (wwnn) { + *wwnn = isp_get_wwn(isp, chan, tgt, 1); + if (*wwnn == INI_NONE) { + break; + } + } + if (wwnp) { + *wwnp = isp_get_wwn(isp, chan, tgt, 0); + if (*wwnp == INI_NONE) { + break; + } + } + return (0); } case ISPCTL_RUN_MBOXCMD: - - isp_mboxcmd(isp, arg); - return(0); - + { + va_start(ap, ctl); + mbr = va_arg(ap, mbreg_t *); + va_end(ap); + isp_mboxcmd(isp, mbr); + return (0); + } case ISPCTL_PLOGX: { - isp_plcmd_t *p = arg; + isp_plcmd_t *p; int r; - if ((p->flags & PLOGX_FLG_CMD_MASK) != PLOGX_FLG_CMD_PLOGI || - (p->handle != NIL_HANDLE)) { - return (isp_plogx(isp, p->handle, p->portid, - p->flags, 0)); + va_start(ap, ctl); + p = va_arg(ap, isp_plcmd_t *); + va_end(ap); + + if ((p->flags & PLOGX_FLG_CMD_MASK) != PLOGX_FLG_CMD_PLOGI || (p->handle != NIL_HANDLE)) { + return (isp_plogx(isp, p->channel, p->handle, p->portid, p->flags, 0)); } do { - p->handle = isp_nxt_handle(isp, p->handle); - r = isp_plogx(isp, p->handle, p->portid, p->flags, 0); + p->handle = isp_nxt_handle(isp, p->channel, p->handle); + r = isp_plogx(isp, p->channel, p->handle, p->portid, p->flags, 0); if ((r & 0xffff) == MBOX_PORT_ID_USED) { p->handle = r >> 16; r = 0; break; } } while ((r & 0xffff) == MBOX_LOOP_ID_USED); return (r); } -#ifdef ISP_TARGET_MODE - case ISPCTL_TOGGLE_TMODE: - { + default: + isp_prt(isp, ISP_LOGERR, "Unknown Control Opcode 0x%x", ctl); + break; - /* - * We don't check/set against role here- that's the - * responsibility for the outer layer to coordinate. - */ - if (IS_SCSI(isp)) { - int param = *(int *)arg; - mbs.param[0] = MBOX_ENABLE_TARGET_MODE; - mbs.param[1] = param & 0xffff; - mbs.param[2] = param >> 16; - mbs.logval = MBLOGALL; - isp_mboxcmd(isp, &mbs); - if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { - break; - } - } - return (0); } -#endif - } return (-1); } /* * Interrupt Service Routine(s). * * External (OS) framework has done the appropriate locking, * and the locking will be held throughout this function. */ /* * Limit our stack depth by sticking with the max likely number * of completions on a request queue at any one time. */ #ifndef MAX_REQUESTQ_COMPLETIONS #define MAX_REQUESTQ_COMPLETIONS 32 #endif void isp_intr(ispsoftc_t *isp, uint32_t isr, uint16_t sema, uint16_t mbox) { XS_T *complist[MAX_REQUESTQ_COMPLETIONS], *xs; uint32_t iptr, optr, junk; int i, nlooked = 0, ndone = 0; again: optr = isp->isp_residx; /* * Is this a mailbox related interrupt? * The mailbox semaphore will be nonzero if so. */ if (sema) { + fmbox: if (mbox & 0x4000) { isp->isp_intmboxc++; if (isp->isp_mboxbsy) { int obits = isp->isp_obits; isp->isp_mboxtmp[0] = mbox; for (i = 1; i < MAX_MAILBOX(isp); i++) { if ((obits & (1 << i)) == 0) { continue; } - isp->isp_mboxtmp[i] = - ISP_READ(isp, MBOX_OFF(i)); + isp->isp_mboxtmp[i] = ISP_READ(isp, MBOX_OFF(i)); } if (isp->isp_mbxwrk0) { if (isp_mbox_continue(isp) == 0) { return; } } MBOX_NOTIFY_COMPLETE(isp); } else { - isp_prt(isp, ISP_LOGWARN, - "mailbox cmd (0x%x) with no waiters", mbox); + isp_prt(isp, ISP_LOGWARN, "mailbox cmd (0x%x) with no waiters", mbox); } } else if (isp_parse_async(isp, mbox) < 0) { return; } - if ((IS_FC(isp) && mbox != ASYNC_RIO_RESP) || - isp->isp_state != ISP_RUNSTATE) { + if ((IS_FC(isp) && mbox != ASYNC_RIO_RESP) || isp->isp_state != ISP_RUNSTATE) { goto out; } } /* * We can't be getting this now. */ if (isp->isp_state != ISP_RUNSTATE) { - isp_prt(isp, ISP_LOGINFO, - "interrupt (ISR=%x SEMA=%x) when not ready", isr, sema); /* + * This seems to happen to 23XX and 24XX cards- don't know why. + */ + if (isp->isp_mboxbsy && isp->isp_lastmbxcmd == MBOX_ABOUT_FIRMWARE) { + goto fmbox; + } + isp_prt(isp, ISP_LOGINFO, "interrupt (ISR=%x SEMA=%x) when not ready", isr, sema); + /* * Thank you very much! *Burrrp*! */ - ISP_WRITE(isp, isp->isp_respoutrp, - ISP_READ(isp, isp->isp_respinrp)); + ISP_WRITE(isp, isp->isp_respoutrp, ISP_READ(isp, isp->isp_respinrp)); if (IS_24XX(isp)) { ISP_DISABLE_INTS(isp); } goto out; } #ifdef ISP_TARGET_MODE /* * Check for ATIO Queue entries. */ - if (isp->isp_rspbsy == 0 && (isp->isp_role & ISP_ROLE_TARGET) && - IS_24XX(isp)) { - iptr = ISP_READ(isp, isp->isp_atioinrp); - optr = ISP_READ(isp, isp->isp_atiooutrp); + if (IS_24XX(isp)) { + iptr = ISP_READ(isp, BIU2400_ATIO_RSPINP); + optr = ISP_READ(isp, BIU2400_ATIO_RSPOUTP); - isp->isp_rspbsy = 1; while (optr != iptr) { uint8_t qe[QENTRY_LEN]; isphdr_t *hp; uint32_t oop; void *addr; oop = optr; MEMORYBARRIER(isp, SYNC_ATIOQ, oop, QENTRY_LEN); addr = ISP_QUEUE_ENTRY(isp->isp_atioq, oop); isp_get_hdr(isp, addr, (isphdr_t *)qe); hp = (isphdr_t *)qe; switch (hp->rqs_entry_type) { case RQSTYPE_NOTIFY: case RQSTYPE_ATIO: (void) isp_target_notify(isp, addr, &oop); break; default: - isp_print_qentry(isp, "?ATIOQ entry?", - oop, addr); + isp_print_qentry(isp, "?ATIOQ entry?", oop, addr); break; } optr = ISP_NXT_QENTRY(oop, RESULT_QUEUE_LEN(isp)); - ISP_WRITE(isp, isp->isp_atiooutrp, optr); + ISP_WRITE(isp, BIU2400_ATIO_RSPOUTP, optr); } - isp->isp_rspbsy = 0; optr = isp->isp_residx; } #endif /* * Get the current Response Queue Out Pointer. * * If we're a 2300 or 2400, we can ask what hardware what it thinks. */ if (IS_23XX(isp) || IS_24XX(isp)) { optr = ISP_READ(isp, isp->isp_respoutrp); /* * Debug: to be taken out eventually */ if (isp->isp_residx != optr) { - isp_prt(isp, ISP_LOGINFO, - "isp_intr: hard optr=%x, soft optr %x", - optr, isp->isp_residx); + isp_prt(isp, ISP_LOGINFO, "isp_intr: hard optr=%x, soft optr %x", optr, isp->isp_residx); isp->isp_residx = optr; } } else { optr = isp->isp_residx; } /* * You *must* read the Response Queue In Pointer * prior to clearing the RISC interrupt. * * Debounce the 2300 if revision less than 2. */ if (IS_2100(isp) || (IS_2300(isp) && isp->isp_revision < 2)) { i = 0; do { iptr = ISP_READ(isp, isp->isp_respinrp); junk = ISP_READ(isp, isp->isp_respinrp); } while (junk != iptr && ++i < 1000); if (iptr != junk) { - isp_prt(isp, ISP_LOGWARN, - "Response Queue Out Pointer Unstable (%x, %x)", - iptr, junk); + isp_prt(isp, ISP_LOGWARN, "Response Queue Out Pointer Unstable (%x, %x)", iptr, junk); goto out; } } else { iptr = ISP_READ(isp, isp->isp_respinrp); } isp->isp_resodx = iptr; if (optr == iptr && sema == 0) { /* * There are a lot of these- reasons unknown- mostly on * faster Alpha machines. * * I tried delaying after writing HCCR_CMD_CLEAR_RISC_INT to * make sure the old interrupt went away (to avoid 'ringing' * effects), but that didn't stop this from occurring. */ if (IS_24XX(isp)) { junk = 0; } else if (IS_23XX(isp)) { - USEC_DELAY(100); + ISP_DELAY(100); iptr = ISP_READ(isp, isp->isp_respinrp); junk = ISP_READ(isp, BIU_R2HSTSLO); } else { junk = ISP_READ(isp, BIU_ISR); } if (optr == iptr) { if (IS_23XX(isp) || IS_24XX(isp)) { ; } else { sema = ISP_READ(isp, BIU_SEMA); mbox = ISP_READ(isp, OUTMAILBOX0); if ((sema & 0x3) && (mbox & 0x8000)) { goto again; } } isp->isp_intbogus++; - isp_prt(isp, ISP_LOGDEBUG1, - "bogus intr- isr %x (%x) iptr %x optr %x", - isr, junk, iptr, optr); + isp_prt(isp, ISP_LOGDEBUG1, "bogus intr- isr %x (%x) iptr %x optr %x", isr, junk, iptr, optr); } } isp->isp_resodx = iptr; - - if (isp->isp_rspbsy) { - goto out; - } - isp->isp_rspbsy = 1; while (optr != iptr) { uint8_t qe[QENTRY_LEN]; ispstatusreq_t *sp = (ispstatusreq_t *) qe; isphdr_t *hp; int buddaboom, etype, scsi_status, completion_status; int req_status_flags, req_state_flags; uint8_t *snsp, *resp; uint32_t rlen, slen; long resid; uint16_t oop; hp = (isphdr_t *) ISP_QUEUE_ENTRY(isp->isp_result, optr); oop = optr; optr = ISP_NXT_QENTRY(optr, RESULT_QUEUE_LEN(isp)); nlooked++; read_again: buddaboom = req_status_flags = req_state_flags = 0; resid = 0L; /* * Synchronize our view of this response queue entry. */ MEMORYBARRIER(isp, SYNC_RESULT, oop, QENTRY_LEN); isp_get_hdr(isp, hp, &sp->req_header); etype = sp->req_header.rqs_entry_type; if (IS_24XX(isp) && etype == RQSTYPE_RESPONSE) { isp24xx_statusreq_t *sp2 = (isp24xx_statusreq_t *)qe; - isp_get_24xx_response(isp, - (isp24xx_statusreq_t *)hp, sp2); + isp_get_24xx_response(isp, (isp24xx_statusreq_t *)hp, sp2); if (isp->isp_dblev & ISP_LOGDEBUG1) { - isp_print_bytes(isp, - "Response Queue Entry", QENTRY_LEN, sp2); + isp_print_bytes(isp, "Response Queue Entry", QENTRY_LEN, sp2); } scsi_status = sp2->req_scsi_status; completion_status = sp2->req_completion_status; req_state_flags = 0; resid = sp2->req_resid; } else if (etype == RQSTYPE_RESPONSE) { isp_get_response(isp, (ispstatusreq_t *) hp, sp); if (isp->isp_dblev & ISP_LOGDEBUG1) { - isp_print_bytes(isp, - "Response Queue Entry", QENTRY_LEN, sp); + isp_print_bytes(isp, "Response Queue Entry", QENTRY_LEN, sp); } scsi_status = sp->req_scsi_status; completion_status = sp->req_completion_status; req_status_flags = sp->req_status_flags; req_state_flags = sp->req_state_flags; resid = sp->req_resid; } else if (etype == RQSTYPE_RIO2) { isp_rio2_t *rio = (isp_rio2_t *)qe; isp_get_rio2(isp, (isp_rio2_t *) hp, rio); if (isp->isp_dblev & ISP_LOGDEBUG1) { - isp_print_bytes(isp, - "Response Queue Entry", QENTRY_LEN, rio); + isp_print_bytes(isp, "Response Queue Entry", QENTRY_LEN, rio); } for (i = 0; i < rio->req_header.rqs_seqno; i++) { isp_fastpost_complete(isp, rio->req_handles[i]); } if (isp->isp_fpcchiwater < rio->req_header.rqs_seqno) { - isp->isp_fpcchiwater = - rio->req_header.rqs_seqno; + isp->isp_fpcchiwater = rio->req_header.rqs_seqno; } - MEMZERO(hp, QENTRY_LEN); /* PERF */ + ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ continue; } else { /* * Somebody reachable via isp_handle_other_response * may have updated the response queue pointers for * us, so we reload our goal index. */ int r; - r = isp_handle_other_response(isp, etype, hp, &optr); + uint32_t tsto = oop; + r = isp_handle_other_response(isp, etype, hp, &tsto); if (r < 0) { goto read_again; } + /* + * If somebody updated the output pointer, then reset + * optr to be one more than the updated amount. + */ + while (tsto != oop) { + optr = ISP_NXT_QENTRY(tsto, + RESULT_QUEUE_LEN(isp)); + } if (r > 0) { - iptr = isp->isp_resodx; - MEMZERO(hp, QENTRY_LEN); /* PERF */ + ISP_WRITE(isp, isp->isp_respoutrp, optr); + ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ continue; } /* * After this point, we'll just look at the header as * we don't know how to deal with the rest of the * response. */ /* * It really has to be a bounced request just copied * from the request queue to the response queue. If * not, something bad has happened. */ if (etype != RQSTYPE_REQUEST) { isp_prt(isp, ISP_LOGERR, notresp, etype, oop, optr, nlooked); isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, sp); - MEMZERO(hp, QENTRY_LEN); /* PERF */ + ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ continue; } buddaboom = 1; scsi_status = sp->req_scsi_status; completion_status = sp->req_completion_status; req_status_flags = sp->req_status_flags; req_state_flags = sp->req_state_flags; resid = sp->req_resid; } if (sp->req_header.rqs_flags & RQSFLAG_MASK) { if (sp->req_header.rqs_flags & RQSFLAG_CONTINUATION) { - isp_prt(isp, ISP_LOGWARN, - "continuation segment"); + isp_print_bytes(isp, "unexpected continuation segment", QENTRY_LEN, sp); ISP_WRITE(isp, isp->isp_respoutrp, optr); continue; } if (sp->req_header.rqs_flags & RQSFLAG_FULL) { - isp_prt(isp, ISP_LOGDEBUG1, - "internal queues full"); + isp_prt(isp, ISP_LOGDEBUG0, "internal queues full"); /* * We'll synthesize a QUEUE FULL message below. */ } if (sp->req_header.rqs_flags & RQSFLAG_BADHEADER) { - isp_print_bytes(isp, "bad header flag", - QENTRY_LEN, sp); + isp_print_bytes(isp, "bad header flag", QENTRY_LEN, sp); buddaboom++; } if (sp->req_header.rqs_flags & RQSFLAG_BADPACKET) { - isp_print_bytes(isp, "bad request packet", - QENTRY_LEN, sp); + isp_print_bytes(isp, "bad request packet", QENTRY_LEN, sp); buddaboom++; } + if (sp->req_header.rqs_flags & RQSFLAG_BADCOUNT) { + isp_print_bytes(isp, "invalid entry count", QENTRY_LEN, sp); + buddaboom++; + } + if (sp->req_header.rqs_flags & RQSFLAG_BADORDER) { + isp_print_bytes(isp, "invalid IOCB ordering", QENTRY_LEN, sp); + ISP_WRITE(isp, isp->isp_respoutrp, optr); + continue; + } } - if (sp->req_handle > isp->isp_maxcmds || sp->req_handle < 1) { - isp_prt(isp, ISP_LOGERR, - "bad request handle %d (type 0x%x)", - sp->req_handle, etype); - MEMZERO(hp, QENTRY_LEN); /* PERF */ + if ((sp->req_handle != ISP_SPCL_HANDLE) && (sp->req_handle > isp->isp_maxcmds || sp->req_handle < 1)) { + isp_prt(isp, ISP_LOGERR, "bad request handle %d (type 0x%x)", sp->req_handle, etype); + ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ ISP_WRITE(isp, isp->isp_respoutrp, optr); continue; } xs = isp_find_xs(isp, sp->req_handle); if (xs == NULL) { uint8_t ts = completion_status & 0xff; /* * Only whine if this isn't the expected fallout of - * aborting the command. + * aborting the command or resetting the target. */ if (etype != RQSTYPE_RESPONSE) { - isp_prt(isp, ISP_LOGERR, - "cannot find handle 0x%x (type 0x%x)", - sp->req_handle, etype); - } else if (ts != RQCS_ABORTED) { - isp_prt(isp, ISP_LOGERR, - "cannot find handle 0x%x (status 0x%x)", - sp->req_handle, ts); + isp_prt(isp, ISP_LOGERR, "cannot find handle 0x%x (type 0x%x)", sp->req_handle, etype); + } else if (ts != RQCS_ABORTED && ts != RQCS_RESET_OCCURRED && sp->req_handle != ISP_SPCL_HANDLE) { + isp_prt(isp, ISP_LOGERR, "cannot find handle 0x%x (status 0x%x)", sp->req_handle, ts); } - MEMZERO(hp, QENTRY_LEN); /* PERF */ + ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ ISP_WRITE(isp, isp->isp_respoutrp, optr); continue; } isp_destroy_handle(isp, sp->req_handle); if (req_status_flags & RQSTF_BUS_RESET) { XS_SETERR(xs, HBA_BUSRESET); - isp->isp_sendmarker |= (1 << XS_CHANNEL(xs)); + ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 1); } if (buddaboom) { XS_SETERR(xs, HBA_BOTCH); } resp = NULL; rlen = 0; snsp = NULL; slen = 0; if (IS_24XX(isp) && (scsi_status & (RQCS_RV|RQCS_SV)) != 0) { resp = ((isp24xx_statusreq_t *)sp)->req_rsp_sense; rlen = ((isp24xx_statusreq_t *)sp)->req_response_len; } else if (IS_FC(isp) && (scsi_status & RQCS_RV) != 0) { resp = sp->req_response; rlen = sp->req_response_len; } if (IS_FC(isp) && (scsi_status & RQCS_SV) != 0) { /* * Fibre Channel F/W doesn't say we got status * if there's Sense Data instead. I guess they * think it goes w/o saying. */ req_state_flags |= RQSF_GOT_STATUS|RQSF_GOT_SENSE; if (IS_24XX(isp)) { - snsp = - ((isp24xx_statusreq_t *)sp)->req_rsp_sense; + snsp = ((isp24xx_statusreq_t *)sp)->req_rsp_sense; snsp += rlen; - slen = - ((isp24xx_statusreq_t *)sp)->req_sense_len; + slen = ((isp24xx_statusreq_t *)sp)->req_sense_len; } else { snsp = sp->req_sense_data; slen = sp->req_sense_len; } } else if (IS_SCSI(isp) && (req_state_flags & RQSF_GOT_SENSE)) { snsp = sp->req_sense_data; slen = sp->req_sense_len; } if (req_state_flags & RQSF_GOT_STATUS) { *XS_STSP(xs) = scsi_status & 0xff; } switch (etype) { case RQSTYPE_RESPONSE: - XS_SET_STATE_STAT(isp, xs, sp); - if (resp && rlen >= 4 && - resp[FCP_RSPNS_CODE_OFFSET] != 0) { - isp_prt(isp, ISP_LOGWARN, - "%d.%d.%d FCP RESPONSE: 0x%x", - XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), - resp[FCP_RSPNS_CODE_OFFSET]); - XS_SETERR(xs, HBA_BOTCH); + if (resp && rlen >= 4 && resp[FCP_RSPNS_CODE_OFFSET] != 0) { + const char *ptr; + char lb[64]; + const char *rnames[6] = { + "Task Management Function Done", + "Data Length Differs From Burst Length", + "Invalid FCP Cmnd", + "FCP DATA RO mismatch with FCP DATA_XFR_RDY RO", + "Task Management Function Rejected", + "Task Management Function Failed", + }; + if (resp[FCP_RSPNS_CODE_OFFSET] > 5) { + ISP_SNPRINTF(lb, sizeof lb, "Unknown FCP Response Code 0x%x", resp[FCP_RSPNS_CODE_OFFSET]); + ptr = lb; + } else { + ptr = rnames[resp[FCP_RSPNS_CODE_OFFSET]]; + } + isp_prt(isp, ISP_LOGWARN, "%d.%d.%d FCP RESPONSE, LENGTH %u: %s CDB0=0x%02x", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), rlen, ptr, XS_CDBP(xs)[0] & 0xff); + if (resp[FCP_RSPNS_CODE_OFFSET] != 0) { + XS_SETERR(xs, HBA_BOTCH); + } } if (IS_24XX(isp)) { - isp_parse_status_24xx(isp, - (isp24xx_statusreq_t *)sp, xs, &resid); + isp_parse_status_24xx(isp, (isp24xx_statusreq_t *)sp, xs, &resid); } else { isp_parse_status(isp, (void *)sp, xs, &resid); } - if ((XS_NOERR(xs) || XS_ERR(xs) == HBA_NOERROR) && - (*XS_STSP(xs) == SCSI_BUSY)) { + if ((XS_NOERR(xs) || XS_ERR(xs) == HBA_NOERROR) && (*XS_STSP(xs) == SCSI_BUSY)) { XS_SETERR(xs, HBA_TGTBSY); } if (IS_SCSI(isp)) { - XS_RESID(xs) = resid; + XS_SET_RESID(xs, resid); /* * A new synchronous rate was negotiated for * this target. Mark state such that we'll go * look up that which has changed later. */ if (req_status_flags & RQSTF_NEGOTIATION) { int t = XS_TGT(xs); - sdparam *sdp = isp->isp_param; - sdp += XS_CHANNEL(xs); + sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); sdp->isp_devparam[t].dev_refresh = 1; - isp->isp_update |= - (1 << XS_CHANNEL(xs)); + sdp->update = 1; } } else { if (req_status_flags & RQSF_XFER_COMPLETE) { - XS_RESID(xs) = 0; + XS_SET_RESID(xs, 0); } else if (scsi_status & RQCS_RESID) { - XS_RESID(xs) = resid; + XS_SET_RESID(xs, resid); } else { - XS_RESID(xs) = 0; + XS_SET_RESID(xs, 0); } } if (snsp && slen) { XS_SAVE_SENSE(xs, snsp, slen); + } else if ((req_status_flags & RQSF_GOT_STATUS) && (scsi_status & 0xff) == SCSI_CHECK && IS_FC(isp)) { + isp_prt(isp, ISP_LOGWARN, "CHECK CONDITION w/o sense data for CDB=0x%x", XS_CDBP(xs)[0] & 0xff); + isp_print_bytes(isp, "CC with no Sense", QENTRY_LEN, qe); } - isp_prt(isp, ISP_LOGDEBUG2, - "asked for %ld got raw resid %ld settled for %ld", - (long) XS_XFRLEN(xs), resid, (long) XS_RESID(xs)); + isp_prt(isp, ISP_LOGDEBUG2, "asked for %ld got raw resid %ld settled for %ld", (long) XS_XFRLEN(xs), resid, (long) XS_GET_RESID(xs)); break; case RQSTYPE_REQUEST: case RQSTYPE_A64: case RQSTYPE_T2RQS: case RQSTYPE_T3RQS: case RQSTYPE_T7RQS: - if (sp->req_header.rqs_flags & RQSFLAG_FULL) { + if (!IS_24XX(isp) && (sp->req_header.rqs_flags & RQSFLAG_FULL)) { /* * Force Queue Full status. */ *XS_STSP(xs) = SCSI_QFULL; XS_SETERR(xs, HBA_NOERROR); } else if (XS_NOERR(xs)) { - /* - * ???? - */ XS_SETERR(xs, HBA_BOTCH); - isp_prt(isp, ISP_LOGDEBUG0, - "Request Queue Entry bounced back"); - if ((isp->isp_dblev & ISP_LOGDEBUG1) == 0) { - isp_print_bytes(isp, "Bounced Request", - QENTRY_LEN, qe); - } } - XS_RESID(xs) = XS_XFRLEN(xs); + XS_SET_RESID(xs, XS_XFRLEN(xs)); break; default: - isp_print_bytes(isp, "Unhandled Response Type", - QENTRY_LEN, qe); + isp_print_bytes(isp, "Unhandled Response Type", QENTRY_LEN, qe); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } break; } /* * Free any DMA resources. As a side effect, this may * also do any cache flushing necessary for data coherence. */ if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, sp->req_handle); } - if (((isp->isp_dblev & (ISP_LOGDEBUG2|ISP_LOGDEBUG3))) || + if (((isp->isp_dblev & (ISP_LOGDEBUG1|ISP_LOGDEBUG2|ISP_LOGDEBUG3))) || ((isp->isp_dblev & ISP_LOGDEBUG0) && ((!XS_NOERR(xs)) || (*XS_STSP(xs) != SCSI_GOOD)))) { char skey; if (req_state_flags & RQSF_GOT_SENSE) { skey = XS_SNSKEY(xs) & 0xf; if (skey < 10) skey += '0'; else skey += 'a' - 10; } else if (*XS_STSP(xs) == SCSI_CHECK) { skey = '?'; } else { skey = '.'; } isp_prt(isp, ISP_LOGALL, finmsg, XS_CHANNEL(xs), - XS_TGT(xs), XS_LUN(xs), XS_XFRLEN(xs), XS_RESID(xs), + XS_TGT(xs), XS_LUN(xs), XS_XFRLEN(xs), (long) XS_GET_RESID(xs), *XS_STSP(xs), skey, XS_ERR(xs)); } - if (isp->isp_nactive > 0) + if (isp->isp_nactive > 0) { isp->isp_nactive--; + } complist[ndone++] = xs; /* defer completion call until later */ - MEMZERO(hp, QENTRY_LEN); /* PERF */ + ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ if (ndone == MAX_REQUESTQ_COMPLETIONS) { break; } } /* * If we looked at any commands, then it's valid to find out * what the outpointer is. It also is a trigger to update the * ISP's notion of what we've seen so far. */ if (nlooked) { ISP_WRITE(isp, isp->isp_respoutrp, optr); /* * While we're at it, read the requst queue out pointer. */ isp->isp_reqodx = ISP_READ(isp, isp->isp_rqstoutrp); if (isp->isp_rscchiwater < ndone) { isp->isp_rscchiwater = ndone; } } out: if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); ISP_WRITE(isp, BIU_SEMA, 0); } isp->isp_residx = optr; - isp->isp_rspbsy = 0; for (i = 0; i < ndone; i++) { xs = complist[i]; if (xs) { isp->isp_rsltccmplt++; isp_done(xs); } } } /* * Support routines. */ +#define GET_24XX_BUS(isp, chan, msg) \ + if (IS_24XX(isp)) { \ + chan = ISP_READ(isp, OUTMAILBOX3) & 0xff; \ + if (chan >= isp->isp_nchan) { \ + isp_prt(isp, ISP_LOGERR, "bogus channel %u for %s at line %d", chan, msg, __LINE__); \ + break; \ + } \ + } + static int isp_parse_async(ispsoftc_t *isp, uint16_t mbox) { int rval = 0; - int bus; + int pattern = 0; + uint16_t chan; if (IS_DUALBUS(isp)) { - bus = ISP_READ(isp, OUTMAILBOX6); + chan = ISP_READ(isp, OUTMAILBOX6); } else { - bus = 0; + chan = 0; } isp_prt(isp, ISP_LOGDEBUG2, "Async Mbox 0x%x", mbox); switch (mbox) { case ASYNC_BUS_RESET: - isp->isp_sendmarker |= (1 << bus); + if (IS_FC(isp)) { + isp_prt(isp, ISP_LOGWARN, + "ILLEGAL ASYNC_BUS_RESET for FC card"); + break; + } + ISP_SET_SENDMARKER(isp, chan, 1); #ifdef ISP_TARGET_MODE - if (isp_target_async(isp, bus, mbox)) { + if (isp_target_async(isp, chan, mbox)) { rval = -1; } #endif - isp_async(isp, ISPASYNC_BUS_RESET, &bus); + isp_async(isp, ISPASYNC_BUS_RESET, chan); break; case ASYNC_SYSTEM_ERROR: + isp->isp_dead = 1; isp->isp_state = ISP_CRASHED; if (IS_FC(isp)) { - FCPARAM(isp)->isp_loopstate = LOOP_NIL; - FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; + FCPARAM(isp, chan)->isp_loopstate = LOOP_NIL; + FCPARAM(isp, chan)->isp_fwstate = FW_CONFIG_WAIT; } /* * Were we waiting for a mailbox command to complete? * If so, it's dead, so wake up the waiter. */ if (isp->isp_mboxbsy) { isp->isp_obits = 1; isp->isp_mboxtmp[0] = MBOX_HOST_INTERFACE_ERROR; MBOX_NOTIFY_COMPLETE(isp); } /* * It's up to the handler for isp_async to reinit stuff and * restart the firmware */ - isp_async(isp, ISPASYNC_FW_CRASH, NULL); + isp_async(isp, ISPASYNC_FW_CRASH); rval = -1; break; case ASYNC_RQS_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Request Queue Transfer Error"); break; case ASYNC_RSP_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Response Queue Transfer Error"); break; case ASYNC_QWAKEUP: +#ifdef ISP_TARGET_MODE + if (IS_24XX(isp)) { + isp_prt(isp, ISP_LOGERR, "ATIO Queue Transfer Error"); + break; + } +#endif + if (IS_FC(isp)) { + isp_prt(isp, ISP_LOGWARN, + "ILLEGAL ASYNC_QWAKEUP for FC card"); + break; + } /* * We've just been notified that the Queue has woken up. * We don't need to be chatty about this- just unlatch things * and move on. */ mbox = ISP_READ(isp, isp->isp_rqstoutrp); break; case ASYNC_TIMEOUT_RESET: + if (IS_FC(isp)) { + isp_prt(isp, ISP_LOGWARN, + "ILLEGAL ASYNC_TIMEOUT_RESET for FC card"); + break; + } isp_prt(isp, ISP_LOGWARN, - "timeout initiated SCSI bus reset of bus %d", bus); - isp->isp_sendmarker |= (1 << bus); + "timeout initiated SCSI bus reset of chan %d", chan); + ISP_SET_SENDMARKER(isp, chan, 1); #ifdef ISP_TARGET_MODE - if (isp_target_async(isp, bus, mbox)) { + if (isp_target_async(isp, chan, mbox)) { rval = -1; } #endif break; case ASYNC_DEVICE_RESET: - isp_prt(isp, ISP_LOGINFO, "device reset on bus %d", bus); - isp->isp_sendmarker |= (1 << bus); + if (IS_FC(isp)) { + isp_prt(isp, ISP_LOGWARN, + "ILLEGAL DEVICE_RESET for FC card"); + break; + } + isp_prt(isp, ISP_LOGINFO, "device reset on chan %d", chan); + ISP_SET_SENDMARKER(isp, chan, 1); #ifdef ISP_TARGET_MODE - if (isp_target_async(isp, bus, mbox)) { + if (isp_target_async(isp, chan, mbox)) { rval = -1; } #endif break; case ASYNC_EXTMSG_UNDERRUN: + if (IS_FC(isp)) { + isp_prt(isp, ISP_LOGWARN, + "ILLEGAL ASYNC_EXTMSG_UNDERRUN for FC card"); + break; + } isp_prt(isp, ISP_LOGWARN, "extended message underrun"); break; case ASYNC_SCAM_INT: + if (IS_FC(isp)) { + isp_prt(isp, ISP_LOGWARN, + "ILLEGAL ASYNC_SCAM_INT for FC card"); + break; + } isp_prt(isp, ISP_LOGINFO, "SCAM interrupt"); break; case ASYNC_HUNG_SCSI: + if (IS_FC(isp)) { + isp_prt(isp, ISP_LOGWARN, + "ILLEGAL ASYNC_HUNG_SCSI for FC card"); + break; + } isp_prt(isp, ISP_LOGERR, "stalled SCSI Bus after DATA Overrun"); /* XXX: Need to issue SCSI reset at this point */ break; case ASYNC_KILLED_BUS: + if (IS_FC(isp)) { + isp_prt(isp, ISP_LOGWARN, + "ILLEGAL ASYNC_KILLED_BUS for FC card"); + break; + } isp_prt(isp, ISP_LOGERR, "SCSI Bus reset after DATA Overrun"); break; case ASYNC_BUS_TRANSIT: + if (IS_FC(isp)) { + isp_prt(isp, ISP_LOGWARN, + "ILLEGAL ASYNC_BUS_TRANSIT for FC card"); + break; + } mbox = ISP_READ(isp, OUTMAILBOX2); switch (mbox & 0x1c00) { case SXP_PINS_LVD_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to LVD mode"); - SDPARAM(isp)->isp_diffmode = 0; - SDPARAM(isp)->isp_ultramode = 0; - SDPARAM(isp)->isp_lvdmode = 1; + SDPARAM(isp, chan)->isp_diffmode = 0; + SDPARAM(isp, chan)->isp_ultramode = 0; + SDPARAM(isp, chan)->isp_lvdmode = 1; break; case SXP_PINS_HVD_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to Differential mode"); - SDPARAM(isp)->isp_diffmode = 1; - SDPARAM(isp)->isp_ultramode = 0; - SDPARAM(isp)->isp_lvdmode = 0; + SDPARAM(isp, chan)->isp_diffmode = 1; + SDPARAM(isp, chan)->isp_ultramode = 0; + SDPARAM(isp, chan)->isp_lvdmode = 0; break; case SXP_PINS_SE_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to Single Ended mode"); - SDPARAM(isp)->isp_diffmode = 0; - SDPARAM(isp)->isp_ultramode = 1; - SDPARAM(isp)->isp_lvdmode = 0; + SDPARAM(isp, chan)->isp_diffmode = 0; + SDPARAM(isp, chan)->isp_ultramode = 1; + SDPARAM(isp, chan)->isp_lvdmode = 0; break; default: isp_prt(isp, ISP_LOGWARN, "Transition to Unknown Mode 0x%x", mbox); break; } /* * XXX: Set up to renegotiate again! */ /* Can only be for a 1080... */ - isp->isp_sendmarker |= (1 << bus); + ISP_SET_SENDMARKER(isp, chan, 1); break; - /* - * We can use bus, which will always be zero for FC cards, - * as a mailbox pattern accumulator to be checked below. - */ case ASYNC_RIO5: - bus = 0x1ce; /* outgoing mailbox regs 1-3, 6-7 */ + pattern = 0xce; /* outgoing mailbox regs 1-3, 6-7 */ break; case ASYNC_RIO4: - bus = 0x14e; /* outgoing mailbox regs 1-3, 6 */ + pattern = 0x4e; /* outgoing mailbox regs 1-3, 6 */ break; case ASYNC_RIO3: - bus = 0x10e; /* outgoing mailbox regs 1-3 */ + pattern = 0x0e; /* outgoing mailbox regs 1-3 */ break; case ASYNC_RIO2: - bus = 0x106; /* outgoing mailbox regs 1-2 */ + pattern = 0x06; /* outgoing mailbox regs 1-2 */ break; case ASYNC_RIO1: case ASYNC_CMD_CMPLT: - bus = 0x102; /* outgoing mailbox regs 1 */ + pattern = 0x02; /* outgoing mailbox regs 1 */ break; case ASYNC_RIO_RESP: return (rval); case ASYNC_CTIO_DONE: { #ifdef ISP_TARGET_MODE - int handle = - (ISP_READ(isp, OUTMAILBOX2) << 16) | + int handle; + if (IS_SCSI(isp) || IS_24XX(isp)) { + isp_prt(isp, ISP_LOGWARN, + "bad ASYNC_CTIO_DONE for %s cards", + IS_SCSI(isp)? "SCSI" : "24XX"); + break; + } + handle = + (ISP_READ(isp, OUTMAILBOX2) << 16) | (ISP_READ(isp, OUTMAILBOX1)); if (isp_target_async(isp, handle, mbox)) { rval = -1; } else { /* count it as a fast posting intr */ isp->isp_fphccmplt++; } #else + if (IS_SCSI(isp) || IS_24XX(isp)) { + isp_prt(isp, ISP_LOGWARN, + "bad ASYNC_CTIO_DONE for %s cards", + IS_SCSI(isp)? "SCSI" : "24XX"); + break; + } isp_prt(isp, ISP_LOGINFO, "Fast Posting CTIO done"); isp->isp_fphccmplt++; /* count it as a fast posting intr */ #endif break; } case ASYNC_LIP_ERROR: case ASYNC_LIP_F8: case ASYNC_LIP_OCCURRED: - FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; - FCPARAM(isp)->isp_loopstate = LOOP_LIP_RCVD; - isp->isp_sendmarker = 1; - ISP_MARK_PORTDB(isp, 1); - isp_async(isp, ISPASYNC_LIP, NULL); -#ifdef ISP_TARGET_MODE - if (isp_target_async(isp, bus, mbox)) { - rval = -1; + case ASYNC_PTPMODE: + if (IS_SCSI(isp)) { + isp_prt(isp, ISP_LOGWARN, + "bad LIP event for SCSI cards"); + break; } -#endif /* - * We've had problems with data corruption occuring on - * commands that complete (with no apparent error) after - * we receive a LIP. This has been observed mostly on - * Local Loop topologies. To be safe, let's just mark - * all active commands as dead. + * These are broadcast events that have to be sent across + * all active channels. */ - if (FCPARAM(isp)->isp_topo == TOPO_NL_PORT || - FCPARAM(isp)->isp_topo == TOPO_FL_PORT) { - int i, j; - for (i = j = 0; i < isp->isp_maxcmds; i++) { - XS_T *xs; - xs = isp->isp_xflist[i]; - if (xs != NULL) { + for (chan = 0; chan < isp->isp_nchan; chan++) { + fcparam *fcp = FCPARAM(isp, chan); + int topo = fcp->isp_topo; + + if (fcp->role == ISP_ROLE_NONE) { + continue; + } + + fcp->isp_fwstate = FW_CONFIG_WAIT; + fcp->isp_loopstate = LOOP_LIP_RCVD; + ISP_SET_SENDMARKER(isp, chan, 1); + ISP_MARK_PORTDB(isp, chan, 1); + isp_async(isp, ISPASYNC_LIP, chan); +#ifdef ISP_TARGET_MODE + if (isp_target_async(isp, chan, mbox)) { + rval = -1; + } +#endif + /* + * We've had problems with data corruption occuring on + * commands that complete (with no apparent error) after + * we receive a LIP. This has been observed mostly on + * Local Loop topologies. To be safe, let's just mark + * all active commands as dead. + */ + if (topo == TOPO_NL_PORT || topo == TOPO_FL_PORT) { + int i, j; + for (i = j = 0; i < isp->isp_maxcmds; i++) { + XS_T *xs; + xs = isp->isp_xflist[i]; + if (xs == NULL) { + continue; + } + if (XS_CHANNEL(xs) != chan) { + continue; + } j++; XS_SETERR(xs, HBA_BUSRESET); } + if (j) { + isp_prt(isp, ISP_LOGERR, lipd, chan, j); + } } - if (j) { - isp_prt(isp, ISP_LOGERR, - "LIP destroyed %d active commands", j); - } } break; case ASYNC_LOOP_UP: - isp->isp_sendmarker = 1; - FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; - FCPARAM(isp)->isp_loopstate = LOOP_LIP_RCVD; - ISP_MARK_PORTDB(isp, 1); - isp_async(isp, ISPASYNC_LOOP_UP, NULL); -#ifdef ISP_TARGET_MODE - if (isp_target_async(isp, bus, mbox)) { - rval = -1; + if (IS_SCSI(isp)) { + isp_prt(isp, ISP_LOGWARN, + "bad LOOP UP event for SCSI cards"); + break; } + /* + * This is a broadcast event that has to be sent across + * all active channels. + */ + for (chan = 0; chan < isp->isp_nchan; chan++) { + fcparam *fcp = FCPARAM(isp, chan); + + if (fcp->role == ISP_ROLE_NONE) { + continue; + } + + ISP_SET_SENDMARKER(isp, chan, 1); + + fcp->isp_fwstate = FW_CONFIG_WAIT; + fcp->isp_loopstate = LOOP_LIP_RCVD; + ISP_MARK_PORTDB(isp, chan, 1); + isp_async(isp, ISPASYNC_LOOP_UP, chan); +#ifdef ISP_TARGET_MODE + if (isp_target_async(isp, chan, mbox)) { + rval = -1; + } #endif + } break; case ASYNC_LOOP_DOWN: - isp->isp_sendmarker = 1; - FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; - FCPARAM(isp)->isp_loopstate = LOOP_NIL; - ISP_MARK_PORTDB(isp, 1); - isp_async(isp, ISPASYNC_LOOP_DOWN, NULL); -#ifdef ISP_TARGET_MODE - if (isp_target_async(isp, bus, mbox)) { - rval = -1; + if (IS_SCSI(isp)) { + isp_prt(isp, ISP_LOGWARN, + "bad LOOP DOWN event for SCSI cards"); + break; } + /* + * This is a broadcast event that has to be sent across + * all active channels. + */ + for (chan = 0; chan < isp->isp_nchan; chan++) { + fcparam *fcp = FCPARAM(isp, chan); + + if (fcp->role == ISP_ROLE_NONE) { + continue; + } + + ISP_SET_SENDMARKER(isp, chan, 1); + fcp->isp_fwstate = FW_CONFIG_WAIT; + fcp->isp_loopstate = LOOP_NIL; + ISP_MARK_PORTDB(isp, chan, 1); + isp_async(isp, ISPASYNC_LOOP_DOWN, chan); +#ifdef ISP_TARGET_MODE + if (isp_target_async(isp, chan, mbox)) { + rval = -1; + } #endif + } break; case ASYNC_LOOP_RESET: - isp->isp_sendmarker = 1; - FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; - FCPARAM(isp)->isp_loopstate = LOOP_NIL; - ISP_MARK_PORTDB(isp, 1); - isp_async(isp, ISPASYNC_LOOP_RESET, NULL); -#ifdef ISP_TARGET_MODE - if (isp_target_async(isp, bus, mbox)) { - rval = -1; + if (IS_SCSI(isp)) { + isp_prt(isp, ISP_LOGWARN, + "bad LIP RESET event for SCSI cards"); + break; } + /* + * This is a broadcast event that has to be sent across + * all active channels. + */ + for (chan = 0; chan < isp->isp_nchan; chan++) { + fcparam *fcp = FCPARAM(isp, chan); + + if (fcp->role == ISP_ROLE_NONE) { + continue; + } + + ISP_SET_SENDMARKER(isp, chan, 1); + fcp->isp_fwstate = FW_CONFIG_WAIT; + fcp->isp_loopstate = LOOP_NIL; + ISP_MARK_PORTDB(isp, chan, 1); + isp_async(isp, ISPASYNC_LOOP_RESET, chan); +#ifdef ISP_TARGET_MODE + if (isp_target_async(isp, chan, mbox)) { + rval = -1; + } #endif + } break; case ASYNC_PDB_CHANGED: - isp->isp_sendmarker = 1; - FCPARAM(isp)->isp_loopstate = LOOP_PDB_RCVD; - ISP_MARK_PORTDB(isp, 1); - isp_async(isp, ISPASYNC_CHANGE_NOTIFY, ISPASYNC_CHANGE_PDB); - break; + { + int nphdl, nlstate, reason; + if (IS_SCSI(isp)) { + isp_prt(isp, ISP_LOGWARN, + "bad PDB CHANGED event for SCSI cards"); + break; + } + /* + * We *should* get a channel out of the 24XX, but we don't seem + * to get more than a PDB CHANGED on channel 0, so turn it into + * a broadcast event. + */ + if (IS_24XX(isp)) { + nphdl = ISP_READ(isp, OUTMAILBOX1); + nlstate = ISP_READ(isp, OUTMAILBOX2); + reason = ISP_READ(isp, OUTMAILBOX3) >> 8; + } else { + nphdl = NIL_HANDLE; + nlstate = reason = 0; + } + for (chan = 0; chan < isp->isp_nchan; chan++) { + fcparam *fcp = FCPARAM(isp, chan); + if (fcp->role == ISP_ROLE_NONE) { + continue; + } + ISP_SET_SENDMARKER(isp, chan, 1); + fcp->isp_loopstate = LOOP_PDB_RCVD; + ISP_MARK_PORTDB(isp, chan, 1); + isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, + ISPASYNC_CHANGE_PDB, nphdl, nlstate, reason); + } + break; + } case ASYNC_CHANGE_NOTIFY: - if (FCPARAM(isp)->isp_topo == TOPO_F_PORT) { - FCPARAM(isp)->isp_loopstate = LOOP_LSCAN_DONE; + { + int lochan, hichan; + + if (IS_SCSI(isp)) { + isp_prt(isp, ISP_LOGWARN, + "bad CHANGE NOTIFY event for SCSI cards"); + break; + } + if (ISP_FW_NEWER_THAN(isp, 4, 0, 25) && ISP_CAP_MULTI_ID(isp)) { + GET_24XX_BUS(isp, chan, "ASYNC_CHANGE_NOTIFY"); + lochan = chan; + hichan = chan + 1; } else { - FCPARAM(isp)->isp_loopstate = LOOP_PDB_RCVD; + lochan = 0; + hichan = isp->isp_nchan; } - ISP_MARK_PORTDB(isp, 1); - isp_async(isp, ISPASYNC_CHANGE_NOTIFY, ISPASYNC_CHANGE_SNS); - break; + for (chan = lochan; chan < hichan; chan++) { + fcparam *fcp = FCPARAM(isp, chan); - case ASYNC_PTPMODE: - ISP_MARK_PORTDB(isp, 1); - isp->isp_sendmarker = 1; - FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; - FCPARAM(isp)->isp_loopstate = LOOP_LIP_RCVD; - isp_async(isp, ISPASYNC_CHANGE_NOTIFY, ISPASYNC_CHANGE_OTHER); -#ifdef ISP_TARGET_MODE - if (isp_target_async(isp, bus, mbox)) { - rval = -1; + if (fcp->role == ISP_ROLE_NONE) { + continue; + } + + if (fcp->isp_topo == TOPO_F_PORT) { + fcp->isp_loopstate = LOOP_LSCAN_DONE; + } else { + fcp->isp_loopstate = LOOP_PDB_RCVD; + } + ISP_MARK_PORTDB(isp, chan, 1); + isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, + ISPASYNC_CHANGE_SNS); } -#endif - isp_prt(isp, ISP_LOGINFO, "Point-to-Point mode"); break; + } case ASYNC_CONNMODE: + /* + * This only applies to 2100 amd 2200 cards + */ + if (!IS_2200(isp) && !IS_2100(isp)) { + isp_prt(isp, ISP_LOGWARN, + "bad card for ASYNC_CONNMODE event"); + break; + } + chan = 0; mbox = ISP_READ(isp, OUTMAILBOX1); - ISP_MARK_PORTDB(isp, 1); + ISP_MARK_PORTDB(isp, chan, 1); switch (mbox) { case ISP_CONN_LOOP: isp_prt(isp, ISP_LOGINFO, "Point-to-Point -> Loop mode"); break; case ISP_CONN_PTP: isp_prt(isp, ISP_LOGINFO, "Loop -> Point-to-Point mode"); break; case ISP_CONN_BADLIP: isp_prt(isp, ISP_LOGWARN, "Point-to-Point -> Loop mode (BAD LIP)"); break; case ISP_CONN_FATAL: + isp->isp_dead = 1; + isp->isp_state = ISP_CRASHED; isp_prt(isp, ISP_LOGERR, "FATAL CONNECTION ERROR"); - isp_async(isp, ISPASYNC_FW_CRASH, NULL); + isp_async(isp, ISPASYNC_FW_CRASH); return (-1); case ISP_CONN_LOOPBACK: isp_prt(isp, ISP_LOGWARN, "Looped Back in Point-to-Point mode"); break; default: isp_prt(isp, ISP_LOGWARN, "Unknown connection mode (0x%x)", mbox); break; } - isp_async(isp, ISPASYNC_CHANGE_NOTIFY, ISPASYNC_CHANGE_OTHER); - isp->isp_sendmarker = 1; - FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; - FCPARAM(isp)->isp_loopstate = LOOP_LIP_RCVD; + isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, + ISPASYNC_CHANGE_OTHER); + FCPARAM(isp, chan)->sendmarker = 1; + FCPARAM(isp, chan)->isp_fwstate = FW_CONFIG_WAIT; + FCPARAM(isp, chan)->isp_loopstate = LOOP_LIP_RCVD; break; + case ASYNC_RCV_ERR: + if (IS_24XX(isp)) { + isp_prt(isp, ISP_LOGWARN, "Receive Error"); + } else { + isp_prt(isp, ISP_LOGWARN, + "Unknown Async Code 0x%x", mbox); + } + break; case ASYNC_RJT_SENT: /* same as ASYNC_QFULL_SENT */ if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGTDEBUG0, "LS_RJT sent"); break; } else if (IS_2200(isp)) { isp_prt(isp, ISP_LOGTDEBUG0, "QFULL sent"); break; } /* FALLTHROUGH */ default: isp_prt(isp, ISP_LOGWARN, "Unknown Async Code 0x%x", mbox); break; } - if (bus & 0x100) { + if (pattern) { int i, nh; uint16_t handles[16]; for (nh = 0, i = 1; i < MAX_MAILBOX(isp); i++) { - if ((bus & (1 << i)) == 0) { + if ((pattern & (1 << i)) == 0) { continue; } handles[nh++] = ISP_READ(isp, MBOX_OFF(i)); } for (i = 0; i < nh; i++) { isp_fastpost_complete(isp, handles[i]); isp_prt(isp, ISP_LOGDEBUG3, "fast post completion of %u", handles[i]); } if (isp->isp_fpcchiwater < nh) { isp->isp_fpcchiwater = nh; } } else { isp->isp_intoasync++; } return (rval); } /* * Handle other response entries. A pointer to the request queue output * index is here in case we want to eat several entries at once, although * this is not used currently. */ static int isp_handle_other_response(ispsoftc_t *isp, int type, isphdr_t *hp, uint32_t *optrp) { switch (type) { case RQSTYPE_STATUS_CONT: isp_prt(isp, ISP_LOGDEBUG0, "Ignored Continuation Response"); return (1); case RQSTYPE_MARKER: isp_prt(isp, ISP_LOGDEBUG0, "Marker Response"); return (1); case RQSTYPE_ATIO: case RQSTYPE_CTIO: case RQSTYPE_ENABLE_LUN: case RQSTYPE_MODIFY_LUN: case RQSTYPE_NOTIFY: case RQSTYPE_NOTIFY_ACK: case RQSTYPE_CTIO1: case RQSTYPE_ATIO2: case RQSTYPE_CTIO2: case RQSTYPE_CTIO3: case RQSTYPE_CTIO7: case RQSTYPE_ABTS_RCVD: case RQSTYPE_ABTS_RSP: isp->isp_rsltccmplt++; /* count as a response completion */ #ifdef ISP_TARGET_MODE if (isp_target_notify(isp, (ispstatusreq_t *) hp, optrp)) { return (1); } #endif /* FALLTHROUGH */ + case RQSTYPE_RPT_ID_ACQ: + if (IS_24XX(isp)) { + isp_ridacq_t rid; + isp_get_ridacq(isp, (isp_ridacq_t *)hp, &rid); + if (rid.ridacq_format == 0) { + } + return (1); + } + /* FALLTHROUGH */ case RQSTYPE_REQUEST: default: - USEC_DELAY(100); + ISP_DELAY(100); if (type != isp_get_response_type(isp, hp)) { /* * This is questionable- we're just papering over * something we've seen on SMP linux in target * mode- we don't really know what's happening * here that causes us to think we've gotten * an entry, but that either the entry isn't * filled out yet or our CPU read data is stale. */ isp_prt(isp, ISP_LOGINFO, "unstable type in response queue"); return (-1); } isp_prt(isp, ISP_LOGWARN, "Unhandled Response Type 0x%x", isp_get_response_type(isp, hp)); - if (isp_async(isp, ISPASYNC_UNHANDLED_RESPONSE, hp)) { - return (1); - } return (0); } } static void isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp) { switch (sp->req_completion_status & 0xff) { case RQCS_COMPLETE: if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_INCOMPLETE: if ((sp->req_state_flags & RQSF_GOT_TARGET) == 0) { isp_prt(isp, ISP_LOGDEBUG1, "Selection Timeout for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); *rp = XS_XFRLEN(xs); } return; } isp_prt(isp, ISP_LOGERR, "command incomplete for %d.%d.%d, state 0x%x", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), sp->req_state_flags); break; case RQCS_DMA_ERROR: isp_prt(isp, ISP_LOGERR, "DMA error for command on %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); *rp = XS_XFRLEN(xs); break; case RQCS_TRANSPORT_ERROR: { char buf[172]; - SNPRINTF(buf, sizeof (buf), "states=>"); + ISP_SNPRINTF(buf, sizeof (buf), "states=>"); if (sp->req_state_flags & RQSF_GOT_BUS) { - SNPRINTF(buf, sizeof (buf), "%s GOT_BUS", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_BUS", buf); } if (sp->req_state_flags & RQSF_GOT_TARGET) { - SNPRINTF(buf, sizeof (buf), "%s GOT_TGT", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_TGT", buf); } if (sp->req_state_flags & RQSF_SENT_CDB) { - SNPRINTF(buf, sizeof (buf), "%s SENT_CDB", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s SENT_CDB", buf); } if (sp->req_state_flags & RQSF_XFRD_DATA) { - SNPRINTF(buf, sizeof (buf), "%s XFRD_DATA", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s XFRD_DATA", buf); } if (sp->req_state_flags & RQSF_GOT_STATUS) { - SNPRINTF(buf, sizeof (buf), "%s GOT_STS", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_STS", buf); } if (sp->req_state_flags & RQSF_GOT_SENSE) { - SNPRINTF(buf, sizeof (buf), "%s GOT_SNS", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_SNS", buf); } if (sp->req_state_flags & RQSF_XFER_COMPLETE) { - SNPRINTF(buf, sizeof (buf), "%s XFR_CMPLT", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s XFR_CMPLT", buf); } - SNPRINTF(buf, sizeof (buf), "%s\nstatus=>", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s\nstatus=>", buf); if (sp->req_status_flags & RQSTF_DISCONNECT) { - SNPRINTF(buf, sizeof (buf), "%s Disconnect", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s Disconnect", buf); } if (sp->req_status_flags & RQSTF_SYNCHRONOUS) { - SNPRINTF(buf, sizeof (buf), "%s Sync_xfr", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s Sync_xfr", buf); } if (sp->req_status_flags & RQSTF_PARITY_ERROR) { - SNPRINTF(buf, sizeof (buf), "%s Parity", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s Parity", buf); } if (sp->req_status_flags & RQSTF_BUS_RESET) { - SNPRINTF(buf, sizeof (buf), "%s Bus_Reset", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s Bus_Reset", buf); } if (sp->req_status_flags & RQSTF_DEVICE_RESET) { - SNPRINTF(buf, sizeof (buf), "%s Device_Reset", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s Device_Reset", buf); } if (sp->req_status_flags & RQSTF_ABORTED) { - SNPRINTF(buf, sizeof (buf), "%s Aborted", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s Aborted", buf); } if (sp->req_status_flags & RQSTF_TIMEOUT) { - SNPRINTF(buf, sizeof (buf), "%s Timeout", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s Timeout", buf); } if (sp->req_status_flags & RQSTF_NEGOTIATION) { - SNPRINTF(buf, sizeof (buf), "%s Negotiation", buf); + ISP_SNPRINTF(buf, sizeof (buf), "%s Negotiation", buf); } isp_prt(isp, ISP_LOGERR, "%s", buf); isp_prt(isp, ISP_LOGERR, "transport error for %d.%d.%d:\n%s", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), buf); *rp = XS_XFRLEN(xs); break; } case RQCS_RESET_OCCURRED: + { + int chan; isp_prt(isp, ISP_LOGWARN, "bus reset destroyed command for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); - isp->isp_sendmarker |= (1 << XS_CHANNEL(xs)); + for (chan = 0; chan < isp->isp_nchan; chan++) { + FCPARAM(isp, chan)->sendmarker = 1; + } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BUSRESET); } *rp = XS_XFRLEN(xs); return; - + } case RQCS_ABORTED: isp_prt(isp, ISP_LOGERR, "command aborted for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); - isp->isp_sendmarker |= (1 << XS_CHANNEL(xs)); + ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 1); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } return; case RQCS_TIMEOUT: isp_prt(isp, ISP_LOGWARN, "command timed out for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); /* * XXX: Check to see if we logged out of the device. */ if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_CMDTIMEOUT); } return; case RQCS_DATA_OVERRUN: - XS_RESID(xs) = sp->req_resid; - isp_prt(isp, ISP_LOGERR, "data overrun for command on %d.%d.%d", - XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); + XS_SET_RESID(xs, sp->req_resid); + isp_prt(isp, ISP_LOGERR, "data overrun (%ld) for command on %d.%d.%d", + (long) XS_GET_RESID(xs), XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_DATAOVR); } return; case RQCS_COMMAND_OVERRUN: isp_prt(isp, ISP_LOGERR, "command overrun for command on %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_STATUS_OVERRUN: isp_prt(isp, ISP_LOGERR, "status overrun for command on %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_BAD_MESSAGE: isp_prt(isp, ISP_LOGERR, "msg not COMMAND COMPLETE after status %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_NO_MESSAGE_OUT: isp_prt(isp, ISP_LOGERR, "No MESSAGE OUT phase after selection on %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_EXT_ID_FAILED: isp_prt(isp, ISP_LOGERR, "EXTENDED IDENTIFY failed %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_IDE_MSG_FAILED: isp_prt(isp, ISP_LOGERR, "INITIATOR DETECTED ERROR rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_ABORT_MSG_FAILED: isp_prt(isp, ISP_LOGERR, "ABORT OPERATION rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_REJECT_MSG_FAILED: isp_prt(isp, ISP_LOGERR, "MESSAGE REJECT rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_NOP_MSG_FAILED: isp_prt(isp, ISP_LOGERR, "NOP rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_PARITY_ERROR_MSG_FAILED: isp_prt(isp, ISP_LOGERR, "MESSAGE PARITY ERROR rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_DEVICE_RESET_MSG_FAILED: isp_prt(isp, ISP_LOGWARN, "BUS DEVICE RESET rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_ID_MSG_FAILED: isp_prt(isp, ISP_LOGERR, "IDENTIFY rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_UNEXP_BUS_FREE: isp_prt(isp, ISP_LOGERR, "%d.%d.%d had an unexpected bus free", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_DATA_UNDERRUN: { if (IS_FC(isp)) { int ru_marked = (sp->req_scsi_status & RQCS_RU) != 0; if (!ru_marked || sp->req_resid > XS_XFRLEN(xs)) { isp_prt(isp, ISP_LOGWARN, bun, XS_TGT(xs), XS_LUN(xs), XS_XFRLEN(xs), sp->req_resid, (ru_marked)? "marked" : "not marked"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } return; } } - XS_RESID(xs) = sp->req_resid; + XS_SET_RESID(xs, sp->req_resid); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; } case RQCS_XACT_ERR1: isp_prt(isp, ISP_LOGERR, xact1, XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_XACT_ERR2: isp_prt(isp, ISP_LOGERR, xact2, XS_LUN(xs), XS_TGT(xs), XS_CHANNEL(xs)); break; case RQCS_XACT_ERR3: isp_prt(isp, ISP_LOGERR, xact3, XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_BAD_ENTRY: isp_prt(isp, ISP_LOGERR, "Invalid IOCB entry type detected"); break; case RQCS_QUEUE_FULL: isp_prt(isp, ISP_LOGDEBUG0, "internal queues full for %d.%d.%d status 0x%x", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), *XS_STSP(xs)); /* * If QFULL or some other status byte is set, then this * isn't an error, per se. * * Unfortunately, some QLogic f/w writers have, in * some cases, ommitted to *set* status to QFULL. * if (*XS_STSP(xs) != SCSI_GOOD && XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); return; } * * */ *XS_STSP(xs) = SCSI_QFULL; XS_SETERR(xs, HBA_NOERROR); return; case RQCS_PHASE_SKIPPED: isp_prt(isp, ISP_LOGERR, pskip, XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_ARQS_FAILED: isp_prt(isp, ISP_LOGERR, "Auto Request Sense failed for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ARQFAIL); } return; case RQCS_WIDE_FAILED: isp_prt(isp, ISP_LOGERR, "Wide Negotiation failed for %d.%d.%d", XS_TGT(xs), XS_LUN(xs), XS_CHANNEL(xs)); if (IS_SCSI(isp)) { - sdparam *sdp = isp->isp_param; - sdp += XS_CHANNEL(xs); + sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); sdp->isp_devparam[XS_TGT(xs)].goal_flags &= ~DPARM_WIDE; sdp->isp_devparam[XS_TGT(xs)].dev_update = 1; - isp->isp_update |= (1 << XS_CHANNEL(xs)); + sdp->update = 1; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_SYNCXFER_FAILED: isp_prt(isp, ISP_LOGERR, "SDTR Message failed for target %d.%d.%d", XS_TGT(xs), XS_LUN(xs), XS_CHANNEL(xs)); if (IS_SCSI(isp)) { - sdparam *sdp = isp->isp_param; + sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); sdp += XS_CHANNEL(xs); sdp->isp_devparam[XS_TGT(xs)].goal_flags &= ~DPARM_SYNC; sdp->isp_devparam[XS_TGT(xs)].dev_update = 1; - isp->isp_update |= (1 << XS_CHANNEL(xs)); + sdp->update = 1; } break; case RQCS_LVD_BUSERR: isp_prt(isp, ISP_LOGERR, "Bad LVD condition while talking to %d.%d.%d", XS_TGT(xs), XS_LUN(xs), XS_CHANNEL(xs)); break; case RQCS_PORT_UNAVAILABLE: /* * No such port on the loop. Moral equivalent of SELTIMEO */ case RQCS_PORT_LOGGED_OUT: { const char *reason; uint8_t sts = sp->req_completion_status & 0xff; /* * It was there (maybe)- treat as a selection timeout. */ if (sts == RQCS_PORT_UNAVAILABLE) { reason = "unavailable"; } else { reason = "logout"; } isp_prt(isp, ISP_LOGINFO, "port %s for target %d", reason, XS_TGT(xs)); /* * If we're on a local loop, force a LIP (which is overkill) * to force a re-login of this unit. If we're on fabric, * then we'll have to log in again as a matter of course. */ - if (FCPARAM(isp)->isp_topo == TOPO_NL_PORT || - FCPARAM(isp)->isp_topo == TOPO_FL_PORT) { + if (FCPARAM(isp, 0)->isp_topo == TOPO_NL_PORT || + FCPARAM(isp, 0)->isp_topo == TOPO_FL_PORT) { mbreg_t mbs; - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_INIT_LIP; - if (FCPARAM(isp)->isp_2klogin) { + MBSINIT(&mbs, MBOX_INIT_LIP, MBLOGALL, 0); + if (ISP_CAP_2KLOGIN(isp)) { mbs.ibits = (1 << 10); } - mbs.logval = MBLOGALL; isp_mboxcmd_qnw(isp, &mbs, 1); } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; } case RQCS_PORT_CHANGED: isp_prt(isp, ISP_LOGWARN, "port changed for target %d", XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; case RQCS_PORT_BUSY: isp_prt(isp, ISP_LOGWARN, "port busy for target %d", XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_TGTBSY); } return; default: isp_prt(isp, ISP_LOGERR, "Unknown Completion Status 0x%x", sp->req_completion_status); break; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } } static void isp_parse_status_24xx(ispsoftc_t *isp, isp24xx_statusreq_t *sp, XS_T *xs, long *rp) { int ru_marked, sv_marked; + int chan = XS_CHANNEL(xs); + switch (sp->req_completion_status) { case RQCS_COMPLETE: if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_DMA_ERROR: isp_prt(isp, ISP_LOGERR, "DMA error for command on %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_TRANSPORT_ERROR: isp_prt(isp, ISP_LOGERR, "transport error for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_RESET_OCCURRED: isp_prt(isp, ISP_LOGWARN, - "bus reset destroyed command for %d.%d.%d", + "reset destroyed command for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); - isp->isp_sendmarker |= (1 << XS_CHANNEL(xs)); + FCPARAM(isp, chan)->sendmarker = 1; if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BUSRESET); } return; case RQCS_ABORTED: isp_prt(isp, ISP_LOGERR, "command aborted for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); - isp->isp_sendmarker |= (1 << XS_CHANNEL(xs)); + FCPARAM(isp, chan)->sendmarker = 1; if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } return; case RQCS_TIMEOUT: isp_prt(isp, ISP_LOGWARN, "command timed out for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_CMDTIMEOUT); } return; case RQCS_DATA_OVERRUN: - XS_RESID(xs) = sp->req_resid; + XS_SET_RESID(xs, sp->req_resid); isp_prt(isp, ISP_LOGERR, "data overrun for command on %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_DATAOVR); } return; case RQCS_24XX_DRE: /* data reassembly error */ - isp_prt(isp, ISP_LOGERR, "data reassembly error for target %d", - XS_TGT(xs)); + isp_prt(isp, ISP_LOGERR, + "Chan %d data reassembly error for target %d", + chan, XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } *rp = XS_XFRLEN(xs); return; case RQCS_24XX_TABORT: /* aborted by target */ - isp_prt(isp, ISP_LOGERR, "target %d sent ABTS", - XS_TGT(xs)); + isp_prt(isp, ISP_LOGERR, "Chan %d target %d sent ABTS", + chan, XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } return; case RQCS_DATA_UNDERRUN: ru_marked = (sp->req_scsi_status & RQCS_RU) != 0; /* - * We can get an underrun w/o things being marked + * We can get an underrun w/o things being marked * if we got a non-zero status. */ sv_marked = (sp->req_scsi_status & (RQCS_SV|RQCS_RV)) != 0; if ((ru_marked == 0 && sv_marked == 0) || (sp->req_resid > XS_XFRLEN(xs))) { isp_prt(isp, ISP_LOGWARN, bun, XS_TGT(xs), XS_LUN(xs), XS_XFRLEN(xs), sp->req_resid, (ru_marked)? "marked" : "not marked"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } return; } - XS_RESID(xs) = sp->req_resid; + XS_SET_RESID(xs, sp->req_resid); isp_prt(isp, ISP_LOGDEBUG0, "%d.%d.%d data underrun (%d) for command 0x%x", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), sp->req_resid, XS_CDBP(xs)[0] & 0xff); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_PORT_UNAVAILABLE: /* * No such port on the loop. Moral equivalent of SELTIMEO */ case RQCS_PORT_LOGGED_OUT: { const char *reason; uint8_t sts = sp->req_completion_status & 0xff; /* * It was there (maybe)- treat as a selection timeout. */ if (sts == RQCS_PORT_UNAVAILABLE) { reason = "unavailable"; } else { reason = "logout"; } - isp_prt(isp, ISP_LOGINFO, "port %s for target %d", - reason, XS_TGT(xs)); + isp_prt(isp, ISP_LOGINFO, "Chan %d port %s for target %d", + chan, reason, XS_TGT(xs)); /* - * If we're on a local loop, force a LIP (which is overkill) - * to force a re-login of this unit. If we're on fabric, - * then we'll have to log in again as a matter of course. + * There is no MBOX_INIT_LIP for the 24XX. */ - if (FCPARAM(isp)->isp_topo == TOPO_NL_PORT || - FCPARAM(isp)->isp_topo == TOPO_FL_PORT) { - mbreg_t mbs; - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_INIT_LIP; - if (FCPARAM(isp)->isp_2klogin) { - mbs.ibits = (1 << 10); - } - mbs.logval = MBLOGALL; - isp_mboxcmd_qnw(isp, &mbs, 1); - } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; } case RQCS_PORT_CHANGED: isp_prt(isp, ISP_LOGWARN, - "port changed for target %d", XS_TGT(xs)); + "port changed for target %d chan %d", XS_TGT(xs), chan); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; case RQCS_24XX_ENOMEM: /* f/w resource unavailable */ isp_prt(isp, ISP_LOGWARN, - "f/w resource unavailable for target %d", XS_TGT(xs)); + "f/w resource unavailable for target %d chan %d", + XS_TGT(xs), chan); if (XS_NOERR(xs)) { *XS_STSP(xs) = SCSI_BUSY; XS_SETERR(xs, HBA_TGTBSY); } return; case RQCS_24XX_TMO: /* task management overrun */ isp_prt(isp, ISP_LOGWARN, - "command for target %d overlapped task management", - XS_TGT(xs)); + "command for target %d overlapped task management for " + "chan %d", XS_TGT(xs), chan); if (XS_NOERR(xs)) { *XS_STSP(xs) = SCSI_BUSY; XS_SETERR(xs, HBA_TGTBSY); } return; default: - isp_prt(isp, ISP_LOGERR, "Unknown Completion Status 0x%x", - sp->req_completion_status); + isp_prt(isp, ISP_LOGERR, + "Unknown Completion Status 0x%x on chan %d", + sp->req_completion_status, chan); break; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } } static void isp_fastpost_complete(ispsoftc_t *isp, uint16_t fph) { XS_T *xs; if (fph == 0) { return; } xs = isp_find_xs(isp, fph); if (xs == NULL) { - isp_prt(isp, ISP_LOGDEBUG1, + isp_prt(isp, ISP_LOGWARN, "Command for fast post handle 0x%x not found", fph); return; } isp_destroy_handle(isp, fph); /* * Since we don't have a result queue entry item, * we must believe that SCSI status is zero and * that all data transferred. */ - XS_SET_STATE_STAT(isp, xs, NULL); - XS_RESID(xs) = 0; + XS_SET_RESID(xs, 0); *XS_STSP(xs) = SCSI_GOOD; if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, fph); } - if (isp->isp_nactive) + if (isp->isp_nactive) { isp->isp_nactive--; + } isp->isp_fphccmplt++; isp_done(xs); } static int isp_mbox_continue(ispsoftc_t *isp) { mbreg_t mbs; uint16_t *ptr; uint32_t offset; switch (isp->isp_lastmbxcmd) { case MBOX_WRITE_RAM_WORD: case MBOX_READ_RAM_WORD: case MBOX_WRITE_RAM_WORD_EXTENDED: case MBOX_READ_RAM_WORD_EXTENDED: break; default: return (1); } if (isp->isp_mboxtmp[0] != MBOX_COMMAND_COMPLETE) { isp->isp_mbxwrk0 = 0; return (-1); } /* * Clear the previous interrupt. */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); ISP_WRITE(isp, BIU_SEMA, 0); } /* * Continue with next word. */ - MEMZERO(&mbs, sizeof (mbs)); + ISP_MEMZERO(&mbs, sizeof (mbs)); ptr = isp->isp_mbxworkp; switch (isp->isp_lastmbxcmd) { case MBOX_WRITE_RAM_WORD: mbs.param[1] = isp->isp_mbxwrk1++;; mbs.param[2] = *ptr++;; break; case MBOX_READ_RAM_WORD: *ptr++ = isp->isp_mboxtmp[2]; mbs.param[1] = isp->isp_mbxwrk1++; break; case MBOX_WRITE_RAM_WORD_EXTENDED: offset = isp->isp_mbxwrk1; offset |= isp->isp_mbxwrk8 << 16; mbs.param[2] = *ptr++;; mbs.param[1] = offset; mbs.param[8] = offset >> 16; isp->isp_mbxwrk1 = ++offset; isp->isp_mbxwrk8 = offset >> 16; break; case MBOX_READ_RAM_WORD_EXTENDED: offset = isp->isp_mbxwrk1; offset |= isp->isp_mbxwrk8 << 16; *ptr++ = isp->isp_mboxtmp[2]; mbs.param[1] = offset; mbs.param[8] = offset >> 16; isp->isp_mbxwrk1 = ++offset; isp->isp_mbxwrk8 = offset >> 16; break; } isp->isp_mbxworkp = ptr; isp->isp_mbxwrk0--; mbs.param[0] = isp->isp_lastmbxcmd; mbs.logval = MBLOGALL; isp_mboxcmd_qnw(isp, &mbs, 0); return (0); } #define HIWRD(x) ((x) >> 16) #define LOWRD(x) ((x) & 0xffff) #define ISPOPMAP(a, b) (((a) << 16) | (b)) static const uint32_t mbpscsi[] = { ISPOPMAP(0x01, 0x01), /* 0x00: MBOX_NO_OP */ ISPOPMAP(0x1f, 0x01), /* 0x01: MBOX_LOAD_RAM */ ISPOPMAP(0x03, 0x01), /* 0x02: MBOX_EXEC_FIRMWARE */ ISPOPMAP(0x1f, 0x01), /* 0x03: MBOX_DUMP_RAM */ ISPOPMAP(0x07, 0x07), /* 0x04: MBOX_WRITE_RAM_WORD */ ISPOPMAP(0x03, 0x07), /* 0x05: MBOX_READ_RAM_WORD */ ISPOPMAP(0x3f, 0x3f), /* 0x06: MBOX_MAILBOX_REG_TEST */ ISPOPMAP(0x07, 0x07), /* 0x07: MBOX_VERIFY_CHECKSUM */ ISPOPMAP(0x01, 0x0f), /* 0x08: MBOX_ABOUT_FIRMWARE */ ISPOPMAP(0x00, 0x00), /* 0x09: */ ISPOPMAP(0x00, 0x00), /* 0x0a: */ ISPOPMAP(0x00, 0x00), /* 0x0b: */ ISPOPMAP(0x00, 0x00), /* 0x0c: */ ISPOPMAP(0x00, 0x00), /* 0x0d: */ ISPOPMAP(0x01, 0x05), /* 0x0e: MBOX_CHECK_FIRMWARE */ ISPOPMAP(0x00, 0x00), /* 0x0f: */ ISPOPMAP(0x1f, 0x1f), /* 0x10: MBOX_INIT_REQ_QUEUE */ ISPOPMAP(0x3f, 0x3f), /* 0x11: MBOX_INIT_RES_QUEUE */ ISPOPMAP(0x0f, 0x0f), /* 0x12: MBOX_EXECUTE_IOCB */ ISPOPMAP(0x03, 0x03), /* 0x13: MBOX_WAKE_UP */ ISPOPMAP(0x01, 0x3f), /* 0x14: MBOX_STOP_FIRMWARE */ ISPOPMAP(0x0f, 0x0f), /* 0x15: MBOX_ABORT */ ISPOPMAP(0x03, 0x03), /* 0x16: MBOX_ABORT_DEVICE */ ISPOPMAP(0x07, 0x07), /* 0x17: MBOX_ABORT_TARGET */ ISPOPMAP(0x07, 0x07), /* 0x18: MBOX_BUS_RESET */ ISPOPMAP(0x03, 0x07), /* 0x19: MBOX_STOP_QUEUE */ ISPOPMAP(0x03, 0x07), /* 0x1a: MBOX_START_QUEUE */ ISPOPMAP(0x03, 0x07), /* 0x1b: MBOX_SINGLE_STEP_QUEUE */ ISPOPMAP(0x03, 0x07), /* 0x1c: MBOX_ABORT_QUEUE */ ISPOPMAP(0x03, 0x4f), /* 0x1d: MBOX_GET_DEV_QUEUE_STATUS */ ISPOPMAP(0x00, 0x00), /* 0x1e: */ ISPOPMAP(0x01, 0x07), /* 0x1f: MBOX_GET_FIRMWARE_STATUS */ ISPOPMAP(0x01, 0x07), /* 0x20: MBOX_GET_INIT_SCSI_ID */ ISPOPMAP(0x01, 0x07), /* 0x21: MBOX_GET_SELECT_TIMEOUT */ ISPOPMAP(0x01, 0xc7), /* 0x22: MBOX_GET_RETRY_COUNT */ ISPOPMAP(0x01, 0x07), /* 0x23: MBOX_GET_TAG_AGE_LIMIT */ ISPOPMAP(0x01, 0x03), /* 0x24: MBOX_GET_CLOCK_RATE */ ISPOPMAP(0x01, 0x07), /* 0x25: MBOX_GET_ACT_NEG_STATE */ ISPOPMAP(0x01, 0x07), /* 0x26: MBOX_GET_ASYNC_DATA_SETUP_TIME */ ISPOPMAP(0x01, 0x07), /* 0x27: MBOX_GET_PCI_PARAMS */ ISPOPMAP(0x03, 0x4f), /* 0x28: MBOX_GET_TARGET_PARAMS */ ISPOPMAP(0x03, 0x0f), /* 0x29: MBOX_GET_DEV_QUEUE_PARAMS */ ISPOPMAP(0x01, 0x07), /* 0x2a: MBOX_GET_RESET_DELAY_PARAMS */ ISPOPMAP(0x00, 0x00), /* 0x2b: */ ISPOPMAP(0x00, 0x00), /* 0x2c: */ ISPOPMAP(0x00, 0x00), /* 0x2d: */ ISPOPMAP(0x00, 0x00), /* 0x2e: */ ISPOPMAP(0x00, 0x00), /* 0x2f: */ ISPOPMAP(0x03, 0x03), /* 0x30: MBOX_SET_INIT_SCSI_ID */ ISPOPMAP(0x07, 0x07), /* 0x31: MBOX_SET_SELECT_TIMEOUT */ ISPOPMAP(0xc7, 0xc7), /* 0x32: MBOX_SET_RETRY_COUNT */ ISPOPMAP(0x07, 0x07), /* 0x33: MBOX_SET_TAG_AGE_LIMIT */ ISPOPMAP(0x03, 0x03), /* 0x34: MBOX_SET_CLOCK_RATE */ ISPOPMAP(0x07, 0x07), /* 0x35: MBOX_SET_ACT_NEG_STATE */ ISPOPMAP(0x07, 0x07), /* 0x36: MBOX_SET_ASYNC_DATA_SETUP_TIME */ ISPOPMAP(0x07, 0x07), /* 0x37: MBOX_SET_PCI_CONTROL_PARAMS */ ISPOPMAP(0x4f, 0x4f), /* 0x38: MBOX_SET_TARGET_PARAMS */ ISPOPMAP(0x0f, 0x0f), /* 0x39: MBOX_SET_DEV_QUEUE_PARAMS */ ISPOPMAP(0x07, 0x07), /* 0x3a: MBOX_SET_RESET_DELAY_PARAMS */ ISPOPMAP(0x00, 0x00), /* 0x3b: */ ISPOPMAP(0x00, 0x00), /* 0x3c: */ ISPOPMAP(0x00, 0x00), /* 0x3d: */ ISPOPMAP(0x00, 0x00), /* 0x3e: */ ISPOPMAP(0x00, 0x00), /* 0x3f: */ ISPOPMAP(0x01, 0x03), /* 0x40: MBOX_RETURN_BIOS_BLOCK_ADDR */ ISPOPMAP(0x3f, 0x01), /* 0x41: MBOX_WRITE_FOUR_RAM_WORDS */ ISPOPMAP(0x03, 0x07), /* 0x42: MBOX_EXEC_BIOS_IOCB */ ISPOPMAP(0x00, 0x00), /* 0x43: */ ISPOPMAP(0x00, 0x00), /* 0x44: */ ISPOPMAP(0x03, 0x03), /* 0x45: SET SYSTEM PARAMETER */ ISPOPMAP(0x01, 0x03), /* 0x46: GET SYSTEM PARAMETER */ ISPOPMAP(0x00, 0x00), /* 0x47: */ ISPOPMAP(0x01, 0xcf), /* 0x48: GET SCAM CONFIGURATION */ ISPOPMAP(0xcf, 0xcf), /* 0x49: SET SCAM CONFIGURATION */ ISPOPMAP(0x03, 0x03), /* 0x4a: MBOX_SET_FIRMWARE_FEATURES */ ISPOPMAP(0x01, 0x03), /* 0x4b: MBOX_GET_FIRMWARE_FEATURES */ ISPOPMAP(0x00, 0x00), /* 0x4c: */ ISPOPMAP(0x00, 0x00), /* 0x4d: */ ISPOPMAP(0x00, 0x00), /* 0x4e: */ ISPOPMAP(0x00, 0x00), /* 0x4f: */ ISPOPMAP(0xdf, 0xdf), /* 0x50: LOAD RAM A64 */ ISPOPMAP(0xdf, 0xdf), /* 0x51: DUMP RAM A64 */ ISPOPMAP(0xdf, 0xff), /* 0x52: INITIALIZE REQUEST QUEUE A64 */ ISPOPMAP(0xef, 0xff), /* 0x53: INITIALIZE RESPONSE QUEUE A64 */ ISPOPMAP(0xcf, 0x01), /* 0x54: EXECUCUTE COMMAND IOCB A64 */ ISPOPMAP(0x07, 0x01), /* 0x55: ENABLE TARGET MODE */ ISPOPMAP(0x03, 0x0f), /* 0x56: GET TARGET STATUS */ ISPOPMAP(0x00, 0x00), /* 0x57: */ ISPOPMAP(0x00, 0x00), /* 0x58: */ ISPOPMAP(0x00, 0x00), /* 0x59: */ ISPOPMAP(0x03, 0x03), /* 0x5a: SET DATA OVERRUN RECOVERY MODE */ ISPOPMAP(0x01, 0x03), /* 0x5b: GET DATA OVERRUN RECOVERY MODE */ ISPOPMAP(0x0f, 0x0f), /* 0x5c: SET HOST DATA */ ISPOPMAP(0x01, 0x01) /* 0x5d: GET NOST DATA */ }; static const char *scsi_mbcmd_names[] = { "NO-OP", "LOAD RAM", "EXEC FIRMWARE", "DUMP RAM", "WRITE RAM WORD", "READ RAM WORD", "MAILBOX REG TEST", "VERIFY CHECKSUM", "ABOUT FIRMWARE", NULL, NULL, NULL, NULL, NULL, "CHECK FIRMWARE", NULL, "INIT REQUEST QUEUE", "INIT RESULT QUEUE", "EXECUTE IOCB", "WAKE UP", "STOP FIRMWARE", "ABORT", "ABORT DEVICE", "ABORT TARGET", "BUS RESET", "STOP QUEUE", "START QUEUE", "SINGLE STEP QUEUE", "ABORT QUEUE", "GET DEV QUEUE STATUS", NULL, "GET FIRMWARE STATUS", "GET INIT SCSI ID", "GET SELECT TIMEOUT", "GET RETRY COUNT", "GET TAG AGE LIMIT", "GET CLOCK RATE", "GET ACT NEG STATE", "GET ASYNC DATA SETUP TIME", "GET PCI PARAMS", "GET TARGET PARAMS", "GET DEV QUEUE PARAMS", "GET RESET DELAY PARAMS", NULL, NULL, NULL, NULL, NULL, "SET INIT SCSI ID", "SET SELECT TIMEOUT", "SET RETRY COUNT", "SET TAG AGE LIMIT", "SET CLOCK RATE", "SET ACT NEG STATE", "SET ASYNC DATA SETUP TIME", "SET PCI CONTROL PARAMS", "SET TARGET PARAMS", "SET DEV QUEUE PARAMS", "SET RESET DELAY PARAMS", NULL, NULL, NULL, NULL, NULL, "RETURN BIOS BLOCK ADDR", "WRITE FOUR RAM WORDS", "EXEC BIOS IOCB", NULL, NULL, "SET SYSTEM PARAMETER", "GET SYSTEM PARAMETER", NULL, "GET SCAM CONFIGURATION", "SET SCAM CONFIGURATION", "SET FIRMWARE FEATURES", "GET FIRMWARE FEATURES", NULL, NULL, NULL, NULL, "LOAD RAM A64", "DUMP RAM A64", "INITIALIZE REQUEST QUEUE A64", "INITIALIZE RESPONSE QUEUE A64", "EXECUTE IOCB A64", "ENABLE TARGET MODE", "GET TARGET MODE STATE", NULL, NULL, NULL, "SET DATA OVERRUN RECOVERY MODE", "GET DATA OVERRUN RECOVERY MODE", "SET HOST DATA", "GET NOST DATA", }; static const uint32_t mbpfc[] = { ISPOPMAP(0x01, 0x01), /* 0x00: MBOX_NO_OP */ ISPOPMAP(0x1f, 0x01), /* 0x01: MBOX_LOAD_RAM */ ISPOPMAP(0x0f, 0x01), /* 0x02: MBOX_EXEC_FIRMWARE */ ISPOPMAP(0xdf, 0x01), /* 0x03: MBOX_DUMP_RAM */ ISPOPMAP(0x07, 0x07), /* 0x04: MBOX_WRITE_RAM_WORD */ ISPOPMAP(0x03, 0x07), /* 0x05: MBOX_READ_RAM_WORD */ ISPOPMAP(0xff, 0xff), /* 0x06: MBOX_MAILBOX_REG_TEST */ - ISPOPMAP(0x03, 0x07), /* 0x07: MBOX_VERIFY_CHECKSUM */ + ISPOPMAP(0x07, 0x07), /* 0x07: MBOX_VERIFY_CHECKSUM */ ISPOPMAP(0x01, 0x4f), /* 0x08: MBOX_ABOUT_FIRMWARE */ ISPOPMAP(0xdf, 0x01), /* 0x09: MBOX_LOAD_RISC_RAM_2100 */ ISPOPMAP(0xdf, 0x01), /* 0x0a: DUMP RAM */ ISPOPMAP(0x1ff, 0x01), /* 0x0b: MBOX_LOAD_RISC_RAM */ ISPOPMAP(0x00, 0x00), /* 0x0c: */ ISPOPMAP(0x10f, 0x01), /* 0x0d: MBOX_WRITE_RAM_WORD_EXTENDED */ ISPOPMAP(0x01, 0x05), /* 0x0e: MBOX_CHECK_FIRMWARE */ ISPOPMAP(0x10f, 0x05), /* 0x0f: MBOX_READ_RAM_WORD_EXTENDED */ ISPOPMAP(0x1f, 0x11), /* 0x10: MBOX_INIT_REQ_QUEUE */ ISPOPMAP(0x2f, 0x21), /* 0x11: MBOX_INIT_RES_QUEUE */ ISPOPMAP(0x0f, 0x01), /* 0x12: MBOX_EXECUTE_IOCB */ ISPOPMAP(0x03, 0x03), /* 0x13: MBOX_WAKE_UP */ ISPOPMAP(0x01, 0xff), /* 0x14: MBOX_STOP_FIRMWARE */ ISPOPMAP(0x4f, 0x01), /* 0x15: MBOX_ABORT */ ISPOPMAP(0x07, 0x01), /* 0x16: MBOX_ABORT_DEVICE */ ISPOPMAP(0x07, 0x01), /* 0x17: MBOX_ABORT_TARGET */ ISPOPMAP(0x03, 0x03), /* 0x18: MBOX_BUS_RESET */ ISPOPMAP(0x07, 0x05), /* 0x19: MBOX_STOP_QUEUE */ ISPOPMAP(0x07, 0x05), /* 0x1a: MBOX_START_QUEUE */ ISPOPMAP(0x07, 0x05), /* 0x1b: MBOX_SINGLE_STEP_QUEUE */ ISPOPMAP(0x07, 0x05), /* 0x1c: MBOX_ABORT_QUEUE */ ISPOPMAP(0x07, 0x03), /* 0x1d: MBOX_GET_DEV_QUEUE_STATUS */ ISPOPMAP(0x00, 0x00), /* 0x1e: */ ISPOPMAP(0x01, 0x07), /* 0x1f: MBOX_GET_FIRMWARE_STATUS */ ISPOPMAP(0x01, 0x4f), /* 0x20: MBOX_GET_LOOP_ID */ ISPOPMAP(0x00, 0x00), /* 0x21: */ ISPOPMAP(0x01, 0x07), /* 0x22: MBOX_GET_RETRY_COUNT */ ISPOPMAP(0x00, 0x00), /* 0x23: */ ISPOPMAP(0x00, 0x00), /* 0x24: */ ISPOPMAP(0x00, 0x00), /* 0x25: */ ISPOPMAP(0x00, 0x00), /* 0x26: */ ISPOPMAP(0x00, 0x00), /* 0x27: */ ISPOPMAP(0x01, 0x03), /* 0x28: MBOX_GET_FIRMWARE_OPTIONS */ ISPOPMAP(0x03, 0x07), /* 0x29: MBOX_GET_PORT_QUEUE_PARAMS */ ISPOPMAP(0x00, 0x00), /* 0x2a: */ ISPOPMAP(0x00, 0x00), /* 0x2b: */ ISPOPMAP(0x00, 0x00), /* 0x2c: */ ISPOPMAP(0x00, 0x00), /* 0x2d: */ ISPOPMAP(0x00, 0x00), /* 0x2e: */ ISPOPMAP(0x00, 0x00), /* 0x2f: */ ISPOPMAP(0x00, 0x00), /* 0x30: */ ISPOPMAP(0x00, 0x00), /* 0x31: */ ISPOPMAP(0x07, 0x07), /* 0x32: MBOX_SET_RETRY_COUNT */ ISPOPMAP(0x00, 0x00), /* 0x33: */ ISPOPMAP(0x00, 0x00), /* 0x34: */ ISPOPMAP(0x00, 0x00), /* 0x35: */ ISPOPMAP(0x00, 0x00), /* 0x36: */ ISPOPMAP(0x00, 0x00), /* 0x37: */ ISPOPMAP(0x0f, 0x01), /* 0x38: MBOX_SET_FIRMWARE_OPTIONS */ ISPOPMAP(0x0f, 0x07), /* 0x39: MBOX_SET_PORT_QUEUE_PARAMS */ ISPOPMAP(0x00, 0x00), /* 0x3a: */ ISPOPMAP(0x00, 0x00), /* 0x3b: */ ISPOPMAP(0x00, 0x00), /* 0x3c: */ ISPOPMAP(0x00, 0x00), /* 0x3d: */ ISPOPMAP(0x00, 0x00), /* 0x3e: */ ISPOPMAP(0x00, 0x00), /* 0x3f: */ ISPOPMAP(0x03, 0x01), /* 0x40: MBOX_LOOP_PORT_BYPASS */ ISPOPMAP(0x03, 0x01), /* 0x41: MBOX_LOOP_PORT_ENABLE */ ISPOPMAP(0x03, 0x07), /* 0x42: MBOX_GET_RESOURCE_COUNT */ ISPOPMAP(0x01, 0x01), /* 0x43: MBOX_REQUEST_OFFLINE_MODE */ ISPOPMAP(0x00, 0x00), /* 0x44: */ ISPOPMAP(0x00, 0x00), /* 0x45: */ ISPOPMAP(0x00, 0x00), /* 0x46: */ ISPOPMAP(0xcf, 0x03), /* 0x47: GET PORT_DATABASE ENHANCED */ - ISPOPMAP(0x00, 0x00), /* 0x48: */ - ISPOPMAP(0x00, 0x00), /* 0x49: */ - ISPOPMAP(0x00, 0x00), /* 0x4a: */ + ISPOPMAP(0xcd, 0x01), /* 0x48: MBOX_INIT_FIRMWARE_MULTI_ID */ + ISPOPMAP(0xcd, 0x01), /* 0x49: MBOX_GET_VP_DATABASE */ + ISPOPMAP(0x2cd, 0x01), /* 0x4a: MBOX_GET_VP_DATABASE_ENTRY */ ISPOPMAP(0x00, 0x00), /* 0x4b: */ ISPOPMAP(0x00, 0x00), /* 0x4c: */ ISPOPMAP(0x00, 0x00), /* 0x4d: */ ISPOPMAP(0x00, 0x00), /* 0x4e: */ ISPOPMAP(0x00, 0x00), /* 0x4f: */ ISPOPMAP(0x00, 0x00), /* 0x50: */ ISPOPMAP(0x00, 0x00), /* 0x51: */ ISPOPMAP(0x00, 0x00), /* 0x52: */ ISPOPMAP(0x00, 0x00), /* 0x53: */ ISPOPMAP(0xcf, 0x01), /* 0x54: EXECUTE IOCB A64 */ ISPOPMAP(0x00, 0x00), /* 0x55: */ ISPOPMAP(0x00, 0x00), /* 0x56: */ ISPOPMAP(0x00, 0x00), /* 0x57: */ ISPOPMAP(0x00, 0x00), /* 0x58: */ ISPOPMAP(0x00, 0x00), /* 0x59: */ ISPOPMAP(0x00, 0x00), /* 0x5a: */ ISPOPMAP(0x03, 0x01), /* 0x5b: MBOX_DRIVER_HEARTBEAT */ ISPOPMAP(0xcf, 0x01), /* 0x5c: MBOX_FW_HEARTBEAT */ ISPOPMAP(0x07, 0x03), /* 0x5d: MBOX_GET_SET_DATA_RATE */ ISPOPMAP(0x00, 0x00), /* 0x5e: */ ISPOPMAP(0x00, 0x00), /* 0x5f: */ ISPOPMAP(0xcd, 0x01), /* 0x60: MBOX_INIT_FIRMWARE */ ISPOPMAP(0x00, 0x00), /* 0x61: */ ISPOPMAP(0x01, 0x01), /* 0x62: MBOX_INIT_LIP */ ISPOPMAP(0xcd, 0x03), /* 0x63: MBOX_GET_FC_AL_POSITION_MAP */ ISPOPMAP(0xcf, 0x01), /* 0x64: MBOX_GET_PORT_DB */ ISPOPMAP(0x07, 0x01), /* 0x65: MBOX_CLEAR_ACA */ ISPOPMAP(0x07, 0x01), /* 0x66: MBOX_TARGET_RESET */ ISPOPMAP(0x07, 0x01), /* 0x67: MBOX_CLEAR_TASK_SET */ ISPOPMAP(0x07, 0x01), /* 0x68: MBOX_ABORT_TASK_SET */ ISPOPMAP(0x01, 0x07), /* 0x69: MBOX_GET_FW_STATE */ ISPOPMAP(0x03, 0xcf), /* 0x6a: MBOX_GET_PORT_NAME */ ISPOPMAP(0xcf, 0x01), /* 0x6b: MBOX_GET_LINK_STATUS */ ISPOPMAP(0x0f, 0x01), /* 0x6c: MBOX_INIT_LIP_RESET */ ISPOPMAP(0x00, 0x00), /* 0x6d: */ ISPOPMAP(0xcf, 0x03), /* 0x6e: MBOX_SEND_SNS */ ISPOPMAP(0x0f, 0x07), /* 0x6f: MBOX_FABRIC_LOGIN */ ISPOPMAP(0x03, 0x01), /* 0x70: MBOX_SEND_CHANGE_REQUEST */ ISPOPMAP(0x03, 0x03), /* 0x71: MBOX_FABRIC_LOGOUT */ ISPOPMAP(0x0f, 0x0f), /* 0x72: MBOX_INIT_LIP_LOGIN */ ISPOPMAP(0x00, 0x00), /* 0x73: */ ISPOPMAP(0x07, 0x01), /* 0x74: LOGIN LOOP PORT */ ISPOPMAP(0xcf, 0x03), /* 0x75: GET PORT/NODE NAME LIST */ ISPOPMAP(0x4f, 0x01), /* 0x76: SET VENDOR ID */ ISPOPMAP(0xcd, 0x01), /* 0x77: INITIALIZE IP MAILBOX */ ISPOPMAP(0x00, 0x00), /* 0x78: */ ISPOPMAP(0x00, 0x00), /* 0x79: */ ISPOPMAP(0x00, 0x00), /* 0x7a: */ ISPOPMAP(0x00, 0x00), /* 0x7b: */ ISPOPMAP(0x4f, 0x03), /* 0x7c: Get ID List */ ISPOPMAP(0xcf, 0x01), /* 0x7d: SEND LFA */ ISPOPMAP(0x0f, 0x01) /* 0x7e: LUN RESET */ }; /* * Footnotes * - * (1): this sets bits 21..16 in mailbox register #8, which we nominally + * (1): this sets bits 21..16 in mailbox register #8, which we nominally * do not access at this time in the core driver. The caller is * responsible for setting this register first (Gross!). The assumption * is that we won't overflow. */ static const char *fc_mbcmd_names[] = { "NO-OP", "LOAD RAM", "EXEC FIRMWARE", "DUMP RAM", "WRITE RAM WORD", "READ RAM WORD", "MAILBOX REG TEST", "VERIFY CHECKSUM", "ABOUT FIRMWARE", "LOAD RAM", "DUMP RAM", "WRITE RAM WORD EXTENDED", NULL, "READ RAM WORD EXTENDED", "CHECK FIRMWARE", NULL, "INIT REQUEST QUEUE", "INIT RESULT QUEUE", "EXECUTE IOCB", "WAKE UP", "STOP FIRMWARE", "ABORT", "ABORT DEVICE", "ABORT TARGET", "BUS RESET", "STOP QUEUE", "START QUEUE", "SINGLE STEP QUEUE", "ABORT QUEUE", "GET DEV QUEUE STATUS", NULL, "GET FIRMWARE STATUS", "GET LOOP ID", NULL, "GET RETRY COUNT", NULL, NULL, NULL, NULL, NULL, "GET FIRMWARE OPTIONS", "GET PORT QUEUE PARAMS", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "SET RETRY COUNT", NULL, NULL, NULL, NULL, NULL, "SET FIRMWARE OPTIONS", "SET PORT QUEUE PARAMS", NULL, NULL, NULL, NULL, NULL, NULL, "LOOP PORT BYPASS", "LOOP PORT ENABLE", "GET RESOURCE COUNT", "REQUEST NON PARTICIPATING MODE", NULL, NULL, NULL, "GET PORT DATABASE ENHANCED", + "INIT FIRMWARE MULTI ID", + "GET VP DATABASE", + "GET VP DATABASE ENTRY", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, - NULL, - NULL, "EXECUTE IOCB A64", NULL, NULL, NULL, NULL, NULL, NULL, "DRIVER HEARTBEAT", NULL, "GET/SET DATA RATE", NULL, NULL, "INIT FIRMWARE", NULL, "INIT LIP", "GET FC-AL POSITION MAP", "GET PORT DATABASE", "CLEAR ACA", "TARGET RESET", "CLEAR TASK SET", "ABORT TASK SET", "GET FW STATE", "GET PORT NAME", "GET LINK STATUS", "INIT LIP RESET", NULL, "SEND SNS", "FABRIC LOGIN", "SEND CHANGE REQUEST", "FABRIC LOGOUT", "INIT LIP LOGIN", NULL, "LOGIN LOOP PORT", "GET PORT/NODE NAME LIST", "SET VENDOR ID", "INITIALIZE IP MAILBOX", NULL, NULL, NULL, NULL, "Get ID List", "SEND LFA", "Lun RESET" }; static void isp_mboxcmd_qnw(ispsoftc_t *isp, mbreg_t *mbp, int nodelay) { unsigned int ibits, obits, box, opcode; const uint32_t *mcp; if (IS_FC(isp)) { mcp = mbpfc; } else { mcp = mbpscsi; } opcode = mbp->param[0]; ibits = HIWRD(mcp[opcode]) & NMBOX_BMASK(isp); obits = LOWRD(mcp[opcode]) & NMBOX_BMASK(isp); ibits |= mbp->ibits; obits |= mbp->obits; for (box = 0; box < MAX_MAILBOX(isp); box++) { if (ibits & (1 << box)) { ISP_WRITE(isp, MBOX_OFF(box), mbp->param[box]); } if (nodelay == 0) { isp->isp_mboxtmp[box] = mbp->param[box] = 0; } } if (nodelay == 0) { isp->isp_lastmbxcmd = opcode; isp->isp_obits = obits; isp->isp_mboxbsy = 1; } if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_SET_HOST_INT); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_SET_HOST_INT); } /* * Oddly enough, if we're not delaying for an answer, * delay a bit to give the f/w a chance to pick up the * command. */ if (nodelay) { - USEC_DELAY(1000); + ISP_DELAY(1000); } } static void isp_mboxcmd(ispsoftc_t *isp, mbreg_t *mbp) { const char *cname, *xname; char tname[16], mname[16]; unsigned int lim, ibits, obits, box, opcode; const uint32_t *mcp; if (IS_FC(isp)) { mcp = mbpfc; lim = (sizeof (mbpfc) / sizeof (mbpfc[0])); } else { mcp = mbpscsi; lim = (sizeof (mbpscsi) / sizeof (mbpscsi[0])); } if ((opcode = mbp->param[0]) >= lim) { mbp->param[0] = MBOX_INVALID_COMMAND; isp_prt(isp, ISP_LOGERR, "Unknown Command 0x%x", opcode); return; } ibits = HIWRD(mcp[opcode]) & NMBOX_BMASK(isp); obits = LOWRD(mcp[opcode]) & NMBOX_BMASK(isp); /* * Pick up any additional bits that the caller might have set. */ ibits |= mbp->ibits; obits |= mbp->obits; if (ibits == 0 && obits == 0) { mbp->param[0] = MBOX_COMMAND_PARAM_ERROR; isp_prt(isp, ISP_LOGERR, "no parameters for 0x%x", opcode); return; } /* * Get exclusive usage of mailbox registers. */ if (MBOX_ACQUIRE(isp)) { mbp->param[0] = MBOX_REGS_BUSY; goto out; } for (box = 0; box < MAX_MAILBOX(isp); box++) { if (ibits & (1 << box)) { - isp_prt(isp, ISP_LOGDEBUG1, "IN mbox %d = 0x%04x", box, + isp_prt(isp, ISP_LOGDEBUG3, "IN mbox %d = 0x%04x", box, mbp->param[box]); ISP_WRITE(isp, MBOX_OFF(box), mbp->param[box]); } isp->isp_mboxtmp[box] = mbp->param[box] = 0; } isp->isp_lastmbxcmd = opcode; /* * We assume that we can't overwrite a previous command. */ isp->isp_obits = obits; isp->isp_mboxbsy = 1; /* * Set Host Interrupt condition so that RISC will pick up mailbox regs. */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_SET_HOST_INT); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_SET_HOST_INT); } /* * While we haven't finished the command, spin our wheels here. */ MBOX_WAIT_COMPLETE(isp, mbp); /* * Did the command time out? */ if (mbp->param[0] == MBOX_TIMEOUT) { + isp->isp_mboxbsy = 0; MBOX_RELEASE(isp); goto out; } /* * Copy back output registers. */ for (box = 0; box < MAX_MAILBOX(isp); box++) { if (obits & (1 << box)) { mbp->param[box] = isp->isp_mboxtmp[box]; - isp_prt(isp, ISP_LOGDEBUG1, "OUT mbox %d = 0x%04x", box, + isp_prt(isp, ISP_LOGDEBUG3, "OUT mbox %d = 0x%04x", box, mbp->param[box]); } } + isp->isp_mboxbsy = 0; MBOX_RELEASE(isp); out: - isp->isp_mboxbsy = 0; if (mbp->logval == 0 || opcode == MBOX_EXEC_FIRMWARE) { return; } cname = (IS_FC(isp))? fc_mbcmd_names[opcode] : scsi_mbcmd_names[opcode]; if (cname == NULL) { cname = tname; - SNPRINTF(tname, sizeof tname, "opcode %x", opcode); + ISP_SNPRINTF(tname, sizeof tname, "opcode %x", opcode); } /* * Just to be chatty here... */ xname = NULL; switch (mbp->param[0]) { case MBOX_COMMAND_COMPLETE: break; case MBOX_INVALID_COMMAND: if (mbp->logval & MBLOGMASK(MBOX_COMMAND_COMPLETE)) { xname = "INVALID COMMAND"; } break; case MBOX_HOST_INTERFACE_ERROR: if (mbp->logval & MBLOGMASK(MBOX_HOST_INTERFACE_ERROR)) { xname = "HOST INTERFACE ERROR"; } break; case MBOX_TEST_FAILED: if (mbp->logval & MBLOGMASK(MBOX_TEST_FAILED)) { xname = "TEST FAILED"; } break; case MBOX_COMMAND_ERROR: if (mbp->logval & MBLOGMASK(MBOX_COMMAND_ERROR)) { xname = "COMMAND ERROR"; } break; case MBOX_COMMAND_PARAM_ERROR: if (mbp->logval & MBLOGMASK(MBOX_COMMAND_PARAM_ERROR)) { xname = "COMMAND PARAMETER ERROR"; } break; case MBOX_LOOP_ID_USED: if (mbp->logval & MBLOGMASK(MBOX_LOOP_ID_USED)) { xname = "LOOP ID ALREADY IN USE"; } break; case MBOX_PORT_ID_USED: if (mbp->logval & MBLOGMASK(MBOX_PORT_ID_USED)) { xname = "PORT ID ALREADY IN USE"; } break; case MBOX_ALL_IDS_USED: if (mbp->logval & MBLOGMASK(MBOX_ALL_IDS_USED)) { xname = "ALL LOOP IDS IN USE"; } break; case MBOX_REGS_BUSY: xname = "REGISTERS BUSY"; break; case MBOX_TIMEOUT: xname = "TIMEOUT"; break; default: - SNPRINTF(mname, sizeof mname, "error 0x%x", mbp->param[0]); + ISP_SNPRINTF(mname, sizeof mname, "error 0x%x", mbp->param[0]); xname = mname; break; } if (xname) { isp_prt(isp, ISP_LOGALL, "Mailbox Command '%s' failed (%s)", cname, xname); } } static void -isp_fw_state(ispsoftc_t *isp) +isp_fw_state(ispsoftc_t *isp, int chan) { if (IS_FC(isp)) { mbreg_t mbs; - fcparam *fcp = isp->isp_param; + fcparam *fcp = FCPARAM(isp, chan); - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_GET_FW_STATE; - mbs.logval = MBLOGALL; + MBSINIT(&mbs, MBOX_GET_FW_STATE, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { fcp->isp_fwstate = mbs.param[1]; } } } static void -isp_update(ispsoftc_t *isp) +isp_spi_update(ispsoftc_t *isp, int chan) { - int bus, upmask; - - for (bus = 0, upmask = isp->isp_update; upmask != 0; bus++) { - if (upmask & (1 << bus)) { - isp_update_bus(isp, bus); - } - upmask &= ~(1 << bus); - } -} - -static void -isp_update_bus(ispsoftc_t *isp, int bus) -{ int tgt; mbreg_t mbs; sdparam *sdp; - isp->isp_update &= ~(1 << bus); if (IS_FC(isp)) { /* * There are no 'per-bus' settings for Fibre Channel. */ return; } - sdp = isp->isp_param; - sdp += bus; + sdp = SDPARAM(isp, chan); + sdp->update = 0; for (tgt = 0; tgt < MAX_TARGETS; tgt++) { uint16_t flags, period, offset; int get; if (sdp->isp_devparam[tgt].dev_enable == 0) { sdp->isp_devparam[tgt].dev_update = 0; sdp->isp_devparam[tgt].dev_refresh = 0; isp_prt(isp, ISP_LOGDEBUG0, - "skipping target %d bus %d update", tgt, bus); + "skipping target %d bus %d update", tgt, chan); continue; } /* * If the goal is to update the status of the device, * take what's in goal_flags and try and set the device * toward that. Otherwise, if we're just refreshing the * current device state, get the current parameters. */ - MEMZERO(&mbs, sizeof (mbs)); + MBSINIT(&mbs, 0, MBLOGALL, 0); /* * Refresh overrides set */ if (sdp->isp_devparam[tgt].dev_refresh) { mbs.param[0] = MBOX_GET_TARGET_PARAMS; get = 1; } else if (sdp->isp_devparam[tgt].dev_update) { mbs.param[0] = MBOX_SET_TARGET_PARAMS; /* * Make sure goal_flags has "Renegotiate on Error" * on and "Freeze Queue on Error" off. */ sdp->isp_devparam[tgt].goal_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].goal_flags &= ~DPARM_QFRZ; mbs.param[2] = sdp->isp_devparam[tgt].goal_flags; /* * Insist that PARITY must be enabled * if SYNC or WIDE is enabled. */ if ((mbs.param[2] & (DPARM_SYNC|DPARM_WIDE)) != 0) { mbs.param[2] |= DPARM_PARITY; } if (mbs.param[2] & DPARM_SYNC) { mbs.param[3] = (sdp->isp_devparam[tgt].goal_offset << 8) | (sdp->isp_devparam[tgt].goal_period); } /* * A command completion later that has * RQSTF_NEGOTIATION set can cause * the dev_refresh/announce cycle also. * * Note: It is really important to update our current * flags with at least the state of TAG capabilities- * otherwise we might try and send a tagged command * when we have it all turned off. So change it here * to say that current already matches goal. */ sdp->isp_devparam[tgt].actv_flags &= ~DPARM_TQING; sdp->isp_devparam[tgt].actv_flags |= (sdp->isp_devparam[tgt].goal_flags & DPARM_TQING); isp_prt(isp, ISP_LOGDEBUG0, "bus %d set tgt %d flags 0x%x off 0x%x period 0x%x", - bus, tgt, mbs.param[2], mbs.param[3] >> 8, + chan, tgt, mbs.param[2], mbs.param[3] >> 8, mbs.param[3] & 0xff); get = 0; } else { continue; } - mbs.param[1] = (bus << 15) | (tgt << 8); - mbs.logval = MBLOGALL; + mbs.param[1] = (chan << 15) | (tgt << 8); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { continue; } if (get == 0) { - isp->isp_sendmarker |= (1 << bus); + sdp->sendmarker = 1; sdp->isp_devparam[tgt].dev_update = 0; sdp->isp_devparam[tgt].dev_refresh = 1; } else { sdp->isp_devparam[tgt].dev_refresh = 0; flags = mbs.param[2]; period = mbs.param[3] & 0xff; offset = mbs.param[3] >> 8; sdp->isp_devparam[tgt].actv_flags = flags; sdp->isp_devparam[tgt].actv_period = period; sdp->isp_devparam[tgt].actv_offset = offset; - get = (bus << 16) | tgt; - (void) isp_async(isp, ISPASYNC_NEW_TGT_PARAMS, &get); + isp_async(isp, ISPASYNC_NEW_TGT_PARAMS, chan, tgt); } } for (tgt = 0; tgt < MAX_TARGETS; tgt++) { if (sdp->isp_devparam[tgt].dev_update || sdp->isp_devparam[tgt].dev_refresh) { - isp->isp_update |= (1 << bus); + sdp->update = 1; break; } } } -#ifndef DEFAULT_EXEC_THROTTLE -#define DEFAULT_EXEC_THROTTLE(isp) ISP_EXEC_THROTTLE -#endif - static void -isp_setdfltparm(ispsoftc_t *isp, int channel) +isp_setdfltsdparm(ispsoftc_t *isp) { int tgt; - sdparam *sdp; + sdparam *sdp, *sdp1; - sdp = (sdparam *) isp->isp_param; - sdp += channel; + sdp = SDPARAM(isp, 0); + sdp->role = GET_DEFAULT_ROLE(isp, 0); + if (IS_DUALBUS(isp)) { + sdp1 = sdp + 1; + sdp1->role = GET_DEFAULT_ROLE(isp, 1); + } else { + sdp1 = NULL; + } /* - * Been there, done that, got the T-shirt... - */ - if (sdp->isp_gotdparms) { - return; - } - sdp->isp_gotdparms = 1; - sdp->isp_bad_nvram = 0; - /* * Establish some default parameters. */ sdp->isp_cmd_dma_burst_enable = 0; sdp->isp_data_dma_burst_enabl = 1; sdp->isp_fifo_threshold = 0; - sdp->isp_initiator_id = DEFAULT_IID(isp); + sdp->isp_initiator_id = DEFAULT_IID(isp, 0); if (isp->isp_type >= ISP_HA_SCSI_1040) { sdp->isp_async_data_setup = 9; } else { sdp->isp_async_data_setup = 6; } sdp->isp_selection_timeout = 250; sdp->isp_max_queue_depth = MAXISPREQUEST(isp); sdp->isp_tag_aging = 8; sdp->isp_bus_reset_delay = 5; /* * Don't retry selection, busy or queue full automatically- reflect * these back to us. */ sdp->isp_retry_count = 0; sdp->isp_retry_delay = 0; for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].exc_throttle = ISP_EXEC_THROTTLE; sdp->isp_devparam[tgt].dev_enable = 1; } /* - * If we've not been told to avoid reading NVRAM, try and read it. - * If we're successful reading it, we can then return because NVRAM - * will tell us what the desired settings are. Otherwise, we establish - * some reasonable 'fake' nvram and goal defaults. - */ - - if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { - if (isp_read_nvram(isp) == 0) { - return; - } - sdp->isp_bad_nvram = 1; - } - - /* - * Now try and see whether we have specific values for them. - */ - if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { - mbreg_t mbs; - - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_GET_ACT_NEG_STATE; - mbs.logval = MBLOGNONE; - isp_mboxcmd(isp, &mbs); - if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { - sdp->isp_req_ack_active_neg = 1; - sdp->isp_data_line_active_neg = 1; - } else { - sdp->isp_req_ack_active_neg = - (mbs.param[1+channel] >> 4) & 0x1; - sdp->isp_data_line_active_neg = - (mbs.param[1+channel] >> 5) & 0x1; - } - } - - isp_prt(isp, ISP_LOGDEBUG0, sc0, sc3, - 0, sdp->isp_fifo_threshold, sdp->isp_initiator_id, - sdp->isp_bus_reset_delay, sdp->isp_retry_count, - sdp->isp_retry_delay, sdp->isp_async_data_setup); - isp_prt(isp, ISP_LOGDEBUG0, sc1, sc3, - sdp->isp_req_ack_active_neg, sdp->isp_data_line_active_neg, - sdp->isp_data_dma_burst_enabl, sdp->isp_cmd_dma_burst_enable, - sdp->isp_selection_timeout, sdp->isp_max_queue_depth); - - /* * The trick here is to establish a default for the default (honk!) * state (goal_flags). Then try and get the current status from * the card to fill in the current state. We don't, in fact, set * the default to the SAFE default state- that's not the goal state. */ for (tgt = 0; tgt < MAX_TARGETS; tgt++) { uint8_t off, per; sdp->isp_devparam[tgt].actv_offset = 0; sdp->isp_devparam[tgt].actv_period = 0; sdp->isp_devparam[tgt].actv_flags = 0; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags = DPARM_DEFAULT; /* * We default to Wide/Fast for versions less than a 1040 * (unless it's SBus). */ if (IS_ULTRA3(isp)) { off = ISP_80M_SYNCPARMS >> 8; per = ISP_80M_SYNCPARMS & 0xff; } else if (IS_ULTRA2(isp)) { off = ISP_40M_SYNCPARMS >> 8; per = ISP_40M_SYNCPARMS & 0xff; } else if (IS_1240(isp)) { off = ISP_20M_SYNCPARMS >> 8; per = ISP_20M_SYNCPARMS & 0xff; } else if ((isp->isp_bustype == ISP_BT_SBUS && isp->isp_type < ISP_HA_SCSI_1020A) || (isp->isp_bustype == ISP_BT_PCI && isp->isp_type < ISP_HA_SCSI_1040) || (isp->isp_clock && isp->isp_clock < 60) || (sdp->isp_ultramode == 0)) { off = ISP_10M_SYNCPARMS >> 8; per = ISP_10M_SYNCPARMS & 0xff; } else { off = ISP_20M_SYNCPARMS_1040 >> 8; per = ISP_20M_SYNCPARMS_1040 & 0xff; } sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset = off; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period = per; - isp_prt(isp, ISP_LOGDEBUG0, sc2, sc3, - channel, tgt, sdp->isp_devparam[tgt].nvrm_flags, - sdp->isp_devparam[tgt].nvrm_offset, - sdp->isp_devparam[tgt].nvrm_period); } + + /* + * If we're a dual bus card, just copy the data over + */ + if (sdp1) { + *sdp1 = *sdp; + sdp1->isp_initiator_id = DEFAULT_IID(isp, 1); + } + + /* + * If we've not been told to avoid reading NVRAM, try and read it. + * If we're successful reading it, we can then return because NVRAM + * will tell us what the desired settings are. Otherwise, we establish + * some reasonable 'fake' nvram and goal defaults. + */ + if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { + mbreg_t mbs; + + if (isp_read_nvram(isp, 0) == 0) { + if (IS_DUALBUS(isp)) { + if (isp_read_nvram(isp, 1) == 0) { + return; + } + } + } + MBSINIT(&mbs, MBOX_GET_ACT_NEG_STATE, MBLOGNONE, 0); + isp_mboxcmd(isp, &mbs); + if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { + sdp->isp_req_ack_active_neg = 1; + sdp->isp_data_line_active_neg = 1; + if (sdp1) { + sdp1->isp_req_ack_active_neg = 1; + sdp1->isp_data_line_active_neg = 1; + } + } else { + sdp->isp_req_ack_active_neg = + (mbs.param[1] >> 4) & 0x1; + sdp->isp_data_line_active_neg = + (mbs.param[1] >> 5) & 0x1; + if (sdp1) { + sdp1->isp_req_ack_active_neg = + (mbs.param[2] >> 4) & 0x1; + sdp1->isp_data_line_active_neg = + (mbs.param[2] >> 5) & 0x1; + } + } + } + } -#ifndef DEFAULT_FRAMESIZE -#define DEFAULT_FRAMESIZE(isp) ICB_DFLT_FRMLEN -#endif static void -isp_setdfltfcparm(ispsoftc_t *isp) +isp_setdfltfcparm(ispsoftc_t *isp, int chan) { - fcparam *fcp = FCPARAM(isp); + fcparam *fcp = FCPARAM(isp, chan); - if (fcp->isp_gotdparms) { - return; - } - fcp->isp_gotdparms = 1; - fcp->isp_bad_nvram = 0; - fcp->isp_maxfrmlen = DEFAULT_FRAMESIZE(isp); + /* + * Establish some default parameters. + */ + fcp->role = GET_DEFAULT_ROLE(isp, chan); fcp->isp_maxalloc = ICB_DFLT_ALLOC; - fcp->isp_execthrottle = DEFAULT_EXEC_THROTTLE(isp); fcp->isp_retry_delay = ICB_DFLT_RDELAY; fcp->isp_retry_count = ICB_DFLT_RCOUNT; - /* Platform specific.... */ - fcp->isp_loopid = DEFAULT_LOOPID(isp); - fcp->isp_wwnn_nvram = DEFAULT_NODEWWN(isp); - fcp->isp_wwpn_nvram = DEFAULT_PORTWWN(isp); + fcp->isp_loopid = DEFAULT_LOOPID(isp, chan); + fcp->isp_wwnn_nvram = DEFAULT_NODEWWN(isp, chan); + fcp->isp_wwpn_nvram = DEFAULT_PORTWWN(isp, chan); fcp->isp_fwoptions = 0; - fcp->isp_fwoptions |= ICBOPT_FAIRNESS; - fcp->isp_fwoptions |= ICBOPT_PDBCHANGE_AE; - fcp->isp_fwoptions |= ICBOPT_HARD_ADDRESS; - fcp->isp_fwoptions |= ICBOPT_FAST_POST; - if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) { - fcp->isp_fwoptions |= ICBOPT_FULL_DUPLEX; + fcp->isp_lasthdl = NIL_HANDLE; + + if (IS_24XX(isp)) { + fcp->isp_fwoptions |= ICB2400_OPT1_FAIRNESS; + fcp->isp_fwoptions |= ICB2400_OPT1_HARD_ADDRESS; + if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) { + fcp->isp_fwoptions |= ICB2400_OPT1_FULL_DUPLEX; + } + fcp->isp_fwoptions |= ICB2400_OPT1_BOTH_WWNS; + } else { + fcp->isp_fwoptions |= ICBOPT_FAIRNESS; + fcp->isp_fwoptions |= ICBOPT_PDBCHANGE_AE; + fcp->isp_fwoptions |= ICBOPT_HARD_ADDRESS; + fcp->isp_fwoptions |= ICBOPT_FAST_POST; + if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) { + fcp->isp_fwoptions |= ICBOPT_FULL_DUPLEX; + } + /* + * Make sure this is turned off now until we get + * extended options from NVRAM + */ + fcp->isp_fwoptions &= ~ICBOPT_EXTENDED; } - /* - * Make sure this is turned off now until we get - * extended options from NVRAM - */ - fcp->isp_fwoptions &= ~ICBOPT_EXTENDED; /* * Now try and read NVRAM unless told to not do so. * This will set fcparam's isp_wwnn_nvram && isp_wwpn_nvram. */ if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { int i, j = 0; /* * Give a couple of tries at reading NVRAM. */ for (i = 0; i < 2; i++) { - j = isp_read_nvram(isp); + j = isp_read_nvram(isp, chan); if (j == 0) { break; } } if (j) { - fcp->isp_bad_nvram = 1; isp->isp_confopts |= ISP_CFG_NONVRAM; - isp->isp_confopts |= ISP_CFG_OWNWWPN; - isp->isp_confopts |= ISP_CFG_OWNWWNN; } - } else { - isp->isp_confopts |= ISP_CFG_OWNWWPN|ISP_CFG_OWNWWNN; } - /* - * Set node && port to override platform set defaults - * unless the nvram read failed (or none was done), - * or the platform code wants to use what had been - * set in the defaults. - */ - if (isp->isp_confopts & ISP_CFG_OWNWWNN) { - isp_prt(isp, ISP_LOGCONFIG, "Using Node WWN 0x%08x%08x", - (uint32_t) (DEFAULT_NODEWWN(isp) >> 32), - (uint32_t) (DEFAULT_NODEWWN(isp) & 0xffffffff)); - ISP_NODEWWN(isp) = DEFAULT_NODEWWN(isp); - } else { - /* - * We always start out with values derived - * from NVRAM or our platform default. - */ - ISP_NODEWWN(isp) = fcp->isp_wwnn_nvram; - if (fcp->isp_wwnn_nvram == 0) { - isp_prt(isp, ISP_LOGCONFIG, - "bad WWNN- using default"); - ISP_NODEWWN(isp) = DEFAULT_NODEWWN(isp); - } - } - if (isp->isp_confopts & ISP_CFG_OWNWWPN) { - isp_prt(isp, ISP_LOGCONFIG, "Using Port WWN 0x%08x%08x", - (uint32_t) (DEFAULT_PORTWWN(isp) >> 32), - (uint32_t) (DEFAULT_PORTWWN(isp) & 0xffffffff)); - ISP_PORTWWN(isp) = DEFAULT_PORTWWN(isp); - } else { - /* - * We always start out with values derived - * from NVRAM or our platform default. - */ - ISP_PORTWWN(isp) = fcp->isp_wwpn_nvram; - if (fcp->isp_wwpn_nvram == 0) { - isp_prt(isp, ISP_LOGCONFIG, - "bad WWPN- using default"); - ISP_PORTWWN(isp) = DEFAULT_PORTWWN(isp); - } - } + fcp->isp_wwnn = ACTIVE_NODEWWN(isp, chan); + fcp->isp_wwpn = ACTIVE_PORTWWN(isp, chan); + isp_prt(isp, ISP_LOGCONFIG, "Chan %d 0x%08x%08x/0x%08x%08x Role %s", + chan, (uint32_t) (fcp->isp_wwnn >> 32), (uint32_t) (fcp->isp_wwnn), + (uint32_t) (fcp->isp_wwpn >> 32), (uint32_t) (fcp->isp_wwpn), + isp_class3_roles[fcp->role]); } /* * Re-initialize the ISP and complete all orphaned commands * with a 'botched' notice. The reset/init routines should * not disturb an already active list of commands. */ void -isp_reinit(ispsoftc_t *isp) +isp_reinit(ispsoftc_t *isp, int do_load_defaults) { - XS_T *xs; - uint32_t tmp; + int i; - if (IS_FC(isp)) { - ISP_MARK_PORTDB(isp, 0); - } - isp_reset(isp); + isp_reset(isp, do_load_defaults); + if (isp->isp_state != ISP_RESETSTATE) { - isp_prt(isp, ISP_LOGERR, "isp_reinit cannot reset card"); - } else if (isp->isp_role != ISP_ROLE_NONE) { - isp_init(isp); - if (isp->isp_state == ISP_INITSTATE) { - isp->isp_state = ISP_RUNSTATE; - } - if (isp->isp_state != ISP_RUNSTATE) { - isp_prt(isp, ISP_LOGERR, - "isp_reinit cannot restart card"); - ISP_DISABLE_INTS(isp); - } - } else { + isp_prt(isp, ISP_LOGERR, "%s: cannot reset card", __func__); ISP_DISABLE_INTS(isp); + goto cleanup; + } + + isp_init(isp); + + if (isp->isp_state == ISP_INITSTATE) { + isp->isp_state = ISP_RUNSTATE; + } + + if (isp->isp_state != ISP_RUNSTATE) { +#ifndef ISP_TARGET_MODE + isp_prt(isp, ISP_LOGWARN, "%s: not at runstate", __func__); +#endif + ISP_DISABLE_INTS(isp); if (IS_FC(isp)) { /* * If we're in ISP_ROLE_NONE, turn off the lasers. */ if (!IS_24XX(isp)) { ISP_WRITE(isp, BIU2100_CSR, BIU2100_FPM0_REGS); ISP_WRITE(isp, FPM_DIAG_CONFIG, FPM_SOFT_RESET); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FB_REGS); ISP_WRITE(isp, FBM_CMD, FBMCMD_FIFO_RESET_ALL); ISP_WRITE(isp, BIU2100_CSR, BIU2100_RISC_REGS); } } } - isp->isp_nactive = 0; - for (tmp = 0; tmp < isp->isp_maxcmds; tmp++) { - uint32_t handle; + cleanup: - xs = isp->isp_xflist[tmp]; - if (xs == NULL) { - continue; + isp->isp_nactive = 0; + + isp_clear_commands(isp); + if (IS_FC(isp)) { + for (i = 0; i < isp->isp_nchan; i++) { + ISP_MARK_PORTDB(isp, i, -1); } - handle = isp_find_handle(isp, xs); - if (handle == 0) { - continue; - } - isp_destroy_handle(isp, handle); - if (XS_XFRLEN(xs)) { - ISP_DMAFREE(isp, xs, handle); - XS_RESID(xs) = XS_XFRLEN(xs); - } else { - XS_RESID(xs) = 0; - } - XS_SETERR(xs, HBA_BUSRESET); - isp_done(xs); } -#ifdef ISP_TARGET_MODE - MEMZERO(isp->isp_tgtlist, isp->isp_maxcmds * sizeof (void **)); -#endif } /* * NVRAM Routines */ static int -isp_read_nvram(ispsoftc_t *isp) +isp_read_nvram(ispsoftc_t *isp, int bus) { int i, amt, retval; uint8_t csum, minversion; union { - uint8_t _x[ISP2100_NVRAM_SIZE]; - uint16_t _s[ISP2100_NVRAM_SIZE>>1]; + uint8_t _x[ISP2400_NVRAM_SIZE]; + uint16_t _s[ISP2400_NVRAM_SIZE>>1]; } _n; #define nvram_data _n._x #define nvram_words _n._s if (IS_24XX(isp)) { - return (isp_read_nvram_2400(isp)); + return (isp_read_nvram_2400(isp, nvram_data)); } else if (IS_FC(isp)) { amt = ISP2100_NVRAM_SIZE; minversion = 1; } else if (IS_ULTRA2(isp)) { amt = ISP1080_NVRAM_SIZE; minversion = 0; } else { amt = ISP_NVRAM_SIZE; minversion = 2; } for (i = 0; i < amt>>1; i++) { isp_rdnvram_word(isp, i, &nvram_words[i]); } if (nvram_data[0] != 'I' || nvram_data[1] != 'S' || nvram_data[2] != 'P') { if (isp->isp_bustype != ISP_BT_SBUS) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM header"); isp_prt(isp, ISP_LOGDEBUG0, "%x %x %x", nvram_data[0], nvram_data[1], nvram_data[2]); } retval = -1; goto out; } for (csum = 0, i = 0; i < amt; i++) { csum += nvram_data[i]; } if (csum != 0) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM checksum"); retval = -1; goto out; } if (ISP_NVRAM_VERSION(nvram_data) < minversion) { isp_prt(isp, ISP_LOGWARN, "version %d NVRAM not understood", ISP_NVRAM_VERSION(nvram_data)); retval = -1; goto out; } if (IS_ULTRA3(isp)) { - isp_parse_nvram_12160(isp, 0, nvram_data); - if (IS_12160(isp)) - isp_parse_nvram_12160(isp, 1, nvram_data); + isp_parse_nvram_12160(isp, bus, nvram_data); } else if (IS_1080(isp)) { - isp_parse_nvram_1080(isp, 0, nvram_data); + isp_parse_nvram_1080(isp, bus, nvram_data); } else if (IS_1280(isp) || IS_1240(isp)) { - isp_parse_nvram_1080(isp, 0, nvram_data); - isp_parse_nvram_1080(isp, 1, nvram_data); + isp_parse_nvram_1080(isp, bus, nvram_data); } else if (IS_SCSI(isp)) { isp_parse_nvram_1020(isp, nvram_data); } else { isp_parse_nvram_2100(isp, nvram_data); } retval = 0; out: return (retval); #undef nvram_data #undef nvram_words } static int -isp_read_nvram_2400(ispsoftc_t *isp) +isp_read_nvram_2400(ispsoftc_t *isp, uint8_t *nvram_data) { - uint8_t *nvram_data = FCPARAM(isp)->isp_scratch; int retval = 0; uint32_t addr, csum, lwrds, *dptr; - + if (isp->isp_port) { addr = ISP2400_NVRAM_PORT1_ADDR; } else { addr = ISP2400_NVRAM_PORT0_ADDR; } - + dptr = (uint32_t *) nvram_data; for (lwrds = 0; lwrds < ISP2400_NVRAM_SIZE >> 2; lwrds++) { isp_rd_2400_nvram(isp, addr++, dptr++); } if (nvram_data[0] != 'I' || nvram_data[1] != 'S' || nvram_data[2] != 'P') { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM header (%x %x %x)", nvram_data[0], nvram_data[1], nvram_data[2]); retval = -1; goto out; } dptr = (uint32_t *) nvram_data; for (csum = 0, lwrds = 0; lwrds < ISP2400_NVRAM_SIZE >> 2; lwrds++) { uint32_t tmp; ISP_IOXGET_32(isp, &dptr[lwrds], tmp); csum += tmp; } if (csum != 0) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM checksum"); retval = -1; goto out; } isp_parse_nvram_2400(isp, nvram_data); out: return (retval); } static void isp_rdnvram_word(ispsoftc_t *isp, int wo, uint16_t *rp) { int i, cbits; uint16_t bit, rqst, junk; ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT); - USEC_DELAY(10); + ISP_DELAY(10); ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT|BIU_NVRAM_CLOCK); - USEC_DELAY(10); + ISP_DELAY(10); if (IS_FC(isp)) { - wo &= ((ISP2100_NVRAM_SIZE >> 1) - 1); + wo &= ((ISP2100_NVRAM_SIZE >> 1) - 1); if (IS_2312(isp) && isp->isp_port) { wo += 128; } rqst = (ISP_NVRAM_READ << 8) | wo; cbits = 10; } else if (IS_ULTRA2(isp)) { wo &= ((ISP1080_NVRAM_SIZE >> 1) - 1); rqst = (ISP_NVRAM_READ << 8) | wo; cbits = 10; } else { wo &= ((ISP_NVRAM_SIZE >> 1) - 1); rqst = (ISP_NVRAM_READ << 6) | wo; cbits = 8; } /* * Clock the word select request out... */ for (i = cbits; i >= 0; i--) { if ((rqst >> i) & 1) { bit = BIU_NVRAM_SELECT | BIU_NVRAM_DATAOUT; } else { bit = BIU_NVRAM_SELECT; } ISP_WRITE(isp, BIU_NVRAM, bit); - USEC_DELAY(10); + ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ ISP_WRITE(isp, BIU_NVRAM, bit | BIU_NVRAM_CLOCK); - USEC_DELAY(10); + ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ ISP_WRITE(isp, BIU_NVRAM, bit); - USEC_DELAY(10); + ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ } /* * Now read the result back in (bits come back in MSB format). */ *rp = 0; for (i = 0; i < 16; i++) { uint16_t rv; *rp <<= 1; ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT|BIU_NVRAM_CLOCK); - USEC_DELAY(10); + ISP_DELAY(10); rv = ISP_READ(isp, BIU_NVRAM); if (rv & BIU_NVRAM_DATAIN) { *rp |= 1; } - USEC_DELAY(10); + ISP_DELAY(10); ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT); - USEC_DELAY(10); + ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ } ISP_WRITE(isp, BIU_NVRAM, 0); - USEC_DELAY(10); + ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ ISP_SWIZZLE_NVRAM_WORD(isp, rp); } static void isp_rd_2400_nvram(ispsoftc_t *isp, uint32_t addr, uint32_t *rp) { int loops = 0; - const uint32_t base = 0x7ffe0000; + uint32_t base = 0x7ffe0000; uint32_t tmp = 0; + if (IS_25XX(isp)) { + base = 0x7ff00000 | 0x48000; + } ISP_WRITE(isp, BIU2400_FLASH_ADDR, base | addr); for (loops = 0; loops < 5000; loops++) { - USEC_DELAY(10); + ISP_DELAY(10); tmp = ISP_READ(isp, BIU2400_FLASH_ADDR); if ((tmp & (1U << 31)) != 0) { break; } } if (tmp & (1U << 31)) { *rp = ISP_READ(isp, BIU2400_FLASH_DATA); ISP_SWIZZLE_NVRAM_LONG(isp, rp); } else { *rp = 0xffffffff; } } static void isp_parse_nvram_1020(ispsoftc_t *isp, uint8_t *nvram_data) { - sdparam *sdp = (sdparam *) isp->isp_param; + sdparam *sdp = SDPARAM(isp, 0); int tgt; sdp->isp_fifo_threshold = ISP_NVRAM_FIFO_THRESHOLD(nvram_data) | (ISP_NVRAM_FIFO_THRESHOLD_128(nvram_data) << 2); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) sdp->isp_initiator_id = ISP_NVRAM_INITIATOR_ID(nvram_data); sdp->isp_bus_reset_delay = ISP_NVRAM_BUS_RESET_DELAY(nvram_data); sdp->isp_retry_count = ISP_NVRAM_BUS_RETRY_COUNT(nvram_data); sdp->isp_retry_delay = ISP_NVRAM_BUS_RETRY_DELAY(nvram_data); sdp->isp_async_data_setup = ISP_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data); if (isp->isp_type >= ISP_HA_SCSI_1040) { if (sdp->isp_async_data_setup < 9) { sdp->isp_async_data_setup = 9; } } else { if (sdp->isp_async_data_setup != 6) { sdp->isp_async_data_setup = 6; } } sdp->isp_req_ack_active_neg = ISP_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data); sdp->isp_data_line_active_neg = ISP_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data); sdp->isp_data_dma_burst_enabl = ISP_NVRAM_DATA_DMA_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP_NVRAM_CMD_DMA_BURST_ENABLE(nvram_data); sdp->isp_tag_aging = ISP_NVRAM_TAG_AGE_LIMIT(nvram_data); sdp->isp_selection_timeout = ISP_NVRAM_SELECTION_TIMEOUT(nvram_data); sdp->isp_max_queue_depth = ISP_NVRAM_MAX_QUEUE_DEPTH(nvram_data); sdp->isp_fast_mttr = ISP_NVRAM_FAST_MTTR_ENABLE(nvram_data); - isp_prt(isp, ISP_LOGDEBUG0, sc0, sc4, - 0, sdp->isp_fifo_threshold, sdp->isp_initiator_id, - sdp->isp_bus_reset_delay, sdp->isp_retry_count, - sdp->isp_retry_delay, sdp->isp_async_data_setup); - isp_prt(isp, ISP_LOGDEBUG0, sc1, sc4, - sdp->isp_req_ack_active_neg, sdp->isp_data_line_active_neg, - sdp->isp_data_dma_burst_enabl, sdp->isp_cmd_dma_burst_enable, - sdp->isp_selection_timeout, sdp->isp_max_queue_depth); - for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].dev_enable = ISP_NVRAM_TGT_DEVICE_ENABLE(nvram_data, tgt); sdp->isp_devparam[tgt].exc_throttle = ISP_NVRAM_TGT_EXEC_THROTTLE(nvram_data, tgt); sdp->isp_devparam[tgt].nvrm_offset = ISP_NVRAM_TGT_SYNC_OFFSET(nvram_data, tgt); sdp->isp_devparam[tgt].nvrm_period = ISP_NVRAM_TGT_SYNC_PERIOD(nvram_data, tgt); /* * We probably shouldn't lie about this, but it * it makes it much safer if we limit NVRAM values * to sanity. */ if (isp->isp_type < ISP_HA_SCSI_1040) { /* * If we're not ultra, we can't possibly * be a shorter period than this. */ if (sdp->isp_devparam[tgt].nvrm_period < 0x19) { sdp->isp_devparam[tgt].nvrm_period = 0x19; } if (sdp->isp_devparam[tgt].nvrm_offset > 0xc) { sdp->isp_devparam[tgt].nvrm_offset = 0x0c; } } else { if (sdp->isp_devparam[tgt].nvrm_offset > 0x8) { sdp->isp_devparam[tgt].nvrm_offset = 0x8; } } sdp->isp_devparam[tgt].nvrm_flags = 0; if (ISP_NVRAM_TGT_RENEG(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].nvrm_flags |= DPARM_ARQ; if (ISP_NVRAM_TGT_TQING(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_TQING; if (ISP_NVRAM_TGT_SYNC(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_SYNC; if (ISP_NVRAM_TGT_WIDE(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_WIDE; if (ISP_NVRAM_TGT_PARITY(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_PARITY; if (ISP_NVRAM_TGT_DISC(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_DISC; sdp->isp_devparam[tgt].actv_flags = 0; /* we don't know */ - isp_prt(isp, ISP_LOGDEBUG0, sc2, sc4, - 0, tgt, sdp->isp_devparam[tgt].nvrm_flags, - sdp->isp_devparam[tgt].nvrm_offset, - sdp->isp_devparam[tgt].nvrm_period); sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags; } } static void isp_parse_nvram_1080(ispsoftc_t *isp, int bus, uint8_t *nvram_data) { - sdparam *sdp = (sdparam *) isp->isp_param; + sdparam *sdp = SDPARAM(isp, bus); int tgt; - sdp += bus; - sdp->isp_fifo_threshold = ISP1080_NVRAM_FIFO_THRESHOLD(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) sdp->isp_initiator_id = ISP1080_NVRAM_INITIATOR_ID(nvram_data, bus); sdp->isp_bus_reset_delay = ISP1080_NVRAM_BUS_RESET_DELAY(nvram_data, bus); sdp->isp_retry_count = ISP1080_NVRAM_BUS_RETRY_COUNT(nvram_data, bus); sdp->isp_retry_delay = ISP1080_NVRAM_BUS_RETRY_DELAY(nvram_data, bus); sdp->isp_async_data_setup = ISP1080_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data, bus); sdp->isp_req_ack_active_neg = ISP1080_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_line_active_neg = ISP1080_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_dma_burst_enabl = ISP1080_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP1080_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_selection_timeout = ISP1080_NVRAM_SELECTION_TIMEOUT(nvram_data, bus); sdp->isp_max_queue_depth = ISP1080_NVRAM_MAX_QUEUE_DEPTH(nvram_data, bus); - isp_prt(isp, ISP_LOGDEBUG0, sc0, sc4, - bus, sdp->isp_fifo_threshold, sdp->isp_initiator_id, - sdp->isp_bus_reset_delay, sdp->isp_retry_count, - sdp->isp_retry_delay, sdp->isp_async_data_setup); - isp_prt(isp, ISP_LOGDEBUG0, sc1, sc4, - sdp->isp_req_ack_active_neg, sdp->isp_data_line_active_neg, - sdp->isp_data_dma_burst_enabl, sdp->isp_cmd_dma_burst_enable, - sdp->isp_selection_timeout, sdp->isp_max_queue_depth); - - for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].dev_enable = ISP1080_NVRAM_TGT_DEVICE_ENABLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].exc_throttle = ISP1080_NVRAM_TGT_EXEC_THROTTLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_offset = ISP1080_NVRAM_TGT_SYNC_OFFSET(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_period = ISP1080_NVRAM_TGT_SYNC_PERIOD(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_flags = 0; if (ISP1080_NVRAM_TGT_RENEG(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].nvrm_flags |= DPARM_ARQ; if (ISP1080_NVRAM_TGT_TQING(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_TQING; if (ISP1080_NVRAM_TGT_SYNC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_SYNC; if (ISP1080_NVRAM_TGT_WIDE(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_WIDE; if (ISP1080_NVRAM_TGT_PARITY(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_PARITY; if (ISP1080_NVRAM_TGT_DISC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_DISC; sdp->isp_devparam[tgt].actv_flags = 0; - isp_prt(isp, ISP_LOGDEBUG0, sc2, sc4, - bus, tgt, sdp->isp_devparam[tgt].nvrm_flags, - sdp->isp_devparam[tgt].nvrm_offset, - sdp->isp_devparam[tgt].nvrm_period); sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags; } } static void isp_parse_nvram_12160(ispsoftc_t *isp, int bus, uint8_t *nvram_data) { - sdparam *sdp = (sdparam *) isp->isp_param; + sdparam *sdp = SDPARAM(isp, bus); int tgt; - sdp += bus; - sdp->isp_fifo_threshold = ISP12160_NVRAM_FIFO_THRESHOLD(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) sdp->isp_initiator_id = ISP12160_NVRAM_INITIATOR_ID(nvram_data, bus); sdp->isp_bus_reset_delay = ISP12160_NVRAM_BUS_RESET_DELAY(nvram_data, bus); sdp->isp_retry_count = ISP12160_NVRAM_BUS_RETRY_COUNT(nvram_data, bus); sdp->isp_retry_delay = ISP12160_NVRAM_BUS_RETRY_DELAY(nvram_data, bus); sdp->isp_async_data_setup = ISP12160_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data, bus); sdp->isp_req_ack_active_neg = ISP12160_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_line_active_neg = ISP12160_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_dma_burst_enabl = ISP12160_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP12160_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_selection_timeout = ISP12160_NVRAM_SELECTION_TIMEOUT(nvram_data, bus); sdp->isp_max_queue_depth = ISP12160_NVRAM_MAX_QUEUE_DEPTH(nvram_data, bus); - isp_prt(isp, ISP_LOGDEBUG0, sc0, sc4, - bus, sdp->isp_fifo_threshold, sdp->isp_initiator_id, - sdp->isp_bus_reset_delay, sdp->isp_retry_count, - sdp->isp_retry_delay, sdp->isp_async_data_setup); - isp_prt(isp, ISP_LOGDEBUG0, sc1, sc4, - sdp->isp_req_ack_active_neg, sdp->isp_data_line_active_neg, - sdp->isp_data_dma_burst_enabl, sdp->isp_cmd_dma_burst_enable, - sdp->isp_selection_timeout, sdp->isp_max_queue_depth); - for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].dev_enable = ISP12160_NVRAM_TGT_DEVICE_ENABLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].exc_throttle = ISP12160_NVRAM_TGT_EXEC_THROTTLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_offset = ISP12160_NVRAM_TGT_SYNC_OFFSET(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_period = ISP12160_NVRAM_TGT_SYNC_PERIOD(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_flags = 0; if (ISP12160_NVRAM_TGT_RENEG(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].nvrm_flags |= DPARM_ARQ; if (ISP12160_NVRAM_TGT_TQING(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_TQING; if (ISP12160_NVRAM_TGT_SYNC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_SYNC; if (ISP12160_NVRAM_TGT_WIDE(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_WIDE; if (ISP12160_NVRAM_TGT_PARITY(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_PARITY; if (ISP12160_NVRAM_TGT_DISC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_DISC; sdp->isp_devparam[tgt].actv_flags = 0; - isp_prt(isp, ISP_LOGDEBUG0, sc2, sc4, - bus, tgt, sdp->isp_devparam[tgt].nvrm_flags, - sdp->isp_devparam[tgt].nvrm_offset, - sdp->isp_devparam[tgt].nvrm_period); sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags; } } static void -isp_fix_nvram_wwns(ispsoftc_t *isp) -{ - fcparam *fcp = FCPARAM(isp); - - /* - * Make sure we have both Node and Port as non-zero values. - */ - if (fcp->isp_wwnn_nvram != 0 && fcp->isp_wwpn_nvram == 0) { - fcp->isp_wwpn_nvram = fcp->isp_wwnn_nvram; - } else if (fcp->isp_wwnn_nvram == 0 && fcp->isp_wwpn_nvram != 0) { - fcp->isp_wwnn_nvram = fcp->isp_wwpn_nvram; - } - - /* - * Make the Node and Port values sane if they're NAA == 2. - * This means to clear bits 48..56 for the Node WWN and - * make sure that there's some non-zero value in 48..56 - * for the Port WWN. - */ - if (fcp->isp_wwnn_nvram && fcp->isp_wwpn_nvram) { - if ((fcp->isp_wwnn_nvram & (((uint64_t) 0xfff) << 48)) != 0 && - (fcp->isp_wwnn_nvram >> 60) == 2) { - fcp->isp_wwnn_nvram &= ~((uint64_t) 0xfff << 48); - } - if ((fcp->isp_wwpn_nvram & (((uint64_t) 0xfff) << 48)) == 0 && - (fcp->isp_wwpn_nvram >> 60) == 2) { - fcp->isp_wwpn_nvram |= ((uint64_t) 1 << 56); - } - } -} - -static void isp_parse_nvram_2100(ispsoftc_t *isp, uint8_t *nvram_data) { - fcparam *fcp = FCPARAM(isp); + fcparam *fcp = FCPARAM(isp, 0); uint64_t wwn; /* * There is NVRAM storage for both Port and Node entities- * but the Node entity appears to be unused on all the cards * I can find. However, we should account for this being set * at some point in the future. * * Qlogic WWNs have an NAA of 2, but usually nothing shows up in * bits 48..60. In the case of the 2202, it appears that they do * use bit 48 to distinguish between the two instances on the card. * The 2204, which I've never seen, *probably* extends this method. */ wwn = ISP2100_NVRAM_PORT_NAME(nvram_data); if (wwn) { isp_prt(isp, ISP_LOGCONFIG, "NVRAM Port WWN 0x%08x%08x", - (uint32_t) (wwn >> 32), (uint32_t) (wwn & 0xffffffff)); + (uint32_t) (wwn >> 32), (uint32_t) (wwn)); if ((wwn >> 60) == 0) { wwn |= (((uint64_t) 2)<< 60); } } fcp->isp_wwpn_nvram = wwn; if (IS_2200(isp) || IS_23XX(isp)) { wwn = ISP2100_NVRAM_NODE_NAME(nvram_data); if (wwn) { isp_prt(isp, ISP_LOGCONFIG, "NVRAM Node WWN 0x%08x%08x", (uint32_t) (wwn >> 32), - (uint32_t) (wwn & 0xffffffff)); + (uint32_t) (wwn)); if ((wwn >> 60) == 0) { wwn |= (((uint64_t) 2)<< 60); } } } else { wwn &= ~((uint64_t) 0xfff << 48); } fcp->isp_wwnn_nvram = wwn; - isp_fix_nvram_wwns(isp); - fcp->isp_maxalloc = ISP2100_NVRAM_MAXIOCBALLOCATION(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNFSZ) == 0) { - fcp->isp_maxfrmlen = ISP2100_NVRAM_MAXFRAMELENGTH(nvram_data); + DEFAULT_FRAMESIZE(isp) = + ISP2100_NVRAM_MAXFRAMELENGTH(nvram_data); } fcp->isp_retry_delay = ISP2100_NVRAM_RETRY_DELAY(nvram_data); fcp->isp_retry_count = ISP2100_NVRAM_RETRY_COUNT(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) { fcp->isp_loopid = ISP2100_NVRAM_HARDLOOPID(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNEXCTHROTTLE) == 0) { - fcp->isp_execthrottle = + DEFAULT_EXEC_THROTTLE(isp) = ISP2100_NVRAM_EXECUTION_THROTTLE(nvram_data); } fcp->isp_fwoptions = ISP2100_NVRAM_OPTIONS(nvram_data); isp_prt(isp, ISP_LOGDEBUG0, "NVRAM 0x%08x%08x 0x%08x%08x maxalloc %d maxframelen %d", - (uint32_t) (fcp->isp_wwnn_nvram >> 32), (uint32_t) fcp->isp_wwnn_nvram, - (uint32_t) (fcp->isp_wwpn_nvram >> 32), (uint32_t) fcp->isp_wwpn_nvram, + (uint32_t) (fcp->isp_wwnn_nvram >> 32), + (uint32_t) fcp->isp_wwnn_nvram, + (uint32_t) (fcp->isp_wwpn_nvram >> 32), + (uint32_t) fcp->isp_wwpn_nvram, ISP2100_NVRAM_MAXIOCBALLOCATION(nvram_data), ISP2100_NVRAM_MAXFRAMELENGTH(nvram_data)); isp_prt(isp, ISP_LOGDEBUG0, "execthrottle %d fwoptions 0x%x hardloop %d tov %d", ISP2100_NVRAM_EXECUTION_THROTTLE(nvram_data), ISP2100_NVRAM_OPTIONS(nvram_data), ISP2100_NVRAM_HARDLOOPID(nvram_data), ISP2100_NVRAM_TOV(nvram_data)); fcp->isp_xfwoptions = ISP2100_XFW_OPTIONS(nvram_data); fcp->isp_zfwoptions = ISP2100_ZFW_OPTIONS(nvram_data); isp_prt(isp, ISP_LOGDEBUG0, "xfwoptions 0x%x zfw options 0x%x", ISP2100_XFW_OPTIONS(nvram_data), ISP2100_ZFW_OPTIONS(nvram_data)); } static void isp_parse_nvram_2400(ispsoftc_t *isp, uint8_t *nvram_data) { - fcparam *fcp = FCPARAM(isp); + fcparam *fcp = FCPARAM(isp, 0); uint64_t wwn; isp_prt(isp, ISP_LOGDEBUG0, "NVRAM 0x%08x%08x 0x%08x%08x exchg_cnt %d maxframelen %d", (uint32_t) (ISP2400_NVRAM_NODE_NAME(nvram_data) >> 32), (uint32_t) (ISP2400_NVRAM_NODE_NAME(nvram_data)), (uint32_t) (ISP2400_NVRAM_PORT_NAME(nvram_data) >> 32), (uint32_t) (ISP2400_NVRAM_PORT_NAME(nvram_data)), ISP2400_NVRAM_EXCHANGE_COUNT(nvram_data), ISP2400_NVRAM_MAXFRAMELENGTH(nvram_data)); isp_prt(isp, ISP_LOGDEBUG0, "NVRAM execthr %d loopid %d fwopt1 0x%x fwopt2 0x%x fwopt3 0x%x", ISP2400_NVRAM_EXECUTION_THROTTLE(nvram_data), ISP2400_NVRAM_HARDLOOPID(nvram_data), ISP2400_NVRAM_FIRMWARE_OPTIONS1(nvram_data), ISP2400_NVRAM_FIRMWARE_OPTIONS2(nvram_data), ISP2400_NVRAM_FIRMWARE_OPTIONS3(nvram_data)); wwn = ISP2400_NVRAM_PORT_NAME(nvram_data); if (wwn) { if ((wwn >> 60) != 2 && (wwn >> 60) != 5) { wwn = 0; } } fcp->isp_wwpn_nvram = wwn; wwn = ISP2400_NVRAM_NODE_NAME(nvram_data); if (wwn) { if ((wwn >> 60) != 2 && (wwn >> 60) != 5) { wwn = 0; } } fcp->isp_wwnn_nvram = wwn; - isp_fix_nvram_wwns(isp); - if (ISP2400_NVRAM_EXCHANGE_COUNT(nvram_data)) { fcp->isp_maxalloc = ISP2400_NVRAM_EXCHANGE_COUNT(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNFSZ) == 0) { - fcp->isp_maxfrmlen = ISP2400_NVRAM_MAXFRAMELENGTH(nvram_data); + DEFAULT_FRAMESIZE(isp) = + ISP2400_NVRAM_MAXFRAMELENGTH(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) { fcp->isp_loopid = ISP2400_NVRAM_HARDLOOPID(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNEXCTHROTTLE) == 0) { - fcp->isp_execthrottle = + DEFAULT_EXEC_THROTTLE(isp) = ISP2400_NVRAM_EXECUTION_THROTTLE(nvram_data); } fcp->isp_fwoptions = ISP2400_NVRAM_FIRMWARE_OPTIONS1(nvram_data); fcp->isp_xfwoptions = ISP2400_NVRAM_FIRMWARE_OPTIONS2(nvram_data); fcp->isp_zfwoptions = ISP2400_NVRAM_FIRMWARE_OPTIONS3(nvram_data); } - -#ifdef ISP_FW_CRASH_DUMP -static void isp2200_fw_dump(ispsoftc_t *); -static void isp2300_fw_dump(ispsoftc_t *); - -static void -isp2200_fw_dump(ispsoftc_t *isp) -{ - int i, j; - mbreg_t mbs; - uint16_t *ptr; - - MEMZERO(&mbs, sizeof (mbs)); - ptr = FCPARAM(isp)->isp_dump_data; - if (ptr == NULL) { - isp_prt(isp, ISP_LOGERR, - "No place to dump RISC registers and SRAM"); - return; - } - if (*ptr++) { - isp_prt(isp, ISP_LOGERR, - "dump area for RISC registers and SRAM already used"); - return; - } - ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); - for (i = 0; i < 100; i++) { - USEC_DELAY(100); - if (ISP_READ(isp, HCCR) & HCCR_PAUSE) { - break; - } - } - if (ISP_READ(isp, HCCR) & HCCR_PAUSE) { - /* - * PBIU Registers - */ - for (i = 0; i < 8; i++) { - *ptr++ = ISP_READ(isp, BIU_BLOCK + (i << 1)); - } - - /* - * Mailbox Registers - */ - for (i = 0; i < 8; i++) { - *ptr++ = ISP_READ(isp, MBOX_BLOCK + (i << 1)); - } - - /* - * DMA Registers - */ - for (i = 0; i < 48; i++) { - *ptr++ = ISP_READ(isp, DMA_BLOCK + 0x20 + (i << 1)); - } - - /* - * RISC H/W Registers - */ - ISP_WRITE(isp, BIU2100_CSR, 0); - for (i = 0; i < 16; i++) { - *ptr++ = ISP_READ(isp, BIU_BLOCK + 0xA0 + (i << 1)); - } - - /* - * RISC GP Registers - */ - for (j = 0; j < 8; j++) { - ISP_WRITE(isp, BIU_BLOCK + 0xA4, 0x2000 + (j << 8)); - for (i = 0; i < 16; i++) { - *ptr++ = - ISP_READ(isp, BIU_BLOCK + 0x80 + (i << 1)); - } - } - - /* - * Frame Buffer Hardware Registers - */ - ISP_WRITE(isp, BIU2100_CSR, 0x10); - for (i = 0; i < 16; i++) { - *ptr++ = ISP_READ(isp, BIU_BLOCK + 0x80 + (i << 1)); - } - - /* - * Fibre Protocol Module 0 Hardware Registers - */ - ISP_WRITE(isp, BIU2100_CSR, 0x20); - for (i = 0; i < 64; i++) { - *ptr++ = ISP_READ(isp, BIU_BLOCK + 0x80 + (i << 1)); - } - - /* - * Fibre Protocol Module 1 Hardware Registers - */ - ISP_WRITE(isp, BIU2100_CSR, 0x30); - for (i = 0; i < 64; i++) { - *ptr++ = ISP_READ(isp, BIU_BLOCK + 0x80 + (i << 1)); - } - } else { - isp_prt(isp, ISP_LOGERR, "RISC Would Not Pause"); - return; - } - isp_prt(isp, ISP_LOGALL, - "isp_fw_dump: RISC registers dumped successfully"); - ISP_WRITE(isp, BIU2100_CSR, BIU2100_SOFT_RESET); - for (i = 0; i < 100; i++) { - USEC_DELAY(100); - if (ISP_READ(isp, OUTMAILBOX0) == 0) { - break; - } - } - if (ISP_READ(isp, OUTMAILBOX0) != 0) { - isp_prt(isp, ISP_LOGERR, "Board Would Not Reset"); - return; - } - ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); - for (i = 0; i < 100; i++) { - USEC_DELAY(100); - if (ISP_READ(isp, HCCR) & HCCR_PAUSE) { - break; - } - } - if ((ISP_READ(isp, HCCR) & HCCR_PAUSE) == 0) { - isp_prt(isp, ISP_LOGERR, "RISC Would Not Pause After Reset"); - return; - } - ISP_WRITE(isp, RISC_EMB, 0xf2); - ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); - for (i = 0; i < 100; i++) { - USEC_DELAY(100); - if ((ISP_READ(isp, HCCR) & HCCR_PAUSE) == 0) { - break; - } - } - ISP_ENABLE_INTS(isp); - mbs.param[0] = MBOX_READ_RAM_WORD; - mbs.param[1] = 0x1000; - isp->isp_mbxworkp = (void *) ptr; - isp->isp_mbxwrk0 = 0xefff; /* continuation count */ - isp->isp_mbxwrk1 = 0x1001; /* next SRAM address */ - isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); - if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { - isp_prt(isp, ISP_LOGWARN, - "RAM DUMP FAILED @ WORD %x", isp->isp_mbxwrk1); - return; - } - ptr = isp->isp_mbxworkp; /* finish fetch of final word */ - *ptr++ = isp->isp_mboxtmp[2]; - isp_prt(isp, ISP_LOGALL, "isp_fw_dump: SRAM dumped successfully"); - FCPARAM(isp)->isp_dump_data[0] = isp->isp_type; /* now used */ - (void) isp_async(isp, ISPASYNC_FW_DUMPED, 0); -} - -static void -isp2300_fw_dump(ispsoftc_t *isp) -{ - int i, j; - mbreg_t mbs; - uint16_t *ptr; - - MEMZERO(&mbs, sizeof (mbs)); - ptr = FCPARAM(isp)->isp_dump_data; - if (ptr == NULL) { - isp_prt(isp, ISP_LOGERR, - "No place to dump RISC registers and SRAM"); - return; - } - if (*ptr++) { - isp_prt(isp, ISP_LOGERR, - "dump area for RISC registers and SRAM already used"); - return; - } - ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); - for (i = 0; i < 100; i++) { - USEC_DELAY(100); - if (ISP_READ(isp, HCCR) & HCCR_PAUSE) { - break; - } - } - if (ISP_READ(isp, HCCR) & HCCR_PAUSE) { - /* - * PBIU registers - */ - for (i = 0; i < 8; i++) { - *ptr++ = ISP_READ(isp, BIU_BLOCK + (i << 1)); - } - - /* - * ReqQ-RspQ-Risc2Host Status registers - */ - for (i = 0; i < 8; i++) { - *ptr++ = ISP_READ(isp, BIU_BLOCK + 0x10 + (i << 1)); - } - - /* - * Mailbox Registers - */ - for (i = 0; i < 32; i++) { - *ptr++ = - ISP_READ(isp, PCI_MBOX_REGS2300_OFF + (i << 1)); - } - - /* - * Auto Request Response DMA registers - */ - ISP_WRITE(isp, BIU2100_CSR, 0x40); - for (i = 0; i < 32; i++) { - *ptr++ = ISP_READ(isp, BIU_BLOCK + 0x80 + (i << 1)); - } - - /* - * DMA registers - */ - ISP_WRITE(isp, BIU2100_CSR, 0x50); - for (i = 0; i < 48; i++) { - *ptr++ = ISP_READ(isp, BIU_BLOCK + 0x80 + (i << 1)); - } - - /* - * RISC hardware registers - */ - ISP_WRITE(isp, BIU2100_CSR, 0); - for (i = 0; i < 16; i++) { - *ptr++ = ISP_READ(isp, BIU_BLOCK + 0xA0 + (i << 1)); - } - - /* - * RISC GP? registers - */ - for (j = 0; j < 8; j++) { - ISP_WRITE(isp, BIU_BLOCK + 0xA4, 0x2000 + (j << 9)); - for (i = 0; i < 16; i++) { - *ptr++ = - ISP_READ(isp, BIU_BLOCK + 0x80 + (i << 1)); - } - } - - /* - * frame buffer hardware registers - */ - ISP_WRITE(isp, BIU2100_CSR, 0x10); - for (i = 0; i < 64; i++) { - *ptr++ = ISP_READ(isp, BIU_BLOCK + 0x80 + (i << 1)); - } - - /* - * FPM B0 hardware registers - */ - ISP_WRITE(isp, BIU2100_CSR, 0x20); - for (i = 0; i < 64; i++) { - *ptr++ = ISP_READ(isp, BIU_BLOCK + 0x80 + (i << 1)); - } - - /* - * FPM B1 hardware registers - */ - ISP_WRITE(isp, BIU2100_CSR, 0x30); - for (i = 0; i < 64; i++) { - *ptr++ = ISP_READ(isp, BIU_BLOCK + 0x80 + (i << 1)); - } - } else { - isp_prt(isp, ISP_LOGERR, "RISC Would Not Pause"); - return; - } - isp_prt(isp, ISP_LOGALL, - "isp_fw_dump: RISC registers dumped successfully"); - ISP_WRITE(isp, BIU2100_CSR, BIU2100_SOFT_RESET); - for (i = 0; i < 100; i++) { - USEC_DELAY(100); - if (ISP_READ(isp, OUTMAILBOX0) == 0) { - break; - } - } - if (ISP_READ(isp, OUTMAILBOX0) != 0) { - isp_prt(isp, ISP_LOGERR, "Board Would Not Reset"); - return; - } - ISP_ENABLE_INTS(isp); - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_READ_RAM_WORD; - mbs.param[1] = 0x800; - isp->isp_mbxworkp = (void *) ptr; - isp->isp_mbxwrk0 = 0xf7ff; /* continuation count */ - isp->isp_mbxwrk1 = 0x801; /* next SRAM address */ - isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); - if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { - isp_prt(isp, ISP_LOGWARN, - "RAM DUMP FAILED @ WORD %x", isp->isp_mbxwrk1); - return; - } - ptr = isp->isp_mbxworkp; /* finish fetch of final word */ - *ptr++ = isp->isp_mboxtmp[2]; - MEMZERO(&mbs, sizeof (mbs)); - mbs.param[0] = MBOX_READ_RAM_WORD_EXTENDED; - mbs.param[8] = 1; - isp->isp_mbxworkp = (void *) ptr; - isp->isp_mbxwrk0 = 0xffff; /* continuation count */ - isp->isp_mbxwrk1 = 0x1; /* next SRAM address */ - isp->isp_mbxwrk8 = 0x1; - isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); - if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { - isp_prt(isp, ISP_LOGWARN, - "RAM DUMP FAILED @ WORD %x", 0x10000 + isp->isp_mbxwrk1); - return; - } - ptr = isp->isp_mbxworkp; /* finish final word */ - *ptr++ = mbs.param[2]; - isp_prt(isp, ISP_LOGALL, "isp_fw_dump: SRAM dumped successfully"); - FCPARAM(isp)->isp_dump_data[0] = isp->isp_type; /* now used */ - (void) isp_async(isp, ISPASYNC_FW_DUMPED, 0); -} - -void -isp_fw_dump(ispsoftc_t *isp) -{ - if (IS_2200(isp)) - isp2200_fw_dump(isp); - else if (IS_23XX(isp)) - isp2300_fw_dump(isp); - else if (IS_24XX(isp)) - isp_prt(isp, ISP_LOGERR, "24XX dump method undefined"); - -} -#endif Index: head/sys/dev/isp/isp_freebsd.c =================================================================== --- head/sys/dev/isp/isp_freebsd.c (revision 196007) +++ head/sys/dev/isp/isp_freebsd.c (revision 196008) @@ -1,3596 +1,5515 @@ /*- - * Copyright (c) 1997-2006 by Matthew Jacob + * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. */ #include __FBSDID("$FreeBSD$"); #include #include #include -#include /* for use by isp_prt below */ #include #include #include #include -#if __FreeBSD_version >= 500000 -#include -#else #include -#endif #include #include -#if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025 -#define CAM_NEW_TRAN_CODE 1 +#if __FreeBSD_version < 800002 +#define THREAD_CREATE kthread_create +#else +#define THREAD_CREATE kproc_create #endif - MODULE_VERSION(isp, 1); MODULE_DEPEND(isp, cam, 1, 1, 1); int isp_announced = 0; -int isp_fabric_hysteresis = 5; -int isp_loop_down_limit = 300; /* default loop down limit */ +int isp_fabric_hysteresis = 3; +int isp_loop_down_limit = 60; /* default loop down limit */ int isp_change_is_bad = 0; /* "changed" devices are bad */ -int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */ +int isp_quickboot_time = 7; /* don't wait more than N secs for loop up */ int isp_gone_device_time = 30; /* grace time before reporting device lost */ +int isp_autoconfig = 1; /* automatically attach/detach devices */ static const char *roles[4] = { "(none)", "Target", "Initiator", "Target/Initiator" }; -static const char prom3[] = - "PortID 0x%06x Departed from Target %u because of %s"; +static const char prom3[] = "Chan %d PortID 0x%06x Departed from Target %u because of %s"; +static const char rqo[] = "%s: Request Queue Overflow\n"; -static void isp_freeze_loopdown(ispsoftc_t *, char *); +static void isp_freeze_loopdown(ispsoftc_t *, int, char *); static d_ioctl_t ispioctl; static void isp_intr_enable(void *); static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); static void isp_poll(struct cam_sim *); static timeout_t isp_watchdog; static timeout_t isp_ldt; static void isp_kthread(void *); static void isp_action(struct cam_sim *, union ccb *); - -#if __FreeBSD_version < 700000 -ispfwfunc *isp_get_firmware_p = NULL; +#ifdef ISP_INTERNAL_TARGET +static void isp_target_thread_pi(void *); +static void isp_target_thread_fc(void *); #endif +static void isp_timer(void *); -#if __FreeBSD_version < 500000 -#define ISP_CDEV_MAJOR 248 static struct cdevsw isp_cdevsw = { - /* open */ nullopen, - /* close */ nullclose, - /* read */ noread, - /* write */ nowrite, - /* ioctl */ ispioctl, - /* poll */ nopoll, - /* mmap */ nommap, - /* strategy */ nostrategy, - /* name */ "isp", - /* maj */ ISP_CDEV_MAJOR, - /* dump */ nodump, - /* psize */ nopsize, - /* flags */ D_TAPE, -}; -#define isp_sysctl_update(x) do { ; } while (0) -#else -static struct cdevsw isp_cdevsw = { .d_version = D_VERSION, -#if __FreeBSD_version < 700037 - .d_flags = D_NEEDGIANT, -#endif .d_ioctl = ispioctl, .d_name = "isp", }; -static void isp_sysctl_update(ispsoftc_t *); -#endif -static ispsoftc_t *isplist = NULL; - -void -isp_attach(ispsoftc_t *isp) +static int +isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan) { - int primary, secondary; struct ccb_setasync csa; - struct cam_devq *devq; struct cam_sim *sim; struct cam_path *path; /* - * Establish (in case of 12X0) which bus is the primary. - */ - - primary = 0; - secondary = 1; - - /* - * Create the device queue for our SIM(s). - */ - devq = cam_simq_alloc(isp->isp_maxcmds); - if (devq == NULL) { - return; - } - - /* * Construct our SIM entry. */ - sim = isp_sim_alloc(isp_action, isp_poll, "isp", isp, - device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); + sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, device_get_unit(isp->isp_dev), &isp->isp_osinfo.lock, isp->isp_maxcmds, isp->isp_maxcmds, devq); + if (sim == NULL) { - cam_simq_free(devq); - return; + return (ENOMEM); } - isp->isp_osinfo.ehook.ich_func = isp_intr_enable; - isp->isp_osinfo.ehook.ich_arg = isp; - ISP_UNLOCK(isp); - if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { - ISP_LOCK(isp); - cam_sim_free(sim, TRUE); - isp_prt(isp, ISP_LOGERR, - "could not establish interrupt enable hook"); - return; - } ISP_LOCK(isp); - - if (xpt_bus_register(sim, isp->isp_dev, primary) != CAM_SUCCESS) { - cam_sim_free(sim, TRUE); - return; + if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) { + ISP_UNLOCK(isp); + cam_sim_free(sim, FALSE); + return (EIO); } + ISP_UNLOCK(isp); - if (xpt_create_path(&path, NULL, cam_sim_path(sim), - CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + ISP_LOCK(isp); xpt_bus_deregister(cam_sim_path(sim)); - cam_sim_free(sim, TRUE); - config_intrhook_disestablish(&isp->isp_osinfo.ehook); - return; + ISP_UNLOCK(isp); + cam_sim_free(sim, FALSE); + return (ENXIO); } xpt_setup_ccb(&csa.ccb_h, path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = isp_cam_async; csa.callback_arg = sim; xpt_action((union ccb *)&csa); - isp->isp_sim = sim; - isp->isp_path = path; - /* - * If we have a second channel, construct SIM entry for that. - */ - if (IS_DUALBUS(isp)) { - sim = isp_sim_alloc(isp_action, isp_poll, "isp", isp, - device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); - if (sim == NULL) { - xpt_bus_deregister(cam_sim_path(isp->isp_sim)); - xpt_free_path(isp->isp_path); - cam_simq_free(devq); - config_intrhook_disestablish(&isp->isp_osinfo.ehook); - return; + if (IS_SCSI(isp)) { + struct isp_spi *spi = ISP_SPI_PC(isp, chan); + spi->sim = sim; + spi->path = path; +#ifdef ISP_INTERNAL_TARGET + ISP_SET_PC(isp, chan, proc_active, 1); + if (THREAD_CREATE(isp_target_thread_pi, spi, &spi->target_proc, 0, 0, "%s: isp_test_tgt%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) { + ISP_SET_PC(isp, chan, proc_active, 0); + isp_prt(isp, ISP_LOGERR, "cannot create test target thread"); } - if (xpt_bus_register(sim, isp->isp_dev, secondary) != - CAM_SUCCESS) { - xpt_bus_deregister(cam_sim_path(isp->isp_sim)); - xpt_free_path(isp->isp_path); - cam_sim_free(sim, TRUE); - config_intrhook_disestablish(&isp->isp_osinfo.ehook); - return; - } +#endif + } else { + struct isp_fc *fc = ISP_FC_PC(isp, chan); - if (xpt_create_path(&path, NULL, cam_sim_path(sim), - CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { - xpt_bus_deregister(cam_sim_path(isp->isp_sim)); - xpt_free_path(isp->isp_path); - xpt_bus_deregister(cam_sim_path(sim)); - cam_sim_free(sim, TRUE); - config_intrhook_disestablish(&isp->isp_osinfo.ehook); - return; + fc->sim = sim; + fc->path = path; + fc->isp = isp; + + callout_init_mtx(&fc->ldt, &isp->isp_osinfo.lock, 0); + callout_init_mtx(&fc->gdt, &isp->isp_osinfo.lock, 0); + + if (THREAD_CREATE(isp_kthread, fc, &fc->kproc, 0, 0, "%s: fc_thrd%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) { + xpt_free_path(fc->path); + ISP_LOCK(isp); + xpt_bus_deregister(cam_sim_path(fc->sim)); + ISP_UNLOCK(isp); + cam_sim_free(fc->sim, FALSE); } + /* + * We start by being "loop down" if we have an initiator role + */ + ISP_LOCK(isp); + if ((FCPARAM(isp, chan)->role & ISP_ROLE_INITIATOR) && fc->ldt_running == 0) { + isp_freeze_loopdown(isp, chan, "isp_attach"); + fc->ldt_running = 1; + callout_reset(&fc->ldt, isp_quickboot_time * hz, isp_ldt, fc); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Starting Initial Loop Down Timer @ %lu", (unsigned long) time_uptime); + } + ISP_UNLOCK(isp); +#ifdef ISP_INTERNAL_TARGET + ISP_SET_PC(isp, chan, proc_active, 1); + if (THREAD_CREATE(isp_target_thread_fc, fc, &fc->target_proc, 0, 0, "%s: isp_test_tgt%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) { + ISP_SET_PC(isp, chan, proc_active, 0); + isp_prt(isp, ISP_LOGERR, "cannot create test target thread"); + } +#endif + } + return (0); +} - xpt_setup_ccb(&csa.ccb_h, path, 5); - csa.ccb_h.func_code = XPT_SASYNC_CB; - csa.event_enable = AC_LOST_DEVICE; - csa.callback = isp_cam_async; - csa.callback_arg = sim; - xpt_action((union ccb *)&csa); - isp->isp_sim2 = sim; - isp->isp_path2 = path; +int +isp_attach(ispsoftc_t *isp) +{ + const char *nu = device_get_nameunit(isp->isp_osinfo.dev); + int du = device_get_unit(isp->isp_dev); + int chan; + + isp->isp_osinfo.ehook.ich_func = isp_intr_enable; + isp->isp_osinfo.ehook.ich_arg = isp; + if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { + isp_prt(isp, ISP_LOGERR, "could not establish interrupt enable hook"); + return (-EIO); } + isp->isp_osinfo.ehook_active = 1; + /* - * Create device nodes + * Create the device queue for our SIM(s). */ - ISP_UNLOCK(isp); - (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, - GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); - isp_sysctl_update(isp); - ISP_LOCK(isp); - - if (isp->isp_role != ISP_ROLE_NONE) { - isp->isp_state = ISP_RUNSTATE; - ISP_ENABLE_INTS(isp); + isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds); + if (isp->isp_osinfo.devq == NULL) { + config_intrhook_disestablish(&isp->isp_osinfo.ehook); + return (EIO); } - if (isplist == NULL) { - isplist = isp; - } else { - ispsoftc_t *tmp = isplist; - while (tmp->isp_osinfo.next) { - tmp = tmp->isp_osinfo.next; + + for (chan = 0; chan < isp->isp_nchan; chan++) { + if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) { + goto unwind; } - tmp->isp_osinfo.next = isp; } - /* - * Create a kernel thread for fibre channel instances. - */ - if (IS_FC(isp)) { - isp_callout_init(&isp->isp_osinfo.ldt); - isp_callout_init(&isp->isp_osinfo.gdt); - ISP_UNLOCK(isp); -#if __FreeBSD_version >= 500000 - if (kproc_create(isp_kthread, isp, &isp->isp_osinfo.kproc, - RFHIGHPID, 0, "%s: fc_thrd", - device_get_nameunit(isp->isp_dev))) -#else - if (kproc_create(isp_kthread, isp, &isp->isp_osinfo.kproc, - "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) -#endif - { - ISP_LOCK(isp); - xpt_bus_deregister(cam_sim_path(sim)); - cam_sim_free(sim, TRUE); - config_intrhook_disestablish(&isp->isp_osinfo.ehook); - isp_prt(isp, ISP_LOGERR, "could not create kthread"); - return; + callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_osinfo.lock, 0); + callout_reset(&isp->isp_osinfo.tmo, hz, isp_timer, isp); + isp->isp_osinfo.timer_active = 1; + + isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu); + if (isp->isp_osinfo.cdev) { + isp->isp_osinfo.cdev->si_drv1 = isp; + } + return (0); + +unwind: + while (--chan >= 0) { + struct cam_sim *sim; + struct cam_path *path; + if (IS_FC(isp)) { + sim = ISP_FC_PC(isp, chan)->sim; + path = ISP_FC_PC(isp, chan)->path; + } else { + sim = ISP_SPI_PC(isp, chan)->sim; + path = ISP_SPI_PC(isp, chan)->path; } + xpt_free_path(path); ISP_LOCK(isp); - /* - * We start by being "loop down" if we have an initiator role - */ - if (isp->isp_role & ISP_ROLE_INITIATOR) { - isp_freeze_loopdown(isp, "isp_attach"); - isp->isp_osinfo.ldt_running = 1; - callout_reset(&isp->isp_osinfo.ldt, - isp_quickboot_time * hz, isp_ldt, isp); - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "Starting Initial Loop Down Timer"); + xpt_bus_deregister(cam_sim_path(sim)); + ISP_UNLOCK(isp); + cam_sim_free(sim, FALSE); + } + if (isp->isp_osinfo.ehook_active) { + config_intrhook_disestablish(&isp->isp_osinfo.ehook); + isp->isp_osinfo.ehook_active = 0; + } + if (isp->isp_osinfo.cdev) { + destroy_dev(isp->isp_osinfo.cdev); + isp->isp_osinfo.cdev = NULL; + } + cam_simq_free(isp->isp_osinfo.devq); + isp->isp_osinfo.devq = NULL; + return (-1); +} + +void +isp_detach(ispsoftc_t *isp) +{ + int chan; + + ISP_LOCK(isp); + if (isp->isp_osinfo.timer_active) { + callout_stop(&isp->isp_osinfo.tmo); + isp->isp_osinfo.timer_active = 0; + } + ISP_UNLOCK(isp); + for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1) { + struct cam_sim *sim; + struct cam_path *path; + if (IS_FC(isp)) { + sim = ISP_FC_PC(isp, chan)->sim; + path = ISP_FC_PC(isp, chan)->path; + } else { + sim = ISP_SPI_PC(isp, chan)->sim; + path = ISP_SPI_PC(isp, chan)->path; } + xpt_free_path(path); + ISP_LOCK(isp); + xpt_bus_deregister(cam_sim_path(sim)); + ISP_UNLOCK(isp); + cam_sim_free(sim, FALSE); } + if (isp->isp_osinfo.cdev) { + destroy_dev(isp->isp_osinfo.cdev); + isp->isp_osinfo.cdev = NULL; + } + if (isp->isp_osinfo.ehook_active) { + config_intrhook_disestablish(&isp->isp_osinfo.ehook); + isp->isp_osinfo.ehook_active = 0; + } + if (isp->isp_osinfo.devq == NULL) { + cam_simq_free(isp->isp_osinfo.devq); + isp->isp_osinfo.devq = NULL; + } } static void -isp_freeze_loopdown(ispsoftc_t *isp, char *msg) +isp_freeze_loopdown(ispsoftc_t *isp, int chan, char *msg) { - if (isp->isp_osinfo.simqfrozen == 0) { - isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); - isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; - xpt_freeze_simq(isp->isp_sim, 1); - } else { - isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); - isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; + if (IS_FC(isp)) { + struct isp_fc *fc = ISP_FC_PC(isp, chan); + if (fc->simqfrozen == 0) { + isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown) chan %d", msg, chan); + fc->simqfrozen = SIMQFRZ_LOOPDOWN; + xpt_freeze_simq(fc->sim, 1); + } else { + isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown) chan %d", msg, chan); + fc->simqfrozen |= SIMQFRZ_LOOPDOWN; + } } } -#if __FreeBSD_version < 500000 -#define _DEV dev_t -#define _IOP struct proc -#else -#define _IOP struct thread -#define _DEV struct cdev * -#endif - static int -ispioctl(_DEV dev, u_long c, caddr_t addr, int flags, _IOP *td) +ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td) { ispsoftc_t *isp; - int nr, retval = ENOTTY; + int nr, chan, retval = ENOTTY; - isp = isplist; - while (isp) { - if (dev2unit(dev) == device_get_unit(isp->isp_dev)) { - break; - } - isp = isp->isp_osinfo.next; - } - if (isp == NULL) { - return (ENXIO); - } + isp = dev->si_drv1; switch (c) { -#ifdef ISP_FW_CRASH_DUMP - case ISP_GET_FW_CRASH_DUMP: - if (IS_FC(isp)) { - uint16_t *ptr = FCPARAM(isp)->isp_dump_data; - size_t sz; - - retval = 0; - if (IS_2200(isp)) { - sz = QLA2200_RISC_IMAGE_DUMP_SIZE; - } else { - sz = QLA2300_RISC_IMAGE_DUMP_SIZE; - } - if (ptr && *ptr) { - void *uaddr = *((void **) addr); - if (copyout(ptr, uaddr, sz)) { - retval = EFAULT; - } else { - *ptr = 0; - } - } else { - retval = ENXIO; - } - } - break; - case ISP_FORCE_CRASH_DUMP: - if (IS_FC(isp)) { - ISP_LOCK(isp); - isp_freeze_loopdown(isp, - "ispioctl(ISP_FORCE_CRASH_DUMP)"); - isp_fw_dump(isp); - isp_reinit(isp); - ISP_UNLOCK(isp); - retval = 0; - } - break; -#endif case ISP_SDBLEV: { int olddblev = isp->isp_dblev; isp->isp_dblev = *(int *)addr; *(int *)addr = olddblev; retval = 0; break; } case ISP_GETROLE: - *(int *)addr = isp->isp_role; + chan = *(int *)addr; + if (chan < 0 || chan >= isp->isp_nchan) { + retval = -ENXIO; + break; + } + if (IS_FC(isp)) { + *(int *)addr = FCPARAM(isp, chan)->role; + } else { + *(int *)addr = SDPARAM(isp, chan)->role; + } retval = 0; break; case ISP_SETROLE: nr = *(int *)addr; + chan = nr >> 8; + if (chan < 0 || chan >= isp->isp_nchan) { + retval = -ENXIO; + break; + } + nr &= 0xff; if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { retval = EINVAL; break; } - *(int *)addr = isp->isp_role; - isp->isp_role = nr; - /* FALLTHROUGH */ + if (IS_FC(isp)) { + *(int *)addr = FCPARAM(isp, chan)->role; +#ifdef ISP_INTERNAL_TARGET + ISP_LOCK(isp); + retval = isp_fc_change_role(isp, chan, nr); + ISP_UNLOCK(isp); +#else + FCPARAM(isp, chan)->role = nr; +#endif + } else { + *(int *)addr = SDPARAM(isp, chan)->role; + SDPARAM(isp, chan)->role = nr; + } + retval = 0; + break; + case ISP_RESETHBA: ISP_LOCK(isp); - isp_reinit(isp); +#ifdef ISP_TARGET_MODE + isp_del_all_wwn_entries(isp, ISP_NOCHAN); +#endif + isp_reinit(isp, 0); ISP_UNLOCK(isp); retval = 0; break; + case ISP_RESCAN: if (IS_FC(isp)) { + chan = *(int *)addr; + if (chan < 0 || chan >= isp->isp_nchan) { + retval = -ENXIO; + break; + } ISP_LOCK(isp); - if (isp_fc_runstate(isp, 5 * 1000000)) { + if (isp_fc_runstate(isp, chan, 5 * 1000000)) { retval = EIO; } else { retval = 0; } ISP_UNLOCK(isp); } break; + case ISP_FC_LIP: if (IS_FC(isp)) { + chan = *(int *)addr; + if (chan < 0 || chan >= isp->isp_nchan) { + retval = -ENXIO; + break; + } ISP_LOCK(isp); - if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { + if (isp_control(isp, ISPCTL_SEND_LIP, chan)) { retval = EIO; } else { retval = 0; } ISP_UNLOCK(isp); } break; case ISP_FC_GETDINFO: { struct isp_fc_device *ifc = (struct isp_fc_device *) addr; fcportdb_t *lp; if (IS_SCSI(isp)) { break; } if (ifc->loopid >= MAX_FC_TARG) { retval = EINVAL; break; } - lp = &FCPARAM(isp)->portdb[ifc->loopid]; - if (lp->state == FC_PORTDB_STATE_VALID) { + lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid]; + if (lp->state == FC_PORTDB_STATE_VALID || lp->target_mode) { ifc->role = lp->roles; ifc->loopid = lp->handle; ifc->portid = lp->portid; ifc->node_wwn = lp->node_wwn; ifc->port_wwn = lp->port_wwn; retval = 0; } else { retval = ENODEV; } break; } case ISP_GET_STATS: { isp_stats_t *sp = (isp_stats_t *) addr; - MEMZERO(sp, sizeof (*sp)); + ISP_MEMZERO(sp, sizeof (*sp)); sp->isp_stat_version = ISP_STATS_VERSION; sp->isp_type = isp->isp_type; sp->isp_revision = isp->isp_revision; ISP_LOCK(isp); sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; ISP_UNLOCK(isp); retval = 0; break; } case ISP_CLR_STATS: ISP_LOCK(isp); isp->isp_intcnt = 0; isp->isp_intbogus = 0; isp->isp_intmboxc = 0; isp->isp_intoasync = 0; isp->isp_rsltccmplt = 0; isp->isp_fphccmplt = 0; isp->isp_rscchiwater = 0; isp->isp_fpcchiwater = 0; ISP_UNLOCK(isp); retval = 0; break; case ISP_FC_GETHINFO: { struct isp_hba_device *hba = (struct isp_hba_device *) addr; - MEMZERO(hba, sizeof (*hba)); + int chan = hba->fc_channel; + if (chan < 0 || chan >= isp->isp_nchan) { + retval = ENXIO; + break; + } hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); + hba->fc_nchannels = isp->isp_nchan; if (IS_FC(isp)) { - hba->fc_speed = FCPARAM(isp)->isp_gbspeed; - hba->fc_scsi_supported = 1; - hba->fc_topology = FCPARAM(isp)->isp_topo + 1; - hba->fc_loopid = FCPARAM(isp)->isp_loopid; - hba->nvram_node_wwn = FCPARAM(isp)->isp_wwnn_nvram; - hba->nvram_port_wwn = FCPARAM(isp)->isp_wwpn_nvram; - hba->active_node_wwn = ISP_NODEWWN(isp); - hba->active_port_wwn = ISP_PORTWWN(isp); + hba->fc_nports = MAX_FC_TARG; + hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed; + hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1; + hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid; + hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram; + hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram; + hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn; + hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn; + } else { + hba->fc_nports = MAX_TARGETS; + hba->fc_speed = 0; + hba->fc_topology = 0; + hba->nvram_node_wwn = 0ull; + hba->nvram_port_wwn = 0ull; + hba->active_node_wwn = 0ull; + hba->active_port_wwn = 0ull; } retval = 0; break; } case ISP_TSK_MGMT: { int needmarker; struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; uint16_t loopid; mbreg_t mbs; if (IS_SCSI(isp)) { break; } - memset(&mbs, 0, sizeof (mbs)); - needmarker = retval = 0; - loopid = fct->loopid; - if (FCPARAM(isp)->isp_2klogin == 0) { - loopid <<= 8; - } - switch (fct->action) { - case IPT_CLEAR_ACA: - mbs.param[0] = MBOX_CLEAR_ACA; - mbs.param[1] = loopid; - mbs.param[2] = fct->lun; + chan = fct->chan; + if (chan < 0 || chan >= isp->isp_nchan) { + retval = -ENXIO; break; - case IPT_TARGET_RESET: - mbs.param[0] = MBOX_TARGET_RESET; - mbs.param[1] = loopid; - needmarker = 1; - break; - case IPT_LUN_RESET: - mbs.param[0] = MBOX_LUN_RESET; - mbs.param[1] = loopid; - mbs.param[2] = fct->lun; - needmarker = 1; - break; - case IPT_CLEAR_TASK_SET: - mbs.param[0] = MBOX_CLEAR_TASK_SET; - mbs.param[1] = loopid; - mbs.param[2] = fct->lun; - needmarker = 1; - break; - case IPT_ABORT_TASK_SET: - mbs.param[0] = MBOX_ABORT_TASK_SET; - mbs.param[1] = loopid; - mbs.param[2] = fct->lun; - needmarker = 1; - break; - default: - retval = EINVAL; - break; } - if (retval == 0) { - if (needmarker) { - isp->isp_sendmarker |= 1; + + needmarker = retval = 0; + loopid = fct->loopid; + ISP_LOCK(isp); + if (IS_24XX(isp)) { + uint8_t local[QENTRY_LEN]; + isp24xx_tmf_t *tmf; + isp24xx_statusreq_t *sp; + fcparam *fcp = FCPARAM(isp, chan); + fcportdb_t *lp; + int i; + + for (i = 0; i < MAX_FC_TARG; i++) { + lp = &fcp->portdb[i]; + if (lp->handle == loopid) { + break; + } } - ISP_LOCK(isp); + if (i == MAX_FC_TARG) { + retval = ENXIO; + ISP_UNLOCK(isp); + break; + } + /* XXX VALIDATE LP XXX */ + tmf = (isp24xx_tmf_t *) local; + ISP_MEMZERO(tmf, QENTRY_LEN); + tmf->tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT; + tmf->tmf_header.rqs_entry_count = 1; + tmf->tmf_nphdl = lp->handle; + tmf->tmf_delay = 2; + tmf->tmf_timeout = 2; + tmf->tmf_tidlo = lp->portid; + tmf->tmf_tidhi = lp->portid >> 16; + tmf->tmf_vpidx = ISP_GET_VPIDX(isp, chan); + tmf->tmf_lun[1] = fct->lun & 0xff; + if (fct->lun >= 256) { + tmf->tmf_lun[0] = 0x40 | (fct->lun >> 8); + } + switch (fct->action) { + case IPT_CLEAR_ACA: + tmf->tmf_flags = ISP24XX_TMF_CLEAR_ACA; + break; + case IPT_TARGET_RESET: + tmf->tmf_flags = ISP24XX_TMF_TARGET_RESET; + needmarker = 1; + break; + case IPT_LUN_RESET: + tmf->tmf_flags = ISP24XX_TMF_LUN_RESET; + needmarker = 1; + break; + case IPT_CLEAR_TASK_SET: + tmf->tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET; + needmarker = 1; + break; + case IPT_ABORT_TASK_SET: + tmf->tmf_flags = ISP24XX_TMF_ABORT_TASK_SET; + needmarker = 1; + break; + default: + retval = EINVAL; + break; + } + if (retval) { + ISP_UNLOCK(isp); + break; + } + MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 5000000); + mbs.param[1] = QENTRY_LEN; + mbs.param[2] = DMA_WD1(fcp->isp_scdma); + mbs.param[3] = DMA_WD0(fcp->isp_scdma); + mbs.param[6] = DMA_WD3(fcp->isp_scdma); + mbs.param[7] = DMA_WD2(fcp->isp_scdma); + + if (FC_SCRATCH_ACQUIRE(isp, chan)) { + ISP_UNLOCK(isp); + retval = ENOMEM; + break; + } + isp_put_24xx_tmf(isp, tmf, fcp->isp_scratch); + MEMORYBARRIER(isp, SYNC_SFORDEV, 0, QENTRY_LEN); + sp = (isp24xx_statusreq_t *) local; + sp->req_completion_status = 1; retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); - ISP_UNLOCK(isp); - if (retval) + MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, QENTRY_LEN); + isp_get_24xx_response(isp, &((isp24xx_statusreq_t *)fcp->isp_scratch)[1], sp); + FC_SCRATCH_RELEASE(isp, chan); + if (retval || sp->req_completion_status != 0) { + FC_SCRATCH_RELEASE(isp, chan); retval = EIO; + } + if (retval == 0) { + if (needmarker) { + fcp->sendmarker = 1; + } + } + } else { + MBSINIT(&mbs, 0, MBLOGALL, 0); + if (ISP_CAP_2KLOGIN(isp) == 0) { + loopid <<= 8; + } + switch (fct->action) { + case IPT_CLEAR_ACA: + mbs.param[0] = MBOX_CLEAR_ACA; + mbs.param[1] = loopid; + mbs.param[2] = fct->lun; + break; + case IPT_TARGET_RESET: + mbs.param[0] = MBOX_TARGET_RESET; + mbs.param[1] = loopid; + needmarker = 1; + break; + case IPT_LUN_RESET: + mbs.param[0] = MBOX_LUN_RESET; + mbs.param[1] = loopid; + mbs.param[2] = fct->lun; + needmarker = 1; + break; + case IPT_CLEAR_TASK_SET: + mbs.param[0] = MBOX_CLEAR_TASK_SET; + mbs.param[1] = loopid; + mbs.param[2] = fct->lun; + needmarker = 1; + break; + case IPT_ABORT_TASK_SET: + mbs.param[0] = MBOX_ABORT_TASK_SET; + mbs.param[1] = loopid; + mbs.param[2] = fct->lun; + needmarker = 1; + break; + default: + retval = EINVAL; + break; + } + if (retval == 0) { + if (needmarker) { + FCPARAM(isp, chan)->sendmarker = 1; + } + retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); + if (retval) { + retval = EIO; + } + } } + ISP_UNLOCK(isp); break; } default: break; } return (retval); } -#if __FreeBSD_version >= 500000 static void -isp_sysctl_update(ispsoftc_t *isp) -{ - struct sysctl_ctx_list *ctx = - device_get_sysctl_ctx(isp->isp_osinfo.dev); - struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev); - - if (IS_SCSI(isp)) { - return; - } - - snprintf(isp->isp_osinfo.sysctl_info.fc.wwnn, - sizeof (isp->isp_osinfo.sysctl_info.fc.wwnn), "0x%08x%08x", - (uint32_t) (ISP_NODEWWN(isp) >> 32), (uint32_t) ISP_NODEWWN(isp)); - - snprintf(isp->isp_osinfo.sysctl_info.fc.wwpn, - sizeof (isp->isp_osinfo.sysctl_info.fc.wwpn), "0x%08x%08x", - (uint32_t) (ISP_PORTWWN(isp) >> 32), (uint32_t) ISP_PORTWWN(isp)); - - SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, - "wwnn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwnn, 0, - "World Wide Node Name"); - - SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, - "wwpn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwpn, 0, - "World Wide Port Name"); - - SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, - "loop_down_limit", - CTLFLAG_RW, &isp->isp_osinfo.loop_down_limit, 0, - "How long to wait for loop to come back up"); - - SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, - "gone_device_time", - CTLFLAG_RW, &isp->isp_osinfo.gone_device_time, 0, - "How long to wait for a device to reappear"); -} -#endif - -static void isp_intr_enable(void *arg) { + int chan; ispsoftc_t *isp = arg; ISP_LOCK(isp); - if (isp->isp_role != ISP_ROLE_NONE) { - ISP_ENABLE_INTS(isp); + for (chan = 0; chan < isp->isp_nchan; chan++) { + if (IS_FC(isp)) { + if (FCPARAM(isp, chan)->role != ISP_ROLE_NONE) { + ISP_ENABLE_INTS(isp); + break; + } + } else { + if (SDPARAM(isp, chan)->role != ISP_ROLE_NONE) { + ISP_ENABLE_INTS(isp); + break; + } + } } ISP_UNLOCK(isp); /* Release our hook so that the boot can continue. */ config_intrhook_disestablish(&isp->isp_osinfo.ehook); } /* + * Local Inlines + */ + +static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *); +static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *); + +static ISP_INLINE int +isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb) +{ + ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free; + if (ISP_PCMD(ccb) == NULL) { + return (-1); + } + isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next; + return (0); +} + +static ISP_INLINE void +isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb) +{ + ((struct isp_pcmd *)ISP_PCMD(ccb))->next = isp->isp_osinfo.pcmd_free; + isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb); + ISP_PCMD(ccb) = NULL; +} +/* * Put the target mode functions here, because some are inlines */ #ifdef ISP_TARGET_MODE - -static __inline int is_lun_enabled(ispsoftc_t *, int, lun_id_t); -static __inline int are_any_luns_enabled(ispsoftc_t *, int); -static __inline tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); -static __inline void rls_lun_statep(ispsoftc_t *, tstate_t *); -static __inline atio_private_data_t *isp_get_atpd(ispsoftc_t *, int); -static cam_status -create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); +static ISP_INLINE int is_lun_enabled(ispsoftc_t *, int, lun_id_t); +static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); +static ISP_INLINE tstate_t *get_lun_statep_from_tag(ispsoftc_t *, int, uint32_t); +static ISP_INLINE void rls_lun_statep(ispsoftc_t *, tstate_t *); +static ISP_INLINE inot_private_data_t *get_ntp_from_tagdata(ispsoftc_t *, uint32_t, uint32_t, tstate_t **); +static ISP_INLINE atio_private_data_t *isp_get_atpd(ispsoftc_t *, tstate_t *, uint32_t); +static ISP_INLINE void isp_put_atpd(ispsoftc_t *, tstate_t *, atio_private_data_t *); +static ISP_INLINE inot_private_data_t *isp_get_ntpd(ispsoftc_t *, tstate_t *); +static ISP_INLINE inot_private_data_t *isp_find_ntpd(ispsoftc_t *, tstate_t *, uint32_t, uint32_t); +static ISP_INLINE void isp_put_ntpd(ispsoftc_t *, tstate_t *, inot_private_data_t *); +static cam_status create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); static void destroy_lun_state(ispsoftc_t *, tstate_t *); -static int isp_en_lun(ispsoftc_t *, union ccb *); +static void isp_enable_lun(ispsoftc_t *, union ccb *); +static void isp_enable_deferred_luns(ispsoftc_t *, int); +static cam_status isp_enable_deferred(ispsoftc_t *, int, lun_id_t); +static void isp_disable_lun(ispsoftc_t *, union ccb *); +static int isp_enable_target_mode(ispsoftc_t *, int); static void isp_ledone(ispsoftc_t *, lun_entry_t *); -static cam_status isp_abort_tgt_ccb(ispsoftc_t *, union ccb *); static timeout_t isp_refire_putback_atio; static void isp_complete_ctio(union ccb *); static void isp_target_putback_atio(union ccb *); static void isp_target_start_ctio(ispsoftc_t *, union ccb *); -static int isp_handle_platform_atio(ispsoftc_t *, at_entry_t *); -static int isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); -static int isp_handle_platform_ctio(ispsoftc_t *, void *); -static int isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *); -static int isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *); +static void isp_handle_platform_atio(ispsoftc_t *, at_entry_t *); +static void isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); +static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *); +static void isp_handle_platform_ctio(ispsoftc_t *, void *); +static void isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *); +static void isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *); +static void isp_handle_platform_notify_24xx(ispsoftc_t *, in_fcentry_24xx_t *); +static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *); +static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *); +static void isp_target_mark_aborted(ispsoftc_t *, union ccb *); +static void isp_target_mark_aborted_early(ispsoftc_t *, tstate_t *, uint32_t); -static __inline int +static ISP_INLINE int is_lun_enabled(ispsoftc_t *isp, int bus, lun_id_t lun) { tstate_t *tptr; - tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; - if (tptr == NULL) { - return (0); - } - do { - if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { + struct tslist *lhp; + + ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp); + SLIST_FOREACH(tptr, lhp, next) { + if (xpt_path_lun_id(tptr->owner) == lun) { return (1); } - } while ((tptr = tptr->next) != NULL); + } return (0); } -static __inline int -are_any_luns_enabled(ispsoftc_t *isp, int port) +static void +dump_tstates(ispsoftc_t *isp, int bus) { - int lo, hi; - if (IS_DUALBUS(isp)) { - lo = (port * (LUN_HASH_SIZE >> 1)); - hi = lo + (LUN_HASH_SIZE >> 1); - } else { - lo = 0; - hi = LUN_HASH_SIZE; + int i, j; + struct tslist *lhp; + tstate_t *tptr = NULL; + + if (bus >= isp->isp_nchan) { + return; } - for (lo = 0; lo < hi; lo++) { - if (isp->isp_osinfo.lun_hash[lo]) { - return (1); + for (i = 0; i < LUN_HASH_SIZE; i++) { + ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); + j = 0; + SLIST_FOREACH(tptr, lhp, next) { + xpt_print(tptr->owner, "[%d, %d] atio_cnt=%d inot_cnt=%d\n", i, j, tptr->atio_count, tptr->inot_count); + j++; } } - return (0); } -static __inline tstate_t * +static ISP_INLINE tstate_t * get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) { tstate_t *tptr = NULL; + struct tslist *lhp; + int i; - if (lun == CAM_LUN_WILDCARD) { - if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { - tptr = &isp->isp_osinfo.tsdflt[bus]; - tptr->hold++; - return (tptr); + if (bus < isp->isp_nchan) { + for (i = 0; i < LUN_HASH_SIZE; i++) { + ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); + SLIST_FOREACH(tptr, lhp, next) { + if (xpt_path_lun_id(tptr->owner) == lun) { + tptr->hold++; + return (tptr); + } + } } - return (NULL); - } else { - tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; - if (tptr == NULL) { - return (NULL); - } } + return (NULL); +} - do { - if (tptr->lun == lun && tptr->bus == bus) { - tptr->hold++; - return (tptr); +static ISP_INLINE tstate_t * +get_lun_statep_from_tag(ispsoftc_t *isp, int bus, uint32_t tagval) +{ + tstate_t *tptr = NULL; + atio_private_data_t *atp; + struct tslist *lhp; + int i; + + if (bus < isp->isp_nchan && tagval != 0) { + for (i = 0; i < LUN_HASH_SIZE; i++) { + ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); + SLIST_FOREACH(tptr, lhp, next) { + atp = isp_get_atpd(isp, tptr, tagval); + if (atp && atp->tag == tagval) { + tptr->hold++; + return (tptr); + } + } } - } while ((tptr = tptr->next) != NULL); - return (tptr); + } + return (NULL); } -static __inline void +static ISP_INLINE inot_private_data_t * +get_ntp_from_tagdata(ispsoftc_t *isp, uint32_t tag_id, uint32_t seq_id, tstate_t **rslt) +{ + inot_private_data_t *ntp; + tstate_t *tptr; + struct tslist *lhp; + int bus, i; + + for (bus = 0; bus < isp->isp_nchan; bus++) { + for (i = 0; i < LUN_HASH_SIZE; i++) { + ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); + SLIST_FOREACH(tptr, lhp, next) { + ntp = isp_find_ntpd(isp, tptr, tag_id, seq_id); + if (ntp) { + *rslt = tptr; + tptr->hold++; + return (ntp); + } + } + } + } + return (NULL); +} +static ISP_INLINE void rls_lun_statep(ispsoftc_t *isp, tstate_t *tptr) { - if (tptr->hold) - tptr->hold--; + KASSERT((tptr->hold), ("tptr not held")); + tptr->hold--; } -static __inline atio_private_data_t * -isp_get_atpd(ispsoftc_t *isp, int tag) +static void +isp_tmcmd_restart(ispsoftc_t *isp) { + inot_private_data_t *ntp; + tstate_t *tptr; + struct tslist *lhp; + int bus, i; + + for (bus = 0; bus < isp->isp_nchan; bus++) { + for (i = 0; i < LUN_HASH_SIZE; i++) { + ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); + SLIST_FOREACH(tptr, lhp, next) { + inot_private_data_t *restart_queue = tptr->restart_queue; + tptr->restart_queue = NULL; + while (restart_queue) { + ntp = restart_queue; + restart_queue = ntp->rd.nt.nt_hba; + if (IS_24XX(isp)) { + isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at7_entry_t *)ntp->rd.data)->at_rxid); + isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->rd.data); + } else { + isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at2_entry_t *)ntp->rd.data)->at_rxid); + isp_handle_platform_atio2(isp, (at2_entry_t *) ntp->rd.data); + } + isp_put_ntpd(isp, tptr, ntp); + if (tptr->restart_queue && restart_queue != NULL) { + ntp = tptr->restart_queue; + tptr->restart_queue = restart_queue; + while (restart_queue->rd.nt.nt_hba) { + restart_queue = restart_queue->rd.nt.nt_hba; + } + restart_queue->rd.nt.nt_hba = ntp; + break; + } + } + } + } + } +} + +static ISP_INLINE atio_private_data_t * +isp_get_atpd(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag) +{ atio_private_data_t *atp; - for (atp = isp->isp_osinfo.atpdp; - atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { - if (atp->tag == tag) + + if (tag == 0) { + atp = tptr->atfree; + if (atp) { + tptr->atfree = atp->next; + } + return (atp); + } + for (atp = tptr->atpool; atp < &tptr->atpool[ATPDPSIZE]; atp++) { + if (atp->tag == tag) { return (atp); + } } return (NULL); } +static ISP_INLINE void +isp_put_atpd(ispsoftc_t *isp, tstate_t *tptr, atio_private_data_t *atp) +{ + atp->tag = 0; + atp->dead = 0; + atp->next = tptr->atfree; + tptr->atfree = atp; +} + +static void +isp_dump_atpd(ispsoftc_t *isp, tstate_t *tptr) +{ + atio_private_data_t *atp; + const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" }; + + for (atp = tptr->atpool; atp < &tptr->atpool[ATPDPSIZE]; atp++) { + if (atp->tag == 0) { + continue; + } + xpt_print(tptr->owner, "ATP: [0x%x] origdlen %u bytes_xfrd %u last_xfr %u lun %u nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s\n", + atp->tag, atp->orig_datalen, atp->bytes_xfered, atp->last_xframt, atp->lun, atp->nphdl, atp->sid, atp->portid, atp->oxid, states[atp->state & 0x7]); + } +} + + +static ISP_INLINE inot_private_data_t * +isp_get_ntpd(ispsoftc_t *isp, tstate_t *tptr) +{ + inot_private_data_t *ntp; + ntp = tptr->ntfree; + if (ntp) { + tptr->ntfree = ntp->next; + } + return (ntp); +} + +static ISP_INLINE inot_private_data_t * +isp_find_ntpd(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag_id, uint32_t seq_id) +{ + inot_private_data_t *ntp; + for (ntp = tptr->ntpool; ntp < &tptr->ntpool[ATPDPSIZE]; ntp++) { + if (ntp->rd.tag_id == tag_id && ntp->rd.seq_id == seq_id) { + return (ntp); + } + } + return (NULL); +} + +static ISP_INLINE void +isp_put_ntpd(ispsoftc_t *isp, tstate_t *tptr, inot_private_data_t *ntp) +{ + ntp->rd.tag_id = ntp->rd.seq_id = 0; + ntp->next = tptr->ntfree; + tptr->ntfree = ntp; +} + static cam_status -create_lun_state(ispsoftc_t *isp, int bus, - struct cam_path *path, tstate_t **rslt) +create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path, tstate_t **rslt) { cam_status status; lun_id_t lun; - int hfx; - tstate_t *tptr, *new; + struct tslist *lhp; + tstate_t *tptr; + int i; lun = xpt_path_lun_id(path); - if (lun >= ISP_MAX_LUNS(isp)) { - return (CAM_LUN_INVALID); + if (lun != CAM_LUN_WILDCARD) { + if (lun >= ISP_MAX_LUNS(isp)) { + return (CAM_LUN_INVALID); + } } if (is_lun_enabled(isp, bus, lun)) { return (CAM_LUN_ALRDY_ENA); } - new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); - if (new == NULL) { + tptr = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); + if (tptr == NULL) { return (CAM_RESRC_UNAVAIL); } - - status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), - xpt_path_target_id(path), xpt_path_lun_id(path)); + status = xpt_create_path(&tptr->owner, NULL, xpt_path_path_id(path), xpt_path_target_id(path), lun); if (status != CAM_REQ_CMP) { - free(new, M_DEVBUF); + free(tptr, M_DEVBUF); return (status); } - new->bus = bus; - new->lun = lun; - SLIST_INIT(&new->atios); - SLIST_INIT(&new->inots); - new->hold = 1; - - hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); - tptr = isp->isp_osinfo.lun_hash[hfx]; - if (tptr == NULL) { - isp->isp_osinfo.lun_hash[hfx] = new; - } else { - while (tptr->next) - tptr = tptr->next; - tptr->next = new; + SLIST_INIT(&tptr->atios); + SLIST_INIT(&tptr->inots); + for (i = 0; i < ATPDPSIZE-1; i++) { + tptr->atpool[i].next = &tptr->atpool[i+1]; + tptr->ntpool[i].next = &tptr->ntpool[i+1]; } - *rslt = new; + tptr->atfree = tptr->atpool; + tptr->ntfree = tptr->ntpool; + tptr->hold = 1; + ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(xpt_path_lun_id(tptr->owner))], lhp); + SLIST_INSERT_HEAD(lhp, tptr, next); + *rslt = tptr; + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n"); return (CAM_REQ_CMP); } -static __inline void +static ISP_INLINE void destroy_lun_state(ispsoftc_t *isp, tstate_t *tptr) { - int hfx; - tstate_t *lw, *pw; - - if (tptr->hold) { - return; - } - hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); - pw = isp->isp_osinfo.lun_hash[hfx]; - if (pw == NULL) { - return; - } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { - isp->isp_osinfo.lun_hash[hfx] = pw->next; - } else { - lw = pw; - pw = lw->next; - while (pw) { - if (pw->lun == tptr->lun && pw->bus == tptr->bus) { - lw->next = pw->next; - break; - } - lw = pw; - pw = pw->next; - } - if (pw == NULL) { - return; - } - } + struct tslist *lhp; + KASSERT((tptr->hold == 0), ("tptr still held")); + ISP_GET_PC_ADDR(isp, xpt_path_path_id(tptr->owner), lun_hash[LUN_HASH_FUNC(xpt_path_lun_id(tptr->owner))], lhp); + SLIST_REMOVE(lhp, tptr, tstate, next); + xpt_free_path(tptr->owner); free(tptr, M_DEVBUF); } /* - * Enable luns. + * Enable a lun. */ -static int -isp_en_lun(ispsoftc_t *isp, union ccb *ccb) +static void +isp_enable_lun(ispsoftc_t *isp, union ccb *ccb) { - struct ccb_en_lun *cel = &ccb->cel; tstate_t *tptr = NULL; - uint32_t seq; - int bus, cmd, av, wildcard, tm_on; + int bus, tm_enabled, target_role; + target_id_t target; lun_id_t lun; - target_id_t tgt; + /* + * We only support either a wildcard target/lun or a target ID of zero and a non-wildcard lun + */ bus = XS_CHANNEL(ccb); - if (bus > 1) { - xpt_print(ccb->ccb_h.path, "illegal bus %d\n", bus); - ccb->ccb_h.status = CAM_PATH_INVALID; - return (-1); - } - tgt = ccb->ccb_h.target_id; + target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; + if (target != CAM_TARGET_WILDCARD && target != 0) { + ccb->ccb_h.status = CAM_TID_INVALID; + xpt_done(ccb); + return; + } + if (target == CAM_TARGET_WILDCARD && lun != CAM_LUN_WILDCARD) { + ccb->ccb_h.status = CAM_LUN_INVALID; + xpt_done(ccb); + return; + } + if (target != CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { + ccb->ccb_h.status = CAM_LUN_INVALID; + xpt_done(ccb); + return; + } if (isp->isp_dblev & ISP_LOGTDEBUG0) { - xpt_print(ccb->ccb_h.path, "%sabling lun 0x%x on channel %d\n", - cel->enable? "en" : "dis", lun, bus); + xpt_print(ccb->ccb_h.path, "enabling lun 0x%x on channel %d\n", lun, bus); } - if ((lun != CAM_LUN_WILDCARD) && - (lun >= (lun_id_t) isp->isp_maxluns)) { - ccb->ccb_h.status = CAM_LUN_INVALID; - return (-1); + /* + * Wait until we're not busy with the lun enables subsystem + */ + while (isp->isp_osinfo.tmbusy) { + isp->isp_osinfo.tmwanted = 1; + mtx_sleep(isp, &isp->isp_lock, PRIBIO, "want_isp_enable_lun", 0); } + isp->isp_osinfo.tmbusy = 1; - if (IS_SCSI(isp)) { - sdparam *sdp = isp->isp_param; - sdp += bus; - if (tgt != CAM_TARGET_WILDCARD && - tgt != sdp->isp_initiator_id) { - ccb->ccb_h.status = CAM_TID_INVALID; - return (-1); - } - } else { - /* - * There's really no point in doing this yet w/o multi-tid - * capability. Even then, it's problematic. - */ -#if 0 - if (tgt != CAM_TARGET_WILDCARD && - tgt != FCPARAM(isp)->isp_iid) { - ccb->ccb_h.status = CAM_TID_INVALID; - return (-1); - } -#endif - /* - * This is as a good a place as any to check f/w capabilities. - */ - if (FCPARAM(isp)->isp_tmode == 0) { - xpt_print(ccb->ccb_h.path, - "firmware does not support target mode\n"); + /* + * This is as a good a place as any to check f/w capabilities. + */ + + if (IS_FC(isp)) { + if (ISP_CAP_TMODE(isp) == 0) { + xpt_print(ccb->ccb_h.path, "firmware does not support target mode\n"); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; - return (-1); + goto done; } /* - * XXX: We *could* handle non-SCCLUN f/w, but we'd have to - * XXX: dork with our already fragile enable/disable code. + * We *could* handle non-SCCLUN f/w, but we'd have to + * dork with our already fragile enable/disable code. */ - if (FCPARAM(isp)->isp_sccfw == 0) { - xpt_print(ccb->ccb_h.path, - "firmware not SCCLUN capable\n"); + if (ISP_CAP_SCCFW(isp) == 0) { + xpt_print(ccb->ccb_h.path, "firmware not SCCLUN capable\n"); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; - return (-1); + goto done; } - } - if (tgt == CAM_TARGET_WILDCARD) { - if (lun == CAM_LUN_WILDCARD) { - wildcard = 1; - } else { - ccb->ccb_h.status = CAM_LUN_INVALID; - return (-1); - } + target_role = (FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) != 0; + } else { - wildcard = 0; + target_role = (SDPARAM(isp, bus)->role & ISP_ROLE_TARGET) != 0; } - tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0; - /* - * Next check to see whether this is a target/lun wildcard action. - * - * If so, we know that we can accept commands for luns that haven't - * been enabled yet and send them upstream. Otherwise, we have to - * handle them locally (if we see them at all). + * Create the state pointer. + * It should not already exist. */ - - if (wildcard) { - tptr = &isp->isp_osinfo.tsdflt[bus]; - if (cel->enable) { - if (tm_on) { - ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; - return (-1); - } - ccb->ccb_h.status = - xpt_create_path(&tptr->owner, NULL, - xpt_path_path_id(ccb->ccb_h.path), - xpt_path_target_id(ccb->ccb_h.path), - xpt_path_lun_id(ccb->ccb_h.path)); - if (ccb->ccb_h.status != CAM_REQ_CMP) { - return (-1); - } - SLIST_INIT(&tptr->atios); - SLIST_INIT(&tptr->inots); - isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; - } else { - if (tm_on == 0) { - ccb->ccb_h.status = CAM_REQ_CMP; - return (-1); - } - if (tptr->hold) { - ccb->ccb_h.status = CAM_SCSI_BUSY; - return (-1); - } - xpt_free_path(tptr->owner); - isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; - } + tptr = get_lun_statep(isp, bus, lun); + if (tptr) { + ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; + goto done; } + ccb->ccb_h.status = create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); + if (ccb->ccb_h.status != CAM_REQ_CMP) { + goto done; + } /* - * Now check to see whether this bus needs to be - * enabled/disabled with respect to target mode. + * We have a tricky maneuver to perform here. + * + * If target mode isn't already enabled here, + * *and* our current role includes target mode, + * we enable target mode here. + * */ - av = bus << 31; - if (cel->enable && tm_on == 0) { - av |= ENABLE_TARGET_FLAG; - av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); - if (av) { - ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; - if (wildcard) { - isp->isp_osinfo.tmflags[bus] &= - ~TM_WILDCARD_ENABLED; - xpt_free_path(tptr->owner); - } - return (-1); + ISP_GET_PC(isp, bus, tm_enabled, tm_enabled); + if (tm_enabled == 0 && target_role != 0) { + if (isp_enable_target_mode(isp, bus)) { + ccb->ccb_h.status = CAM_REQ_CMP_ERR; + destroy_lun_state(isp, tptr); + tptr = NULL; + goto done; } - isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; - xpt_print(ccb->ccb_h.path, "Target Mode Enabled\n"); - } else if (cel->enable == 0 && tm_on && wildcard) { - if (are_any_luns_enabled(isp, bus)) { - ccb->ccb_h.status = CAM_SCSI_BUSY; - return (-1); - } - av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); - if (av) { - ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; - return (-1); - } - isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; - xpt_print(ccb->ccb_h.path, "Target Mode Disabled\n"); + tm_enabled = 1; } - if (wildcard) { + /* + * Now check to see whether this bus is in target mode already. + * + * If not, a later role change into target mode will finish the job. + */ + if (tm_enabled == 0) { + ISP_SET_PC(isp, bus, tm_enable_defer, 1); ccb->ccb_h.status = CAM_REQ_CMP; - return (-1); + xpt_print(ccb->ccb_h.path, "Target Mode Not Enabled Yet- Lun Enables Deferred\n"); + goto done; } /* - * Find an empty slot + * Enable the lun. */ - for (seq = 0; seq < NLEACT; seq++) { - if (isp->isp_osinfo.leact[seq] == 0) { - break; - } + ccb->ccb_h.status = isp_enable_deferred(isp, bus, lun); + +done: + if (ccb->ccb_h.status != CAM_REQ_CMP && tptr) { + destroy_lun_state(isp, tptr); + tptr = NULL; } - if (seq >= NLEACT) { - ccb->ccb_h.status = CAM_RESRC_UNAVAIL; - return (-1); - + if (tptr) { + rls_lun_statep(isp, tptr); } - isp->isp_osinfo.leact[seq] = ccb; - - if (cel->enable) { - ccb->ccb_h.status = - create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); - if (ccb->ccb_h.status != CAM_REQ_CMP) { - isp->isp_osinfo.leact[seq] = 0; - return (-1); - } - } else { - tptr = get_lun_statep(isp, bus, lun); - if (tptr == NULL) { - ccb->ccb_h.status = CAM_LUN_INVALID; - return (-1); - } + isp->isp_osinfo.tmbusy = 0; + if (isp->isp_osinfo.tmwanted) { + isp->isp_osinfo.tmwanted = 0; + wakeup(isp); } + xpt_done(ccb); +} - if (cel->enable) { - int c, n, ulun = lun; +static void +isp_enable_deferred_luns(ispsoftc_t *isp, int bus) +{ + /* + * XXX: not entirely implemented yet + */ + (void) isp_enable_deferred(isp, bus, 0); +} - cmd = RQSTYPE_ENABLE_LUN; - c = DFLT_CMND_CNT; - n = DFLT_INOT_CNT; - if (IS_FC(isp) && lun != 0) { - cmd = RQSTYPE_MODIFY_LUN; - n = 0; - /* - * For SCC firmware, we only deal with setting - * (enabling or modifying) lun 0. - */ - ulun = 0; - } - if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { - rls_lun_statep(isp, tptr); - ccb->ccb_h.status = CAM_REQ_INPROG; - return (seq); - } +static uint32_t +isp_enable_deferred(ispsoftc_t *isp, int bus, lun_id_t lun) +{ + cam_status status; + + isp_prt(isp, ISP_LOGTINFO, "%s: bus %d lun %u", __func__, bus, lun); + if (IS_24XX(isp) || (IS_FC(isp) && ISP_FC_PC(isp, bus)->tm_luns_enabled)) { + status = CAM_REQ_CMP; } else { - int c, n, ulun = lun; + int cmd_cnt, not_cnt; - cmd = -RQSTYPE_MODIFY_LUN; - c = DFLT_CMND_CNT; - n = DFLT_INOT_CNT; - if (IS_FC(isp) && lun != 0) { - n = 0; - /* - * For SCC firmware, we only deal with setting - * (enabling or modifying) lun 0. - */ - ulun = 0; + if (IS_23XX(isp)) { + cmd_cnt = DFLT_CMND_CNT; + not_cnt = DFLT_INOT_CNT; + } else { + cmd_cnt = 64; + not_cnt = 8; } - if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { - rls_lun_statep(isp, tptr); - ccb->ccb_h.status = CAM_REQ_INPROG; - return (seq); + status = CAM_REQ_INPROG; + isp->isp_osinfo.rptr = &status; + if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, lun, DFLT_CMND_CNT, DFLT_INOT_CNT)) { + status = CAM_RESRC_UNAVAIL; + } else { + mtx_sleep(&status, &isp->isp_lock, PRIBIO, "isp_enable_deferred", 0); } + isp->isp_osinfo.rptr = NULL; } - rls_lun_statep(isp, tptr); - xpt_print(ccb->ccb_h.path, "isp_lun_cmd failed\n"); - isp->isp_osinfo.leact[seq] = 0; - ccb->ccb_h.status = CAM_REQ_CMP_ERR; - return (-1); + + if (status == CAM_REQ_CMP) { + ISP_SET_PC(isp, bus, tm_luns_enabled, 1); + isp_prt(isp, ISP_LOGTINFO, "bus %d lun %u now enabled for target mode", bus, lun); + } + return (status); } static void -isp_ledone(ispsoftc_t *isp, lun_entry_t *lep) +isp_disable_lun(ispsoftc_t *isp, union ccb *ccb) { - const char lfmt[] = "now %sabled for target mode\n"; - union ccb *ccb; - uint32_t seq; - tstate_t *tptr; - int av; - struct ccb_en_lun *cel; + tstate_t *tptr = NULL; + int bus; + cam_status status; + target_id_t target; + lun_id_t lun; - seq = lep->le_reserved - 1; - if (seq >= NLEACT) { - isp_prt(isp, ISP_LOGERR, - "seq out of range (%u) in isp_ledone", seq); + bus = XS_CHANNEL(ccb); + target = ccb->ccb_h.target_id; + lun = ccb->ccb_h.target_lun; + if (target != CAM_TARGET_WILDCARD && target != 0) { + ccb->ccb_h.status = CAM_TID_INVALID; + xpt_done(ccb); return; } - ccb = isp->isp_osinfo.leact[seq]; - if (ccb == 0) { - isp_prt(isp, ISP_LOGERR, - "no ccb for seq %u in isp_ledone", seq); + if (target == CAM_TARGET_WILDCARD && lun != CAM_LUN_WILDCARD) { + ccb->ccb_h.status = CAM_LUN_INVALID; + xpt_done(ccb); return; } - cel = &ccb->cel; - tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); - if (tptr == NULL) { - xpt_print(ccb->ccb_h.path, "null tptr in isp_ledone\n"); - isp->isp_osinfo.leact[seq] = 0; - return; - } - if (lep->le_status != LUN_OK) { - xpt_print(ccb->ccb_h.path, - "ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status); -err: - ccb->ccb_h.status = CAM_REQ_CMP_ERR; - rls_lun_statep(isp, tptr); - isp->isp_osinfo.leact[seq] = 0; + if (target != CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { + ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; - } else { - isp_prt(isp, ISP_LOGTDEBUG0, - "isp_ledone: ENABLE/MODIFY done okay"); } + if (isp->isp_dblev & ISP_LOGTDEBUG0) { + xpt_print(ccb->ccb_h.path, "enabling lun 0x%x on channel %d\n", lun, bus); + } + /* + * See if we're busy disabling a lun now. + */ + while (isp->isp_osinfo.tmbusy) { + isp->isp_osinfo.tmwanted = 1; + mtx_sleep(isp, &isp->isp_lock, PRIBIO, "want_isp_disable_lun", 0); + } + isp->isp_osinfo.tmbusy = 1; - if (cel->enable) { - ccb->ccb_h.status = CAM_REQ_CMP; - xpt_print(ccb->ccb_h.path, lfmt, "en"); - rls_lun_statep(isp, tptr); - isp->isp_osinfo.leact[seq] = 0; - xpt_done(ccb); - return; + /* + * Find the state pointer. + */ + if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) { + ccb->ccb_h.status = CAM_PATH_INVALID; + goto done; } - if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) { - if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb), - XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) { - xpt_print(ccb->ccb_h.path, - "isp_ledone: isp_lun_cmd failed\n"); - goto err; - } - rls_lun_statep(isp, tptr); - return; + /* + * If we're a 24XX card, we're done. + */ + if (IS_24XX(isp)) { + status = CAM_REQ_CMP; + goto done; } - xpt_print(ccb->ccb_h.path, lfmt, "dis"); - rls_lun_statep(isp, tptr); - destroy_lun_state(isp, tptr); - ccb->ccb_h.status = CAM_REQ_CMP; - isp->isp_osinfo.leact[seq] = 0; + /* + * For SCC FW, we only deal with lun zero. + */ + if (IS_FC(isp)) { + lun = 0; + } + + isp->isp_osinfo.rptr = &status; + status = CAM_REQ_INPROG; + if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, lun, 0, 0)) { + status = CAM_RESRC_UNAVAIL; + } else { + mtx_sleep(ccb, &isp->isp_lock, PRIBIO, "isp_disable_lun", 0); + } +done: + if (status == CAM_REQ_CMP) { + xpt_print(ccb->ccb_h.path, "now disabled for target mode\n"); + } + if (tptr) { + rls_lun_statep(isp, tptr); + } + isp->isp_osinfo.rptr = NULL; + isp->isp_osinfo.tmbusy = 0; + if (isp->isp_osinfo.tmwanted) { + isp->isp_osinfo.tmwanted = 0; + wakeup(isp); + } xpt_done(ccb); - if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) { - int bus = XS_CHANNEL(ccb); - av = bus << 31; - av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); - if (av) { - isp_prt(isp, ISP_LOGWARN, - "disable target mode on channel %d failed", bus); +} + +static int +isp_enable_target_mode(ispsoftc_t *isp, int bus) +{ + int ct; + + ISP_GET_PC(isp, bus, tm_enabled, ct); + if (ct != 0) { + return (0); + } + + if (IS_SCSI(isp)) { + mbreg_t mbs; + + MBSINIT(&mbs, MBOX_ENABLE_TARGET_MODE, MBLOGALL, 0); + mbs.param[0] = MBOX_ENABLE_TARGET_MODE; + mbs.param[1] = ENABLE_TARGET_FLAG|ENABLE_TQING_FLAG; + mbs.param[2] = bus << 7; + if (isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs) < 0 || mbs.param[0] != MBOX_COMMAND_COMPLETE) { + isp_prt(isp, ISP_LOGERR, "Unable to add Target Role to Bus %d", bus); + return (EIO); } - isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; + SDPARAM(isp, bus)->role |= ISP_ROLE_TARGET; } + ISP_SET_PC(isp, bus, tm_enabled, 1); + isp_prt(isp, ISP_LOGINFO, "Target Role added to Bus %d", bus); + return (0); } - -static cam_status -isp_abort_tgt_ccb(ispsoftc_t *isp, union ccb *ccb) +#ifdef NEEDED +static int +isp_disable_target_mode(ispsoftc_t *isp, int bus) { - tstate_t *tptr; - struct ccb_hdr_slist *lp; - struct ccb_hdr *curelm; - int found, *ctr; - union ccb *accb = ccb->cab.abort_ccb; + int ct; - xpt_print(ccb->ccb_h.path, "aborting ccb %p\n", accb); - if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { - int badpath = 0; - if (IS_FC(isp) && (accb->ccb_h.target_id != - ((fcparam *) isp->isp_param)->isp_loopid)) { - badpath = 1; - } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != - ((sdparam *) isp->isp_param)->isp_initiator_id)) { - badpath = 1; + ISP_GET_PC(isp, bus, tm_enabled, ct); + if (ct == 0) { + return (0); + } + + if (IS_SCSI(isp)) { + mbreg_t mbs; + + MBSINIT(&mbs, MBOX_ENABLE_TARGET_MODE, MBLOGALL, 0); + mbs.param[2] = bus << 7; + if (isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs) < 0 || mbs.param[0] != MBOX_COMMAND_COMPLETE) { + isp_prt(isp, ISP_LOGERR, "Unable to subtract Target Role to Bus %d", bus); + return (EIO); } - if (badpath) { - /* - * Being restrictive about target ids is really about - * making sure we're aborting for the right multi-tid - * path. This doesn't really make much sense at present. - */ -#if 0 - return (CAM_PATH_INVALID); + SDPARAM(isp, bus)->role &= ~ISP_ROLE_TARGET; + } + ISP_SET_PC(isp, bus, tm_enabled, 0); + isp_prt(isp, ISP_LOGINFO, "Target Role subtracted from Bus %d", bus); + return (0); +} #endif + +static void +isp_ledone(ispsoftc_t *isp, lun_entry_t *lep) +{ + uint32_t *rptr; + + rptr = isp->isp_osinfo.rptr; + if (lep->le_status != LUN_OK) { + isp_prt(isp, ISP_LOGERR, "ENABLE/MODIFY LUN returned 0x%x", lep->le_status); + if (rptr) { + *rptr = CAM_REQ_CMP_ERR; + wakeup_one(rptr); } - } - tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); - if (tptr == NULL) { - xpt_print(ccb->ccb_h.path, "can't get statep\n"); - return (CAM_PATH_INVALID); - } - if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { - lp = &tptr->atios; - ctr = &tptr->atio_count; - } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { - lp = &tptr->inots; - ctr = &tptr->inot_count; } else { - rls_lun_statep(isp, tptr); - xpt_print(ccb->ccb_h.path, "bad function code %d\n", - accb->ccb_h.func_code); - return (CAM_UA_ABORT); - } - curelm = SLIST_FIRST(lp); - found = 0; - if (curelm == &accb->ccb_h) { - found = 1; - SLIST_REMOVE_HEAD(lp, sim_links.sle); - } else { - while(curelm != NULL) { - struct ccb_hdr *nextelm; - - nextelm = SLIST_NEXT(curelm, sim_links.sle); - if (nextelm == &accb->ccb_h) { - found = 1; - SLIST_NEXT(curelm, sim_links.sle) = - SLIST_NEXT(nextelm, sim_links.sle); - break; - } - curelm = nextelm; + if (rptr) { + *rptr = CAM_REQ_CMP; + wakeup_one(rptr); } } - rls_lun_statep(isp, tptr); - if (found) { - (*ctr)--; - accb->ccb_h.status = CAM_REQ_ABORTED; - xpt_done(accb); - return (CAM_REQ_CMP); - } - xpt_print(ccb->ccb_h.path, "ccb %p not found\n", accb); - return (CAM_PATH_INVALID); } static void isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb) { void *qe; + tstate_t *tptr; + atio_private_data_t *atp; struct ccb_scsiio *cso = &ccb->csio; - uint32_t nxti, optr, handle; + uint32_t dmaresult, handle; uint8_t local[QENTRY_LEN]; + /* + * Do some sanity checks. + */ + if (cso->dxfer_len == 0) { + if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { + xpt_print(ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n"); + ccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(ccb); + return; + } + } - if (isp_getrqentry(isp, &nxti, &optr, &qe)) { - xpt_print(ccb->ccb_h.path, - "Request Queue Overflow in isp_target_start_ctio\n"); - XS_SETERR(ccb, CAM_REQUEUE_REQ); + tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); + if (tptr == NULL) { + tptr = get_lun_statep(isp, XS_CHANNEL(ccb), CAM_LUN_WILDCARD); + if (tptr == NULL) { + xpt_print(ccb->ccb_h.path, "%s: [0x%x] cannot find tstate pointer in %s\n", __func__, cso->tag_id); + dump_tstates(isp, XS_CHANNEL(ccb)); + ccb->ccb_h.status = CAM_DEV_NOT_THERE; + xpt_done(ccb); + return; + } + } + + atp = isp_get_atpd(isp, tptr, cso->tag_id); + if (atp == NULL) { + xpt_print(ccb->ccb_h.path, "%s: [0x%x] cannot find private data adjunct\n", __func__, cso->tag_id); + isp_dump_atpd(isp, tptr); + ccb->ccb_h.status = CAM_REQ_CMP_ERR; + xpt_done(ccb); + return; + } + if (atp->dead) { + xpt_print(ccb->ccb_h.path, "%s: [0x%x] stopping sending a CTIO for a dead command\n", __func__, cso->tag_id); + ccb->ccb_h.status = CAM_REQ_ABORTED; + xpt_done(ccb); + return; + } + + /* + * Check to make sure we're still in target mode. + */ + if ((FCPARAM(isp, XS_CHANNEL(ccb))->role & ISP_ROLE_TARGET) == 0) { + xpt_print(ccb->ccb_h.path, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode\n", __func__, cso->tag_id); + ccb->ccb_h.status = CAM_PROVIDE_FAIL; + xpt_done(ccb); + return; + } + + /* + * Get some resources + */ + if (isp_get_pcmd(isp, ccb)) { + rls_lun_statep(isp, tptr); + xpt_print(ccb->ccb_h.path, "out of PCMDs\n"); + cam_freeze_devq(ccb->ccb_h.path); + cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); + ccb->ccb_h.status = CAM_REQUEUE_REQ; + xpt_done(ccb); + return; + } + qe = isp_getrqentry(isp); + if (qe == NULL) { + xpt_print(ccb->ccb_h.path, rqo, __func__); + cam_freeze_devq(ccb->ccb_h.path); + cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); + ccb->ccb_h.status = CAM_REQUEUE_REQ; goto out; } memset(local, 0, QENTRY_LEN); /* * We're either moving data or completing a command here. */ + if (IS_24XX(isp)) { + ct7_entry_t *cto = (ct7_entry_t *) local; - if (IS_FC(isp)) { - atio_private_data_t *atp; + cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; + cto->ct_header.rqs_entry_count = 1; + cto->ct_header.rqs_seqno = 1; + cto->ct_nphdl = atp->nphdl; + cto->ct_rxid = atp->tag; + cto->ct_iid_lo = atp->portid; + cto->ct_iid_hi = atp->portid >> 16; + cto->ct_oxid = atp->oxid; + cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb)); + cto->ct_scsi_status = cso->scsi_status; + cto->ct_timeout = 120; + cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT; + if (ccb->ccb_h.flags & CAM_SEND_STATUS) { + cto->ct_flags |= CT7_SENDSTATUS; + } + if (cso->dxfer_len == 0) { + cto->ct_flags |= CT7_FLAG_MODE1 | CT7_NO_DATA; + if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { + int m = min(cso->sense_len, sizeof (struct scsi_sense_data)); + cto->rsp.m1.ct_resplen = cto->ct_senselen = min(m, MAXRESPLEN_24XX); + memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, cto->ct_senselen); + cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8); + } + } else { + cto->ct_flags |= CT7_FLAG_MODE0; + if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + cto->ct_flags |= CT7_DATA_IN; + } else { + cto->ct_flags |= CT7_DATA_OUT; + } + cto->rsp.m0.reloff = atp->bytes_xfered; + /* + * Don't overrun the limits placed on us + */ + if (atp->bytes_xfered + cso->dxfer_len > atp->orig_datalen) { + cso->dxfer_len = atp->orig_datalen - atp->bytes_xfered; + } + atp->last_xframt = cso->dxfer_len; + cto->rsp.m0.ct_xfrlen = cso->dxfer_len; + } + if (cto->ct_flags & CT7_SENDSTATUS) { + int lvl = (cso->scsi_status)? ISP_LOGTINFO : ISP_LOGTDEBUG0; + cto->ct_resid = atp->orig_datalen - (atp->bytes_xfered + cso->dxfer_len); + if (cto->ct_resid < 0) { + cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8); + } else if (cto->ct_resid > 0) { + cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8); + } + atp->state = ATPD_STATE_LAST_CTIO; + ISP_PATH_PRT(isp, lvl, cso->ccb_h.path, "%s: CTIO7[%x] CDB0=%x scsi status %x flags %x resid %d xfrlen %u offset %u\n", __func__, cto->ct_rxid, + atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, cso->dxfer_len, atp->bytes_xfered); + } else { + cto->ct_resid = 0; + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, cso->ccb_h.path, "%s: CTIO7[%x] flags %x xfrlen %u offset %u\n", __func__, cto->ct_rxid, cto->ct_flags, + cso->dxfer_len, atp->bytes_xfered); + atp->state = ATPD_STATE_CTIO; + } + } else if (IS_FC(isp)) { ct2_entry_t *cto = (ct2_entry_t *) local; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; cto->ct_header.rqs_entry_count = 1; - if (FCPARAM(isp)->isp_2klogin) { + cto->ct_header.rqs_seqno = 1; + if (ISP_CAP_2KLOGIN(isp) == 0) { ((ct2e_entry_t *)cto)->ct_iid = cso->init_id; } else { cto->ct_iid = cso->init_id; - if (FCPARAM(isp)->isp_sccfw == 0) { + if (ISP_CAP_SCCFW(isp) == 0) { cto->ct_lun = ccb->ccb_h.target_lun; } } - atp = isp_get_atpd(isp, cso->tag_id); - if (atp == NULL) { - xpt_print(ccb->ccb_h.path, - "cannot find private data adjunct for tag %x\n", - cso->tag_id); - XS_SETERR(ccb, CAM_REQ_CMP_ERR); - goto out; - } cto->ct_rxid = cso->tag_id; if (cso->dxfer_len == 0) { - cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; - if (ccb->ccb_h.flags & CAM_SEND_STATUS) { - cto->ct_flags |= CT2_SENDSTATUS; - cto->rsp.m1.ct_scsi_status = cso->scsi_status; - cto->ct_resid = - atp->orig_datalen - atp->bytes_xfered; - if (cto->ct_resid < 0) { - cto->rsp.m1.ct_scsi_status |= - CT2_DATA_OVER; - } else if (cto->ct_resid > 0) { - cto->rsp.m1.ct_scsi_status |= - CT2_DATA_UNDER; - } + cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA | CT2_SENDSTATUS; + cto->rsp.m1.ct_scsi_status = cso->scsi_status; + cto->ct_resid = atp->orig_datalen - atp->bytes_xfered; + if (cto->ct_resid < 0) { + cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER; + } else if (cto->ct_resid > 0) { + cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER; } if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { int m = min(cso->sense_len, MAXRESPLEN); - memcpy(cto->rsp.m1.ct_resp, - &cso->sense_data, m); + memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, m); cto->rsp.m1.ct_senselen = m; cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; + } else if (cso->scsi_status == SCSI_STATUS_CHECK_COND) { + /* + * XXX: DEBUG + */ + xpt_print(ccb->ccb_h.path, "CHECK CONDITION being sent without associated SENSE DATA for CDB=0x%x\n", atp->cdb0); } } else { cto->ct_flags |= CT2_FLAG_MODE0; if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cto->ct_flags |= CT2_DATA_IN; } else { cto->ct_flags |= CT2_DATA_OUT; } cto->ct_reloff = atp->bytes_xfered; + cto->rsp.m0.ct_xfrlen = cso->dxfer_len; + /* + * Don't overrun the limits placed on us + */ + if (atp->bytes_xfered + cso->dxfer_len > atp->orig_datalen) { + cso->dxfer_len = atp->orig_datalen - atp->bytes_xfered; + } if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { cto->ct_flags |= CT2_SENDSTATUS; cto->rsp.m0.ct_scsi_status = cso->scsi_status; - cto->ct_resid = - atp->orig_datalen - - (atp->bytes_xfered + cso->dxfer_len); + cto->ct_resid = atp->orig_datalen - (atp->bytes_xfered + cso->dxfer_len); if (cto->ct_resid < 0) { - cto->rsp.m0.ct_scsi_status |= - CT2_DATA_OVER; + cto->rsp.m0.ct_scsi_status |= CT2_DATA_OVER; } else if (cto->ct_resid > 0) { - cto->rsp.m0.ct_scsi_status |= - CT2_DATA_UNDER; + cto->rsp.m0.ct_scsi_status |= CT2_DATA_UNDER; } } else { atp->last_xframt = cso->dxfer_len; } /* * If we're sending data and status back together, * we can't also send back sense data as well. */ ccb->ccb_h.flags &= ~CAM_SEND_SENSE; } if (cto->ct_flags & CT2_SENDSTATUS) { - isp_prt(isp, ISP_LOGTDEBUG0, - "CTIO2[%x] STATUS %x origd %u curd %u resid %u", - cto->ct_rxid, cso->scsi_status, atp->orig_datalen, - cso->dxfer_len, cto->ct_resid); + int lvl = (cso->scsi_status)? ISP_LOGTINFO : ISP_LOGTDEBUG0; cto->ct_flags |= CT2_CCINCR; atp->state = ATPD_STATE_LAST_CTIO; + ISP_PATH_PRT(isp, lvl, cso->ccb_h.path, "%s: CTIO2[%x] CDB0=%x scsi status %x flags %x resid %d xfrlen %u offset %u\n", __func__, cto->ct_rxid, + atp->cdb0, cto->rsp.m0.ct_scsi_status, cto->ct_flags, cto->ct_resid, cso->dxfer_len, atp->bytes_xfered); } else { + cto->ct_resid = 0; atp->state = ATPD_STATE_CTIO; + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: CTIO2[%x] flags %x xfrlen %u offset %u\n", __func__, cto->ct_rxid, cto->ct_flags, + cso->dxfer_len, atp->bytes_xfered); } cto->ct_timeout = 10; } else { ct_entry_t *cto = (ct_entry_t *) local; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; cto->ct_header.rqs_entry_count = 1; + cto->ct_header.rqs_seqno = 1; cto->ct_iid = cso->init_id; cto->ct_iid |= XS_CHANNEL(ccb) << 7; cto->ct_tgt = ccb->ccb_h.target_id; cto->ct_lun = ccb->ccb_h.target_lun; - cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); + cto->ct_fwhandle = cso->tag_id >> 16; if (AT_HAS_TAG(cso->tag_id)) { - cto->ct_tag_val = (uint8_t) AT_GET_TAG(cso->tag_id); + cto->ct_tag_val = cso->tag_id; cto->ct_flags |= CT_TQAE; } if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { cto->ct_flags |= CT_NODISC; } if (cso->dxfer_len == 0) { cto->ct_flags |= CT_NO_DATA; } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cto->ct_flags |= CT_DATA_IN; } else { cto->ct_flags |= CT_DATA_OUT; } if (ccb->ccb_h.flags & CAM_SEND_STATUS) { cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; cto->ct_scsi_status = cso->scsi_status; cto->ct_resid = cso->resid; - isp_prt(isp, ISP_LOGTDEBUG0, - "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", - cto->ct_fwhandle, cso->scsi_status, cso->resid, - cso->tag_id); + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: CTIO[%x] scsi status %x resid %d tag_id %x\n", __func__, + cto->ct_fwhandle, cso->scsi_status, cso->resid, cso->tag_id); } ccb->ccb_h.flags &= ~CAM_SEND_SENSE; cto->ct_timeout = 10; } if (isp_save_xs_tgt(isp, ccb, &handle)) { - xpt_print(ccb->ccb_h.path, - "No XFLIST pointers for isp_target_start_ctio\n"); - XS_SETERR(ccb, CAM_REQUEUE_REQ); + xpt_print(ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__); + ccb->ccb_h.status = CAM_REQUEUE_REQ; goto out; } /* * Call the dma setup routines for this entry (and any subsequent * CTIOs) if there's data to move, and then tell the f/w it's got * new things to play with. As with isp_start's usage of DMA setup, * any swizzling is done in the machine dependent layer. Because * of this, we put the request onto the queue area first in native * format. */ - if (IS_FC(isp)) { + if (IS_24XX(isp)) { + ct7_entry_t *cto = (ct7_entry_t *) local; + cto->ct_syshandle = handle; + } else if (IS_FC(isp)) { ct2_entry_t *cto = (ct2_entry_t *) local; cto->ct_syshandle = handle; } else { ct_entry_t *cto = (ct_entry_t *) local; cto->ct_syshandle = handle; } - switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { - case CMD_QUEUED: - ISP_ADD_REQUEST(isp, nxti); + dmaresult = ISP_DMASETUP(isp, cso, (ispreq_t *) local); + if (dmaresult == CMD_QUEUED) { + isp->isp_nactive++; ccb->ccb_h.status |= CAM_SIM_QUEUED; + rls_lun_statep(isp, tptr); return; - - case CMD_EAGAIN: - XS_SETERR(ccb, CAM_REQUEUE_REQ); - break; - - default: - break; } + if (dmaresult == CMD_EAGAIN) { + ccb->ccb_h.status = CAM_REQUEUE_REQ; + } else { + ccb->ccb_h.status = CAM_REQ_CMP_ERR; + } isp_destroy_tgt_handle(isp, handle); - out: + rls_lun_statep(isp, tptr); + isp_free_pcmd(isp, ccb); xpt_done(ccb); } static void isp_refire_putback_atio(void *arg) { - int s = splcam(); - isp_target_putback_atio(arg); - splx(s); + union ccb *ccb = arg; + ispsoftc_t *isp = XS_ISP(ccb); + ISP_LOCK(isp); + isp_target_putback_atio(ccb); + ISP_UNLOCK(isp); } static void isp_target_putback_atio(union ccb *ccb) { ispsoftc_t *isp; struct ccb_scsiio *cso; - uint32_t nxti, optr; void *qe; isp = XS_ISP(ccb); - if (isp_getrqentry(isp, &nxti, &optr, &qe)) { - xpt_print(ccb->ccb_h.path, - "isp_target_putback_atio: Request Queue Overflow\n"); + qe = isp_getrqentry(isp); + if (qe == NULL) { + xpt_print(ccb->ccb_h.path, rqo, __func__); (void) timeout(isp_refire_putback_atio, ccb, 10); return; } memset(qe, 0, QENTRY_LEN); cso = &ccb->csio; if (IS_FC(isp)) { at2_entry_t local, *at = &local; - MEMZERO(at, sizeof (at2_entry_t)); + ISP_MEMZERO(at, sizeof (at2_entry_t)); at->at_header.rqs_entry_type = RQSTYPE_ATIO2; at->at_header.rqs_entry_count = 1; - if (FCPARAM(isp)->isp_sccfw) { + if (ISP_CAP_SCCFW(isp)) { at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; } else { at->at_lun = (uint8_t) ccb->ccb_h.target_lun; } at->at_status = CT_OK; at->at_rxid = cso->tag_id; at->at_iid = cso->ccb_h.target_id; isp_put_atio2(isp, at, qe); } else { at_entry_t local, *at = &local; - MEMZERO(at, sizeof (at_entry_t)); + ISP_MEMZERO(at, sizeof (at_entry_t)); at->at_header.rqs_entry_type = RQSTYPE_ATIO; at->at_header.rqs_entry_count = 1; at->at_iid = cso->init_id; at->at_iid |= XS_CHANNEL(ccb) << 7; at->at_tgt = cso->ccb_h.target_id; at->at_lun = cso->ccb_h.target_lun; at->at_status = CT_OK; at->at_tag_val = AT_GET_TAG(cso->tag_id); at->at_handle = AT_GET_HANDLE(cso->tag_id); isp_put_atio(isp, at, qe); } - ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); - ISP_ADD_REQUEST(isp, nxti); + ISP_TDQE(isp, "isp_target_putback_atio", isp->isp_reqidx, qe); + ISP_SYNC_REQUEST(isp); isp_complete_ctio(ccb); } static void isp_complete_ctio(union ccb *ccb) { if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { ccb->ccb_h.status |= CAM_REQ_CMP; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + isp_free_pcmd(XS_ISP(ccb), ccb); xpt_done(ccb); } /* * Handle ATIO stuff that the generic code can't. * This means handling CDBs. */ -static int +static void isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep) { tstate_t *tptr; - int status, bus, iswildcard; + int status, bus; struct ccb_accept_tio *atiop; + atio_private_data_t *atp; /* * The firmware status (except for the QLTM_SVALID bit) * indicates why this ATIO was sent to us. * * If QLTM_SVALID is set, the firware has recommended Sense Data. * * If the DISCONNECTS DISABLED bit is set in the flags field, * we're still connected on the SCSI bus. */ status = aep->at_status; if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { /* * Bus Phase Sequence error. We should have sense data * suggested by the f/w. I'm not sure quite yet what * to do about this for CAM. */ isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); - return (0); + return; } if ((status & ~QLTM_SVALID) != AT_CDB) { - isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", - status); + isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", status); isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); - return (0); + return; } bus = GET_BUS_VAL(aep->at_iid); tptr = get_lun_statep(isp, bus, aep->at_lun); if (tptr == NULL) { tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); if (tptr == NULL) { /* * Because we can't autofeed sense data back with * a command for parallel SCSI, we can't give back * a CHECK CONDITION. We'll give back a BUSY status * instead. This works out okay because the only * time we should, in fact, get this, is in the * case that somebody configured us without the * blackhole driver, so they get what they deserve. */ isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); - return (0); + return; } - iswildcard = 1; - } else { - iswildcard = 0; } + atp = isp_get_atpd(isp, tptr, 0); atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); - if (atiop == NULL) { + if (atiop == NULL || atp == NULL) { /* * Because we can't autofeed sense data back with * a command for parallel SCSI, we can't give back * a CHECK CONDITION. We'll give back a QUEUE FULL status * instead. This works out okay because the only time we * should, in fact, get this, is in the case that we've * run out of ATIOS. */ - xpt_print(tptr->owner, - "no ATIOS for lun %d from initiator %d on channel %d\n", - aep->at_lun, GET_IID_VAL(aep->at_iid), bus); - if (aep->at_flags & AT_TQAE) - isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); - else - isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); + xpt_print(tptr->owner, "no %s for lun %d from initiator %d\n", (atp == NULL && atiop == NULL)? "ATIOs *or* ATPS" : + ((atp == NULL)? "ATPs" : "ATIOs"), aep->at_lun, aep->at_iid); + isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); + if (atp) { + isp_put_atpd(isp, tptr, atp); + } rls_lun_statep(isp, tptr); - return (0); + return; } + atp->tag = aep->at_tag_val; + if (atp->tag == 0) { + atp->tag = ~0; + } + atp->state = ATPD_STATE_ATIO; SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); tptr->atio_count--; - isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", - aep->at_lun, tptr->atio_count); - if (iswildcard) { - atiop->ccb_h.target_id = aep->at_tgt; - atiop->ccb_h.target_lun = aep->at_lun; - } + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, atiop->ccb_h.path, "Take FREE ATIO count now %d\n", tptr->atio_count); + atiop->ccb_h.target_id = aep->at_tgt; + atiop->ccb_h.target_lun = aep->at_lun; if (aep->at_flags & AT_NODISC) { atiop->ccb_h.flags = CAM_DIS_DISCONNECT; } else { atiop->ccb_h.flags = 0; } if (status & QLTM_SVALID) { size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); atiop->sense_len = amt; - MEMCPY(&atiop->sense_data, aep->at_sense, amt); + ISP_MEMCPY(&atiop->sense_data, aep->at_sense, amt); } else { atiop->sense_len = 0; } atiop->init_id = GET_IID_VAL(aep->at_iid); atiop->cdb_len = aep->at_cdblen; - MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); + ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); atiop->ccb_h.status = CAM_CDB_RECVD; /* * Construct a tag 'id' based upon tag value (which may be 0..255) * and the handle (which we have to preserve). */ - AT_MAKE_TAGID(atiop->tag_id, bus, device_get_unit(isp->isp_dev), aep); + atiop->tag_id = atp->tag; if (aep->at_flags & AT_TQAE) { atiop->tag_action = aep->at_tag_type; atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; } - xpt_done((union ccb*)atiop); - isp_prt(isp, ISP_LOGTDEBUG0, - "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", - aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), - GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, - aep->at_tag_type, (aep->at_flags & AT_NODISC)? - "nondisc" : "disconnecting"); + atp->orig_datalen = 0; + atp->bytes_xfered = 0; + atp->last_xframt = 0; + atp->lun = aep->at_lun; + atp->nphdl = aep->at_iid; + atp->portid = PORT_NONE; + atp->oxid = 0; + atp->cdb0 = atiop->cdb_io.cdb_bytes[0]; + atp->tattr = aep->at_tag_type; + atp->state = ATPD_STATE_CAM; + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, tptr->owner, "ATIO[%x] CDB=0x%x lun %d\n", aep->at_tag_val, atp->cdb0, atp->lun); rls_lun_statep(isp, tptr); - return (0); } -static int +static void isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep) { lun_id_t lun; + fcportdb_t *lp; tstate_t *tptr; struct ccb_accept_tio *atiop; - atio_private_data_t *atp; + uint16_t nphdl; + atio_private_data_t *atp = NULL; + inot_private_data_t *ntp; /* * The firmware status (except for the QLTM_SVALID bit) * indicates why this ATIO was sent to us. * * If QLTM_SVALID is set, the firware has recommended Sense Data. */ if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { - isp_prt(isp, ISP_LOGWARN, - "bogus atio (0x%x) leaked to platform", aep->at_status); + isp_prt(isp, ISP_LOGWARN, "bogus atio (0x%x) leaked to platform", aep->at_status); isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); - return (0); + return; } - if (FCPARAM(isp)->isp_sccfw) { + if (ISP_CAP_SCCFW(isp)) { lun = aep->at_scclun; } else { lun = aep->at_lun; } + if (ISP_CAP_2KLOGIN(isp)) { + nphdl = ((at2e_entry_t *)aep)->at_iid; + } else { + nphdl = aep->at_iid; + } tptr = get_lun_statep(isp, 0, lun); if (tptr == NULL) { - isp_prt(isp, ISP_LOGTDEBUG0, - "[0x%x] no state pointer for lun %d", aep->at_rxid, lun); tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); if (tptr == NULL) { - isp_endcmd(isp, aep, - SCSI_STATUS_CHECK_COND | ECMD_SVALID | - (0x5 << 12) | (0x25 << 16), 0); - return (0); + isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] no state pointer for lun %d", aep->at_rxid, lun); + isp_endcmd(isp, aep, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0); + return; } } - atp = isp_get_atpd(isp, 0); + /* + * Start any commands pending resources first. + */ + if (tptr->restart_queue) { + inot_private_data_t *restart_queue = tptr->restart_queue; + tptr->restart_queue = NULL; + while (restart_queue) { + ntp = restart_queue; + restart_queue = ntp->rd.nt.nt_hba; + isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at2_entry_t *)ntp->rd.data)->at_rxid); + isp_handle_platform_atio2(isp, (at2_entry_t *) ntp->rd.data); + isp_put_ntpd(isp, tptr, ntp); + /* + * If a recursion caused the restart queue to start to fill again, + * stop and splice the new list on top of the old list and restore + * it and go to noresrc. + */ + if (tptr->restart_queue) { + ntp = tptr->restart_queue; + tptr->restart_queue = restart_queue; + while (restart_queue->rd.nt.nt_hba) { + restart_queue = restart_queue->rd.nt.nt_hba; + } + restart_queue->rd.nt.nt_hba = ntp; + goto noresrc; + } + } + } + atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); - if (atiop == NULL || atp == NULL) { + if (atiop == NULL) { + goto noresrc; + } - /* - * Because we can't autofeed sense data back with - * a command for parallel SCSI, we can't give back - * a CHECK CONDITION. We'll give back a QUEUE FULL status - * instead. This works out okay because the only time we - * should, in fact, get this, is in the case that we've - * run out of ATIOS. - */ - xpt_print(tptr->owner, - "no %s for lun %d from initiator %d\n", - (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : - ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); - rls_lun_statep(isp, tptr); - isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); - return (0); + atp = isp_get_atpd(isp, tptr, 0); + if (atp == NULL) { + goto noresrc; } + + atp->tag = aep->at_rxid; atp->state = ATPD_STATE_ATIO; SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); tptr->atio_count--; - isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", - lun, tptr->atio_count); + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, atiop->ccb_h.path, "Take FREE ATIO count now %d\n", tptr->atio_count); + atiop->ccb_h.target_id = FCPARAM(isp, 0)->isp_loopid; + atiop->ccb_h.target_lun = lun; - if (tptr == &isp->isp_osinfo.tsdflt[0]) { - atiop->ccb_h.target_id = FCPARAM(isp)->isp_loopid; - atiop->ccb_h.target_lun = lun; - } /* * We don't get 'suggested' sense data as we do with SCSI cards. */ atiop->sense_len = 0; + if (ISP_CAP_2KLOGIN(isp)) { + /* + * NB: We could not possibly have 2K logins if we + * NB: also did not have SCC FW. + */ + atiop->init_id = ((at2e_entry_t *)aep)->at_iid; + } else { + atiop->init_id = aep->at_iid; + } - atiop->init_id = aep->at_iid; + /* + * If we're not in the port database, add ourselves. + */ + if (!IS_2100(isp) && isp_find_pdb_by_loopid(isp, 0, atiop->init_id, &lp) == 0) { + uint64_t iid = + (((uint64_t) aep->at_wwpn[0]) << 48) | + (((uint64_t) aep->at_wwpn[1]) << 32) | + (((uint64_t) aep->at_wwpn[2]) << 16) | + (((uint64_t) aep->at_wwpn[3]) << 0); + /* + * However, make sure we delete ourselves if otherwise + * we were there but at a different loop id. + */ + if (isp_find_pdb_by_wwn(isp, 0, iid, &lp)) { + isp_del_wwn_entry(isp, 0, iid, lp->handle, lp->portid); + } + isp_add_wwn_entry(isp, 0, iid, atiop->init_id, PORT_ANY); + } atiop->cdb_len = ATIO2_CDBLEN; - MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); + ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); atiop->ccb_h.status = CAM_CDB_RECVD; - atiop->tag_id = aep->at_rxid; + atiop->tag_id = atp->tag; switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { case ATIO2_TC_ATTR_SIMPLEQ: + atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_SIMPLE_Q_TAG; break; - case ATIO2_TC_ATTR_HEADOFQ: + case ATIO2_TC_ATTR_HEADOFQ: + atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_HEAD_OF_Q_TAG; break; - case ATIO2_TC_ATTR_ORDERED: + case ATIO2_TC_ATTR_ORDERED: + atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_ORDERED_Q_TAG; break; - case ATIO2_TC_ATTR_ACAQ: /* ?? */ + case ATIO2_TC_ATTR_ACAQ: /* ?? */ case ATIO2_TC_ATTR_UNTAGGED: default: atiop->tag_action = 0; break; } - atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; - atp->tag = atiop->tag_id; - atp->lun = lun; atp->orig_datalen = aep->at_datalen; - atp->last_xframt = 0; atp->bytes_xfered = 0; + atp->last_xframt = 0; + atp->lun = lun; + atp->nphdl = atiop->init_id; + atp->sid = PORT_ANY; + atp->oxid = aep->at_oxid; + atp->cdb0 = aep->at_cdb[0]; + atp->tattr = aep->at_taskflags & ATIO2_TC_ATTR_MASK; atp->state = ATPD_STATE_CAM; - xpt_done((union ccb*)atiop); + xpt_done((union ccb *)atiop); + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, tptr->owner, "ATIO2[%x] CDB=0x%x lun %d datalen %u\n", aep->at_rxid, atp->cdb0, lun, atp->orig_datalen); + rls_lun_statep(isp, tptr); + return; +noresrc: + if (atp) { + isp_put_atpd(isp, tptr, atp); + } + ntp = isp_get_ntpd(isp, tptr); + if (ntp == NULL) { + rls_lun_statep(isp, tptr); + isp_endcmd(isp, aep, nphdl, 0, SCSI_STATUS_BUSY, 0); + return; + } + memcpy(ntp->rd.data, aep, QENTRY_LEN); + ntp->rd.nt.nt_hba = tptr->restart_queue; + tptr->restart_queue = ntp; + rls_lun_statep(isp, tptr); +} - isp_prt(isp, ISP_LOGTDEBUG0, - "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", - aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, - lun, aep->at_taskflags, aep->at_datalen); +static void +isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep) +{ + int cdbxlen; + uint16_t lun, chan, nphdl = NIL_HANDLE; + uint32_t did, sid; + uint64_t wwn = INI_NONE; + fcportdb_t *lp; + tstate_t *tptr; + struct ccb_accept_tio *atiop; + atio_private_data_t *atp = NULL; + inot_private_data_t *ntp; + + did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2]; + sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; + lun = (aep->at_cmnd.fcp_cmnd_lun[0] << 8) | aep->at_cmnd.fcp_cmnd_lun[1]; + + /* + * Find the N-port handle, and Virtual Port Index for this command. + * + * If we can't, we're somewhat in trouble because we can't actually respond w/o that information. + * We also, as a matter of course, need to know the WWN of the initiator too. + */ + if (ISP_CAP_MULTI_ID(isp)) { + /* + * Find the right channel based upon D_ID + */ + isp_find_chan_by_did(isp, did, &chan); + + if (chan == ISP_NOCHAN) { + NANOTIME_T now; + + /* + * If we don't recognizer our own D_DID, terminate the exchange, unless we're within 2 seconds of startup + * It's a bit tricky here as we need to stash this command *somewhere*. + */ + GET_NANOTIME(&now); + if (NANOTIME_SUB(&isp->isp_init_time, &now) > 2000000000ULL) { + isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel- dropping", __func__, aep->at_rxid, did); + isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, ECMD_TERMINATE, 0); + return; + } + tptr = get_lun_statep(isp, 0, 0); + if (tptr == NULL) { + tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); + if (tptr == NULL) { + isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel and no tptr- dropping", __func__, aep->at_rxid, did); + isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, ECMD_TERMINATE, 0); + return; + } + } + isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel- deferring", __func__, aep->at_rxid, did); + goto noresrc; + } + isp_prt(isp, ISP_LOGTDEBUG0, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x", __func__, aep->at_rxid, did, chan, sid); + } else { + chan = 0; + } + + /* + * Find the PDB entry for this initiator + */ + if (isp_find_pdb_by_sid(isp, chan, sid, &lp) == 0) { + /* + * If we're not in the port database terminate the exchange. + */ + isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already", + __func__, aep->at_rxid, did, chan, sid); + isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0); + return; + } + nphdl = lp->handle; + wwn = lp->port_wwn; + + /* + * Get the tstate pointer + */ + tptr = get_lun_statep(isp, chan, lun); + if (tptr == NULL) { + tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD); + if (tptr == NULL) { + isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] no state pointer for lun %d or wildcard", aep->at_rxid, lun); + isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0); + return; + } + } + + /* + * Start any commands pending resources first. + */ + if (tptr->restart_queue) { + inot_private_data_t *restart_queue = tptr->restart_queue; + tptr->restart_queue = NULL; + while (restart_queue) { + ntp = restart_queue; + restart_queue = ntp->rd.nt.nt_hba; + isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at7_entry_t *)ntp->rd.data)->at_rxid); + isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->rd.data); + isp_put_ntpd(isp, tptr, ntp); + /* + * If a recursion caused the restart queue to start to fill again, + * stop and splice the new list on top of the old list and restore + * it and go to noresrc. + */ + if (tptr->restart_queue) { + if (restart_queue) { + ntp = tptr->restart_queue; + tptr->restart_queue = restart_queue; + while (restart_queue->rd.nt.nt_hba) { + restart_queue = restart_queue->rd.nt.nt_hba; + } + restart_queue->rd.nt.nt_hba = ntp; + } + goto noresrc; + } + } + } + + /* + * If the f/w is out of resources, just send a BUSY status back. + */ + if (aep->at_rxid == AT7_NORESRC_RXID) { + rls_lun_statep(isp, tptr); + isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0); + return; + } + + /* + * If we're out of resources, just send a BUSY status back. + */ + atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); + if (atiop == NULL) { + isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid); + goto noresrc; + } + + atp = isp_get_atpd(isp, tptr, 0); + if (atp == NULL) { + isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid); + goto noresrc; + } + if (isp_get_atpd(isp, tptr, aep->at_rxid)) { + isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] tag wraparound in isp_handle_platforms_atio7 (N-Port Handle 0x%04x S_ID 0x%04x OX_ID 0x%04x)\n", + aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id); + /* + * It's not a "no resource" condition- but we can treat it like one + */ + goto noresrc; + } + + atp->tag = aep->at_rxid; + atp->state = ATPD_STATE_ATIO; + SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); + tptr->atio_count--; + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, atiop->ccb_h.path, "Take FREE ATIO count now %d\n", tptr->atio_count); + atiop->init_id = nphdl; + atiop->ccb_h.target_id = FCPARAM(isp, chan)->isp_loopid; + atiop->ccb_h.target_lun = lun; + atiop->sense_len = 0; + cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT; + if (cdbxlen) { + isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored"); + } + cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb); + ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen); + atiop->cdb_len = cdbxlen; + atiop->ccb_h.status = CAM_CDB_RECVD; + atiop->tag_id = atp->tag; + switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) { + case FCP_CMND_TASK_ATTR_SIMPLE: + atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; + atiop->tag_action = MSG_SIMPLE_Q_TAG; + break; + case FCP_CMND_TASK_ATTR_HEAD: + atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; + atiop->tag_action = MSG_HEAD_OF_Q_TAG; + break; + case FCP_CMND_TASK_ATTR_ORDERED: + atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; + atiop->tag_action = MSG_ORDERED_Q_TAG; + break; + default: + /* FALLTHROUGH */ + case FCP_CMND_TASK_ATTR_ACA: + case FCP_CMND_TASK_ATTR_UNTAGGED: + atiop->tag_action = 0; + break; + } + atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl; + atp->bytes_xfered = 0; + atp->last_xframt = 0; + atp->lun = lun; + atp->nphdl = nphdl; + atp->portid = sid; + atp->oxid = aep->at_hdr.ox_id; + atp->cdb0 = atiop->cdb_io.cdb_bytes[0]; + atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK; + atp->state = ATPD_STATE_CAM; + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, tptr->owner, "ATIO7[%x] CDB=0x%x lun %d datalen %u\n", aep->at_rxid, atp->cdb0, lun, atp->orig_datalen); + xpt_done((union ccb *)atiop); rls_lun_statep(isp, tptr); - return (0); + return; +noresrc: + if (atp) { + isp_put_atpd(isp, tptr, atp); + } + ntp = isp_get_ntpd(isp, tptr); + if (ntp == NULL) { + rls_lun_statep(isp, tptr); + isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0); + return; + } + memcpy(ntp->rd.data, aep, QENTRY_LEN); + ntp->rd.nt.nt_hba = tptr->restart_queue; + tptr->restart_queue = ntp; + rls_lun_statep(isp, tptr); } -static int +static void isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) { union ccb *ccb; int sentstatus, ok, notify_cam, resid = 0; - uint16_t tval; + tstate_t *tptr = NULL; + atio_private_data_t *atp = NULL; + int bus; + uint32_t tval, handle; /* - * CTIO and CTIO2 are close enough.... + * CTIO, CTIO2 and CTIO7 are close enough.... */ - ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle); - KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); - isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); + if (IS_SCSI(isp)) { + handle = ((ct_entry_t *)arg)->ct_syshandle; + } else { + handle = ((ct2_entry_t *)arg)->ct_syshandle; + } + ccb = isp_find_xs_tgt(isp, handle); + if (ccb == NULL) { + isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, arg); + return; + } + isp_destroy_tgt_handle(isp, handle); + bus = XS_CHANNEL(ccb); + tptr = get_lun_statep(isp, bus, XS_LUN(ccb)); + if (tptr == NULL) { + tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); + } + KASSERT((tptr != NULL), ("cannot get state pointer")); + if (isp->isp_nactive) { + isp->isp_nactive++; + } + if (IS_24XX(isp)) { + ct7_entry_t *ct = arg; - if (IS_FC(isp)) { + atp = isp_get_atpd(isp, tptr, ct->ct_rxid); + if (atp == NULL) { + rls_lun_statep(isp, tptr); + isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ct->ct_rxid); + return; + } + + sentstatus = ct->ct_flags & CT7_SENDSTATUS; + ok = (ct->ct_nphdl == CT7_OK); + if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { + ccb->ccb_h.status |= CAM_SENT_SENSE; + } + notify_cam = ct->ct_header.rqs_seqno & 0x1; + if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA) { + resid = ct->ct_resid; + atp->bytes_xfered += (atp->last_xframt - resid); + atp->last_xframt = 0; + } + if (ct->ct_nphdl == CT_HBA_RESET) { + ok = 0; + notify_cam = 1; + sentstatus = 1; + ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; + } else if (!ok) { + ccb->ccb_h.status |= CAM_REQ_CMP_ERR; + } + tval = atp->tag; + isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] sts 0x%x flg 0x%x sns %d resid %d %s", __func__, + ct->ct_rxid, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID"); + atp->state = ATPD_STATE_PDON; /* XXX: should really come after isp_complete_ctio */ + } else if (IS_FC(isp)) { ct2_entry_t *ct = arg; - atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); + + atp = isp_get_atpd(isp, tptr, ct->ct_rxid); if (atp == NULL) { - isp_prt(isp, ISP_LOGERR, - "cannot find adjunct for %x after I/O", - ct->ct_rxid); - return (0); + rls_lun_statep(isp, tptr); + isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ct->ct_rxid); + return; } sentstatus = ct->ct_flags & CT2_SENDSTATUS; ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { ccb->ccb_h.status |= CAM_SENT_SENSE; } notify_cam = ct->ct_header.rqs_seqno & 0x1; if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { resid = ct->ct_resid; atp->bytes_xfered += (atp->last_xframt - resid); atp->last_xframt = 0; } - if (sentstatus || !ok) { - atp->tag = 0; + if (ct->ct_status == CT_HBA_RESET) { + ok = 0; + notify_cam = 1; + sentstatus = 1; + ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; + } else if (!ok) { + ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } - isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, - "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", - ct->ct_rxid, ct->ct_status, ct->ct_flags, - (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, - resid, sentstatus? "FIN" : "MID"); - tval = ct->ct_rxid; - - /* XXX: should really come after isp_complete_ctio */ - atp->state = ATPD_STATE_PDON; + isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", __func__, + ct->ct_rxid, ct->ct_status, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID"); + tval = atp->tag; + atp->state = ATPD_STATE_PDON; /* XXX: should really come after isp_complete_ctio */ } else { ct_entry_t *ct = arg; sentstatus = ct->ct_flags & CT_SENDSTATUS; ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; /* * We *ought* to be able to get back to the original ATIO * here, but for some reason this gets lost. It's just as * well because it's squirrelled away as part of periph * private data. * * We can live without it as long as we continue to use * the auto-replenish feature for CTIOs. */ notify_cam = ct->ct_header.rqs_seqno & 0x1; - if (ct->ct_status & QLTM_SVALID) { + if (ct->ct_status == (CT_HBA_RESET & 0xff)) { + ok = 0; + notify_cam = 1; + sentstatus = 1; + ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; + } else if (!ok) { + ccb->ccb_h.status |= CAM_REQ_CMP_ERR; + } else if (ct->ct_status & QLTM_SVALID) { char *sp = (char *)ct; sp += CTIO_SENSE_OFFSET; - ccb->csio.sense_len = - min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); - MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); + ccb->csio.sense_len = min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); + ISP_MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { resid = ct->ct_resid; } - isp_prt(isp, ISP_LOGTDEBUG0, - "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", - ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, - ct->ct_status, ct->ct_flags, resid, - sentstatus? "FIN" : "MID"); + isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO[%x] tag %x S_ID 0x%x lun %d sts %x flg %x resid %d %s", __func__, + ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, ct->ct_status, ct->ct_flags, resid, sentstatus? "FIN" : "MID"); tval = ct->ct_fwhandle; } ccb->csio.resid += resid; /* * We're here either because intermediate data transfers are done * and/or the final status CTIO (which may have joined with a * Data Transfer) is done. * * In any case, for this platform, the upper layers figure out * what to do next, so all we do here is collect status and * pass information along. Any DMA handles have already been * freed. */ if (notify_cam == 0) { isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); - return (0); + return; } + if (tptr) { + rls_lun_statep(isp, tptr); + } + isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", (sentstatus)? " FINAL " : "MIDTERM ", tval); - isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", - (sentstatus)? " FINAL " : "MIDTERM ", tval); - - if (!ok) { + if (!ok && !IS_24XX(isp)) { isp_target_putback_atio(ccb); } else { isp_complete_ctio(ccb); - } - return (0); } -static int -isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inp) +static void +isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inot) { - return (0); /* XXXX */ + (void) isp_notify_ack(isp, inot); } -static int +static void isp_handle_platform_notify_fc(ispsoftc_t *isp, in_fcentry_t *inp) { - + int needack = 1; switch (inp->in_status) { case IN_PORT_LOGOUT: - isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", - inp->in_iid); + /* + * XXX: Need to delete this initiator's WWN from the database + * XXX: Need to send this LOGOUT upstream + */ + isp_prt(isp, ISP_LOGWARN, "port logout of S_ID 0x%x", inp->in_iid); break; case IN_PORT_CHANGED: - isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", - inp->in_iid); + isp_prt(isp, ISP_LOGWARN, "port changed for S_ID 0x%x", inp->in_iid); break; case IN_GLOBAL_LOGO: + isp_del_all_wwn_entries(isp, 0); isp_prt(isp, ISP_LOGINFO, "all ports logged out"); break; case IN_ABORT_TASK: { - atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); - struct ccb_immed_notify *inot = NULL; + tstate_t *tptr; + uint16_t lun; + uint32_t loopid; + uint64_t wwn; + atio_private_data_t *atp; + fcportdb_t *lp; + struct ccb_immediate_notify *inot = NULL; + if (ISP_CAP_SCCFW(isp)) { + lun = inp->in_scclun; + } else { + lun = inp->in_lun; + } + if (ISP_CAP_2KLOGIN(isp)) { + loopid = ((in_fcentry_e_t *)inot)->in_iid; + } else { + loopid = inp->in_iid; + } + if (isp_find_pdb_by_loopid(isp, 0, loopid, &lp)) { + wwn = lp->port_wwn; + } else { + wwn = INI_ANY; + } + tptr = get_lun_statep(isp, 0, lun); + if (tptr == NULL) { + tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); + if (tptr == NULL) { + isp_prt(isp, ISP_LOGWARN, "ABORT TASK for lun %u- but no tstate", lun); + return; + } + } + atp = isp_get_atpd(isp, tptr, inp->in_seqid); + if (atp) { - tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); - if (tptr) { - inot = (struct ccb_immed_notify *) - SLIST_FIRST(&tptr->inots); - if (inot) { - tptr->inot_count--; - SLIST_REMOVE_HEAD(&tptr->inots, - sim_links.sle); - isp_prt(isp, ISP_LOGTDEBUG0, - "Take FREE INOT count now %d", - tptr->inot_count); - } + inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots); + isp_prt(isp, ISP_LOGTDEBUG0, "ABORT TASK RX_ID %x WWN 0x%016llx state %d", inp->in_seqid, (unsigned long long) wwn, atp->state); + if (inot) { + tptr->inot_count--; + SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, inot->ccb_h.path, "%s: Take FREE INOT count now %d\n", __func__, tptr->inot_count); + } else { + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, tptr->owner, "out of INOT structures\n"); } - isp_prt(isp, ISP_LOGWARN, - "abort task RX_ID %x IID %d state %d", - inp->in_seqid, inp->in_iid, atp->state); } else { - isp_prt(isp, ISP_LOGWARN, - "abort task RX_ID %x from iid %d, state unknown", - inp->in_seqid, inp->in_iid); + ISP_PATH_PRT(isp, ISP_LOGWARN, tptr->owner, "abort task RX_ID %x from wwn 0x%016llx, state unknown\n", inp->in_seqid, wwn); } if (inot) { - inot->initiator_id = inp->in_iid; - inot->sense_len = 0; - inot->message_args[0] = MSG_ABORT_TAG; - inot->message_args[1] = inp->in_seqid & 0xff; - inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; - inot->ccb_h.status = CAM_MESSAGE_RECV; - xpt_done((union ccb *)inot); + isp_notify_t tmp, *nt = &tmp; + ISP_MEMZERO(nt, sizeof (isp_notify_t)); + nt->nt_hba = isp; + nt->nt_tgt = FCPARAM(isp, 0)->isp_wwpn; + nt->nt_wwn = wwn; + nt->nt_nphdl = loopid; + nt->nt_sid = PORT_ANY; + nt->nt_did = PORT_ANY; + nt->nt_lun = lun; + nt->nt_need_ack = 1; + nt->nt_channel = 0; + nt->nt_ncode = NT_ABORT_TASK; + nt->nt_lreserved = inot; + isp_handle_platform_target_tmf(isp, nt); + needack = 0; } + rls_lun_statep(isp, tptr); break; } default: break; } + if (needack) { + (void) isp_notify_ack(isp, inp); + } +} + +static void +isp_handle_platform_notify_24xx(ispsoftc_t *isp, in_fcentry_24xx_t *inot) +{ + uint16_t nphdl; + uint32_t portid; + fcportdb_t *lp; + uint8_t *ptr = NULL; + uint64_t wwn; + + nphdl = inot->in_nphdl; + if (nphdl != NIL_HANDLE) { + portid = inot->in_portid_hi << 16 | inot->in_portid_lo; + } else { + portid = PORT_ANY; + } + + switch (inot->in_status) { + case IN24XX_ELS_RCVD: + { + char buf[16], *msg; + int chan = ISP_GET_VPIDX(isp, inot->in_vpidx); + + /* + * Note that we're just getting notification that an ELS was received + * (possibly with some associcated information sent upstream). This is + * *not* the same as being given the ELS frame to accept or reject. + */ + switch (inot->in_status_subcode) { + case LOGO: + msg = "LOGO"; + if (ISP_FW_NEWER_THAN(isp, 4, 0, 25)) { + ptr = (uint8_t *)inot; /* point to unswizzled entry! */ + wwn = (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF]) << 56) | + (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+1]) << 48) | + (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+2]) << 40) | + (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+3]) << 32) | + (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+4]) << 24) | + (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+5]) << 16) | + (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+6]) << 8) | + (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+7])); + } else { + wwn = INI_ANY; + } + isp_del_wwn_entry(isp, chan, wwn, nphdl, portid); + break; + case PRLO: + msg = "PRLO"; + break; + case PLOGI: + msg = "PLOGI"; + if (ISP_FW_NEWER_THAN(isp, 4, 0, 25)) { + ptr = (uint8_t *)inot; /* point to unswizzled entry! */ + wwn = (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF]) << 56) | + (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+1]) << 48) | + (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+2]) << 40) | + (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+3]) << 32) | + (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+4]) << 24) | + (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+5]) << 16) | + (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+6]) << 8) | + (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+7])); + } else { + wwn = INI_NONE; + } + isp_add_wwn_entry(isp, chan, wwn, nphdl, portid); + break; + case PRLI: + msg = "PRLI"; + break; + case PDISC: + msg = "PDISC"; + break; + case ADISC: + msg = "ADISC"; + break; + default: + ISP_SNPRINTF(buf, sizeof (buf), "ELS 0x%x", inot->in_status_subcode); + msg = buf; + break; + } + if (inot->in_flags & IN24XX_FLAG_PUREX_IOCB) { + isp_prt(isp, ISP_LOGERR, "%s Chan %d ELS N-port handle %x PortID 0x%06x marked as needing a PUREX response", msg, chan, nphdl, portid); + break; + } + isp_prt(isp, ISP_LOGTDEBUG0, "%s Chan %d ELS N-port handle %x PortID 0x%06x RX_ID 0x%x OX_ID 0x%x", msg, chan, nphdl, portid, + inot->in_rxid, inot->in_oxid); + (void) isp_notify_ack(isp, inot); + break; + } + + case IN24XX_PORT_LOGOUT: + ptr = "PORT LOGOUT"; + if (isp_find_pdb_by_loopid(isp, ISP_GET_VPIDX(isp, inot->in_vpidx), nphdl, &lp)) { + isp_del_wwn_entry(isp, ISP_GET_VPIDX(isp, inot->in_vpidx), lp->port_wwn, nphdl, lp->portid); + } + /* FALLTHROUGH */ + case IN24XX_PORT_CHANGED: + if (ptr == NULL) { + ptr = "PORT CHANGED"; + } + /* FALLTHROUGH */ + case IN24XX_LIP_RESET: + if (ptr == NULL) { + ptr = "LIP RESET"; + } + isp_prt(isp, ISP_LOGINFO, "Chan %d %s (sub-status 0x%x) for N-port handle 0x%x", ISP_GET_VPIDX(isp, inot->in_vpidx), ptr, inot->in_status_subcode, nphdl); + + /* + * All subcodes here are irrelevant. What is relevant + * is that we need to terminate all active commands from + * this initiator (known by N-port handle). + */ + /* XXX IMPLEMENT XXX */ + (void) isp_notify_ack(isp, inot); + break; + + case IN24XX_LINK_RESET: + case IN24XX_LINK_FAILED: + case IN24XX_SRR_RCVD: + default: + (void) isp_notify_ack(isp, inot); + break; + } +} + +static int +isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp) +{ + + if (isp->isp_state != ISP_RUNSTATE) { + isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL); + return (0); + } + + /* + * This case is for a Task Management Function, which shows up as an ATIO7 entry. + */ + if (IS_24XX(isp) && mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) { + ct7_entry_t local, *cto = &local; + at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved; + fcportdb_t *lp; + uint32_t sid; + uint16_t nphdl; + + sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; + if (isp_find_pdb_by_sid(isp, mp->nt_channel, sid, &lp)) { + nphdl = lp->handle; + } else { + nphdl = NIL_HANDLE; + } + ISP_MEMZERO(&local, sizeof (local)); + cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; + cto->ct_header.rqs_entry_count = 1; + cto->ct_nphdl = nphdl; + cto->ct_rxid = aep->at_rxid; + cto->ct_vpidx = mp->nt_channel; + cto->ct_iid_lo = sid; + cto->ct_iid_hi = sid >> 16; + cto->ct_oxid = aep->at_hdr.ox_id; + cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1; + cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT; + return (isp_target_put_entry(isp, &local)); + } + + /* + * This case is for a responding to an ABTS frame + */ + if (IS_24XX(isp) && mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) { + + /* + * Overload nt_need_ack here to mark whether we've terminated the associated command. + */ + if (mp->nt_need_ack) { + uint8_t storage[QENTRY_LEN]; + ct7_entry_t *cto = (ct7_entry_t *) storage; + abts_t *abts = (abts_t *)mp->nt_lreserved; + + ISP_MEMZERO(cto, sizeof (ct7_entry_t)); + isp_prt(isp, ISP_LOGTDEBUG0, "%s: [%x] terminating after ABTS received", __func__, abts->abts_rxid_task); + cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; + cto->ct_header.rqs_entry_count = 1; + cto->ct_nphdl = mp->nt_nphdl; + cto->ct_rxid = abts->abts_rxid_task; + cto->ct_iid_lo = mp->nt_sid; + cto->ct_iid_hi = mp->nt_sid >> 16; + cto->ct_oxid = abts->abts_ox_id; + cto->ct_vpidx = mp->nt_channel; + cto->ct_flags = CT7_NOACK|CT7_TERMINATE; + if (isp_target_put_entry(isp, cto)) { + return (ENOMEM); + } + mp->nt_need_ack = 0; + } + if (isp_acknak_abts(isp, mp->nt_lreserved, 0) == ENOMEM) { + return (ENOMEM); + } else { + return (0); + } + } + + /* + * Handle logout cases here + */ + if (mp->nt_ncode == NT_GLOBAL_LOGOUT) { + isp_del_all_wwn_entries(isp, mp->nt_channel); + } + + if (mp->nt_ncode == NT_LOGOUT) { + if (!IS_2100(isp) && IS_FC(isp)) { + isp_del_wwn_entries(isp, mp); + } + } + + /* + * General purpose acknowledgement + */ + if (mp->nt_need_ack) { + isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL); + return (isp_notify_ack(isp, mp->nt_lreserved)); + } return (0); } + +/* + * Handle task managment functions. + * + * We show up here with a notify structure filled out. + * + * The nt_lreserved tag points to the original queue entry + */ +static void +isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify) +{ + tstate_t *tptr; + fcportdb_t *lp; + struct ccb_immediate_notify *inot; + inot_private_data_t *ntp = NULL; + lun_id_t lun; + + isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid 0x%x tagval 0x%016llx chan %d lun 0x%x", __func__, notify->nt_ncode, + notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun); + /* + * NB: This assignment is necessary because of tricky type conversion. + * XXX: This is tricky and I need to check this. If the lun isn't known + * XXX: for the task management function, it does not of necessity follow + * XXX: that it should go up stream to the wildcard listener. + */ + if (notify->nt_lun == LUN_ANY) { + lun = CAM_LUN_WILDCARD; + } else { + lun = notify->nt_lun; + } + tptr = get_lun_statep(isp, notify->nt_channel, lun); + if (tptr == NULL) { + tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD); + if (tptr == NULL) { + isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun 0x%x", __func__, notify->nt_channel, lun); + goto bad; + } + } + inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots); + if (inot == NULL) { + isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun 0x%x", __func__, notify->nt_channel, lun); + goto bad; + } + + if (isp_find_pdb_by_sid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0) { + inot->initiator_id = CAM_TARGET_WILDCARD; + } else { + inot->initiator_id = lp->handle; + } + inot->seq_id = notify->nt_tagval; + inot->tag_id = notify->nt_tagval >> 32; + + switch (notify->nt_ncode) { + case NT_ABORT_TASK: + isp_target_mark_aborted_early(isp, tptr, inot->tag_id); + inot->arg = MSG_ABORT_TASK; + break; + case NT_ABORT_TASK_SET: + isp_target_mark_aborted_early(isp, tptr, TAG_ANY); + inot->arg = MSG_ABORT_TASK_SET; + break; + case NT_CLEAR_ACA: + inot->arg = MSG_CLEAR_ACA; + break; + case NT_CLEAR_TASK_SET: + inot->arg = MSG_CLEAR_TASK_SET; + break; + case NT_LUN_RESET: + inot->arg = MSG_LOGICAL_UNIT_RESET; + break; + case NT_TARGET_RESET: + inot->arg = MSG_TARGET_RESET; + break; + default: + isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun 0x%x", __func__, notify->nt_ncode, notify->nt_channel, lun); + goto bad; + } + + ntp = isp_get_ntpd(isp, tptr); + if (ntp == NULL) { + isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__); + goto bad; + } + ISP_MEMCPY(&ntp->rd.nt, notify, sizeof (isp_notify_t)); + if (notify->nt_lreserved) { + ISP_MEMCPY(&ntp->rd.data, notify->nt_lreserved, QENTRY_LEN); + ntp->rd.nt.nt_lreserved = &ntp->rd.data; + } + ntp->rd.seq_id = notify->nt_tagval; + ntp->rd.tag_id = notify->nt_tagval >> 32; + + tptr->inot_count--; + SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); + rls_lun_statep(isp, tptr); + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, inot->ccb_h.path, "%s: Take FREE INOT count now %d\n", __func__, tptr->inot_count); + inot->ccb_h.status = CAM_MESSAGE_RECV; + xpt_done((union ccb *)inot); + return; +bad: + if (tptr) { + rls_lun_statep(isp, tptr); + } + if (notify->nt_need_ack && notify->nt_lreserved) { + if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) { + (void) isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM); + } else { + (void) isp_notify_ack(isp, notify->nt_lreserved); + } + } +} + +/* + * Find the associated private data and makr it as dead so + * we don't try to work on it any further. + */ +static void +isp_target_mark_aborted(ispsoftc_t *isp, union ccb *ccb) +{ + tstate_t *tptr; + atio_private_data_t *atp; + + tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); + if (tptr == NULL) { + tptr = get_lun_statep(isp, XS_CHANNEL(ccb), CAM_LUN_WILDCARD); + if (tptr == NULL) { + ccb->ccb_h.status = CAM_REQ_INVALID; + return; + } + } + + atp = isp_get_atpd(isp, tptr, ccb->atio.tag_id); + if (atp == NULL) { + ccb->ccb_h.status = CAM_REQ_INVALID; + return; + } + atp->dead = 1; + ccb->ccb_h.status = CAM_REQ_CMP; +} + +static void +isp_target_mark_aborted_early(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag_id) +{ + atio_private_data_t *atp; + inot_private_data_t *restart_queue = tptr->restart_queue; + + /* + * First, clean any commands pending restart + */ + tptr->restart_queue = NULL; + while (restart_queue) { + uint32_t this_tag_id; + inot_private_data_t *ntp = restart_queue; + + restart_queue = ntp->rd.nt.nt_hba; + + if (IS_24XX(isp)) { + this_tag_id = ((at7_entry_t *)ntp->rd.data)->at_rxid; + } else { + this_tag_id = ((at2_entry_t *)ntp->rd.data)->at_rxid; + } + if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) { + isp_put_ntpd(isp, tptr, ntp); + } else { + ntp->rd.nt.nt_hba = tptr->restart_queue; + tptr->restart_queue = ntp; + } + } + + /* + * Now mark other ones dead as well. + */ + for (atp = tptr->atpool; atp < &tptr->atpool[ATPDPSIZE]; atp++) { + if ((uint64_t)tag_id == TAG_ANY || atp->tag == tag_id) { + atp->dead = 1; + } + } +} + + +#ifdef ISP_INTERNAL_TARGET +// #define ISP_FORCE_TIMEOUT 1 +#define ISP_TEST_WWNS 1 +#define ISP_TEST_SEPARATE_STATUS 1 + +#define ccb_data_offset ppriv_field0 +#define ccb_atio ppriv_ptr1 +#define ccb_inot ppriv_ptr1 + +#define MAX_ISP_TARG_TRANSFER (2 << 20) +#define NISP_TARG_CMDS 1024 +#define NISP_TARG_NOTIFIES 1024 +#define DISK_SHIFT 9 +#define JUNK_SIZE 256 + +#ifndef VERIFY_10 +#define VERIFY_10 0x2f #endif +TAILQ_HEAD(ccb_queue, ccb_hdr); +extern u_int vm_kmem_size; +static int ca; +static uint32_t disk_size; +static uint8_t *disk_data = NULL; +static uint8_t *junk_data; +static MALLOC_DEFINE(M_ISPTARG, "ISPTARG", "ISP TARGET data"); +struct isptarg_softc { + /* CCBs (CTIOs, ATIOs, INOTs) pending on the controller */ + struct ccb_queue work_queue; + struct ccb_queue rework_queue; + struct ccb_queue running_queue; + struct ccb_queue inot_queue; + struct cam_periph *periph; + struct cam_path *path; + ispsoftc_t *isp; +}; +static periph_ctor_t isptargctor; +static periph_dtor_t isptargdtor; +static periph_start_t isptargstart; +static periph_init_t isptarginit; +static void isptarg_done(struct cam_periph *, union ccb *); +static void isptargasync(void *, u_int32_t, struct cam_path *, void *); + + +static int isptarg_rwparm(uint8_t *, uint8_t *, uint64_t, uint32_t, uint8_t **, uint32_t *, int *); + +static struct periph_driver isptargdriver = +{ + isptarginit, "isptarg", TAILQ_HEAD_INITIALIZER(isptargdriver.units), /* generation */ 0 +}; + static void +isptarginit(void) +{ +} + +static void +isptargnotify(ispsoftc_t *isp, union ccb *iccb, struct ccb_immediate_notify *inot) +{ + struct ccb_notify_acknowledge *ack = &iccb->cna2; + + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, inot->ccb_h.path, "%s: [0x%x] immediate notify for 0x%x from 0x%x status 0x%x arg 0x%x\n", __func__, + inot->tag_id, inot->initiator_id, inot->seq_id, inot->ccb_h.status, inot->arg); + ack->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; + ack->ccb_h.flags = 0; + ack->ccb_h.retry_count = 0; + ack->ccb_h.cbfcnp = isptarg_done; + ack->ccb_h.timeout = 0; + ack->ccb_h.ccb_inot = inot; + ack->tag_id = inot->tag_id; + ack->seq_id = inot->seq_id; + ack->initiator_id = inot->initiator_id; + xpt_action(iccb); +} + +static void +isptargstart(struct cam_periph *periph, union ccb *iccb) +{ + const uint8_t niliqd[SHORT_INQUIRY_LENGTH] = { 0x7f }; + const uint8_t iqd[SHORT_INQUIRY_LENGTH] = { + 0, 0x0, 0x2, 0x2, 32, 0, 0, 0x32, + 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', + 'S', 'C', 'S', 'I', ' ', 'M', 'E', 'M', + 'O', 'R', 'Y', ' ', 'D', 'I', 'S', 'K', + '0', '0', '0', '1' + }; + int i, more = 0, last; + struct isptarg_softc *softc = periph->softc; + struct ccb_scsiio *csio; + lun_id_t return_lun; + struct ccb_accept_tio *atio; + uint8_t *cdb, *ptr, status; + uint8_t *data_ptr; + uint32_t data_len, flags; + struct ccb_hdr *ccbh; + + mtx_assert(periph->sim->mtx, MA_OWNED); + ISP_PATH_PRT(softc->isp, ISP_LOGTDEBUG0, iccb->ccb_h.path, "%s: function code 0x%x INOTQ=%c WORKQ=%c REWORKQ=%c\n", __func__, iccb->ccb_h.func_code, + TAILQ_FIRST(&softc->inot_queue)? 'y' : 'n', TAILQ_FIRST(&softc->work_queue)? 'y' : 'n', TAILQ_FIRST(&softc->rework_queue)? 'y' : 'n'); + /* + * Check for immediate notifies first + */ + ccbh = TAILQ_FIRST(&softc->inot_queue); + if (ccbh) { + TAILQ_REMOVE(&softc->inot_queue, ccbh, periph_links.tqe); + if (TAILQ_FIRST(&softc->inot_queue) || TAILQ_FIRST(&softc->work_queue) || TAILQ_FIRST(&softc->rework_queue)) { + xpt_schedule(periph, 1); + } + isptargnotify(softc->isp, iccb, (struct ccb_immediate_notify *)ccbh); + return; + } + + /* + * Check the rework (continuation) work queue first. + */ + ccbh = TAILQ_FIRST(&softc->rework_queue); + if (ccbh) { + atio = (struct ccb_accept_tio *)ccbh; + TAILQ_REMOVE(&softc->rework_queue, ccbh, periph_links.tqe); + more = TAILQ_FIRST(&softc->work_queue) || TAILQ_FIRST(&softc->rework_queue); + } else { + ccbh = TAILQ_FIRST(&softc->work_queue); + if (ccbh == NULL) { + ISP_PATH_PRT(softc->isp, ISP_LOGTDEBUG0, iccb->ccb_h.path, "%s: woken up but no work?\n", __func__); + xpt_release_ccb(iccb); + return; + } + atio = (struct ccb_accept_tio *)ccbh; + TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe); + more = TAILQ_FIRST(&softc->work_queue) != NULL; + atio->ccb_h.ccb_data_offset = 0; + } + + if (atio->tag_id == 0xffffffff || atio->ccb_h.func_code != XPT_ACCEPT_TARGET_IO) { + panic("BAD ATIO"); + } + + data_ptr = NULL; + data_len = 0; + csio = &iccb->csio; + status = SCSI_STATUS_OK; + flags = CAM_SEND_STATUS; + memset(&atio->sense_data, 0, sizeof (atio->sense_data)); + cdb = atio->cdb_io.cdb_bytes; + ISP_PATH_PRT(softc->isp, ISP_LOGTDEBUG0, ccbh->path, "%s: [0x%x] processing ATIO from 0x%x CDB=0x%x data_offset=%u\n", __func__, atio->tag_id, atio->init_id, + cdb[0], atio->ccb_h.ccb_data_offset); + + return_lun = XS_LUN(atio); + if (return_lun != 0) { + xpt_print(atio->ccb_h.path, "[0x%x] Non-Zero Lun %d: cdb0=0x%x\n", atio->tag_id, return_lun, cdb[0]); + if (cdb[0] != INQUIRY && cdb[0] != REPORT_LUNS && cdb[0] != REQUEST_SENSE) { + status = SCSI_STATUS_CHECK_COND; + atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_ILLEGAL_REQUEST; + atio->sense_data.add_sense_code = 0x25; + atio->sense_data.add_sense_code_qual = 0x0; + atio->sense_len = sizeof (atio->sense_data); + } + return_lun = CAM_LUN_WILDCARD; + } + + switch (cdb[0]) { + case REQUEST_SENSE: + flags |= CAM_DIR_IN; + data_len = sizeof (atio->sense_data); + junk_data[0] = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_NO_SENSE; + memset(junk_data+1, 0, data_len-1); + if (data_len > cdb[4]) { + data_len = cdb[4]; + } + if (data_len) { + data_ptr = junk_data; + } + break; + case READ_6: + case READ_10: + case READ_12: + case READ_16: + if (isptarg_rwparm(cdb, disk_data, disk_size, atio->ccb_h.ccb_data_offset, &data_ptr, &data_len, &last)) { + status = SCSI_STATUS_CHECK_COND; + atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_UNIT_ATTENTION; + atio->sense_data.add_sense_code = 0x5; + atio->sense_data.add_sense_code_qual = 0x24; + atio->sense_len = sizeof (atio->sense_data); + } else { +#ifdef ISP_FORCE_TIMEOUT + { + static int foo; + if (foo++ == 500) { + if (more) { + xpt_schedule(periph, 1); + } + foo = 0; + return; + } + } +#endif +#ifdef ISP_TEST_SEPARATE_STATUS + if (last && data_len) { + last = 0; + } +#endif + if (last == 0) { + flags &= ~CAM_SEND_STATUS; + } + if (data_len) { + atio->ccb_h.ccb_data_offset += data_len; + flags |= CAM_DIR_IN; + } else { + flags |= CAM_DIR_NONE; + } + } + break; + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + if (isptarg_rwparm(cdb, disk_data, disk_size, atio->ccb_h.ccb_data_offset, &data_ptr, &data_len, &last)) { + status = SCSI_STATUS_CHECK_COND; + atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_UNIT_ATTENTION; + atio->sense_data.add_sense_code = 0x5; + atio->sense_data.add_sense_code_qual = 0x24; + atio->sense_len = sizeof (atio->sense_data); + } else { +#ifdef ISP_FORCE_TIMEOUT + { + static int foo; + if (foo++ == 500) { + if (more) { + xpt_schedule(periph, 1); + } + foo = 0; + return; + } + } +#endif +#ifdef ISP_TEST_SEPARATE_STATUS + if (last && data_len) { + last = 0; + } +#endif + if (last == 0) { + flags &= ~CAM_SEND_STATUS; + } + if (data_len) { + atio->ccb_h.ccb_data_offset += data_len; + flags |= CAM_DIR_OUT; + } else { + flags |= CAM_DIR_NONE; + } + } + break; + case INQUIRY: + flags |= CAM_DIR_IN; + if (cdb[1] || cdb[2] || cdb[3]) { + status = SCSI_STATUS_CHECK_COND; + atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_UNIT_ATTENTION; + atio->sense_data.add_sense_code = 0x5; + atio->sense_data.add_sense_code_qual = 0x20; + atio->sense_len = sizeof (atio->sense_data); + break; + } + data_len = sizeof (iqd); + if (data_len > cdb[4]) { + data_len = cdb[4]; + } + if (data_len) { + if (XS_LUN(iccb) != 0) { + memcpy(junk_data, niliqd, sizeof (iqd)); + } else { + memcpy(junk_data, iqd, sizeof (iqd)); + } + data_ptr = junk_data; + } + break; + case TEST_UNIT_READY: + flags |= CAM_DIR_NONE; + if (ca) { + ca = 0; + status = SCSI_STATUS_CHECK_COND; + atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_UNIT_ATTENTION; + atio->sense_data.add_sense_code = 0x28; + atio->sense_data.add_sense_code_qual = 0x0; + atio->sense_len = sizeof (atio->sense_data); + } + break; + case SYNCHRONIZE_CACHE: + case START_STOP: + case RESERVE: + case RELEASE: + case VERIFY_10: + flags |= CAM_DIR_NONE; + break; + + case READ_CAPACITY: + flags |= CAM_DIR_IN; + if (cdb[2] || cdb[3] || cdb[4] || cdb[5]) { + status = SCSI_STATUS_CHECK_COND; + atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_UNIT_ATTENTION; + atio->sense_data.add_sense_code = 0x5; + atio->sense_data.add_sense_code_qual = 0x24; + atio->sense_len = sizeof (atio->sense_data); + break; + } + if (cdb[8] & 0x1) { /* PMI */ + junk_data[0] = 0xff; + junk_data[1] = 0xff; + junk_data[2] = 0xff; + junk_data[3] = 0xff; + } else { + uint64_t last_blk = (disk_size >> DISK_SHIFT) - 1; + if (last_blk < 0xffffffffULL) { + junk_data[0] = (last_blk >> 24) & 0xff; + junk_data[1] = (last_blk >> 16) & 0xff; + junk_data[2] = (last_blk >> 8) & 0xff; + junk_data[3] = (last_blk) & 0xff; + } else { + junk_data[0] = 0xff; + junk_data[1] = 0xff; + junk_data[2] = 0xff; + junk_data[3] = 0xff; + } + } + junk_data[4] = ((1 << DISK_SHIFT) >> 24) & 0xff; + junk_data[5] = ((1 << DISK_SHIFT) >> 16) & 0xff; + junk_data[6] = ((1 << DISK_SHIFT) >> 8) & 0xff; + junk_data[7] = ((1 << DISK_SHIFT)) & 0xff; + data_ptr = junk_data; + data_len = 8; + break; + case REPORT_LUNS: + flags |= CAM_DIR_IN; + memset(junk_data, 0, JUNK_SIZE); + junk_data[0] = (1 << 3) >> 24; + junk_data[1] = (1 << 3) >> 16; + junk_data[2] = (1 << 3) >> 8; + junk_data[3] = (1 << 3); + ptr = NULL; + for (i = 0; i < 1; i++) { + ptr = &junk_data[8 + (1 << 3)]; + if (i >= 256) { + ptr[0] = 0x40 | ((i >> 8) & 0x3f); + } + ptr[1] = i; + } + data_ptr = junk_data; + data_len = (ptr + 8) - junk_data; + break; + + default: + flags |= CAM_DIR_NONE; + status = SCSI_STATUS_CHECK_COND; + atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_UNIT_ATTENTION; + atio->sense_data.add_sense_code = 0x5; + atio->sense_data.add_sense_code_qual = 0x20; + atio->sense_len = sizeof (atio->sense_data); + break; + } + + /* + * If we are done with the transaction, tell the + * controller to send status and perform a CMD_CMPLT. + * If we have associated sense data, see if we can + * send that too. + */ + if (status == SCSI_STATUS_CHECK_COND) { + flags |= CAM_SEND_SENSE; + csio->sense_len = atio->sense_len; + csio->sense_data = atio->sense_data; + flags &= ~CAM_DIR_MASK; + data_len = 0; + data_ptr = NULL; + } + cam_fill_ctio(csio, 0, isptarg_done, flags, MSG_SIMPLE_Q_TAG, atio->tag_id, atio->init_id, status, data_ptr, data_len, 0); + iccb->ccb_h.target_id = atio->ccb_h.target_id; + iccb->ccb_h.target_lun = return_lun; + iccb->ccb_h.ccb_atio = atio; + xpt_action(iccb); + + if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { + cam_release_devq(periph->path, 0, 0, 0, 0); + atio->ccb_h.status &= ~CAM_DEV_QFRZN; + } + if (more) { + xpt_schedule(periph, 1); + } +} + +static cam_status +isptargctor(struct cam_periph *periph, void *arg) +{ + struct isptarg_softc *softc; + + softc = (struct isptarg_softc *)arg; + periph->softc = softc; + softc->periph = periph; + softc->path = periph->path; + ISP_PATH_PRT(softc->isp, ISP_LOGTDEBUG0, periph->path, "%s called\n", __func__); + return (CAM_REQ_CMP); +} + +static void +isptargdtor(struct cam_periph *periph) +{ + struct isptarg_softc *softc; + softc = (struct isptarg_softc *)periph->softc; + ISP_PATH_PRT(softc->isp, ISP_LOGTDEBUG0, periph->path, "%s called\n", __func__); + softc->periph = NULL; + softc->path = NULL; + periph->softc = NULL; +} + +static void +isptarg_done(struct cam_periph *periph, union ccb *ccb) +{ + struct isptarg_softc *softc; + ispsoftc_t *isp; + struct ccb_accept_tio *atio; + struct ccb_immediate_notify *inot; + cam_status status; + + softc = (struct isptarg_softc *)periph->softc; + isp = softc->isp; + status = ccb->ccb_h.status & CAM_STATUS_MASK; + + switch (ccb->ccb_h.func_code) { + case XPT_ACCEPT_TARGET_IO: + atio = (struct ccb_accept_tio *) ccb; + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "[0x%x] ATIO seen in %s\n", atio->tag_id, __func__); + TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.tqe); + xpt_schedule(periph, 1); + break; + case XPT_IMMEDIATE_NOTIFY: + inot = (struct ccb_immediate_notify *) ccb; + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "[0x%x] INOT for 0x%x seen in %s\n", inot->tag_id, inot->seq_id, __func__); + TAILQ_INSERT_TAIL(&softc->inot_queue, &ccb->ccb_h, periph_links.tqe); + xpt_schedule(periph, 1); + break; + case XPT_CONT_TARGET_IO: + if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { + cam_release_devq(ccb->ccb_h.path, 0, 0, 0, 0); + ccb->ccb_h.status &= ~CAM_DEV_QFRZN; + } + atio = ccb->ccb_h.ccb_atio; + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL); + xpt_action((union ccb *)atio); + } else if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "[0x%x] MID CTIO seen in %s\n", atio->tag_id, __func__); + TAILQ_INSERT_TAIL(&softc->rework_queue, &atio->ccb_h, periph_links.tqe); + xpt_schedule(periph, 1); + } else { + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "[0x%x] FINAL CTIO seen in %s\n", atio->tag_id, __func__); + xpt_action((union ccb *)atio); + } + xpt_release_ccb(ccb); + break; + case XPT_NOTIFY_ACKNOWLEDGE: + if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { + cam_release_devq(ccb->ccb_h.path, 0, 0, 0, 0); + ccb->ccb_h.status &= ~CAM_DEV_QFRZN; + } + inot = ccb->ccb_h.ccb_inot; + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, inot->ccb_h.path, "[0x%x] recycle notify for tag 0x%x\n", inot->tag_id, inot->seq_id); + xpt_release_ccb(ccb); + xpt_action((union ccb *)inot); + break; + default: + xpt_print(ccb->ccb_h.path, "unexpected code 0x%x\n", ccb->ccb_h.func_code); + break; + } +} + +static void +isptargasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) +{ + struct ac_contract *acp = arg; + struct ac_device_changed *fc = (struct ac_device_changed *) acp->contract_data; + + if (code != AC_CONTRACT) { + return; + } + xpt_print(path, "0x%016llx Port ID 0x%06x %s\n", (unsigned long long) fc->wwpn, fc->port, fc->arrived? "arrived" : "departed"); +} + +static void +isp_target_thread(ispsoftc_t *isp, int chan) +{ + union ccb *ccb = NULL; + int i; + void *wchan; + cam_status status; + struct isptarg_softc *softc = NULL; + struct cam_periph *periph = NULL, *wperiph = NULL; + struct cam_path *path, *wpath; + struct cam_sim *sim; + + if (disk_data == NULL) { + disk_size = roundup2(vm_kmem_size >> 1, (1ULL << 20)); + if (disk_size < (50 << 20)) { + disk_size = 50 << 20; + } + disk_data = malloc(disk_size, M_ISPTARG, M_WAITOK | M_ZERO); + if (disk_data == NULL) { + isp_prt(isp, ISP_LOGERR, "%s: could not allocate disk data", __func__); + goto out; + } + isp_prt(isp, ISP_LOGINFO, "allocated a %ju MiB disk", (uintmax_t) (disk_size >> 20)); + } + junk_data = malloc(JUNK_SIZE, M_ISPTARG, M_WAITOK | M_ZERO); + if (junk_data == NULL) { + isp_prt(isp, ISP_LOGERR, "%s: could not allocate junk", __func__); + goto out; + } + + + softc = malloc(sizeof (*softc), M_ISPTARG, M_WAITOK | M_ZERO); + if (softc == NULL) { + isp_prt(isp, ISP_LOGERR, "%s: could not allocate softc", __func__); + goto out; + } + TAILQ_INIT(&softc->work_queue); + TAILQ_INIT(&softc->rework_queue); + TAILQ_INIT(&softc->running_queue); + TAILQ_INIT(&softc->inot_queue); + softc->isp = isp; + + periphdriver_register(&isptargdriver); + ISP_GET_PC(isp, chan, sim, sim); + ISP_GET_PC(isp, chan, path, path); + status = xpt_create_path_unlocked(&wpath, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); + if (status != CAM_REQ_CMP) { + isp_prt(isp, ISP_LOGERR, "%s: could not allocate wildcard path", __func__); + return; + } + status = xpt_create_path_unlocked(&path, NULL, cam_sim_path(sim), 0, 0); + if (status != CAM_REQ_CMP) { + xpt_free_path(wpath); + isp_prt(isp, ISP_LOGERR, "%s: could not allocate path", __func__); + return; + } + + ccb = xpt_alloc_ccb(); + + ISP_LOCK(isp); + status = cam_periph_alloc(isptargctor, NULL, isptargdtor, isptargstart, "isptarg", CAM_PERIPH_BIO, wpath, NULL, 0, softc); + if (status != CAM_REQ_CMP) { + ISP_UNLOCK(isp); + isp_prt(isp, ISP_LOGERR, "%s: cam_periph_alloc for wildcard failed", __func__); + goto out; + } + wperiph = cam_periph_find(wpath, "isptarg"); + if (wperiph == NULL) { + ISP_UNLOCK(isp); + isp_prt(isp, ISP_LOGERR, "%s: wildcard periph already allocated but doesn't exist", __func__); + goto out; + } + + status = cam_periph_alloc(isptargctor, NULL, isptargdtor, isptargstart, "isptarg", CAM_PERIPH_BIO, path, NULL, 0, softc); + if (status != CAM_REQ_CMP) { + ISP_UNLOCK(isp); + isp_prt(isp, ISP_LOGERR, "%s: cam_periph_alloc failed", __func__); + goto out; + } + + periph = cam_periph_find(path, "isptarg"); + if (periph == NULL) { + ISP_UNLOCK(isp); + isp_prt(isp, ISP_LOGERR, "%s: periph already allocated but doesn't exist", __func__); + goto out; + } + + status = xpt_register_async(AC_CONTRACT, isptargasync, isp, wpath); + if (status != CAM_REQ_CMP) { + ISP_UNLOCK(isp); + isp_prt(isp, ISP_LOGERR, "%s: xpt_register_async failed", __func__); + goto out; + } + + ISP_UNLOCK(isp); + + ccb = xpt_alloc_ccb(); + + /* + * Make sure role is none. + */ + xpt_setup_ccb(&ccb->ccb_h, periph->path, 10); + ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; + ccb->knob.xport_specific.fc.role = KNOB_ROLE_NONE; +#ifdef ISP_TEST_WWNS + ccb->knob.xport_specific.fc.valid = KNOB_VALID_ROLE | KNOB_VALID_ADDRESS; + ccb->knob.xport_specific.fc.wwnn = 0x508004d000000000ULL | (device_get_unit(isp->isp_osinfo.dev) << 8) | (chan << 16); + ccb->knob.xport_specific.fc.wwpn = 0x508004d000000001ULL | (device_get_unit(isp->isp_osinfo.dev) << 8) | (chan << 16); +#else + ccb->knob.xport_specific.fc.valid = KNOB_VALID_ROLE; +#endif + + ISP_LOCK(isp); + xpt_action(ccb); + ISP_UNLOCK(isp); + + /* + * Now enable luns + */ + xpt_setup_ccb(&ccb->ccb_h, periph->path, 10); + ccb->ccb_h.func_code = XPT_EN_LUN; + ccb->cel.enable = 1; + ISP_LOCK(isp); + xpt_action(ccb); + ISP_UNLOCK(isp); + if (ccb->ccb_h.status != CAM_REQ_CMP) { + xpt_free_ccb(ccb); + xpt_print(periph->path, "failed to enable lun (0x%x)\n", ccb->ccb_h.status); + goto out; + } + + xpt_setup_ccb(&ccb->ccb_h, wperiph->path, 10); + ccb->ccb_h.func_code = XPT_EN_LUN; + ccb->cel.enable = 1; + ISP_LOCK(isp); + xpt_action(ccb); + ISP_UNLOCK(isp); + if (ccb->ccb_h.status != CAM_REQ_CMP) { + xpt_free_ccb(ccb); + xpt_print(wperiph->path, "failed to enable lun (0x%x)\n", ccb->ccb_h.status); + goto out; + } + xpt_free_ccb(ccb); + + /* + * Add resources + */ + ISP_GET_PC_ADDR(isp, chan, target_proc, wchan); + for (i = 0; i < 4; i++) { + ccb = malloc(sizeof (*ccb), M_ISPTARG, M_WAITOK | M_ZERO); + xpt_setup_ccb(&ccb->ccb_h, wperiph->path, 1); + ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; + ccb->ccb_h.cbfcnp = isptarg_done; + ISP_LOCK(isp); + xpt_action(ccb); + ISP_UNLOCK(isp); + } + for (i = 0; i < NISP_TARG_CMDS; i++) { + ccb = malloc(sizeof (*ccb), M_ISPTARG, M_WAITOK | M_ZERO); + xpt_setup_ccb(&ccb->ccb_h, periph->path, 1); + ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; + ccb->ccb_h.cbfcnp = isptarg_done; + ISP_LOCK(isp); + xpt_action(ccb); + ISP_UNLOCK(isp); + } + for (i = 0; i < 4; i++) { + ccb = malloc(sizeof (*ccb), M_ISPTARG, M_WAITOK | M_ZERO); + xpt_setup_ccb(&ccb->ccb_h, wperiph->path, 1); + ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; + ccb->ccb_h.cbfcnp = isptarg_done; + ISP_LOCK(isp); + xpt_action(ccb); + ISP_UNLOCK(isp); + } + for (i = 0; i < NISP_TARG_NOTIFIES; i++) { + ccb = malloc(sizeof (*ccb), M_ISPTARG, M_WAITOK | M_ZERO); + xpt_setup_ccb(&ccb->ccb_h, periph->path, 1); + ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; + ccb->ccb_h.cbfcnp = isptarg_done; + ISP_LOCK(isp); + xpt_action(ccb); + ISP_UNLOCK(isp); + } + + /* + * Now turn it all back on + */ + xpt_setup_ccb(&ccb->ccb_h, periph->path, 10); + ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; + ccb->knob.xport_specific.fc.valid = KNOB_VALID_ROLE; + ccb->knob.xport_specific.fc.role = KNOB_ROLE_TARGET; + ISP_LOCK(isp); + xpt_action(ccb); + ISP_UNLOCK(isp); + + /* + * Okay, while things are still active, sleep... + */ + ISP_LOCK(isp); + for (;;) { + ISP_GET_PC(isp, chan, proc_active, i); + if (i == 0) { + break; + } + msleep(wchan, &isp->isp_lock, PUSER, "tsnooze", 0); + } + ISP_UNLOCK(isp); + +out: + if (wperiph) { + cam_periph_invalidate(wperiph); + } + if (periph) { + cam_periph_invalidate(periph); + } + if (junk_data) { + free(junk_data, M_ISPTARG); + } + if (disk_data) { + free(disk_data, M_ISPTARG); + } + if (softc) { + free(softc, M_ISPTARG); + } + xpt_free_path(path); + xpt_free_path(wpath); +} + +static void +isp_target_thread_pi(void *arg) +{ + struct isp_spi *pi = arg; + isp_target_thread(cam_sim_softc(pi->sim), cam_sim_bus(pi->sim)); +} + +static void +isp_target_thread_fc(void *arg) +{ + struct isp_fc *fc = arg; + isp_target_thread(cam_sim_softc(fc->sim), cam_sim_bus(fc->sim)); +} + +static int +isptarg_rwparm(uint8_t *cdb, uint8_t *dp, uint64_t dl, uint32_t offset, uint8_t **kp, uint32_t *tl, int *lp) +{ + uint32_t cnt, curcnt; + uint64_t lba; + + switch (cdb[0]) { + case WRITE_16: + case READ_16: + cnt = (((uint32_t)cdb[10]) << 24) | + (((uint32_t)cdb[11]) << 16) | + (((uint32_t)cdb[12]) << 8) | + ((uint32_t)cdb[13]); + + lba = (((uint64_t)cdb[2]) << 56) | + (((uint64_t)cdb[3]) << 48) | + (((uint64_t)cdb[4]) << 40) | + (((uint64_t)cdb[5]) << 32) | + (((uint64_t)cdb[6]) << 24) | + (((uint64_t)cdb[7]) << 16) | + (((uint64_t)cdb[8]) << 8) | + ((uint64_t)cdb[9]); + break; + case WRITE_12: + case READ_12: + cnt = (((uint32_t)cdb[6]) << 16) | + (((uint32_t)cdb[7]) << 8) | + ((u_int32_t)cdb[8]); + + lba = (((uint32_t)cdb[2]) << 24) | + (((uint32_t)cdb[3]) << 16) | + (((uint32_t)cdb[4]) << 8) | + ((uint32_t)cdb[5]); + break; + case WRITE_10: + case READ_10: + cnt = (((uint32_t)cdb[7]) << 8) | + ((u_int32_t)cdb[8]); + + lba = (((uint32_t)cdb[2]) << 24) | + (((uint32_t)cdb[3]) << 16) | + (((uint32_t)cdb[4]) << 8) | + ((uint32_t)cdb[5]); + break; + case WRITE_6: + case READ_6: + cnt = cdb[4]; + if (cnt == 0) { + cnt = 256; + } + lba = (((uint32_t)cdb[1] & 0x1f) << 16) | + (((uint32_t)cdb[2]) << 8) | + ((uint32_t)cdb[3]); + break; + default: + return (-1); + } + + cnt <<= DISK_SHIFT; + lba <<= DISK_SHIFT; + + if (offset == cnt) { + *lp = 1; + return (0); + } + + if (lba + cnt > dl) { + return (-1); + } + + + curcnt = MAX_ISP_TARG_TRANSFER; + if (offset + curcnt >= cnt) { + curcnt = cnt - offset; + *lp = 1; + } else { + *lp = 0; + } + *tl = curcnt; + *kp = &dp[lba + offset]; + return (0); +} + +#endif +#endif + +static void isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) { struct cam_sim *sim; ispsoftc_t *isp; sim = (struct cam_sim *)cbarg; isp = (ispsoftc_t *) cam_sim_softc(sim); switch (code) { case AC_LOST_DEVICE: if (IS_SCSI(isp)) { uint16_t oflags, nflags; - sdparam *sdp = isp->isp_param; + int bus = cam_sim_bus(sim); + sdparam *sdp = SDPARAM(isp, bus); int tgt; tgt = xpt_path_target_id(path); if (tgt >= 0) { - sdp += cam_sim_bus(sim); nflags = sdp->isp_devparam[tgt].nvrm_flags; #ifndef ISP_TARGET_MODE nflags &= DPARM_SAFE_DFLT; if (isp->isp_loaded_fw) { nflags |= DPARM_NARROW | DPARM_ASYNC; } #else nflags = DPARM_DEFAULT; #endif oflags = sdp->isp_devparam[tgt].goal_flags; sdp->isp_devparam[tgt].goal_flags = nflags; sdp->isp_devparam[tgt].dev_update = 1; - isp->isp_update |= (1 << cam_sim_bus(sim)); - (void) isp_control(isp, - ISPCTL_UPDATE_PARAMS, NULL); + sdp->update = 1; + (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, bus); sdp->isp_devparam[tgt].goal_flags = oflags; } } break; default: isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); break; } } static void isp_poll(struct cam_sim *sim) { ispsoftc_t *isp = cam_sim_softc(sim); uint32_t isr; uint16_t sema, mbox; if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { isp_intr(isp, isr, sema, mbox); } } -static int isp_watchdog_work(ispsoftc_t *, XS_T *); - -static int -isp_watchdog_work(ispsoftc_t *isp, XS_T *xs) +static void +isp_watchdog(void *arg) { + struct ccb_scsiio *xs = arg; + ispsoftc_t *isp; uint32_t handle; - /* - * We've decided this command is dead. Make sure we're not trying - * to kill a command that's already dead by getting it's handle and - * and seeing whether it's still alive. - */ + isp = XS_ISP(xs); + handle = isp_find_handle(isp, xs); if (handle) { - uint32_t isr; - uint16_t sema, mbox; + /* + * Make sure the command is *really* dead before we + * release the handle (and DMA resources) for reuse. + */ + (void) isp_control(isp, ISPCTL_ABORT_CMD, xs); - if (XS_CMD_DONE_P(xs)) { - isp_prt(isp, ISP_LOGDEBUG1, - "watchdog found done cmd (handle 0x%x)", handle); - return (1);; - } - - if (XS_CMD_WDOG_P(xs)) { - isp_prt(isp, ISP_LOGDEBUG2, - "recursive watchdog (handle 0x%x)", handle); - return (1); - } - - XS_CMD_S_WDOG(xs); - if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { - isp_intr(isp, isr, sema, mbox); - } - if (XS_CMD_DONE_P(xs)) { - isp_prt(isp, ISP_LOGDEBUG2, - "watchdog cleanup for handle 0x%x", handle); - isp_free_pcmd(isp, (union ccb *)xs); - xpt_done((union ccb *) xs); - } else if (XS_CMD_GRACE_P(xs)) { - /* - * Make sure the command is *really* dead before we - * release the handle (and DMA resources) for reuse. - */ - (void) isp_control(isp, ISPCTL_ABORT_CMD, xs); - - /* - * After this point, the comamnd is really dead. - */ - if (XS_XFRLEN(xs)) { - ISP_DMAFREE(isp, xs, handle); - } - isp_destroy_handle(isp, handle); - xpt_print(xs->ccb_h.path, - "watchdog timeout for handle 0x%x\n", handle); - XS_SETERR(xs, CAM_CMD_TIMEOUT); - XS_CMD_C_WDOG(xs); - isp_done(xs); - } else { - XS_CMD_C_WDOG(xs); - callout_reset(&PISP_PCMD((union ccb *)xs)->wdog, hz, - isp_watchdog, xs); - XS_CMD_S_GRACE(xs); - isp->isp_sendmarker |= 1 << XS_CHANNEL(xs); - } - return (1); + /* + * After this point, the comamnd is really dead. + */ + if (XS_XFRLEN(xs)) { + ISP_DMAFREE(isp, xs, handle); + } + isp_destroy_handle(isp, handle); + xpt_print(xs->ccb_h.path, "watchdog timeout for handle 0x%x\n", handle); + XS_SETERR(xs, CAM_CMD_TIMEOUT); + isp_done(xs); } - return (0); } static void -isp_watchdog(void *arg) +isp_make_here(ispsoftc_t *isp, int chan, int tgt) { - ispsoftc_t *isp; - XS_T *xs = arg; - int r; + union ccb *ccb; + struct isp_fc *fc = ISP_FC_PC(isp, chan); - for (r = 0, isp = isplist; r && isp; isp = isp->isp_osinfo.next) { - ISP_LOCK(isp); - r = isp_watchdog_work(isp, xs); - ISP_UNLOCK(isp); + if (isp_autoconfig == 0) { + return; } - if (isp == NULL) { - printf("isp_watchdog: nobody had %p active\n", arg); - } -} - -#if __FreeBSD_version >= 600000 -static void -isp_make_here(ispsoftc_t *isp, int tgt) -{ - union ccb *ccb; /* - * Allocate a CCB, create a wildcard path for this bus, - * and schedule a rescan. + * Allocate a CCB, create a wildcard path for this bus/target and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { - isp_prt(isp, ISP_LOGWARN, "unable to alloc CCB for rescan"); + isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan); return; } - if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, - cam_sim_path(isp->isp_sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan"); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void -isp_make_gone(ispsoftc_t *isp, int tgt) +isp_make_gone(ispsoftc_t *isp, int chan, int tgt) { struct cam_path *tp; - if (xpt_create_path(&tp, NULL, cam_sim_path(isp->isp_sim), tgt, - CAM_LUN_WILDCARD) == CAM_REQ_CMP) { + struct isp_fc *fc = ISP_FC_PC(isp, chan); + + if (isp_autoconfig == 0) { + return; + } + if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) { xpt_async(AC_LOST_DEVICE, tp, NULL); xpt_free_path(tp); } } -#else -#define isp_make_here(isp, tgt) do { ; } while (0) -#define isp_make_gone(isp, tgt) do { ; } while (0) -#endif - /* * Gone Device Timer Function- when we have decided that a device has gone * away, we wait a specific period of time prior to telling the OS it has * gone away. * * This timer function fires once a second and then scans the port database * for devices that are marked dead but still have a virtual target assigned. * We decrement a counter for that port database entry, and when it hits zero, * we tell the OS the device has gone away. */ static void isp_gdt(void *arg) { - ispsoftc_t *isp = arg; + struct isp_fc *fc = arg; + ispsoftc_t *isp = fc->isp; + int chan = fc - isp->isp_osinfo.pc.fc; fcportdb_t *lp; int dbidx, tgt, more_to_do = 0; - ISP_LOCK(isp); - isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired"); + isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan); for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { - lp = &FCPARAM(isp)->portdb[dbidx]; + lp = &FCPARAM(isp, chan)->portdb[dbidx]; if (lp->state != FC_PORTDB_STATE_ZOMBIE) { continue; } - if (lp->ini_map_idx == 0) { + if (lp->dev_map_idx == 0 || lp->target_mode) { continue; } if (lp->new_reserved == 0) { continue; } lp->new_reserved -= 1; if (lp->new_reserved != 0) { more_to_do++; continue; } - tgt = lp->ini_map_idx - 1; - FCPARAM(isp)->isp_ini_map[tgt] = 0; - lp->ini_map_idx = 0; + tgt = lp->dev_map_idx - 1; + FCPARAM(isp, chan)->isp_dev_map[tgt] = 0; + lp->dev_map_idx = 0; lp->state = FC_PORTDB_STATE_NIL; - isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, - "Gone Device Timeout"); - isp_make_gone(isp, tgt); + isp_prt(isp, ISP_LOGCONFIG, prom3, chan, lp->portid, tgt, "Gone Device Timeout"); + isp_make_gone(isp, chan, tgt); } if (more_to_do) { - isp->isp_osinfo.gdt_running = 1; - callout_reset(&isp->isp_osinfo.gdt, hz, isp_gdt, isp); + fc->gdt_running = 1; + callout_reset(&fc->gdt, hz, isp_gdt, fc); } else { - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "stopping Gone Device Timer"); - isp->isp_osinfo.gdt_running = 0; + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Chan %d stopping Gone Device Timer", chan); + fc->gdt_running = 0; } - ISP_UNLOCK(isp); } /* * Loop Down Timer Function- when loop goes down, a timer is started and * and after it expires we come here and take all probational devices that * the OS knows about and the tell the OS that they've gone away. * * We don't clear the devices out of our port database because, when loop * come back up, we have to do some actual cleanup with the chip at that * point (implicit PLOGO, e.g., to get the chip's port database state right). */ static void isp_ldt(void *arg) { - ispsoftc_t *isp = arg; + struct isp_fc *fc = arg; + ispsoftc_t *isp = fc->isp; + int chan = fc - isp->isp_osinfo.pc.fc; fcportdb_t *lp; int dbidx, tgt; - ISP_LOCK(isp); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Chan %d Loop Down Timer expired @ %lu", chan, (unsigned long) time_uptime); - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired"); - /* * Notify to the OS all targets who we now consider have departed. */ for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { - lp = &FCPARAM(isp)->portdb[dbidx]; + lp = &FCPARAM(isp, chan)->portdb[dbidx]; if (lp->state != FC_PORTDB_STATE_PROBATIONAL) { continue; } - if (lp->ini_map_idx == 0) { + if (lp->dev_map_idx == 0 || lp->target_mode) { continue; } /* * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST! */ /* * Mark that we've announced that this device is gone.... */ lp->reserved = 1; /* * but *don't* change the state of the entry. Just clear * any target id stuff and announce to CAM that the * device is gone. This way any necessary PLOGO stuff * will happen when loop comes back up. */ - tgt = lp->ini_map_idx - 1; - FCPARAM(isp)->isp_ini_map[tgt] = 0; - lp->ini_map_idx = 0; - isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, - "Loop Down Timeout"); - isp_make_gone(isp, tgt); + tgt = lp->dev_map_idx - 1; + FCPARAM(isp, chan)->isp_dev_map[tgt] = 0; + lp->dev_map_idx = 0; + lp->state = FC_PORTDB_STATE_NIL; + isp_prt(isp, ISP_LOGCONFIG, prom3, chan, lp->portid, tgt, "Loop Down Timeout"); + isp_make_gone(isp, chan, tgt); } /* * The loop down timer has expired. Wake up the kthread * to notice that fact (or make it false). */ - isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1; - wakeup(ISP_KT_WCHAN(isp)); - ISP_UNLOCK(isp); + fc->loop_dead = 1; + fc->loop_down_time = fc->loop_down_limit+1; + wakeup(fc); } static void isp_kthread(void *arg) { - ispsoftc_t *isp = arg; + struct isp_fc *fc = arg; + ispsoftc_t *isp = fc->isp; + int chan = fc - isp->isp_osinfo.pc.fc; int slp = 0; -#if __FreeBSD_version < 500000 - int s = splcam(); -#elif __FreeBSD_version < 700037 - mtx_lock(&Giant); -#else mtx_lock(&isp->isp_osinfo.lock); -#endif - /* - * The first loop is for our usage where we have yet to have - * gotten good fibre channel state. - */ + for (;;) { int wasfrozen, lb, lim; - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "isp_kthread: checking FC state"); - isp->isp_osinfo.mbox_sleep_ok = 1; - lb = isp_fc_runstate(isp, 250000); - isp->isp_osinfo.mbox_sleep_ok = 0; - if (lb) { + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d checking FC state", __func__, chan); + lb = isp_fc_runstate(isp, chan, 250000); + + /* + * Our action is different based upon whether we're supporting + * Initiator mode or not. If we are, we might freeze the simq + * when loop is down and set all sorts of different delays to + * check again. + * + * If not, we simply just wait for loop to come up. + */ + if (lb && (fc->role & ISP_ROLE_INITIATOR)) { /* * Increment loop down time by the last sleep interval */ - isp->isp_osinfo.loop_down_time += slp; + fc->loop_down_time += slp; if (lb < 0) { - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "kthread: FC loop not up (down count %d)", - isp->isp_osinfo.loop_down_time); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC loop not up (down count %d)", __func__, chan, fc->loop_down_time); } else { - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "kthread: FC got to %d (down count %d)", - lb, isp->isp_osinfo.loop_down_time); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC got to %d (down count %d)", __func__, chan, lb, fc->loop_down_time); } - /* * If we've never seen loop up and we've waited longer * than quickboot time, or we've seen loop up but we've * waited longer than loop_down_limit, give up and go * to sleep until loop comes up. */ - if (FCPARAM(isp)->loop_seen_once == 0) { + if (FCPARAM(isp, chan)->loop_seen_once == 0) { lim = isp_quickboot_time; } else { - lim = isp->isp_osinfo.loop_down_limit; + lim = fc->loop_down_limit; } - if (isp->isp_osinfo.loop_down_time >= lim) { - isp_freeze_loopdown(isp, "loop limit hit"); + if (fc->loop_down_time >= lim) { + isp_freeze_loopdown(isp, chan, "loop limit hit"); slp = 0; - } else if (isp->isp_osinfo.loop_down_time < 10) { + } else if (fc->loop_down_time < 10) { slp = 1; - } else if (isp->isp_osinfo.loop_down_time < 30) { + } else if (fc->loop_down_time < 30) { slp = 5; - } else if (isp->isp_osinfo.loop_down_time < 60) { + } else if (fc->loop_down_time < 60) { slp = 10; - } else if (isp->isp_osinfo.loop_down_time < 120) { + } else if (fc->loop_down_time < 120) { slp = 20; } else { slp = 30; } + } else if (lb) { + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC Loop Down", __func__, chan); + fc->loop_down_time += slp; + slp = 60; } else { - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "isp_kthread: FC state OK"); - isp->isp_osinfo.loop_down_time = 0; + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC state OK", __func__, chan); + fc->loop_down_time = 0; slp = 0; } + /* - * If we'd frozen the simq, unfreeze it now so that CAM - * can start sending us commands. If the FC state isn't - * okay yet, they'll hit that in isp_start which will - * freeze the queue again. + * If this is past the first loop up or the loop is dead and if we'd frozen the simq, unfreeze it + * now so that CAM can start sending us commands. + * + * If the FC state isn't okay yet, they'll hit that in isp_start which will freeze the queue again + * or kill the commands, as appropriate. */ - wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; - isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; - if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "isp_kthread: releasing simq"); - xpt_release_simq(isp->isp_sim, 1); + + if (FCPARAM(isp, chan)->loop_seen_once || fc->loop_dead) { + wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN; + fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN; + if (wasfrozen && fc->simqfrozen == 0) { + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d releasing simq", __func__, chan); + xpt_release_simq(fc->sim, 1); + } } - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "isp_kthread: sleep time %d", slp); -#if __FreeBSD_version < 700037 - tsleep(ISP_KT_WCHAN(isp), PRIBIO, "ispf", slp * hz); -#else - msleep(ISP_KT_WCHAN(isp), &isp->isp_osinfo.lock, - PRIBIO, "ispf", slp * hz); -#endif + + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d sleep time %d", __func__, chan, slp); + + msleep(fc, &isp->isp_osinfo.lock, PRIBIO, "ispf", slp * hz); + /* * If slp is zero, we're waking up for the first time after * things have been okay. In this case, we set a deferral state * for all commands and delay hysteresis seconds before starting * the FC state evaluation. This gives the loop/fabric a chance * to settle. */ - if (slp == 0 && isp->isp_osinfo.hysteresis) { - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "isp_kthread: sleep hysteresis tick time %d", - isp->isp_osinfo.hysteresis * hz); -#if __FreeBSD_version < 700037 - (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT", - (isp->isp_osinfo.hysteresis * hz)); -#else - (void) msleep(&isp_fabric_hysteresis, - &isp->isp_osinfo.lock, PRIBIO, "ispT", - (isp->isp_osinfo.hysteresis * hz)); -#endif + if (slp == 0 && fc->hysteresis) { + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d sleep hysteresis ticks %d", __func__, chan, fc->hysteresis * hz); + (void) msleep(&isp_fabric_hysteresis, &isp->isp_osinfo.lock, PRIBIO, "ispT", (fc->hysteresis * hz)); } } -#if __FreeBSD_version < 500000 - splx(s); -#elif __FreeBSD_version < 700037 - mtx_unlock(&Giant); -#else mtx_unlock(&isp->isp_osinfo.lock); -#endif } -#if __FreeBSD_version < 500000 -static void isp_action_wrk(struct cam_sim *, union ccb *); static void isp_action(struct cam_sim *sim, union ccb *ccb) { - ispsoftc_t *isp = (ispsoftc_t *)cam_sim_softc(sim); - ISP_LOCK(isp); - isp_action_wrk(sim, ccb); - ISP_UNLOCK(isp); -} -#define isp_action isp_action_wrk -#endif - -static void -isp_action(struct cam_sim *sim, union ccb *ccb) -{ int bus, tgt, ts, error, lim; ispsoftc_t *isp; struct ccb_trans_settings *cts; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); isp = (ispsoftc_t *)cam_sim_softc(sim); - if (isp->isp_state != ISP_RUNSTATE && - ccb->ccb_h.func_code == XPT_SCSI_IO) { + mtx_assert(&isp->isp_lock, MA_OWNED); + + if (isp->isp_state != ISP_RUNSTATE && ccb->ccb_h.func_code == XPT_SCSI_IO) { isp_init(isp); if (isp->isp_state != ISP_INITSTATE) { /* * Lie. Say it was a selection timeout. */ ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } isp->isp_state = ISP_RUNSTATE; } isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); ISP_PCMD(ccb) = NULL; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ + bus = XS_CHANNEL(ccb); /* * Do a couple of preliminary checks... */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } #ifdef DIAGNOSTIC if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { xpt_print(ccb->ccb_h.path, "invalid target\n"); ccb->ccb_h.status = CAM_PATH_INVALID; } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { xpt_print(ccb->ccb_h.path, "invalid lun\n"); ccb->ccb_h.status = CAM_PATH_INVALID; } if (ccb->ccb_h.status == CAM_PATH_INVALID) { xpt_done(ccb); break; } #endif ccb->csio.scsi_status = SCSI_STATUS_OK; if (isp_get_pcmd(isp, ccb)) { isp_prt(isp, ISP_LOGWARN, "out of PCMDs"); cam_freeze_devq(ccb->ccb_h.path); - cam_release_devq(ccb->ccb_h.path, - RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); + cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); xpt_done(ccb); break; } error = isp_start((XS_T *) ccb); switch (error) { case CMD_QUEUED: XS_CMD_S_CLEAR(ccb); ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) { break; } ts = ccb->ccb_h.timeout; if (ts == CAM_TIME_DEFAULT) { ts = 60*1000; } ts = isp_mstohz(ts); - callout_reset(&PISP_PCMD(ccb)->wdog, ts, - isp_watchdog, ccb); + callout_reset(&PISP_PCMD(ccb)->wdog, ts, isp_watchdog, ccb); break; case CMD_RQLATER: /* - * Handle initial and subsequent loop down cases + * We get this result for FC devices if the loop state isn't ready yet + * or if the device in question has gone zombie on us. + * + * If we've never seen Loop UP at all, we requeue this request and wait + * for the initial loop up delay to expire. */ - if (FCPARAM(isp)->loop_seen_once == 0) { - lim = isp_quickboot_time; - } else { - lim = isp->isp_osinfo.loop_down_limit; - } - if (isp->isp_osinfo.loop_down_time >= lim) { - isp_prt(isp, ISP_LOGDEBUG0, - "%d.%d downtime (%d) > lim (%d)", - XS_TGT(ccb), XS_LUN(ccb), - isp->isp_osinfo.loop_down_time, lim); - ccb->ccb_h.status = - CAM_SEL_TIMEOUT|CAM_DEV_QFRZN; + lim = ISP_FC_PC(isp, bus)->loop_down_limit; + if (FCPARAM(isp, bus)->loop_seen_once == 0 || ISP_FC_PC(isp, bus)->loop_down_time >= lim) { + if (FCPARAM(isp, bus)->loop_seen_once == 0) { + isp_prt(isp, ISP_LOGDEBUG0, "%d.%d loop not seen yet @ %lu", XS_TGT(ccb), XS_LUN(ccb), (unsigned long) time_uptime); + } else { + isp_prt(isp, ISP_LOGDEBUG0, "%d.%d downtime (%d) > lim (%d)", XS_TGT(ccb), XS_LUN(ccb), ISP_FC_PC(isp, bus)->loop_down_time, lim); + } + ccb->ccb_h.status = CAM_SEL_TIMEOUT|CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); isp_free_pcmd(isp, ccb); xpt_done(ccb); break; } - isp_prt(isp, ISP_LOGDEBUG0, - "%d.%d retry later", XS_TGT(ccb), XS_LUN(ccb)); - /* - * Otherwise, retry in a while. - */ + isp_prt(isp, ISP_LOGDEBUG0, "%d.%d retry later", XS_TGT(ccb), XS_LUN(ccb)); cam_freeze_devq(ccb->ccb_h.path); - cam_release_devq(ccb->ccb_h.path, - RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); + cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); XS_SETERR(ccb, CAM_REQUEUE_REQ); isp_free_pcmd(isp, ccb); xpt_done(ccb); break; case CMD_EAGAIN: - XS_SETERR(ccb, CAM_REQUEUE_REQ); isp_free_pcmd(isp, ccb); + cam_freeze_devq(ccb->ccb_h.path); + cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 100, 0); + XS_SETERR(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); break; case CMD_COMPLETE: isp_done((struct ccb_scsiio *) ccb); break; default: - isp_prt(isp, ISP_LOGERR, - "What's this? 0x%x at %d in file %s", - error, __LINE__, __FILE__); + isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__); XS_SETERR(ccb, CAM_REQ_CMP_ERR); isp_free_pcmd(isp, ccb); xpt_done(ccb); } break; #ifdef ISP_TARGET_MODE - case XPT_EN_LUN: /* Enable LUN as a target */ - { - int seq, i; - seq = isp_en_lun(isp, ccb); - if (seq < 0) { - xpt_done(ccb); - break; + case XPT_EN_LUN: /* Enable/Disable LUN as a target */ + if (ccb->cel.enable) { + isp_enable_lun(isp, ccb); + } else { + isp_disable_lun(isp, ccb); } - for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) { - uint32_t isr; - uint16_t sema, mbox; - if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { - isp_intr(isp, isr, sema, mbox); - } - DELAY(1000); - } break; - } - case XPT_NOTIFY_ACK: /* recycle notify ack */ - case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ + case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ { - tstate_t *tptr = - get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); + tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); if (tptr == NULL) { - ccb->ccb_h.status = CAM_LUN_INVALID; - xpt_done(ccb); + tptr = get_lun_statep(isp, XS_CHANNEL(ccb), CAM_LUN_WILDCARD); + } + if (tptr == NULL) { + const char *str; + uint32_t tag; + + if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { + str = "XPT_IMMEDIATE_NOTIFY"; + tag = ccb->cin1.seq_id; + } else { + tag = ccb->atio.tag_id; + str = "XPT_ACCEPT_TARGET_IO"; + } + ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] no state pointer found for %s\n", __func__, tag, str); + dump_tstates(isp, XS_CHANNEL(ccb)); + ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; } ccb->ccb_h.sim_priv.entries[0].field = 0; ccb->ccb_h.sim_priv.entries[1].ptr = isp; ccb->ccb_h.flags = 0; if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { - /* - * Note that the command itself may not be done- - * it may not even have had the first CTIO sent. - */ + if (ccb->atio.tag_id) { + atio_private_data_t *atp = isp_get_atpd(isp, tptr, ccb->atio.tag_id); + if (atp) { + isp_put_atpd(isp, tptr, atp); + } + } tptr->atio_count++; - isp_prt(isp, ISP_LOGTDEBUG0, - "Put FREE ATIO, lun %d, count now %d", - ccb->ccb_h.target_lun, tptr->atio_count); - SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, - sim_links.sle); - } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { + SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle); + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "Put FREE ATIO (tag id 0x%x), count now %d\n", + ((struct ccb_accept_tio *)ccb)->tag_id, tptr->atio_count); + } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { + if (ccb->cin1.tag_id) { + inot_private_data_t *ntp = isp_find_ntpd(isp, tptr, ccb->cin1.tag_id, ccb->cin1.seq_id); + if (ntp) { + isp_put_ntpd(isp, tptr, ntp); + } + } tptr->inot_count++; - isp_prt(isp, ISP_LOGTDEBUG0, - "Put FREE INOT, lun %d, count now %d", - ccb->ccb_h.target_lun, tptr->inot_count); - SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, - sim_links.sle); - } else { - isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");; + SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle); + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "Put FREE INOT, (seq id 0x%x) count now %d\n", + ((struct ccb_immediate_notify *)ccb)->seq_id, tptr->inot_count); } rls_lun_statep(isp, tptr); ccb->ccb_h.status = CAM_REQ_INPROG; break; } - case XPT_CONT_TARGET_IO: + case XPT_NOTIFY_ACKNOWLEDGE: /* notify ack */ { - isp_target_start_ctio(isp, ccb); + tstate_t *tptr; + inot_private_data_t *ntp; + + /* + * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb + * XXX: matches that for the immediate notify, we have to *search* for the notify structure + */ + /* + * All the relevant path information is in the associated immediate notify + */ + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); + ntp = get_ntp_from_tagdata(isp, ccb->cna2.tag_id, ccb->cna2.seq_id, &tptr); + if (ntp == NULL) { + ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__, + ccb->cna2.tag_id, ccb->cna2.seq_id); + ccb->ccb_h.status = CAM_DEV_NOT_THERE; + xpt_done(ccb); + break; + } + if (isp_handle_platform_target_notify_ack(isp, &ntp->rd.nt)) { + rls_lun_statep(isp, tptr); + cam_freeze_devq(ccb->ccb_h.path); + cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); + XS_SETERR(ccb, CAM_REQUEUE_REQ); + break; + } + isp_put_ntpd(isp, tptr, ntp); + rls_lun_statep(isp, tptr); + ccb->ccb_h.status = CAM_REQ_CMP; + ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); + xpt_done(ccb); break; } + case XPT_CONT_TARGET_IO: + isp_target_start_ctio(isp, ccb); + break; #endif case XPT_RESET_DEV: /* BDR the specified SCSI device */ bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); tgt = ccb->ccb_h.target_id; tgt |= (bus << 16); - error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); + error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt); if (error) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); break; case XPT_ABORT: /* Abort the specified CCB */ { union ccb *accb = ccb->cab.abort_ccb; switch (accb->ccb_h.func_code) { #ifdef ISP_TARGET_MODE case XPT_ACCEPT_TARGET_IO: - case XPT_IMMED_NOTIFY: - ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); + isp_target_mark_aborted(isp, accb); break; - case XPT_CONT_TARGET_IO: - isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); - ccb->ccb_h.status = CAM_UA_ABORT; - break; #endif case XPT_SCSI_IO: error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); if (error) { ccb->ccb_h.status = CAM_UA_ABORT; } else { ccb->ccb_h.status = CAM_REQ_CMP; } break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); break; } -#ifdef CAM_NEW_TRAN_CODE #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) -#else -#define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) -#endif case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ cts = &ccb->cts; if (!IS_CURRENT_SETTINGS(cts)) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } tgt = cts->ccb_h.target_id; + bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); if (IS_SCSI(isp)) { -#ifndef CAM_NEW_TRAN_CODE - sdparam *sdp = isp->isp_param; + struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; + struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; + sdparam *sdp = SDPARAM(isp, bus); uint16_t *dptr; - bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); - - sdp += bus; - /* - * We always update (internally) from goal_flags - * so any request to change settings just gets - * vectored to that location. - */ - dptr = &sdp->isp_devparam[tgt].goal_flags; - - /* - * Note that these operations affect the - * the goal flags (goal_flags)- not - * the current state flags. Then we mark - * things so that the next operation to - * this HBA will cause the update to occur. - */ - if (cts->valid & CCB_TRANS_DISC_VALID) { - if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { - *dptr |= DPARM_DISC; - } else { - *dptr &= ~DPARM_DISC; - } - } - if (cts->valid & CCB_TRANS_TQ_VALID) { - if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { - *dptr |= DPARM_TQING; - } else { - *dptr &= ~DPARM_TQING; - } - } - if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { - switch (cts->bus_width) { - case MSG_EXT_WDTR_BUS_16_BIT: - *dptr |= DPARM_WIDE; - break; - default: - *dptr &= ~DPARM_WIDE; - } - } - /* - * Any SYNC RATE of nonzero and SYNC_OFFSET - * of nonzero will cause us to go to the - * selected (from NVRAM) maximum value for - * this device. At a later point, we'll - * allow finer control. - */ - if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && - (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && - (cts->sync_offset > 0)) { - *dptr |= DPARM_SYNC; - } else { - *dptr &= ~DPARM_SYNC; - } - *dptr |= DPARM_SAFE_DFLT; -#else - struct ccb_trans_settings_scsi *scsi = - &cts->proto_specific.scsi; - struct ccb_trans_settings_spi *spi = - &cts->xport_specific.spi; - sdparam *sdp = isp->isp_param; - uint16_t *dptr; - if (spi->valid == 0 && scsi->valid == 0) { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } - - bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); - sdp += bus; + /* * We always update (internally) from goal_flags * so any request to change settings just gets * vectored to that location. */ dptr = &sdp->isp_devparam[tgt].goal_flags; if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) *dptr |= DPARM_DISC; else *dptr &= ~DPARM_DISC; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) *dptr |= DPARM_TQING; else *dptr &= ~DPARM_TQING; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) *dptr |= DPARM_WIDE; else *dptr &= ~DPARM_WIDE; } /* * XXX: FIX ME */ - if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && - (spi->valid & CTS_SPI_VALID_SYNC_RATE) && - (spi->sync_period && spi->sync_offset)) { + if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && (spi->valid & CTS_SPI_VALID_SYNC_RATE) && (spi->sync_period && spi->sync_offset)) { *dptr |= DPARM_SYNC; /* * XXX: CHECK FOR LEGALITY */ - sdp->isp_devparam[tgt].goal_period = - spi->sync_period; - sdp->isp_devparam[tgt].goal_offset = - spi->sync_offset; + sdp->isp_devparam[tgt].goal_period = spi->sync_period; + sdp->isp_devparam[tgt].goal_offset = spi->sync_offset; } else { *dptr &= ~DPARM_SYNC; } -#endif - isp_prt(isp, ISP_LOGDEBUG0, - "SET (%d.%d.%d) to flags %x off %x per %x", - bus, tgt, cts->ccb_h.target_lun, - sdp->isp_devparam[tgt].goal_flags, - sdp->isp_devparam[tgt].goal_offset, - sdp->isp_devparam[tgt].goal_period); + isp_prt(isp, ISP_LOGDEBUG0, "SET (%d.%d.%d) to flags %x off %x per %x", bus, tgt, cts->ccb_h.target_lun, sdp->isp_devparam[tgt].goal_flags, + sdp->isp_devparam[tgt].goal_offset, sdp->isp_devparam[tgt].goal_period); sdp->isp_devparam[tgt].dev_update = 1; - isp->isp_update |= (1 << bus); + sdp->update = 1; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: cts = &ccb->cts; tgt = cts->ccb_h.target_id; + bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); if (IS_FC(isp)) { -#ifndef CAM_NEW_TRAN_CODE - /* - * a lot of normal SCSI things don't make sense. - */ - cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; - cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; - /* - * How do you measure the width of a high - * speed serial bus? Well, in bytes. - * - * Offset and period make no sense, though, so we set - * (above) a 'base' transfer speed to be gigabit. - */ - cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; -#else - fcparam *fcp = isp->isp_param; - struct ccb_trans_settings_scsi *scsi = - &cts->proto_specific.scsi; - struct ccb_trans_settings_fc *fc = - &cts->xport_specific.fc; + fcparam *fcp = FCPARAM(isp, bus); + struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; + struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_FC; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 100000; - if (fcp->isp_gbspeed == 4 || fcp->isp_gbspeed == 2) - fc->bitrate *= fcp->isp_gbspeed; + fc->bitrate *= fcp->isp_gbspeed; if (tgt > 0 && tgt < MAX_FC_TARG) { fcportdb_t *lp = &fcp->portdb[tgt]; fc->wwnn = lp->node_wwn; fc->wwpn = lp->port_wwn; fc->port = lp->portid; - fc->valid |= CTS_FC_VALID_WWNN | - CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; + fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; } -#endif } else { -#ifdef CAM_NEW_TRAN_CODE - struct ccb_trans_settings_scsi *scsi = - &cts->proto_specific.scsi; - struct ccb_trans_settings_spi *spi = - &cts->xport_specific.spi; -#endif - sdparam *sdp = isp->isp_param; - int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); + struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; + struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; + sdparam *sdp = SDPARAM(isp, bus); uint16_t dval, pval, oval; - sdp += bus; - if (IS_CURRENT_SETTINGS(cts)) { sdp->isp_devparam[tgt].dev_refresh = 1; - isp->isp_update |= (1 << bus); - (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, - NULL); + sdp->update = 1; + (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, bus); dval = sdp->isp_devparam[tgt].actv_flags; oval = sdp->isp_devparam[tgt].actv_offset; pval = sdp->isp_devparam[tgt].actv_period; } else { dval = sdp->isp_devparam[tgt].nvrm_flags; oval = sdp->isp_devparam[tgt].nvrm_offset; pval = sdp->isp_devparam[tgt].nvrm_period; } -#ifndef CAM_NEW_TRAN_CODE - cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); - - if (dval & DPARM_DISC) { - cts->flags |= CCB_TRANS_DISC_ENB; - } - if (dval & DPARM_TQING) { - cts->flags |= CCB_TRANS_TAG_ENB; - } - if (dval & DPARM_WIDE) { - cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; - } else { - cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; - } - cts->valid = CCB_TRANS_BUS_WIDTH_VALID | - CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; - - if ((dval & DPARM_SYNC) && oval != 0) { - cts->sync_period = pval; - cts->sync_offset = oval; - cts->valid |= - CCB_TRANS_SYNC_RATE_VALID | - CCB_TRANS_SYNC_OFFSET_VALID; - } -#else cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = 0; scsi->valid = 0; spi->flags = 0; scsi->flags = 0; if (dval & DPARM_DISC) { spi->flags |= CTS_SPI_FLAGS_DISC_ENB; } if ((dval & DPARM_SYNC) && oval && pval) { spi->sync_offset = oval; spi->sync_period = pval; } else { spi->sync_offset = 0; spi->sync_period = 0; } spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; if (dval & DPARM_WIDE) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; if (dval & DPARM_TQING) { scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->valid |= CTS_SPI_VALID_DISC; } -#endif - isp_prt(isp, ISP_LOGDEBUG0, - "GET %s (%d.%d.%d) to flags %x off %x per %x", - IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", + isp_prt(isp, ISP_LOGDEBUG0, "GET %s (%d.%d.%d) to flags %x off %x per %x", IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", bus, tgt, cts->ccb_h.target_lun, dval, oval, pval); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: -#if __FreeBSD_version < 500000 - { - struct ccb_calc_geometry *ccg; - u_int32_t secs_per_cylinder; - u_int32_t size_mb; + cam_calc_geometry(&ccb->ccg, 1); + xpt_done(ccb); + break; - ccg = &ccb->ccg; - if (ccg->block_size == 0) { - ccb->ccb_h.status = CAM_REQ_INVALID; + case XPT_RESET_BUS: /* Reset the specified bus */ + bus = cam_sim_bus(sim); + error = isp_control(isp, ISPCTL_RESET_BUS, bus); + if (error) { + ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); break; } - size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); - if (size_mb > 1024) { - ccg->heads = 255; - ccg->secs_per_track = 63; + if (bootverbose) { + xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus); + } + if (IS_FC(isp)) { + xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0); } else { - ccg->heads = 64; - ccg->secs_per_track = 32; + xpt_async(AC_BUS_RESET, ISP_SPI_PC(isp, bus)->path, 0); } - secs_per_cylinder = ccg->heads * ccg->secs_per_track; - ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; - } -#else - { - cam_calc_geometry(&ccb->ccg, /*extended*/1); + + case XPT_TERM_IO: /* Terminate the I/O process */ + ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; - } + + case XPT_SET_SIM_KNOB: /* Set SIM knobs */ + { + struct ccb_sim_knob *kp = &ccb->knob; + fcparam *fcp; + + + if (!IS_FC(isp)) { + ccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(ccb); + break; + } + + bus = cam_sim_bus(xpt_path_sim(kp->ccb_h.path)); + fcp = FCPARAM(isp, bus); + + if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) { + fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn; + fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn; +isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn); + } + ccb->ccb_h.status = CAM_REQ_CMP; + if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) { + int rchange = 0; + int newrole = 0; + + switch (kp->xport_specific.fc.role) { + case KNOB_ROLE_NONE: + if (fcp->role != ISP_ROLE_NONE) { + rchange = 1; + newrole = ISP_ROLE_NONE; + } + break; + case KNOB_ROLE_TARGET: + if (fcp->role != ISP_ROLE_TARGET) { + rchange = 1; + newrole = ISP_ROLE_TARGET; + } + break; + case KNOB_ROLE_INITIATOR: + if (fcp->role != ISP_ROLE_INITIATOR) { + rchange = 1; + newrole = ISP_ROLE_INITIATOR; + } + break; + case KNOB_ROLE_BOTH: + if (fcp->role != ISP_ROLE_BOTH) { + rchange = 1; + newrole = ISP_ROLE_BOTH; + } + break; + } + if (rchange) { + if (isp_fc_change_role(isp, bus, newrole) != 0) { + ccb->ccb_h.status = CAM_REQ_CMP_ERR; +#ifdef ISP_TARGET_MODE + } else if (newrole == ISP_ROLE_TARGET || newrole == ISP_ROLE_BOTH) { + isp_enable_deferred_luns(isp, bus); #endif - case XPT_RESET_BUS: /* Reset the specified bus */ - bus = cam_sim_bus(sim); - error = isp_control(isp, ISPCTL_RESET_BUS, &bus); - if (error) - ccb->ccb_h.status = CAM_REQ_CMP_ERR; - else { - if (bootverbose) { - xpt_print(ccb->ccb_h.path, "reset bus\n"); + } } - if (cam_sim_bus(sim) && isp->isp_path2 != NULL) - xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); - else if (isp->isp_path != NULL) - xpt_async(AC_BUS_RESET, isp->isp_path, NULL); - ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); break; + } + case XPT_GET_SIM_KNOB: /* Set SIM knobs */ + { + struct ccb_sim_knob *kp = &ccb->knob; - case XPT_TERM_IO: /* Terminate the I/O process */ - ccb->ccb_h.status = CAM_REQ_INVALID; + if (IS_FC(isp)) { + fcparam *fcp; + + bus = cam_sim_bus(xpt_path_sim(kp->ccb_h.path)); + fcp = FCPARAM(isp, bus); + + kp->xport_specific.fc.wwnn = fcp->isp_wwnn; + kp->xport_specific.fc.wwpn = fcp->isp_wwpn; + switch (fcp->role) { + case ISP_ROLE_NONE: + kp->xport_specific.fc.role = KNOB_ROLE_NONE; + break; + case ISP_ROLE_TARGET: + kp->xport_specific.fc.role = KNOB_ROLE_TARGET; + break; + case ISP_ROLE_INITIATOR: + kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR; + break; + case ISP_ROLE_BOTH: + kp->xport_specific.fc.role = KNOB_ROLE_BOTH; + break; + } + kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; + ccb->ccb_h.status = CAM_REQ_CMP; + } else { + ccb->ccb_h.status = CAM_REQ_INVALID; + } xpt_done(ccb); break; - + } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; #ifdef ISP_TARGET_MODE cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; #else cpi->target_sprt = 0; #endif cpi->hba_eng_cnt = 0; cpi->max_target = ISP_MAX_TARGETS(isp) - 1; cpi->max_lun = ISP_MAX_LUNS(isp) - 1; cpi->bus_id = cam_sim_bus(sim); + bus = cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); if (IS_FC(isp)) { + fcparam *fcp = FCPARAM(isp, bus); + cpi->hba_misc = PIM_NOBUSRESET; + /* * Because our loop ID can shift from time to time, * make our initiator ID out of range of our bus. */ cpi->initiator_id = cpi->max_target + 1; /* - * Set base transfer capabilities for Fibre Channel. - * Technically not correct because we don't know - * what media we're running on top of- but we'll - * look good if we always say 100MB/s. + * Set base transfer capabilities for Fibre Channel, for this HBA. */ - cpi->base_transfer_speed = 100000; - if (FCPARAM(isp)->isp_gbspeed == 4 || - FCPARAM(isp)->isp_gbspeed == 2) - cpi->base_transfer_speed *= - FCPARAM(isp)->isp_gbspeed; + if (IS_24XX(isp)) { + cpi->base_transfer_speed = 4000000; + } else if (IS_23XX(isp)) { + cpi->base_transfer_speed = 2000000; + } else { + cpi->base_transfer_speed = 1000000; + } cpi->hba_inquiry = PI_TAG_ABLE; -#ifdef CAM_NEW_TRAN_CODE cpi->transport = XPORT_FC; cpi->transport_version = 0; -#endif + cpi->xport_specific.fc.wwnn = fcp->isp_wwnn; + cpi->xport_specific.fc.wwpn = fcp->isp_wwpn; + cpi->xport_specific.fc.port = fcp->isp_portid; + cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000; } else { - sdparam *sdp = isp->isp_param; - sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); + sdparam *sdp = SDPARAM(isp, bus); cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->hba_misc = 0; cpi->initiator_id = sdp->isp_initiator_id; cpi->base_transfer_speed = 3300; -#ifdef CAM_NEW_TRAN_CODE cpi->transport = XPORT_SPI; cpi->transport_version = 2; -#endif } -#ifdef CAM_NEW_TRAN_CODE cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; -#endif strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) void -isp_done(struct ccb_scsiio *sccb) +isp_done(XS_T *sccb) { ispsoftc_t *isp = XS_ISP(sccb); if (XS_NOERR(sccb)) XS_SETERR(sccb, CAM_REQ_CMP); - if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && - (sccb->scsi_status != SCSI_STATUS_OK)) { + if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) { sccb->ccb_h.status &= ~CAM_STATUS_MASK; - if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && - (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { + if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; } else { sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; } } sccb->ccb_h.status &= ~CAM_SIM_QUEUED; if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { - isp_prt(isp, ISP_LOGDEBUG0, - "target %d lun %d CAM status 0x%x SCSI status 0x%x", - XS_TGT(sccb), XS_LUN(sccb), sccb->ccb_h.status, - sccb->scsi_status); + isp_prt(isp, ISP_LOGDEBUG0, "target %d lun %d CAM status 0x%x SCSI status 0x%x", XS_TGT(sccb), XS_LUN(sccb), sccb->ccb_h.status, sccb->scsi_status); if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { sccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(sccb->ccb_h.path, 1); } } - if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && - (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { - xpt_print(sccb->ccb_h.path, - "cam completion status 0x%x\n", sccb->ccb_h.status); + if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + xpt_print(sccb->ccb_h.path, "cam completion status 0x%x\n", sccb->ccb_h.status); } XS_CMD_S_DONE(sccb); - if (XS_CMD_WDOG_P(sccb) == 0) { - callout_stop(&PISP_PCMD(sccb)->wdog); - if (XS_CMD_GRACE_P(sccb)) { - isp_prt(isp, ISP_LOGDEBUG2, - "finished command on borrowed time"); - } - XS_CMD_S_CLEAR(sccb); - isp_free_pcmd(isp, (union ccb *) sccb); - xpt_done((union ccb *) sccb); - } + callout_stop(&PISP_PCMD(sccb)->wdog); + XS_CMD_S_CLEAR(sccb); + isp_free_pcmd(isp, (union ccb *) sccb); + xpt_done((union ccb *) sccb); } -int -isp_async(ispsoftc_t *isp, ispasync_t cmd, void *arg) +void +isp_async(ispsoftc_t *isp, ispasync_t cmd, ...) { - int bus, rv = 0; - static const char prom[] = - "PortID 0x%06x handle 0x%x role %s %s\n" - " WWNN 0x%08x%08x WWPN 0x%08x%08x"; - static const char prom2[] = - "PortID 0x%06x handle 0x%x role %s %s tgt %u\n" - " WWNN 0x%08x%08x WWPN 0x%08x%08x"; + int bus; + static const char prom[] = "Chan %d PortID 0x%06x handle 0x%x role %s %s WWPN 0x%08x%08x"; + static const char prom2[] = "Chan %d PortID 0x%06x handle 0x%x role %s %s tgt %u WWPN 0x%08x%08x"; char *msg = NULL; target_id_t tgt; fcportdb_t *lp; struct cam_path *tmppath; + va_list ap; switch (cmd) { case ISPASYNC_NEW_TGT_PARAMS: { -#ifdef CAM_NEW_TRAN_CODE struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; -#endif int flags, tgt; - sdparam *sdp = isp->isp_param; + sdparam *sdp; struct ccb_trans_settings cts; memset(&cts, 0, sizeof (struct ccb_trans_settings)); - tgt = *((int *)arg); - bus = (tgt >> 16) & 0xffff; - tgt &= 0xffff; - sdp += bus; - if (xpt_create_path(&tmppath, NULL, - cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), - tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { - isp_prt(isp, ISP_LOGWARN, - "isp_async cannot make temp path for %d.%d", - tgt, bus); - rv = -1; + va_start(ap, cmd); + bus = va_arg(ap, int); + tgt = va_arg(ap, int); + va_end(ap); + sdp = SDPARAM(isp, bus); + + if (xpt_create_path(&tmppath, NULL, cam_sim_path(ISP_SPI_PC(isp, bus)->sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + isp_prt(isp, ISP_LOGWARN, "isp_async cannot make temp path for %d.%d", tgt, bus); break; } flags = sdp->isp_devparam[tgt].actv_flags; -#ifdef CAM_NEW_TRAN_CODE cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.protocol = PROTO_SCSI; cts.transport = XPORT_SPI; scsi = &cts.proto_specific.scsi; spi = &cts.xport_specific.spi; if (flags & DPARM_TQING) { scsi->valid |= CTS_SCSI_VALID_TQ; scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } if (flags & DPARM_DISC) { spi->valid |= CTS_SPI_VALID_DISC; spi->flags |= CTS_SPI_FLAGS_DISC_ENB; } spi->flags |= CTS_SPI_VALID_BUS_WIDTH; if (flags & DPARM_WIDE) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (flags & DPARM_SYNC) { spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->sync_period = sdp->isp_devparam[tgt].actv_period; spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; } -#else - cts.flags = CCB_TRANS_CURRENT_SETTINGS; - cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; - if (flags & DPARM_DISC) { - cts.flags |= CCB_TRANS_DISC_ENB; - } - if (flags & DPARM_TQING) { - cts.flags |= CCB_TRANS_TAG_ENB; - } - cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; - cts.bus_width = (flags & DPARM_WIDE)? - MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; - cts.sync_period = sdp->isp_devparam[tgt].actv_period; - cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; - if (flags & DPARM_SYNC) { - cts.valid |= - CCB_TRANS_SYNC_RATE_VALID | - CCB_TRANS_SYNC_OFFSET_VALID; - } -#endif - isp_prt(isp, ISP_LOGDEBUG2, - "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", - bus, tgt, sdp->isp_devparam[tgt].actv_period, - sdp->isp_devparam[tgt].actv_offset, flags); + isp_prt(isp, ISP_LOGDEBUG2, "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", bus, tgt, sdp->isp_devparam[tgt].actv_period, sdp->isp_devparam[tgt].actv_offset, flags); xpt_setup_ccb(&cts.ccb_h, tmppath, 1); xpt_async(AC_TRANSFER_NEG, tmppath, &cts); xpt_free_path(tmppath); break; } case ISPASYNC_BUS_RESET: - bus = *((int *)arg); - isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", - bus); - if (bus > 0 && isp->isp_path2) { - xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); - } else if (isp->isp_path) { - xpt_async(AC_BUS_RESET, isp->isp_path, NULL); + { + va_start(ap, cmd); + bus = va_arg(ap, int); + va_end(ap); + isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", bus); + if (IS_FC(isp)) { + xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, NULL); + } else { + xpt_async(AC_BUS_RESET, ISP_SPI_PC(isp, bus)->path, NULL); } break; + } case ISPASYNC_LIP: if (msg == NULL) { msg = "LIP Received"; } /* FALLTHROUGH */ case ISPASYNC_LOOP_RESET: if (msg == NULL) { msg = "LOOP Reset"; } /* FALLTHROUGH */ case ISPASYNC_LOOP_DOWN: + { + struct isp_fc *fc; if (msg == NULL) { msg = "LOOP Down"; } - if (isp->isp_path) { - isp_freeze_loopdown(isp, msg); + va_start(ap, cmd); + bus = va_arg(ap, int); + va_end(ap); + + FCPARAM(isp, bus)->link_active = 1; + + fc = ISP_FC_PC(isp, bus); + /* + * We don't do any simq freezing if we are only in target mode + */ + if (fc->role & ISP_ROLE_INITIATOR) { + if (fc->path) { + isp_freeze_loopdown(isp, bus, msg); + } + if (fc->ldt_running == 0) { + fc->ldt_running = 1; + callout_reset(&fc->ldt, fc->loop_down_limit * hz, isp_ldt, fc); + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "starting Loop Down Timer @ %lu", (unsigned long) time_uptime); + } } - if (isp->isp_osinfo.ldt_running == 0) { - isp->isp_osinfo.ldt_running = 1; - callout_reset(&isp->isp_osinfo.ldt, - isp->isp_osinfo.loop_down_limit * hz, isp_ldt, isp); - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "starting Loop Down Timer"); - } - isp_prt(isp, ISP_LOGINFO, msg); + isp_prt(isp, ISP_LOGINFO, "Chan %d: %s", bus, msg); break; + } case ISPASYNC_LOOP_UP: + va_start(ap, cmd); + bus = va_arg(ap, int); + va_end(ap); /* * Now we just note that Loop has come up. We don't * actually do anything because we're waiting for a * Change Notify before activating the FC cleanup * thread to look at the state of the loop again. */ - isp_prt(isp, ISP_LOGINFO, "Loop UP"); + FCPARAM(isp, bus)->link_active = 1; + ISP_FC_PC(isp, bus)->loop_dead = 0; + ISP_FC_PC(isp, bus)->loop_down_time = 0; + isp_prt(isp, ISP_LOGINFO, "Chan %d Loop UP", bus); break; case ISPASYNC_DEV_ARRIVED: - lp = arg; + va_start(ap, cmd); + bus = va_arg(ap, int); + lp = va_arg(ap, fcportdb_t *); + va_end(ap); lp->reserved = 0; - if ((isp->isp_role & ISP_ROLE_INITIATOR) && - (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) { - int dbidx = lp - FCPARAM(isp)->portdb; + if ((ISP_FC_PC(isp, bus)->role & ISP_ROLE_INITIATOR) && (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) { + int dbidx = lp - FCPARAM(isp, bus)->portdb; int i; for (i = 0; i < MAX_FC_TARG; i++) { if (i >= FL_ID && i <= SNS_ID) { continue; } - if (FCPARAM(isp)->isp_ini_map[i] == 0) { + if (FCPARAM(isp, bus)->isp_dev_map[i] == 0) { break; } } if (i < MAX_FC_TARG) { - FCPARAM(isp)->isp_ini_map[i] = dbidx + 1; - lp->ini_map_idx = i + 1; + FCPARAM(isp, bus)->isp_dev_map[i] = dbidx + 1; + lp->dev_map_idx = i + 1; } else { isp_prt(isp, ISP_LOGWARN, "out of target ids"); - isp_dump_portdb(isp); + isp_dump_portdb(isp, bus); } } - if (lp->ini_map_idx) { - tgt = lp->ini_map_idx - 1; - isp_prt(isp, ISP_LOGCONFIG, prom2, - lp->portid, lp->handle, - roles[lp->roles], "arrived at", tgt, - (uint32_t) (lp->node_wwn >> 32), - (uint32_t) lp->node_wwn, - (uint32_t) (lp->port_wwn >> 32), - (uint32_t) lp->port_wwn); - isp_make_here(isp, tgt); + if (lp->dev_map_idx) { + tgt = lp->dev_map_idx - 1; + isp_prt(isp, ISP_LOGCONFIG, prom2, bus, lp->portid, lp->handle, roles[lp->roles], "arrived at", tgt, (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn); + isp_make_here(isp, bus, tgt); } else { - isp_prt(isp, ISP_LOGCONFIG, prom, - lp->portid, lp->handle, - roles[lp->roles], "arrived", - (uint32_t) (lp->node_wwn >> 32), - (uint32_t) lp->node_wwn, - (uint32_t) (lp->port_wwn >> 32), - (uint32_t) lp->port_wwn); + isp_prt(isp, ISP_LOGCONFIG, prom, bus, lp->portid, lp->handle, roles[lp->roles], "arrived", (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn); } break; case ISPASYNC_DEV_CHANGED: - lp = arg; + va_start(ap, cmd); + bus = va_arg(ap, int); + lp = va_arg(ap, fcportdb_t *); + va_end(ap); + lp->reserved = 0; if (isp_change_is_bad) { lp->state = FC_PORTDB_STATE_NIL; - if (lp->ini_map_idx) { - tgt = lp->ini_map_idx - 1; - FCPARAM(isp)->isp_ini_map[tgt] = 0; - lp->ini_map_idx = 0; - isp_prt(isp, ISP_LOGCONFIG, prom3, - lp->portid, tgt, "change is bad"); - isp_make_gone(isp, tgt); + if (lp->dev_map_idx) { + tgt = lp->dev_map_idx - 1; + FCPARAM(isp, bus)->isp_dev_map[tgt] = 0; + lp->dev_map_idx = 0; + isp_prt(isp, ISP_LOGCONFIG, prom3, bus, lp->portid, tgt, "change is bad"); + isp_make_gone(isp, bus, tgt); } else { - isp_prt(isp, ISP_LOGCONFIG, prom, - lp->portid, lp->handle, - roles[lp->roles], - "changed and departed", - (uint32_t) (lp->node_wwn >> 32), - (uint32_t) lp->node_wwn, - (uint32_t) (lp->port_wwn >> 32), - (uint32_t) lp->port_wwn); + isp_prt(isp, ISP_LOGCONFIG, prom, bus, lp->portid, lp->handle, roles[lp->roles], "changed and departed", + (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn); } } else { lp->portid = lp->new_portid; lp->roles = lp->new_roles; - if (lp->ini_map_idx) { - int t = lp->ini_map_idx - 1; - FCPARAM(isp)->isp_ini_map[t] = - (lp - FCPARAM(isp)->portdb) + 1; - tgt = lp->ini_map_idx - 1; - isp_prt(isp, ISP_LOGCONFIG, prom2, - lp->portid, lp->handle, - roles[lp->roles], "changed at", tgt, - (uint32_t) (lp->node_wwn >> 32), - (uint32_t) lp->node_wwn, - (uint32_t) (lp->port_wwn >> 32), - (uint32_t) lp->port_wwn); + if (lp->dev_map_idx) { + int t = lp->dev_map_idx - 1; + FCPARAM(isp, bus)->isp_dev_map[t] = (lp - FCPARAM(isp, bus)->portdb) + 1; + tgt = lp->dev_map_idx - 1; + isp_prt(isp, ISP_LOGCONFIG, prom2, bus, lp->portid, lp->handle, roles[lp->roles], "changed at", tgt, + (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn); } else { - isp_prt(isp, ISP_LOGCONFIG, prom, - lp->portid, lp->handle, - roles[lp->roles], "changed", - (uint32_t) (lp->node_wwn >> 32), - (uint32_t) lp->node_wwn, - (uint32_t) (lp->port_wwn >> 32), - (uint32_t) lp->port_wwn); + isp_prt(isp, ISP_LOGCONFIG, prom, bus, lp->portid, lp->handle, roles[lp->roles], "changed", (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn); } } break; case ISPASYNC_DEV_STAYED: - lp = arg; - if (lp->ini_map_idx) { - tgt = lp->ini_map_idx - 1; - isp_prt(isp, ISP_LOGCONFIG, prom2, - lp->portid, lp->handle, - roles[lp->roles], "stayed at", tgt, - (uint32_t) (lp->node_wwn >> 32), - (uint32_t) lp->node_wwn, - (uint32_t) (lp->port_wwn >> 32), - (uint32_t) lp->port_wwn); + va_start(ap, cmd); + bus = va_arg(ap, int); + lp = va_arg(ap, fcportdb_t *); + va_end(ap); + if (lp->dev_map_idx) { + tgt = lp->dev_map_idx - 1; + isp_prt(isp, ISP_LOGCONFIG, prom2, bus, lp->portid, lp->handle, roles[lp->roles], "stayed at", tgt, + (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn); } else { - isp_prt(isp, ISP_LOGCONFIG, prom, - lp->portid, lp->handle, - roles[lp->roles], "stayed", - (uint32_t) (lp->node_wwn >> 32), - (uint32_t) lp->node_wwn, - (uint32_t) (lp->port_wwn >> 32), - (uint32_t) lp->port_wwn); + isp_prt(isp, ISP_LOGCONFIG, prom, bus, lp->portid, lp->handle, roles[lp->roles], "stayed", + (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn); } break; case ISPASYNC_DEV_GONE: - lp = arg; + va_start(ap, cmd); + bus = va_arg(ap, int); + lp = va_arg(ap, fcportdb_t *); + va_end(ap); /* * If this has a virtual target and we haven't marked it * that we're going to have isp_gdt tell the OS it's gone, * set the isp_gdt timer running on it. * * If it isn't marked that isp_gdt is going to get rid of it, * announce that it's gone. */ - if (lp->ini_map_idx && lp->reserved == 0) { + if (lp->dev_map_idx && lp->reserved == 0) { lp->reserved = 1; - lp->new_reserved = isp->isp_osinfo.gone_device_time; + lp->new_reserved = ISP_FC_PC(isp, bus)->gone_device_time; lp->state = FC_PORTDB_STATE_ZOMBIE; - if (isp->isp_osinfo.gdt_running == 0) { - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "starting Gone Device Timer"); - isp->isp_osinfo.gdt_running = 1; - callout_reset(&isp->isp_osinfo.gdt, hz, - isp_gdt, isp); + if (ISP_FC_PC(isp, bus)->gdt_running == 0) { + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Chan %d starting Gone Device Timer", bus); + ISP_FC_PC(isp, bus)->gdt_running = 1; + callout_reset(&ISP_FC_PC(isp, bus)->gdt, hz, isp_gdt, ISP_FC_PC(isp, bus)); } - tgt = lp->ini_map_idx - 1; - isp_prt(isp, ISP_LOGCONFIG, prom2, - lp->portid, lp->handle, - roles[lp->roles], "gone zombie at", tgt, - (uint32_t) (lp->node_wwn >> 32), - (uint32_t) lp->node_wwn, - (uint32_t) (lp->port_wwn >> 32), - (uint32_t) lp->port_wwn); + tgt = lp->dev_map_idx - 1; + isp_prt(isp, ISP_LOGCONFIG, prom2, bus, lp->portid, lp->handle, roles[lp->roles], "gone zombie at", tgt, (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn); } else if (lp->reserved == 0) { - isp_prt(isp, ISP_LOGCONFIG, prom, - lp->portid, lp->handle, - roles[lp->roles], "departed", - (uint32_t) (lp->node_wwn >> 32), - (uint32_t) lp->node_wwn, - (uint32_t) (lp->port_wwn >> 32), - (uint32_t) lp->port_wwn); + isp_prt(isp, ISP_LOGCONFIG, prom, bus, lp->portid, lp->handle, roles[lp->roles], "departed", (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn); } break; case ISPASYNC_CHANGE_NOTIFY: { char *msg; - if (arg == ISPASYNC_CHANGE_PDB) { - msg = "Port Database Changed"; - } else if (arg == ISPASYNC_CHANGE_SNS) { - msg = "Name Server Database Changed"; + int evt, nphdl, nlstate, reason; + + va_start(ap, cmd); + bus = va_arg(ap, int); + evt = va_arg(ap, int); + if (IS_24XX(isp) && evt == ISPASYNC_CHANGE_PDB) { + nphdl = va_arg(ap, int); + nlstate = va_arg(ap, int); + reason = va_arg(ap, int); } else { - msg = "Other Change Notify"; + nphdl = NIL_HANDLE; + nlstate = reason = 0; } + va_end(ap); + + if (evt == ISPASYNC_CHANGE_PDB) { + msg = "Chan %d Port Database Changed"; + } else if (evt == ISPASYNC_CHANGE_SNS) { + msg = "Chan %d Name Server Database Changed"; + } else { + msg = "Chan %d Other Change Notify"; + } + /* * If the loop down timer is running, cancel it. */ - if (isp->isp_osinfo.ldt_running) { - isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, - "Stopping Loop Down Timer"); - isp->isp_osinfo.ldt_running = 0; - callout_stop(&isp->isp_osinfo.ldt); + if (ISP_FC_PC(isp, bus)->ldt_running) { + isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Stopping Loop Down Timer @ %lu", (unsigned long) time_uptime); + ISP_FC_PC(isp, bus)->ldt_running = 0; + callout_stop(&ISP_FC_PC(isp, bus)->ldt); } - isp_prt(isp, ISP_LOGINFO, msg); - isp_freeze_loopdown(isp, msg); - wakeup(ISP_KT_WCHAN(isp)); + isp_prt(isp, ISP_LOGINFO, msg, bus); + if (ISP_FC_PC(isp, bus)->role & ISP_ROLE_INITIATOR) { + isp_freeze_loopdown(isp, bus, msg); + } + wakeup(ISP_FC_PC(isp, bus)); break; } #ifdef ISP_TARGET_MODE case ISPASYNC_TARGET_NOTIFY: { - tmd_notify_t *nt = arg; - isp_prt(isp, ISP_LOGALL, - "target notify code 0x%x", nt->nt_ncode); + isp_notify_t *notify; + va_start(ap, cmd); + notify = va_arg(ap, isp_notify_t *); + va_end(ap); + switch (notify->nt_ncode) { + case NT_ABORT_TASK: + case NT_ABORT_TASK_SET: + case NT_CLEAR_ACA: + case NT_CLEAR_TASK_SET: + case NT_LUN_RESET: + case NT_TARGET_RESET: + /* + * These are task management functions. + */ + isp_handle_platform_target_tmf(isp, notify); + break; + case NT_BUS_RESET: + case NT_LIP_RESET: + case NT_LINK_UP: + case NT_LINK_DOWN: + /* + * No action need be taken here. + */ + break; + case NT_HBA_RESET: + isp_del_all_wwn_entries(isp, ISP_NOCHAN); + break; + case NT_LOGOUT: + /* + * This is device arrival/departure notification + */ + isp_handle_platform_target_notify_ack(isp, notify); + break; + case NT_ARRIVED: + { + struct ac_contract ac; + struct ac_device_changed *fc; + + ac.contract_number = AC_CONTRACT_DEV_CHG; + fc = (struct ac_device_changed *) ac.contract_data; + fc->wwpn = notify->nt_wwn; + fc->port = notify->nt_sid; + fc->target = notify->nt_nphdl; + fc->arrived = 1; + xpt_async(AC_CONTRACT, ISP_FC_PC(isp, notify->nt_channel)->path, &ac); + break; + } + case NT_DEPARTED: + { + struct ac_contract ac; + struct ac_device_changed *fc; + + ac.contract_number = AC_CONTRACT_DEV_CHG; + fc = (struct ac_device_changed *) ac.contract_data; + fc->wwpn = notify->nt_wwn; + fc->port = notify->nt_sid; + fc->target = notify->nt_nphdl; + fc->arrived = 0; + xpt_async(AC_CONTRACT, ISP_FC_PC(isp, notify->nt_channel)->path, &ac); + break; + } + default: + isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode); + isp_handle_platform_target_notify_ack(isp, notify); + break; + } break; } case ISPASYNC_TARGET_ACTION: - switch (((isphdr_t *)arg)->rqs_entry_type) { + { + isphdr_t *hp; + + va_start(ap, cmd); + hp = va_arg(ap, isphdr_t *); + va_end(ap); + switch (hp->rqs_entry_type) { default: - isp_prt(isp, ISP_LOGWARN, - "event 0x%x for unhandled target action", - ((isphdr_t *)arg)->rqs_entry_type); + isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x", __func__, hp->rqs_entry_type); break; case RQSTYPE_NOTIFY: if (IS_SCSI(isp)) { - rv = isp_handle_platform_notify_scsi(isp, - (in_entry_t *) arg); + isp_handle_platform_notify_scsi(isp, (in_entry_t *) hp); + } else if (IS_24XX(isp)) { + isp_handle_platform_notify_24xx(isp, (in_fcentry_24xx_t *) hp); } else { - rv = isp_handle_platform_notify_fc(isp, - (in_fcentry_t *) arg); + isp_handle_platform_notify_fc(isp, (in_fcentry_t *) hp); } break; case RQSTYPE_ATIO: - rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); + if (IS_24XX(isp)) { + isp_handle_platform_atio7(isp, (at7_entry_t *) hp); + } else { + isp_handle_platform_atio(isp, (at_entry_t *) hp); + } break; case RQSTYPE_ATIO2: - rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); + isp_handle_platform_atio2(isp, (at2_entry_t *) hp); break; + case RQSTYPE_CTIO7: case RQSTYPE_CTIO3: case RQSTYPE_CTIO2: case RQSTYPE_CTIO: - rv = isp_handle_platform_ctio(isp, arg); + isp_handle_platform_ctio(isp, hp); break; + case RQSTYPE_ABTS_RCVD: + { + abts_t *abts = (abts_t *)hp; + isp_notify_t notify, *nt = ¬ify; + tstate_t *tptr; + fcportdb_t *lp; + uint16_t chan; + uint32_t sid, did; + + did = (abts->abts_did_hi << 16) | abts->abts_did_lo; + sid = (abts->abts_sid_hi << 16) | abts->abts_sid_lo; + ISP_MEMZERO(nt, sizeof (isp_notify_t)); + + nt->nt_hba = isp; + nt->nt_did = did; + nt->nt_nphdl = abts->abts_nphdl; + nt->nt_sid = sid; + isp_find_chan_by_did(isp, did, &chan); + if (chan == ISP_NOCHAN) { + nt->nt_tgt = TGT_ANY; + } else { + nt->nt_tgt = FCPARAM(isp, chan)->isp_wwpn; + if (isp_find_pdb_by_loopid(isp, chan, abts->abts_nphdl, &lp)) { + nt->nt_wwn = lp->port_wwn; + } else { + nt->nt_wwn = INI_ANY; + } + } + /* + * Try hard to find the lun for this command. + */ + tptr = get_lun_statep_from_tag(isp, chan, abts->abts_rxid_task); + if (tptr) { + nt->nt_lun = xpt_path_lun_id(tptr->owner); + rls_lun_statep(isp, tptr); + } else { + nt->nt_lun = LUN_ANY; + } + nt->nt_need_ack = 1; + nt->nt_tagval = abts->abts_rxid_task; + nt->nt_tagval |= (((uint64_t) abts->abts_rxid_abts) << 32); + if (abts->abts_rxid_task == ISP24XX_NO_TASK) { + isp_prt(isp, ISP_LOGTINFO, "[0x%x] ABTS from N-Port handle 0x%x Port 0x%06x has no task id (rx_id 0x%04x ox_id 0x%04x)", + abts->abts_rxid_abts, abts->abts_nphdl, sid, abts->abts_rx_id, abts->abts_ox_id); + } else { + isp_prt(isp, ISP_LOGTINFO, "[0x%x] ABTS from N-Port handle 0x%x Port 0x%06x for task 0x%x (rx_id 0x%04x ox_id 0x%04x)", + abts->abts_rxid_abts, abts->abts_nphdl, sid, abts->abts_rxid_task, abts->abts_rx_id, abts->abts_ox_id); + } + nt->nt_channel = chan; + nt->nt_ncode = NT_ABORT_TASK; + nt->nt_lreserved = hp; + isp_handle_platform_target_tmf(isp, nt); + break; + } case RQSTYPE_ENABLE_LUN: case RQSTYPE_MODIFY_LUN: - isp_ledone(isp, (lun_entry_t *) arg); + isp_ledone(isp, (lun_entry_t *) hp); break; } break; + } #endif case ISPASYNC_FW_CRASH: { uint16_t mbox1, mbox6; mbox1 = ISP_READ(isp, OUTMAILBOX1); if (IS_DUALBUS(isp)) { mbox6 = ISP_READ(isp, OUTMAILBOX6); } else { mbox6 = 0; } - isp_prt(isp, ISP_LOGERR, - "Internal Firmware Error on bus %d @ RISC Address 0x%x", - mbox6, mbox1); -#ifdef ISP_FW_CRASH_DUMP + isp_prt(isp, ISP_LOGERR, "Internal Firmware Error on bus %d @ RISC Address 0x%x", mbox6, mbox1); mbox1 = isp->isp_osinfo.mbox_sleep_ok; isp->isp_osinfo.mbox_sleep_ok = 0; - if (IS_FC(isp)) { - FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; - FCPARAM(isp)->isp_loopstate = LOOP_NIL; - isp_freeze_loopdown(isp, "f/w crash"); - isp_fw_dump(isp); - } - isp_reinit(isp); + isp_reinit(isp, 1); isp->isp_osinfo.mbox_sleep_ok = mbox1; -#else - mbox1 = isp->isp_osinfo.mbox_sleep_ok; - isp->isp_osinfo.mbox_sleep_ok = 0; - isp_reinit(isp); - isp->isp_osinfo.mbox_sleep_ok = mbox1; -#endif isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); break; } - case ISPASYNC_UNHANDLED_RESPONSE: - break; default: isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); break; } - return (rv); } /* * Locks are held before coming here. */ void isp_uninit(ispsoftc_t *isp) { if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); } ISP_DISABLE_INTS(isp); } +/* + * When we want to get the 'default' WWNs (when lacking NVRAM), we pick them + * up from our platform default (defww{p|n}n) and morph them based upon + * channel. + * + * When we want to get the 'active' WWNs, we get NVRAM WWNs and then morph them + * based upon channel. + */ + +uint64_t +isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn) +{ + uint64_t seed; + struct isp_fc *fc = ISP_FC_PC(isp, chan); + + /* + * If we're asking for a active WWN, the default overrides get + * returned, otherwise the NVRAM value is picked. + * + * If we're asking for a default WWN, we just pick the default override. + */ + if (isactive) { + seed = iswwnn ? fc->def_wwnn : fc->def_wwpn; + if (seed) { + return (seed); + } + seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram : FCPARAM(isp, chan)->isp_wwpn_nvram; + } else { + seed = iswwnn ? fc->def_wwnn : fc->def_wwpn; + } + + + /* + * For channel zero just return what we have. For either ACIIVE or + * DEFAULT cases, we depend on default override of NVRAM values for + * channel zero. + */ + if (chan == 0) { + return (seed); + } + + /* + * For other channels, we are doing one of three things: + * + * 1. If what we have now is non-zero, return it. Otherwise we morph + * values from channel 0. 2. If we're here for a WWPN we synthesize + * it if Channel 0's wwpn has a type 2 NAA. 3. If we're here for a + * WWNN we synthesize it if Channel 0's wwnn has a type 2 NAA. + */ + + if (seed) { + return (seed); + } + if (isactive) { + seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram : FCPARAM(isp, 0)->isp_wwpn_nvram; + } else { + seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn : ISP_FC_PC(isp, 0)->def_wwpn; + } + + if (((seed >> 60) & 0xf) == 2) { + /* + * The type 2 NAA fields for QLogic cards appear be laid out + * thusly: + * + * bits 63..60 NAA == 2 bits 59..57 unused/zero bit 56 + * port (1) or node (0) WWN distinguishor bit 48 + * physical port on dual-port chips (23XX/24XX) + * + * This is somewhat nutty, particularly since bit 48 is + * irrelevant as they assign seperate serial numbers to + * different physical ports anyway. + * + * We'll stick our channel number plus one first into bits + * 57..59 and thence into bits 52..55 which allows for 8 bits + * of channel which is comfortably more than our maximum + * (126) now. + */ + seed &= ~0x0FF0000000000000ULL; + if (iswwnn == 0) { + seed |= ((uint64_t) (chan + 1) & 0xf) << 56; + seed |= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52; + } + } else { + seed = 0; + } + return (seed); +} + void isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) { va_list ap; if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { return; } printf("%s: ", device_get_nameunit(isp->isp_dev)); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); printf("\n"); } uint64_t isp_nanotime_sub(struct timespec *b, struct timespec *a) { uint64_t elapsed; struct timespec x = *b; timespecsub(&x, a); elapsed = GET_NANOSEC(&x); if (elapsed == 0) elapsed++; return (elapsed); } int isp_mbox_acquire(ispsoftc_t *isp) { if (isp->isp_osinfo.mboxbsy) { return (1); } else { isp->isp_osinfo.mboxcmd_done = 0; isp->isp_osinfo.mboxbsy = 1; return (0); } } void isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp) { unsigned int usecs = mbp->timeout; unsigned int max, olim, ilim; if (usecs == 0) { usecs = MBCMD_DEFAULT_TIMEOUT; } max = isp->isp_mbxwrk0 + 1; if (isp->isp_osinfo.mbox_sleep_ok) { unsigned int ms = (usecs + 999) / 1000; isp->isp_osinfo.mbox_sleep_ok = 0; isp->isp_osinfo.mbox_sleeping = 1; for (olim = 0; olim < max; olim++) { -#if __FreeBSD_version < 700037 - tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", - isp_mstohz(ms)); -#else - msleep(&isp->isp_mbxworkp, &isp->isp_osinfo.lock, - PRIBIO, "ispmbx_sleep", isp_mstohz(ms)); -#endif + msleep(&isp->isp_mbxworkp, &isp->isp_osinfo.lock, PRIBIO, "ispmbx_sleep", isp_mstohz(ms)); if (isp->isp_osinfo.mboxcmd_done) { break; } } isp->isp_osinfo.mbox_sleep_ok = 1; isp->isp_osinfo.mbox_sleeping = 0; } else { for (olim = 0; olim < max; olim++) { for (ilim = 0; ilim < usecs; ilim += 100) { uint32_t isr; uint16_t sema, mbox; if (isp->isp_osinfo.mboxcmd_done) { break; } if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { isp_intr(isp, isr, sema, mbox); if (isp->isp_osinfo.mboxcmd_done) { break; } } - USEC_DELAY(100); + ISP_DELAY(100); } if (isp->isp_osinfo.mboxcmd_done) { break; } } } if (isp->isp_osinfo.mboxcmd_done == 0) { - isp_prt(isp, ISP_LOGWARN, - "%s Mailbox Command (0x%x) Timeout (%uus)", - isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", - isp->isp_lastmbxcmd, usecs); + isp_prt(isp, ISP_LOGWARN, "%s Mailbox Command (0x%x) Timeout (%uus) (started @ %s:%d)", + isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", isp->isp_lastmbxcmd, usecs, mbp->func, mbp->lineno); mbp->param[0] = MBOX_TIMEOUT; isp->isp_osinfo.mboxcmd_done = 1; } } void isp_mbox_notify_done(ispsoftc_t *isp) { if (isp->isp_osinfo.mbox_sleeping) { wakeup(&isp->isp_mbxworkp); } isp->isp_osinfo.mboxcmd_done = 1; } void isp_mbox_release(ispsoftc_t *isp) { isp->isp_osinfo.mboxbsy = 0; } int +isp_fc_scratch_acquire(ispsoftc_t *isp, int chan) +{ + int ret = 0; + if (isp->isp_osinfo.pc.fc[chan].fcbsy) { + ret = -1; + } else { + isp->isp_osinfo.pc.fc[chan].fcbsy = 1; + } + return (ret); +} + +int isp_mstohz(int ms) { int hz; struct timeval t; t.tv_sec = ms / 1000; t.tv_usec = (ms % 1000) * 1000; hz = tvtohz(&t); if (hz < 0) { hz = 0x7fffffff; } if (hz == 0) { hz = 1; } return (hz); } void isp_platform_intr(void *arg) { ispsoftc_t *isp = arg; uint32_t isr; uint16_t sema, mbox; ISP_LOCK(isp); isp->isp_intcnt++; if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { isp->isp_intbogus++; } else { isp_intr(isp, isr, sema, mbox); } ISP_UNLOCK(isp); } void isp_common_dmateardown(ispsoftc_t *isp, struct ccb_scsiio *csio, uint32_t hdl) { if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - bus_dmamap_sync(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTREAD); + bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTREAD); } else { - bus_dmamap_sync(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTWRITE); + bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); +} + +void +isp_timer(void *arg) +{ + ispsoftc_t *isp = arg; +#ifdef ISP_TARGET_MODE + isp_tmcmd_restart(isp); +#endif + callout_reset(&isp->isp_osinfo.tmo, hz, isp_timer, isp); } Index: head/sys/dev/isp/isp_freebsd.h =================================================================== --- head/sys/dev/isp/isp_freebsd.h (revision 196007) +++ head/sys/dev/isp/isp_freebsd.h (revision 196008) @@ -1,599 +1,637 @@ /* $FreeBSD$ */ /*- * Qlogic ISP SCSI Host Adapter FreeBSD Wrapper Definitions * - * Copyright (c) 1997-2006 by Matthew Jacob + * Copyright (c) 1997-2008 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _ISP_FREEBSD_H #define _ISP_FREEBSD_H #include #include #include -#if __FreeBSD_version < 500000 -#include -#include -#include -#else #include #include #include #include #include #include -#endif #include #include #include -#if __FreeBSD_version < 500000 -#include -#endif #include +#include #include #include #include #include #include #include #include #include #include #include "opt_ddb.h" #include "opt_isp.h" -#if __FreeBSD_version < 500000 -#define ISP_PLATFORM_VERSION_MAJOR 4 -#define ISP_PLATFORM_VERSION_MINOR 17 -#else -#define ISP_PLATFORM_VERSION_MAJOR 5 -#define ISP_PLATFORM_VERSION_MINOR 9 -#endif +#define ISP_PLATFORM_VERSION_MAJOR 7 +#define ISP_PLATFORM_VERSION_MINOR 0 /* * Efficiency- get rid of SBus code && tests unless we need them. */ #ifdef __sparc64__ #define ISP_SBUS_SUPPORTED 1 #else #define ISP_SBUS_SUPPORTED 0 #endif - -#if __FreeBSD_version < 500000 -#define ISP_IFLAGS INTR_TYPE_CAM -#elif __FreeBSD_version < 700037 -#define ISP_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY -#else #define ISP_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE -#endif -#if __FreeBSD_version < 700000 -typedef void ispfwfunc(int, int, int, const void **); -#endif - #ifdef ISP_TARGET_MODE #define ISP_TARGET_FUNCTIONS 1 -#define ATPDPSIZE 256 +#define ATPDPSIZE 4096 + +#include + typedef struct { + void * next; uint32_t orig_datalen; uint32_t bytes_xfered; uint32_t last_xframt; - uint32_t tag : 16, - lun : 13, /* not enough */ + uint32_t tag; + uint32_t lun; + uint32_t nphdl; + uint32_t sid; + uint32_t portid; + uint32_t + oxid : 16, + cdb0 : 8, + : 1, + dead : 1, + tattr : 3, state : 3; } atio_private_data_t; #define ATPD_STATE_FREE 0 #define ATPD_STATE_ATIO 1 #define ATPD_STATE_CAM 2 #define ATPD_STATE_CTIO 3 #define ATPD_STATE_LAST_CTIO 4 #define ATPD_STATE_PDON 5 +typedef union inot_private_data inot_private_data_t; +union inot_private_data { + inot_private_data_t *next; + struct { + isp_notify_t nt; /* must be first! */ + uint8_t data[64]; /* sb QENTRY_LEN, but order of definitions is wrong */ + uint32_t tag_id, seq_id; + } rd; +}; + typedef struct tstate { - struct tstate *next; + SLIST_ENTRY(tstate) next; struct cam_path *owner; struct ccb_hdr_slist atios; struct ccb_hdr_slist inots; - lun_id_t lun; - int bus; uint32_t hold; int atio_count; int inot_count; + inot_private_data_t * restart_queue; + inot_private_data_t * ntfree; + inot_private_data_t ntpool[ATPDPSIZE]; + atio_private_data_t * atfree; + atio_private_data_t atpool[ATPDPSIZE]; } tstate_t; -#define LUN_HASH_SIZE 32 -#define LUN_HASH_FUNC(isp, port, lun) \ - ((IS_DUALBUS(isp)) ? \ - (((lun) & ((LUN_HASH_SIZE >> 1) - 1)) << (port)) : \ - ((lun) & (LUN_HASH_SIZE - 1))) +#define LUN_HASH_SIZE 32 +#define LUN_HASH_FUNC(lun) ((lun) & (LUN_HASH_SIZE - 1)) + #endif /* * Per command info. */ struct isp_pcmd { struct isp_pcmd * next; bus_dmamap_t dmap; /* dma map for this command */ struct ispsoftc * isp; /* containing isp */ struct callout wdog; /* watchdog timer */ }; #define ISP_PCMD(ccb) (ccb)->ccb_h.spriv_ptr1 #define PISP_PCMD(ccb) ((struct isp_pcmd *)ISP_PCMD(ccb)) -struct isposinfo { - struct ispsoftc * next; - bus_space_tag_t bus_tag; - bus_space_handle_t bus_handle; - bus_dma_tag_t dmat; - uint64_t default_port_wwn; - uint64_t default_node_wwn; - uint32_t default_id; - device_t dev; - struct cam_sim *sim; - struct cam_path *path; - struct cam_sim *sim2; - struct cam_path *path2; - struct intr_config_hook ehook; - uint32_t loop_down_time; - uint32_t loop_down_limit; - uint32_t gone_device_time; - uint32_t : 5, +/* + * Per channel information + */ +SLIST_HEAD(tslist, tstate); + +struct isp_fc { + struct cam_sim *sim; + struct cam_path *path; + struct ispsoftc *isp; + struct proc *kproc; + bus_dma_tag_t tdmat; + bus_dmamap_t tdmap; + uint64_t def_wwpn; + uint64_t def_wwnn; + uint32_t loop_down_time; + uint32_t loop_down_limit; + uint32_t gone_device_time; + uint32_t +#ifdef ISP_TARGET_MODE +#ifdef ISP_INTERNAL_TARGET + proc_active : 1, +#endif + tm_luns_enabled : 1, + tm_enable_defer : 1, + tm_enabled : 1, +#endif simqfrozen : 3, + default_id : 8, hysteresis : 8, + role : 2, gdt_running : 1, ldt_running : 1, - disabled : 1, - fcbsy : 1, - mbox_sleeping : 1, - mbox_sleep_ok : 1, - mboxcmd_done : 1, - mboxbsy : 1; - struct callout ldt; /* loop down timer */ - struct callout gdt; /* gone device timer */ -#if __FreeBSD_version < 500000 - uint32_t splcount; - uint32_t splsaved; -#else + loop_dead : 1, + fcbsy : 1; + struct callout ldt; /* loop down timer */ + struct callout gdt; /* gone device timer */ +#ifdef ISP_TARGET_MODE + struct tslist lun_hash[LUN_HASH_SIZE]; +#ifdef ISP_INTERNAL_TARGET + struct proc * target_proc; +#endif +#endif +}; + +struct isp_spi { + struct cam_sim *sim; + struct cam_path *path; + uint32_t +#ifdef ISP_TARGET_MODE +#ifdef ISP_INTERNAL_TARGET + proc_active : 1, +#endif + tm_luns_enabled : 1, + tm_enable_defer : 1, + tm_enabled : 1, +#endif + simqfrozen : 3, + role : 3, + iid : 4; +#ifdef ISP_TARGET_MODE + struct tslist lun_hash[LUN_HASH_SIZE]; +#ifdef ISP_INTERNAL_TARGET + struct proc * target_proc; +#endif +#endif +}; + +struct isposinfo { + /* + * Linkage, locking, and identity + */ struct mtx lock; + device_t dev; + struct cdev * cdev; + struct intr_config_hook ehook; + struct cam_devq * devq; + + /* + * Firmware pointer + */ const struct firmware * fw; - union { - struct { - char wwnn[19]; - char wwpn[19]; - } fc; - } sysctl_info; -#endif - struct proc *kproc; + + /* + * DMA related sdtuff + */ + bus_space_tag_t bus_tag; + bus_dma_tag_t dmat; + bus_space_handle_t bus_handle; bus_dma_tag_t cdmat; bus_dmamap_t cdmap; -#define isp_cdmat isp_osinfo.cdmat -#define isp_cdmap isp_osinfo.cdmap + /* - * Per command information. + * Command and transaction related related stuff */ struct isp_pcmd * pcmd_pool; struct isp_pcmd * pcmd_free; + uint32_t #ifdef ISP_TARGET_MODE -#define TM_WILDCARD_ENABLED 0x02 -#define TM_TMODE_ENABLED 0x01 - uint8_t tmflags[2]; /* two busses */ -#define NLEACT 4 - union ccb * leact[NLEACT]; - tstate_t tsdflt[2]; /* two busses */ - tstate_t *lun_hash[LUN_HASH_SIZE]; - atio_private_data_t atpdp[ATPDPSIZE]; + tmwanted : 1, + tmbusy : 1, +#else + : 2, #endif + forcemulti : 1, + timer_active : 1, + autoconf : 1, + ehook_active : 1, + disabled : 1, + mbox_sleeping : 1, + mbox_sleep_ok : 1, + mboxcmd_done : 1, + mboxbsy : 1; + + struct callout tmo; /* general timer */ + + /* + * misc- needs to be sorted better XXXXXX + */ + int framesize; + int exec_throttle; + int cont_max; + +#ifdef ISP_TARGET_MODE + cam_status * rptr; +#endif + + /* + * Per-type private storage... + */ + union { + struct isp_fc *fc; + struct isp_spi *spi; + void *ptr; + } pc; }; -#define ISP_KT_WCHAN(isp) (&(isp)->isp_osinfo.kproc) +#define ISP_FC_PC(isp, chan) (&(isp)->isp_osinfo.pc.fc[(chan)]) +#define ISP_SPI_PC(isp, chan) (&(isp)->isp_osinfo.pc.spi[(chan)]) +#define ISP_GET_PC(isp, chan, tag, rslt) \ + if (IS_SCSI(isp)) { \ + rslt = ISP_SPI_PC(isp, chan)-> tag; \ + } else { \ + rslt = ISP_FC_PC(isp, chan)-> tag; \ + } +#define ISP_GET_PC_ADDR(isp, chan, tag, rp) \ + if (IS_SCSI(isp)) { \ + rp = &ISP_SPI_PC(isp, chan)-> tag; \ + } else { \ + rp = &ISP_FC_PC(isp, chan)-> tag; \ + } +#define ISP_SET_PC(isp, chan, tag, val) \ + if (IS_SCSI(isp)) { \ + ISP_SPI_PC(isp, chan)-> tag = val; \ + } else { \ + ISP_FC_PC(isp, chan)-> tag = val; \ + } #define isp_lock isp_osinfo.lock #define isp_bus_tag isp_osinfo.bus_tag #define isp_bus_handle isp_osinfo.bus_handle /* * Locking macros... */ -#if __FreeBSD_version < 500000 -#define ISP_LOCK(isp) \ - if (isp->isp_osinfo.splcount++ == 0) { \ - isp->isp_osinfo.splsaved = splcam(); \ - } -#define ISP_UNLOCK(isp) \ - if (isp->isp_osinfo.splcount > 1) { \ - isp->isp_osinfo.splcount--; \ - } else { \ - isp->isp_osinfo.splcount = 0; \ - splx(isp->isp_osinfo.splsaved); \ - } -#elif __FreeBSD_version < 700037 -#define ISP_LOCK(isp) do {} while (0) -#define ISP_UNLOCK(isp) do {} while (0) -#else #define ISP_LOCK(isp) mtx_lock(&isp->isp_osinfo.lock) #define ISP_UNLOCK(isp) mtx_unlock(&isp->isp_osinfo.lock) -#endif /* * Required Macros/Defines */ -#define ISP2100_SCRLEN 0x1000 +#define ISP_FC_SCRLEN 0x1000 -#define MEMZERO(a, b) memset(a, 0, b) -#define MEMCPY memcpy -#define SNPRINTF snprintf -#define USEC_DELAY DELAY -#define USEC_SLEEP(isp, x) DELAY(x) +#define ISP_MEMZERO(a, b) memset(a, 0, b) +#define ISP_MEMCPY memcpy +#define ISP_SNPRINTF snprintf +#define ISP_DELAY DELAY +#define ISP_SLEEP(isp, x) DELAY(x) +#ifndef DIAGNOSTIC +#define ISP_INLINE __inline +#else +#define ISP_INLINE +#endif + #define NANOTIME_T struct timespec #define GET_NANOTIME nanotime #define GET_NANOSEC(x) ((x)->tv_sec * 1000000000 + (x)->tv_nsec) #define NANOTIME_SUB isp_nanotime_sub #define MAXISPREQUEST(isp) ((IS_FC(isp) || IS_ULTRA2(isp))? 1024 : 256) #define MEMORYBARRIER(isp, type, offset, size) \ switch (type) { \ case SYNC_SFORDEV: \ case SYNC_REQUEST: \ - bus_dmamap_sync(isp->isp_cdmat, isp->isp_cdmap, \ + bus_dmamap_sync(isp->isp_osinfo.cdmat, \ + isp->isp_osinfo.cdmap, \ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \ break; \ case SYNC_SFORCPU: \ case SYNC_RESULT: \ - bus_dmamap_sync(isp->isp_cdmat, isp->isp_cdmap, \ + bus_dmamap_sync(isp->isp_osinfo.cdmat, \ + isp->isp_osinfo.cdmap, \ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); \ break; \ case SYNC_REG: \ - bus_space_barrier(isp->isp_bus_tag, \ - isp->isp_bus_handle, offset, size, \ + bus_space_barrier(isp->isp_osinfo.bus_tag, \ + isp->isp_osinfo.bus_handle, offset, size, \ BUS_SPACE_BARRIER_READ); \ break; \ default: \ break; \ } #define MBOX_ACQUIRE isp_mbox_acquire #define MBOX_WAIT_COMPLETE isp_mbox_wait_complete #define MBOX_NOTIFY_COMPLETE isp_mbox_notify_done #define MBOX_RELEASE isp_mbox_release -#define FC_SCRATCH_ACQUIRE(isp) \ - if (isp->isp_osinfo.fcbsy) { \ - isp_prt(isp, ISP_LOGWARN, \ - "FC scratch area busy (line %d)!", __LINE__); \ - } else \ - isp->isp_osinfo.fcbsy = 1 -#define FC_SCRATCH_RELEASE(isp) isp->isp_osinfo.fcbsy = 0 +#define FC_SCRATCH_ACQUIRE isp_fc_scratch_acquire +#define FC_SCRATCH_RELEASE(isp, chan) isp->isp_osinfo.pc.fc[chan].fcbsy = 0 #ifndef SCSI_GOOD #define SCSI_GOOD SCSI_STATUS_OK #endif #ifndef SCSI_CHECK #define SCSI_CHECK SCSI_STATUS_CHECK_COND #endif #ifndef SCSI_BUSY #define SCSI_BUSY SCSI_STATUS_BUSY #endif #ifndef SCSI_QFULL #define SCSI_QFULL SCSI_STATUS_QUEUE_FULL #endif #define XS_T struct ccb_scsiio #define XS_DMA_ADDR_T bus_addr_t +#define XS_GET_DMA64_SEG(a, b, c) \ +{ \ + ispds64_t *d = a; \ + bus_dma_segment_t *e = b; \ + uint32_t f = c; \ + e += f; \ + d->ds_base = DMA_LO32(e->ds_addr); \ + d->ds_basehi = DMA_HI32(e->ds_addr); \ + d->ds_count = e->ds_len; \ +} +#define XS_GET_DMA_SEG(a, b, c) \ +{ \ + ispds_t *d = a; \ + bus_dma_segment_t *e = b; \ + uint32_t f = c; \ + e += f; \ + d->ds_base = DMA_LO32(e->ds_addr); \ + d->ds_count = e->ds_len; \ +} #define XS_ISP(ccb) cam_sim_softc(xpt_path_sim((ccb)->ccb_h.path)) #define XS_CHANNEL(ccb) cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path)) #define XS_TGT(ccb) (ccb)->ccb_h.target_id #define XS_LUN(ccb) (ccb)->ccb_h.target_lun #define XS_CDBP(ccb) \ (((ccb)->ccb_h.flags & CAM_CDB_POINTER)? \ (ccb)->cdb_io.cdb_ptr : (ccb)->cdb_io.cdb_bytes) #define XS_CDBLEN(ccb) (ccb)->cdb_len #define XS_XFRLEN(ccb) (ccb)->dxfer_len #define XS_TIME(ccb) (ccb)->ccb_h.timeout -#define XS_RESID(ccb) (ccb)->resid +#define XS_GET_RESID(ccb) (ccb)->resid +#define XS_SET_RESID(ccb, r) (ccb)->resid = r #define XS_STSP(ccb) (&(ccb)->scsi_status) #define XS_SNSP(ccb) (&(ccb)->sense_data) #define XS_SNSLEN(ccb) \ imin((sizeof((ccb)->sense_data)), ccb->sense_len) #define XS_SNSKEY(ccb) ((ccb)->sense_data.flags & 0xf) #define XS_TAG_P(ccb) \ (((ccb)->ccb_h.flags & CAM_TAG_ACTION_VALID) && \ (ccb)->tag_action != CAM_TAG_ACTION_NONE) #define XS_TAG_TYPE(ccb) \ ((ccb->tag_action == MSG_SIMPLE_Q_TAG)? REQFLAG_STAG : \ ((ccb->tag_action == MSG_HEAD_OF_Q_TAG)? REQFLAG_HTAG : REQFLAG_OTAG)) #define XS_SETERR(ccb, v) (ccb)->ccb_h.status &= ~CAM_STATUS_MASK, \ (ccb)->ccb_h.status |= v, \ (ccb)->ccb_h.spriv_field0 |= ISP_SPRIV_ERRSET # define HBA_NOERROR CAM_REQ_INPROG # define HBA_BOTCH CAM_UNREC_HBA_ERROR # define HBA_CMDTIMEOUT CAM_CMD_TIMEOUT # define HBA_SELTIMEOUT CAM_SEL_TIMEOUT # define HBA_TGTBSY CAM_SCSI_STATUS_ERROR # define HBA_BUSRESET CAM_SCSI_BUS_RESET # define HBA_ABORTED CAM_REQ_ABORTED # define HBA_DATAOVR CAM_DATA_RUN_ERR # define HBA_ARQFAIL CAM_AUTOSENSE_FAIL #define XS_ERR(ccb) ((ccb)->ccb_h.status & CAM_STATUS_MASK) #define XS_NOERR(ccb) \ (((ccb)->ccb_h.spriv_field0 & ISP_SPRIV_ERRSET) == 0 || \ ((ccb)->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) #define XS_INITERR(ccb) \ XS_SETERR(ccb, CAM_REQ_INPROG), (ccb)->ccb_h.spriv_field0 = 0 #define XS_SAVE_SENSE(xs, sense_ptr, sense_len) \ (xs)->ccb_h.status |= CAM_AUTOSNS_VALID; \ memcpy(&(xs)->sense_data, sense_ptr, imin(XS_SNSLEN(xs), sense_len)) #define XS_SET_STATE_STAT(a, b, c) -#define DEFAULT_IID(x) (isp)->isp_osinfo.default_id -#define DEFAULT_LOOPID(x) (isp)->isp_osinfo.default_id -#define DEFAULT_NODEWWN(isp) (isp)->isp_osinfo.default_node_wwn -#define DEFAULT_PORTWWN(isp) (isp)->isp_osinfo.default_port_wwn -#define ISP_NODEWWN(isp) FCPARAM(isp)->isp_wwnn_nvram -#define ISP_PORTWWN(isp) FCPARAM(isp)->isp_wwpn_nvram +#define DEFAULT_FRAMESIZE(isp) isp->isp_osinfo.framesize +#define DEFAULT_EXEC_THROTTLE(isp) isp->isp_osinfo.exec_throttle +#define GET_DEFAULT_ROLE(isp, chan) \ + (IS_FC(isp)? ISP_FC_PC(isp, chan)->role : ISP_SPI_PC(isp, chan)->role) +#define SET_DEFAULT_ROLE(isp, chan, val) \ + if (IS_FC(isp)) { \ + ISP_FC_PC(isp, chan)->role = val; \ + } else { \ + ISP_SPI_PC(isp, chan)->role = val; \ + } -#if __FreeBSD_version < 500000 -#if _BYTE_ORDER == _LITTLE_ENDIAN -#define bswap16 htobe16 -#define bswap32 htobe32 -#else -#define bswap16 htole16 -#define bswap32 htole32 -#endif -#endif +#define DEFAULT_IID(isp, chan) isp->isp_osinfo.pc.spi[chan].iid +#define DEFAULT_LOOPID(x, chan) isp->isp_osinfo.pc.fc[chan].default_id + +#define DEFAULT_NODEWWN(isp, chan) isp_default_wwn(isp, chan, 0, 1) +#define DEFAULT_PORTWWN(isp, chan) isp_default_wwn(isp, chan, 0, 0) +#define ACTIVE_NODEWWN(isp, chan) isp_default_wwn(isp, chan, 1, 1) +#define ACTIVE_PORTWWN(isp, chan) isp_default_wwn(isp, chan, 1, 0) + + #if BYTE_ORDER == BIG_ENDIAN #ifdef ISP_SBUS_SUPPORTED #define ISP_IOXPUT_8(isp, s, d) *(d) = s #define ISP_IOXPUT_16(isp, s, d) \ *(d) = (isp->isp_bustype == ISP_BT_SBUS)? s : bswap16(s) #define ISP_IOXPUT_32(isp, s, d) \ *(d) = (isp->isp_bustype == ISP_BT_SBUS)? s : bswap32(s) #define ISP_IOXGET_8(isp, s, d) d = (*((uint8_t *)s)) #define ISP_IOXGET_16(isp, s, d) \ d = (isp->isp_bustype == ISP_BT_SBUS)? \ *((uint16_t *)s) : bswap16(*((uint16_t *)s)) #define ISP_IOXGET_32(isp, s, d) \ d = (isp->isp_bustype == ISP_BT_SBUS)? \ *((uint32_t *)s) : bswap32(*((uint32_t *)s)) #else /* ISP_SBUS_SUPPORTED */ #define ISP_IOXPUT_8(isp, s, d) *(d) = s #define ISP_IOXPUT_16(isp, s, d) *(d) = bswap16(s) #define ISP_IOXPUT_32(isp, s, d) *(d) = bswap32(s) #define ISP_IOXGET_8(isp, s, d) d = (*((uint8_t *)s)) #define ISP_IOXGET_16(isp, s, d) d = bswap16(*((uint16_t *)s)) #define ISP_IOXGET_32(isp, s, d) d = bswap32(*((uint32_t *)s)) #endif #define ISP_SWIZZLE_NVRAM_WORD(isp, rp) *rp = bswap16(*rp) #define ISP_SWIZZLE_NVRAM_LONG(isp, rp) *rp = bswap32(*rp) #define ISP_IOZGET_8(isp, s, d) d = (*((uint8_t *)s)) #define ISP_IOZGET_16(isp, s, d) d = (*((uint16_t *)s)) #define ISP_IOZGET_32(isp, s, d) d = (*((uint32_t *)s)) #define ISP_IOZPUT_8(isp, s, d) *(d) = s #define ISP_IOZPUT_16(isp, s, d) *(d) = s #define ISP_IOZPUT_32(isp, s, d) *(d) = s #else #define ISP_IOXPUT_8(isp, s, d) *(d) = s #define ISP_IOXPUT_16(isp, s, d) *(d) = s #define ISP_IOXPUT_32(isp, s, d) *(d) = s #define ISP_IOXGET_8(isp, s, d) d = *(s) #define ISP_IOXGET_16(isp, s, d) d = *(s) #define ISP_IOXGET_32(isp, s, d) d = *(s) #define ISP_SWIZZLE_NVRAM_WORD(isp, rp) #define ISP_SWIZZLE_NVRAM_LONG(isp, rp) #define ISP_IOZPUT_8(isp, s, d) *(d) = s #define ISP_IOZPUT_16(isp, s, d) *(d) = bswap16(s) #define ISP_IOZPUT_32(isp, s, d) *(d) = bswap32(s) #define ISP_IOZGET_8(isp, s, d) d = (*((uint8_t *)(s))) #define ISP_IOZGET_16(isp, s, d) d = bswap16(*((uint16_t *)(s))) #define ISP_IOZGET_32(isp, s, d) d = bswap32(*((uint32_t *)(s))) #endif #define ISP_SWAP16(isp, s) bswap16(s) #define ISP_SWAP32(isp, s) bswap32(s) /* * Includes of common header files */ #include #include #include -#ifdef ISP_TARGET_MODE -#include -#endif - /* * isp_osinfo definiitions && shorthand */ #define SIMQFRZ_RESOURCE 0x1 #define SIMQFRZ_LOOPDOWN 0x2 #define SIMQFRZ_TIMED 0x4 -#define isp_sim isp_osinfo.sim -#define isp_path isp_osinfo.path -#define isp_sim2 isp_osinfo.sim2 -#define isp_path2 isp_osinfo.path2 #define isp_dev isp_osinfo.dev /* * prototypes for isp_pci && isp_freebsd to share */ -extern void isp_attach(ispsoftc_t *); +extern int isp_attach(ispsoftc_t *); +extern void isp_detach(ispsoftc_t *); extern void isp_uninit(ispsoftc_t *); +extern uint64_t isp_default_wwn(ispsoftc_t *, int, int, int); /* * driver global data */ extern int isp_announced; extern int isp_fabric_hysteresis; extern int isp_loop_down_limit; extern int isp_gone_device_time; extern int isp_quickboot_time; +extern int isp_autoconfig; /* * Platform private flags */ #define ISP_SPRIV_ERRSET 0x1 -#define ISP_SPRIV_INWDOG 0x2 -#define ISP_SPRIV_GRACE 0x4 #define ISP_SPRIV_DONE 0x8 -#define XS_CMD_S_WDOG(sccb) (sccb)->ccb_h.spriv_field0 |= ISP_SPRIV_INWDOG -#define XS_CMD_C_WDOG(sccb) (sccb)->ccb_h.spriv_field0 &= ~ISP_SPRIV_INWDOG -#define XS_CMD_WDOG_P(sccb) ((sccb)->ccb_h.spriv_field0 & ISP_SPRIV_INWDOG) - -#define XS_CMD_S_GRACE(sccb) (sccb)->ccb_h.spriv_field0 |= ISP_SPRIV_GRACE -#define XS_CMD_C_GRACE(sccb) (sccb)->ccb_h.spriv_field0 &= ~ISP_SPRIV_GRACE -#define XS_CMD_GRACE_P(sccb) ((sccb)->ccb_h.spriv_field0 & ISP_SPRIV_GRACE) - #define XS_CMD_S_DONE(sccb) (sccb)->ccb_h.spriv_field0 |= ISP_SPRIV_DONE #define XS_CMD_C_DONE(sccb) (sccb)->ccb_h.spriv_field0 &= ~ISP_SPRIV_DONE #define XS_CMD_DONE_P(sccb) ((sccb)->ccb_h.spriv_field0 & ISP_SPRIV_DONE) #define XS_CMD_S_CLEAR(sccb) (sccb)->ccb_h.spriv_field0 = 0 /* * Platform Library Functions */ void isp_prt(ispsoftc_t *, int level, const char *, ...) __printflike(3, 4); uint64_t isp_nanotime_sub(struct timespec *, struct timespec *); int isp_mbox_acquire(ispsoftc_t *); void isp_mbox_wait_complete(ispsoftc_t *, mbreg_t *); void isp_mbox_notify_done(ispsoftc_t *); void isp_mbox_release(ispsoftc_t *); +int isp_fc_scratch_acquire(ispsoftc_t *, int); int isp_mstohz(int); void isp_platform_intr(void *); void isp_common_dmateardown(ispsoftc_t *, struct ccb_scsiio *, uint32_t); /* * Platform Version specific defines */ -#if __FreeBSD_version < 500000 -#define BUS_DMA_ROOTARG(x) NULL -#define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ - bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) -#elif __FreeBSD_version < 700020 -#define BUS_DMA_ROOTARG(x) NULL -#define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ - bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \ - busdma_lock_mutex, &Giant, z) -#elif __FreeBSD_version < 700037 #define BUS_DMA_ROOTARG(x) bus_get_dma_tag(x) #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \ - busdma_lock_mutex, &Giant, z) -#else -#define BUS_DMA_ROOTARG(x) bus_get_dma_tag(x) -#define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ - bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \ busdma_lock_mutex, &isp->isp_osinfo.lock, z) -#endif -#if __FreeBSD_version < 700031 -#define isp_setup_intr(d, i, f, U, if, ifa, hp) \ - bus_setup_intr(d, i, f, if, ifa, hp) -#else #define isp_setup_intr bus_setup_intr -#endif -#if __FreeBSD_version < 500000 -#define isp_sim_alloc cam_sim_alloc -#define isp_callout_init(x) callout_init(x) -#elif __FreeBSD_version < 700037 -#define isp_callout_init(x) callout_init(x, 0) -#define isp_sim_alloc cam_sim_alloc -#else -#define isp_callout_init(x) callout_init(x, 1) #define isp_sim_alloc(a, b, c, d, e, f, g, h) \ cam_sim_alloc(a, b, c, d, e, &(d)->isp_osinfo.lock, f, g, h) -#endif /* Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE */ -#define ISP_MAXPHYS (128 * 1024) -#define ISP_NSEGS ((ISP_MAXPHYS / PAGE_SIZE) + 1) +#define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) +#define ISP_PATH_PRT(i, l, p, ...) \ + if ((l) == ISP_LOGALL || ((l)& (i)->isp_dblev) != 0) { \ + xpt_print(p, __VA_ARGS__); \ + } + /* * Platform specific inline functions */ -static __inline int isp_get_pcmd(ispsoftc_t *, union ccb *); -static __inline void isp_free_pcmd(ispsoftc_t *, union ccb *); - -static __inline int -isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb) -{ - ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free; - if (ISP_PCMD(ccb) == NULL) { - return (-1); - } - isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next; - return (0); -} - -static __inline void -isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb) -{ - ((struct isp_pcmd *)ISP_PCMD(ccb))->next = isp->isp_osinfo.pcmd_free; - isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb); - ISP_PCMD(ccb) = NULL; -} - /* * ISP General Library functions */ #include #endif /* _ISP_FREEBSD_H */ Index: head/sys/dev/isp/isp_ioctl.h =================================================================== --- head/sys/dev/isp/isp_ioctl.h (revision 196007) +++ head/sys/dev/isp/isp_ioctl.h (revision 196008) @@ -1,172 +1,190 @@ /* $FreeBSD$ */ /*- * Copyright (c) 1997-2006 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * ioctl definitions for Qlogic FC/SCSI HBA driver */ #define ISP_IOC (021) /* 'Ctrl-Q' */ /* * This ioctl sets/retrieves the debugging level for this hba instance. * Note that this is not a simple integer level- see ispvar.h for definitions. * * The arguments is a pointer to an integer with the new debugging level. * The old value is written into this argument. */ #define ISP_SDBLEV _IOWR(ISP_IOC, 1, int) /* * This ioctl resets the HBA. Use with caution. */ #define ISP_RESETHBA _IO(ISP_IOC, 2) /* * This ioctl performs a fibre channel rescan. */ #define ISP_RESCAN _IO(ISP_IOC, 3) /* * This ioctl performs a reset and then will set the adapter to the * role that was passed in (the old role will be returned). It almost * goes w/o saying: use with caution. + * + * Channel selector stored in bits 8..32 as input to driver. */ #define ISP_SETROLE _IOWR(ISP_IOC, 4, int) #define ISP_ROLE_NONE 0x0 #define ISP_ROLE_TARGET 0x1 #define ISP_ROLE_INITIATOR 0x2 #define ISP_ROLE_BOTH (ISP_ROLE_TARGET|ISP_ROLE_INITIATOR) /* * Get the current adapter role + * Channel selector passed in first argument. */ #define ISP_GETROLE _IOR(ISP_IOC, 5, int) /* * Get/Clear Stats */ #define ISP_STATS_VERSION 0 typedef struct { uint8_t isp_stat_version; uint8_t isp_type; /* (ro) reflects chip type */ uint8_t isp_revision; /* (ro) reflects chip version */ uint8_t unused1; uint32_t unused2; /* * Statistics Counters */ #define ISP_NSTATS 16 #define ISP_INTCNT 0 #define ISP_INTBOGUS 1 #define ISP_INTMBOXC 2 #define ISP_INGOASYNC 3 #define ISP_RSLTCCMPLT 4 #define ISP_FPHCCMCPLT 5 #define ISP_RSCCHIWAT 6 #define ISP_FPCCHIWAT 7 uint64_t isp_stats[ISP_NSTATS]; } isp_stats_t; #define ISP_GET_STATS _IOR(ISP_IOC, 6, isp_stats_t) #define ISP_CLR_STATS _IO(ISP_IOC, 7) /* * Initiate a LIP */ #define ISP_FC_LIP _IO(ISP_IOC, 8) /* * Return the Port Database structure for the named device, or ENODEV if none. * Caller fills in virtual loopid (0..255), aka 'target'. The driver returns * ENODEV (if nothing valid there) or the actual loopid (for local loop devices * only), 24 bit Port ID and Node and Port WWNs. */ struct isp_fc_device { - uint32_t loopid; /* 0..255 */ - uint32_t : 6, + uint32_t loopid; /* 0..255,2047 */ + uint32_t + chan : 6, role : 2, portid : 24; /* 24 bit Port ID */ uint64_t node_wwn; uint64_t port_wwn; }; #define ISP_FC_GETDINFO _IOWR(ISP_IOC, 9, struct isp_fc_device) /* * Get F/W crash dump */ #define ISP_GET_FW_CRASH_DUMP _IO(ISP_IOC, 10) #define ISP_FORCE_CRASH_DUMP _IO(ISP_IOC, 11) /* * Get information about this Host Adapter, including current connection * topology and capabilities. */ struct isp_hba_device { uint32_t : 8, - : 4, fc_speed : 4, /* Gbps */ - : 2, - fc_class2 : 1, - fc_ip_supported : 1, - fc_scsi_supported : 1, + : 1, fc_topology : 3, - fc_loopid : 8; + fc_channel : 8, + fc_loopid : 16; uint8_t fc_fw_major; uint8_t fc_fw_minor; uint8_t fc_fw_micro; - uint8_t reserved; + uint8_t fc_nchannels; /* number of supported channels */ + uint16_t fc_nports; /* number of supported ports */ uint64_t nvram_node_wwn; uint64_t nvram_port_wwn; uint64_t active_node_wwn; uint64_t active_port_wwn; }; #define ISP_TOPO_UNKNOWN 0 /* connection topology unknown */ #define ISP_TOPO_FCAL 1 /* private or PL_DA */ #define ISP_TOPO_LPORT 2 /* public loop */ #define ISP_TOPO_NPORT 3 /* N-port */ #define ISP_TOPO_FPORT 4 /* F-port */ -#define ISP_FC_GETHINFO _IOR(ISP_IOC, 12, struct isp_hba_device) +/* don't use 12 any more */ +#define ISP_FC_GETHINFO _IOWR(ISP_IOC, 13, struct isp_hba_device) /* * Various Reset Goodies */ struct isp_fc_tsk_mgmt { - uint32_t loopid; /* 0..255 */ - uint32_t lun; + uint32_t loopid; /* 0..255/2048 */ + uint16_t lun; + uint16_t chan; enum { IPT_CLEAR_ACA, IPT_TARGET_RESET, IPT_LUN_RESET, IPT_CLEAR_TASK_SET, IPT_ABORT_TASK_SET } action; }; -#define ISP_TSK_MGMT _IOWR(ISP_IOC, 97, struct isp_fc_tsk_mgmt) +/* don't use 97 any more */ +#define ISP_TSK_MGMT _IOWR(ISP_IOC, 98, struct isp_fc_tsk_mgmt) + +/* + * Just gimme a list of WWPNs that are logged into us. + */ +typedef struct { + uint16_t count; + uint16_t channel; + struct wwnpair { + uint64_t wwnn; + uint64_t wwpn; + } wwns[1]; +} isp_dlist_t; +#define ISP_FC_GETDLIST _IO(ISP_IOC, 14) Index: head/sys/dev/isp/isp_library.c =================================================================== --- head/sys/dev/isp/isp_library.c (revision 196007) +++ head/sys/dev/isp/isp_library.c (revision 196008) @@ -1,2904 +1,3810 @@ /*- - * Copyright (c) 1997-2007 by Matthew Jacob + * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * */ /* * Qlogic Host Adapter Internal Library Functions */ #ifdef __NetBSD__ #include __KERNEL_RCSID(0, "$NetBSD$"); #include #endif #ifdef __FreeBSD__ #include __FBSDID("$FreeBSD$"); #include #endif #ifdef __OpenBSD__ #include #endif #ifdef __linux__ #include "isp_linux.h" #endif #ifdef __svr4__ #include "isp_solaris.h" #endif +const char *isp_class3_roles[4] = { + "None", "Target", "Initiator", "Target/Initiator" +}; + +/* + * Command shipping- finish off first queue entry and do dma mapping and additional segments as needed. + * + * Called with the first queue entry at least partially filled out. + */ int +isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t totalcnt, isp_ddir_t ddir) +{ + uint8_t storage[QENTRY_LEN]; + uint8_t type, nqe; + uint32_t seg, curseg, seglim, nxt, nxtnxt, ddf; + ispds_t *dsp = NULL; + ispds64_t *dsp64 = NULL; + void *qe0, *qe1; + + qe0 = isp_getrqentry(isp); + if (qe0 == NULL) { + return (CMD_EAGAIN); + } + nxt = ISP_NXT_QENTRY(isp->isp_reqidx, RQUEST_QUEUE_LEN(isp)); + + type = ((isphdr_t *)fqe)->rqs_entry_type; + nqe = 1; + + /* + * If we have no data to transmit, just copy the first IOCB and start it up. + */ + if (ddir == ISP_NOXFR) { + if (type == RQSTYPE_T2RQS || type == RQSTYPE_T3RQS) { + ddf = CT2_NO_DATA; + } else { + ddf = 0; + } + goto copy_and_sync; + } + + /* + * First figure out how many pieces of data to transfer and what kind and how many we can put into the first queue entry. + */ + switch (type) { + case RQSTYPE_REQUEST: + ddf = (ddir == ISP_TO_DEVICE)? REQFLAG_DATA_OUT : REQFLAG_DATA_IN; + dsp = ((ispreq_t *)fqe)->req_dataseg; + seglim = ISP_RQDSEG; + break; + case RQSTYPE_CMDONLY: + ddf = (ddir == ISP_TO_DEVICE)? REQFLAG_DATA_OUT : REQFLAG_DATA_IN; + seglim = 0; + break; + case RQSTYPE_T2RQS: + ddf = (ddir == ISP_TO_DEVICE)? REQFLAG_DATA_OUT : REQFLAG_DATA_IN; + dsp = ((ispreqt2_t *)fqe)->req_dataseg; + seglim = ISP_RQDSEG_T2; + break; + case RQSTYPE_A64: + ddf = (ddir == ISP_TO_DEVICE)? REQFLAG_DATA_OUT : REQFLAG_DATA_IN; + dsp64 = ((ispreqt3_t *)fqe)->req_dataseg; + seglim = ISP_RQDSEG_T3; + break; + case RQSTYPE_T3RQS: + ddf = (ddir == ISP_TO_DEVICE)? REQFLAG_DATA_OUT : REQFLAG_DATA_IN; + dsp64 = ((ispreqt3_t *)fqe)->req_dataseg; + seglim = ISP_RQDSEG_T3; + break; + case RQSTYPE_T7RQS: + ddf = (ddir == ISP_TO_DEVICE)? FCP_CMND_DATA_WRITE : FCP_CMND_DATA_READ; + dsp64 = &((ispreqt7_t *)fqe)->req_dataseg; + seglim = 1; + break; + default: + return (CMD_COMPLETE); + } + + if (seglim > nsegs) { + seglim = nsegs; + } + + for (seg = curseg = 0; curseg < seglim; curseg++) { + if (dsp64) { + XS_GET_DMA64_SEG(dsp64++, segp, seg++); + } else { + XS_GET_DMA_SEG(dsp++, segp, seg++); + } + } + + + /* + * Second, start building additional continuation segments as needed. + */ + while (seg < nsegs) { + nxtnxt = ISP_NXT_QENTRY(nxt, RQUEST_QUEUE_LEN(isp)); + if (nxtnxt == isp->isp_reqodx) { + return (CMD_EAGAIN); + } + ISP_MEMZERO(storage, QENTRY_LEN); + qe1 = ISP_QUEUE_ENTRY(isp->isp_rquest, nxt); + nxt = nxtnxt; + if (dsp64) { + ispcontreq64_t *crq = (ispcontreq64_t *) storage; + seglim = ISP_CDSEG64; + crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; + crq->req_header.rqs_entry_count = 1; + dsp64 = crq->req_dataseg; + } else { + ispcontreq_t *crq = (ispcontreq_t *) storage; + seglim = ISP_CDSEG; + crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; + crq->req_header.rqs_entry_count = 1; + dsp = crq->req_dataseg; + } + if (seg + seglim > nsegs) { + seglim = nsegs - seg; + } + for (curseg = 0; curseg < seglim; curseg++) { + if (dsp64) { + XS_GET_DMA64_SEG(dsp64++, segp, seg++); + } else { + XS_GET_DMA_SEG(dsp++, segp, seg++); + } + } + if (dsp64) { + isp_put_cont64_req(isp, (ispcontreq64_t *)storage, qe1); + } else { + isp_put_cont_req(isp, (ispcontreq_t *)storage, qe1); + } + if (isp->isp_dblev & ISP_LOGDEBUG1) { + isp_print_bytes(isp, "additional queue entry", QENTRY_LEN, storage); + } + nqe++; + } + +copy_and_sync: + ((isphdr_t *)fqe)->rqs_entry_count = nqe; + switch (type) { + case RQSTYPE_REQUEST: + ((ispreq_t *)fqe)->req_flags |= ddf; + /* + * This is historical and not clear whether really needed. + */ + if (nsegs == 0) { + nsegs = 1; + } + ((ispreq_t *)fqe)->req_seg_count = nsegs; + isp_put_request(isp, fqe, qe0); + break; + case RQSTYPE_CMDONLY: + ((ispreq_t *)fqe)->req_flags |= ddf; + /* + * This is historical and not clear whether really needed. + */ + if (nsegs == 0) { + nsegs = 1; + } + ((ispextreq_t *)fqe)->req_seg_count = nsegs; + isp_put_extended_request(isp, fqe, qe0); + break; + case RQSTYPE_T2RQS: + ((ispreqt2_t *)fqe)->req_flags |= ddf; + ((ispreqt2_t *)fqe)->req_seg_count = nsegs; + ((ispreqt2_t *)fqe)->req_totalcnt = totalcnt; + if (ISP_CAP_2KLOGIN(isp)) { + isp_put_request_t2e(isp, fqe, qe0); + } else { + isp_put_request_t2(isp, fqe, qe0); + } + break; + case RQSTYPE_A64: + case RQSTYPE_T3RQS: + ((ispreqt3_t *)fqe)->req_flags |= ddf; + ((ispreqt3_t *)fqe)->req_seg_count = nsegs; + ((ispreqt3_t *)fqe)->req_totalcnt = totalcnt; + if (ISP_CAP_2KLOGIN(isp)) { + isp_put_request_t3e(isp, fqe, qe0); + } else { + isp_put_request_t3(isp, fqe, qe0); + } + break; + case RQSTYPE_T7RQS: + ((ispreqt7_t *)fqe)->req_alen_datadir = ddf; + ((ispreqt7_t *)fqe)->req_seg_count = nsegs; + ((ispreqt7_t *)fqe)->req_dl = totalcnt; + isp_put_request_t7(isp, fqe, qe0); + break; + default: + return (CMD_COMPLETE); + } + if (isp->isp_dblev & ISP_LOGDEBUG1) { + isp_print_bytes(isp, "first queue entry", QENTRY_LEN, fqe); + } + ISP_ADD_REQUEST(isp, nxt); + return (CMD_QUEUED); +} + +int isp_save_xs(ispsoftc_t *isp, XS_T *xs, uint32_t *handlep) { uint16_t i, j; for (j = isp->isp_lasthdls, i = 0; i < isp->isp_maxcmds; i++) { if (isp->isp_xflist[j] == NULL) { break; } if (++j == isp->isp_maxcmds) { j = 0; } } if (i == isp->isp_maxcmds) { return (-1); } isp->isp_xflist[j] = xs; *handlep = j+1; if (++j == isp->isp_maxcmds) { j = 0; } isp->isp_lasthdls = (uint32_t)j; return (0); } XS_T * isp_find_xs(ispsoftc_t *isp, uint32_t handle) { if (handle < 1 || handle > (uint32_t) isp->isp_maxcmds) { return (NULL); } else { return (isp->isp_xflist[handle - 1]); } } uint32_t isp_find_handle(ispsoftc_t *isp, XS_T *xs) { uint16_t i; if (xs != NULL) { for (i = 0; i < isp->isp_maxcmds; i++) { if (isp->isp_xflist[i] == xs) { return ((uint32_t) (i+1)); } } } return (0); } uint32_t isp_handle_index(uint32_t handle) { return (handle - 1); } void isp_destroy_handle(ispsoftc_t *isp, uint32_t handle) { if (handle > 0 && handle <= (uint32_t) isp->isp_maxcmds) { isp->isp_xflist[handle - 1] = NULL; } } -int -isp_getrqentry(ispsoftc_t *isp, uint32_t *iptrp, - uint32_t *optrp, void **resultp) +/* + * Make sure we have space to put something on the request queue. + * Return a pointer to that entry if we do. A side effect of this + * function is to update the output index. The input index + * stays the same. + */ +void * +isp_getrqentry(ispsoftc_t *isp) { - volatile uint32_t iptr, optr; - - optr = isp->isp_reqodx = ISP_READ(isp, isp->isp_rqstoutrp); - iptr = isp->isp_reqidx; - *resultp = ISP_QUEUE_ENTRY(isp->isp_rquest, iptr); - iptr = ISP_NXT_QENTRY(iptr, RQUEST_QUEUE_LEN(isp)); - if (iptr == optr) { - return (1); + isp->isp_reqodx = ISP_READ(isp, isp->isp_rqstoutrp); + if (ISP_NXT_QENTRY(isp->isp_reqidx, RQUEST_QUEUE_LEN(isp)) == isp->isp_reqodx) { + return (NULL); } - if (optrp) - *optrp = optr; - if (iptrp) - *iptrp = iptr; - return (0); + return (ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx)); } #define TBA (4 * (((QENTRY_LEN >> 2) * 3) + 1) + 1) void -isp_print_qentry(ispsoftc_t *isp, char *msg, int idx, void *arg) +isp_print_qentry(ispsoftc_t *isp, const char *msg, int idx, void *arg) { char buf[TBA]; int amt, i, j; uint8_t *ptr = arg; isp_prt(isp, ISP_LOGALL, "%s index %d=>", msg, idx); for (buf[0] = 0, amt = i = 0; i < 4; i++) { buf[0] = 0; - SNPRINTF(buf, TBA, " "); + ISP_SNPRINTF(buf, TBA, " "); for (j = 0; j < (QENTRY_LEN >> 2); j++) { - SNPRINTF(buf, TBA, "%s %02x", buf, ptr[amt++] & 0xff); + ISP_SNPRINTF(buf, TBA, "%s %02x", buf, ptr[amt++] & 0xff); } isp_prt(isp, ISP_LOGALL, buf); } } void isp_print_bytes(ispsoftc_t *isp, const char *msg, int amt, void *arg) { char buf[128]; uint8_t *ptr = arg; int off; if (msg) isp_prt(isp, ISP_LOGALL, "%s:", msg); off = 0; buf[0] = 0; while (off < amt) { int j, to; to = off; for (j = 0; j < 16; j++) { - SNPRINTF(buf, 128, "%s %02x", buf, ptr[off++] & 0xff); - if (off == amt) + ISP_SNPRINTF(buf, 128, "%s %02x", buf, ptr[off++] & 0xff); + if (off == amt) { break; + } } isp_prt(isp, ISP_LOGALL, "0x%08x:%s", to, buf); buf[0] = 0; } } /* * Do the common path to try and ensure that link is up, we've scanned * the fabric (if we're on a fabric), and that we've synchronized this * all with our own database and done the appropriate logins. * * We repeatedly check for firmware state and loop state after each * action because things may have changed while we were doing this. * Any failure or change of state causes us to return a nonzero value. * * We assume we enter here with any locks held. */ int -isp_fc_runstate(ispsoftc_t *isp, int tval) +isp_fc_runstate(ispsoftc_t *isp, int chan, int tval) { fcparam *fcp; - int *tptr; - if (isp->isp_role == ISP_ROLE_NONE) { + fcp = FCPARAM(isp, chan); + if (fcp->role == ISP_ROLE_NONE) { return (0); } - fcp = FCPARAM(isp); - tptr = &tval; - if (fcp->isp_fwstate < FW_READY || - fcp->isp_loopstate < LOOP_PDB_RCVD) { - if (isp_control(isp, ISPCTL_FCLINK_TEST, tptr) != 0) { - isp_prt(isp, ISP_LOGSANCFG, - "isp_fc_runstate: linktest failed"); + if (fcp->isp_fwstate < FW_READY || fcp->isp_loopstate < LOOP_PDB_RCVD) { + if (isp_control(isp, ISPCTL_FCLINK_TEST, chan, tval) != 0) { + isp_prt(isp, ISP_LOGSANCFG, "isp_fc_runstate: linktest failed for channel %d", chan); return (-1); } - if (fcp->isp_fwstate != FW_READY || - fcp->isp_loopstate < LOOP_PDB_RCVD) { - isp_prt(isp, ISP_LOGSANCFG, - "isp_fc_runstate: f/w not ready"); + if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate < LOOP_PDB_RCVD) { + isp_prt(isp, ISP_LOGSANCFG, "isp_fc_runstate: f/w not ready for channel %d", chan); return (-1); } } - if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) { + + if ((fcp->role & ISP_ROLE_INITIATOR) == 0) { return (0); } - if (isp_control(isp, ISPCTL_SCAN_LOOP, NULL) != 0) { - isp_prt(isp, ISP_LOGSANCFG, - "isp_fc_runstate: scan loop fails"); + + if (isp_control(isp, ISPCTL_SCAN_LOOP, chan) != 0) { + isp_prt(isp, ISP_LOGSANCFG, "isp_fc_runstate: scan loop fails on channel %d", chan); return (LOOP_PDB_RCVD); } - if (isp_control(isp, ISPCTL_SCAN_FABRIC, NULL) != 0) { - isp_prt(isp, ISP_LOGSANCFG, - "isp_fc_runstate: scan fabric fails"); + if (isp_control(isp, ISPCTL_SCAN_FABRIC, chan) != 0) { + isp_prt(isp, ISP_LOGSANCFG, "isp_fc_runstate: scan fabric fails on channel %d", chan); return (LOOP_LSCAN_DONE); } - if (isp_control(isp, ISPCTL_PDB_SYNC, NULL) != 0) { - isp_prt(isp, ISP_LOGSANCFG, "isp_fc_runstate: pdb_sync fails"); + if (isp_control(isp, ISPCTL_PDB_SYNC, chan) != 0) { + isp_prt(isp, ISP_LOGSANCFG, "isp_fc_runstate: pdb_sync fails on channel %d", chan); return (LOOP_FSCAN_DONE); } if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate != LOOP_READY) { - isp_prt(isp, ISP_LOGSANCFG, - "isp_fc_runstate: f/w not ready again"); + isp_prt(isp, ISP_LOGSANCFG, "isp_fc_runstate: f/w not ready again on channel %d", chan); return (-1); } return (0); } /* - * Fibre Channel Support- get the port database for the id. + * Fibre Channel Support routines */ void -isp_dump_portdb(ispsoftc_t *isp) +isp_dump_portdb(ispsoftc_t *isp, int chan) { - fcparam *fcp = (fcparam *) isp->isp_param; + fcparam *fcp = FCPARAM(isp, chan); int i; for (i = 0; i < MAX_FC_TARG; i++) { char mb[4]; const char *dbs[8] = { "NIL ", "PROB", "DEAD", "CHGD", "NEW ", "PVLD", "ZOMB", "VLD " }; const char *roles[4] = { " UNK", " TGT", " INI", "TINI" }; fcportdb_t *lp = &fcp->portdb[i]; - if (lp->state == FC_PORTDB_STATE_NIL) { + if (lp->state == FC_PORTDB_STATE_NIL && lp->target_mode == 0) { continue; } - if (lp->ini_map_idx) { - SNPRINTF(mb, sizeof (mb), "%3d", - ((int) lp->ini_map_idx) - 1); + if (lp->dev_map_idx) { + ISP_SNPRINTF(mb, sizeof (mb), "%3d", ((int) lp->dev_map_idx) - 1); } else { - SNPRINTF(mb, sizeof (mb), "---"); + ISP_SNPRINTF(mb, sizeof (mb), "---"); } - isp_prt(isp, ISP_LOGALL, "%d: hdl 0x%x %s al%d tgt %s %s " - "0x%06x =>%s 0x%06x; WWNN 0x%08x%08x WWPN 0x%08x%08x", i, - lp->handle, dbs[lp->state], lp->autologin, mb, - roles[lp->roles], lp->portid, - roles[lp->new_roles], lp->new_portid, - (uint32_t) (lp->node_wwn >> 32), - (uint32_t) (lp->node_wwn), - (uint32_t) (lp->port_wwn >> 32), - (uint32_t) (lp->port_wwn)); + isp_prt(isp, ISP_LOGALL, "Chan %d [%d]: hdl 0x%x %s al%d tgt %s %s 0x%06x =>%s 0x%06x; WWNN 0x%08x%08x WWPN 0x%08x%08x", + chan, i, lp->handle, dbs[lp->state], lp->autologin, mb, roles[lp->roles], lp->portid, roles[lp->new_roles], lp->new_portid, + (uint32_t) (lp->node_wwn >> 32), (uint32_t) (lp->node_wwn), (uint32_t) (lp->port_wwn >> 32), (uint32_t) (lp->port_wwn)); } } +const char * +isp_fc_fw_statename(int state) +{ + switch (state) { + case FW_CONFIG_WAIT: return "Config Wait"; + case FW_WAIT_AL_PA: return "Waiting for AL_PA"; + case FW_WAIT_LOGIN: return "Wait Login"; + case FW_READY: return "Ready"; + case FW_LOSS_OF_SYNC: return "Loss Of Sync"; + case FW_ERROR: return "Error"; + case FW_REINIT: return "Re-Init"; + case FW_NON_PART: return "Nonparticipating"; + default: return "?????"; + } +} + +const char * +isp_fc_loop_statename(int state) +{ + switch (state) { + case LOOP_NIL: return "NIL"; + case LOOP_LIP_RCVD: return "LIP Received"; + case LOOP_PDB_RCVD: return "PDB Received"; + case LOOP_SCANNING_LOOP: return "Scanning"; + case LOOP_LSCAN_DONE: return "Loop Scan Done"; + case LOOP_SCANNING_FABRIC: return "Scanning Fabric"; + case LOOP_FSCAN_DONE: return "Fabric Scan Done"; + case LOOP_SYNCING_PDB: return "Syncing PDB"; + case LOOP_READY: return "Ready"; + default: return "?????"; + } +} + +const char * +isp_fc_toponame(fcparam *fcp) +{ + + if (fcp->isp_fwstate != FW_READY) { + return "Unavailable"; + } + switch (fcp->isp_topo) { + case TOPO_NL_PORT: return "Private Loop"; + case TOPO_FL_PORT: return "FL Port"; + case TOPO_N_PORT: return "N-Port to N-Port"; + case TOPO_F_PORT: return "F Port"; + case TOPO_PTP_STUB: return "F Port (no FLOGI_ACC response)"; + default: return "?????"; + } +} + +/* + * Change Roles + */ +int +isp_fc_change_role(ispsoftc_t *isp, int chan, int new_role) +{ + fcparam *fcp = FCPARAM(isp, chan); + + if (chan >= isp->isp_nchan) { + isp_prt(isp, ISP_LOGWARN, "%s: bad channel %d", __func__, chan); + return (ENXIO); + } + if (chan == 0) { +#ifdef ISP_TARGET_MODE + isp_del_all_wwn_entries(isp, chan); +#endif + isp_clear_commands(isp); + + isp_reset(isp, 0); + if (isp->isp_state != ISP_RESETSTATE) { + isp_prt(isp, ISP_LOGERR, "%s: cannot reset card", __func__); + return (EIO); + } + fcp->role = new_role; + isp_init(isp); + if (isp->isp_state != ISP_INITSTATE) { + isp_prt(isp, ISP_LOGERR, "%s: cannot init card", __func__); + return (EIO); + } + isp->isp_state = ISP_RUNSTATE; + return (0); + } else if (ISP_CAP_MULTI_ID(isp)) { + mbreg_t mbs; + vp_modify_t *vp; + uint8_t qe[QENTRY_LEN], *scp; + + ISP_MEMZERO(qe, QENTRY_LEN); + /* Acquire Scratch */ + + if (FC_SCRATCH_ACQUIRE(isp, chan)) { + return (EBUSY); + } + scp = fcp->isp_scratch; + + /* + * Build a VP MODIFY command in memory + */ + vp = (vp_modify_t *) qe; + vp->vp_mod_hdr.rqs_entry_type = RQSTYPE_VP_MODIFY; + vp->vp_mod_hdr.rqs_entry_count = 1; + vp->vp_mod_cnt = 1; + vp->vp_mod_idx0 = chan; + vp->vp_mod_cmd = VP_MODIFY_ENA; + vp->vp_mod_ports[0].options = ICB2400_VPOPT_ENABLED; + if (new_role & ISP_ROLE_INITIATOR) { + vp->vp_mod_ports[0].options |= ICB2400_VPOPT_INI_ENABLE; + } + if ((new_role & ISP_ROLE_TARGET) == 0) { + vp->vp_mod_ports[0].options |= ICB2400_VPOPT_TGT_DISABLE; + } + MAKE_NODE_NAME_FROM_WWN(vp->vp_mod_ports[0].wwpn, fcp->isp_wwpn); + MAKE_NODE_NAME_FROM_WWN(vp->vp_mod_ports[0].wwnn, fcp->isp_wwnn); + isp_put_vp_modify(isp, vp, (vp_modify_t *) scp); + + /* + * Build a EXEC IOCB A64 command that points to the VP MODIFY command + */ + MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 0); + mbs.param[1] = QENTRY_LEN; + mbs.param[2] = DMA_WD1(fcp->isp_scdma); + mbs.param[3] = DMA_WD0(fcp->isp_scdma); + mbs.param[6] = DMA_WD3(fcp->isp_scdma); + mbs.param[7] = DMA_WD2(fcp->isp_scdma); + MEMORYBARRIER(isp, SYNC_SFORDEV, 0, 2 * QENTRY_LEN); + isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); + if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { + FC_SCRATCH_RELEASE(isp, chan); + return (EIO); + } + MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, QENTRY_LEN); + isp_get_vp_modify(isp, (vp_modify_t *)&scp[QENTRY_LEN], vp); + +#ifdef ISP_TARGET_MODE + isp_del_all_wwn_entries(isp, chan); +#endif + /* + * Release Scratch + */ + FC_SCRATCH_RELEASE(isp, chan); + + if (vp->vp_mod_status != VP_STS_OK) { + isp_prt(isp, ISP_LOGERR, "%s: VP_MODIFY of Chan %d failed with status %d", __func__, chan, vp->vp_mod_status); + return (EIO); + } + fcp->role = new_role; + return (0); + } else { + return (EINVAL); + } +} + void +isp_clear_commands(ispsoftc_t *isp) +{ + XS_T *xs; + uint32_t tmp, handle; +#ifdef ISP_TARGET_MODE + isp_notify_t notify; +#endif + + for (tmp = 0; isp->isp_xflist && tmp < isp->isp_maxcmds; tmp++) { + xs = isp->isp_xflist[tmp]; + if (xs == NULL) { + continue; + } + handle = isp_find_handle(isp, xs); + if (handle == 0) { + continue; + } + if (XS_XFRLEN(xs)) { + ISP_DMAFREE(isp, xs, handle); + XS_SET_RESID(xs, XS_XFRLEN(xs)); + } else { + XS_SET_RESID(xs, 0); + } + isp_destroy_handle(isp, handle); + XS_SETERR(xs, HBA_BUSRESET); + isp_done(xs); + } +#ifdef ISP_TARGET_MODE + for (tmp = 0; isp->isp_tgtlist && tmp < isp->isp_maxcmds; tmp++) { + uint8_t local[QENTRY_LEN]; + + xs = isp->isp_tgtlist[tmp]; + if (xs == NULL) { + continue; + } + handle = isp_find_tgt_handle(isp, xs); + if (handle == 0) { + continue; + } + ISP_DMAFREE(isp, xs, handle); + + ISP_MEMZERO(local, QENTRY_LEN); + if (IS_24XX(isp)) { + ct7_entry_t *ctio = (ct7_entry_t *) local; + ctio->ct_syshandle = handle; + ctio->ct_nphdl = CT_HBA_RESET; + ctio->ct_header.rqs_entry_type = RQSTYPE_CTIO7; + } else if (IS_FC(isp)) { + ct2_entry_t *ctio = (ct2_entry_t *) local; + ctio->ct_syshandle = handle; + ctio->ct_status = CT_HBA_RESET; + ctio->ct_header.rqs_entry_type = RQSTYPE_CTIO2; + } else { + ct_entry_t *ctio = (ct_entry_t *) local; + ctio->ct_syshandle = handle & 0xffff; + ctio->ct_status = CT_HBA_RESET & 0xff;; + ctio->ct_header.rqs_entry_type = RQSTYPE_CTIO; + } + isp_async(isp, ISPASYNC_TARGET_ACTION, local); + } + for (tmp = 0; tmp < isp->isp_nchan; tmp++) { + ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); + notify.nt_ncode = NT_HBA_RESET; + notify.nt_hba = isp; + notify.nt_wwn = INI_ANY; + notify.nt_nphdl = NIL_HANDLE; + notify.nt_sid = PORT_ANY; + notify.nt_did = PORT_ANY; + notify.nt_tgt = TGT_ANY; + notify.nt_channel = tmp; + notify.nt_lun = LUN_ANY; + notify.nt_tagval = TAG_ANY; + isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); + } +#endif +} + +void isp_shutdown(ispsoftc_t *isp) { if (IS_FC(isp)) { if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_ICR, 0); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_PAUSE); } else { ISP_WRITE(isp, BIU_ICR, 0); ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FPM0_REGS); ISP_WRITE(isp, FPM_DIAG_CONFIG, FPM_SOFT_RESET); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FB_REGS); ISP_WRITE(isp, FBM_CMD, FBMCMD_FIFO_RESET_ALL); ISP_WRITE(isp, BIU2100_CSR, BIU2100_RISC_REGS); } } else { ISP_WRITE(isp, BIU_ICR, 0); ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); } } /* * Functions to move stuff to a form that the QLogic RISC engine understands * and functions to move stuff back to a form the processor understands. * * Each platform is required to provide the 8, 16 and 32 bit * swizzle and unswizzle macros (ISP_IOX{PUT|GET}_{8,16,32}) * * The assumption is that swizzling and unswizzling is mostly done 'in place' * (with a few exceptions for efficiency). */ -#define ISP_IS_SBUS(isp) \ - (ISP_SBUS_SUPPORTED && (isp)->isp_bustype == ISP_BT_SBUS) +#define ISP_IS_SBUS(isp) (ISP_SBUS_SUPPORTED && (isp)->isp_bustype == ISP_BT_SBUS) #define ASIZE(x) (sizeof (x) / sizeof (x[0])) /* * Swizzle/Copy Functions */ void isp_put_hdr(ispsoftc_t *isp, isphdr_t *hpsrc, isphdr_t *hpdst) { if (ISP_IS_SBUS(isp)) { - ISP_IOXPUT_8(isp, hpsrc->rqs_entry_type, - &hpdst->rqs_entry_count); - ISP_IOXPUT_8(isp, hpsrc->rqs_entry_count, - &hpdst->rqs_entry_type); - ISP_IOXPUT_8(isp, hpsrc->rqs_seqno, - &hpdst->rqs_flags); - ISP_IOXPUT_8(isp, hpsrc->rqs_flags, - &hpdst->rqs_seqno); + ISP_IOXPUT_8(isp, hpsrc->rqs_entry_type, &hpdst->rqs_entry_count); + ISP_IOXPUT_8(isp, hpsrc->rqs_entry_count, &hpdst->rqs_entry_type); + ISP_IOXPUT_8(isp, hpsrc->rqs_seqno, &hpdst->rqs_flags); + ISP_IOXPUT_8(isp, hpsrc->rqs_flags, &hpdst->rqs_seqno); } else { - ISP_IOXPUT_8(isp, hpsrc->rqs_entry_type, - &hpdst->rqs_entry_type); - ISP_IOXPUT_8(isp, hpsrc->rqs_entry_count, - &hpdst->rqs_entry_count); - ISP_IOXPUT_8(isp, hpsrc->rqs_seqno, - &hpdst->rqs_seqno); - ISP_IOXPUT_8(isp, hpsrc->rqs_flags, - &hpdst->rqs_flags); + ISP_IOXPUT_8(isp, hpsrc->rqs_entry_type, &hpdst->rqs_entry_type); + ISP_IOXPUT_8(isp, hpsrc->rqs_entry_count, &hpdst->rqs_entry_count); + ISP_IOXPUT_8(isp, hpsrc->rqs_seqno, &hpdst->rqs_seqno); + ISP_IOXPUT_8(isp, hpsrc->rqs_flags, &hpdst->rqs_flags); } } void isp_get_hdr(ispsoftc_t *isp, isphdr_t *hpsrc, isphdr_t *hpdst) { if (ISP_IS_SBUS(isp)) { - ISP_IOXGET_8(isp, &hpsrc->rqs_entry_type, - hpdst->rqs_entry_count); - ISP_IOXGET_8(isp, &hpsrc->rqs_entry_count, - hpdst->rqs_entry_type); - ISP_IOXGET_8(isp, &hpsrc->rqs_seqno, - hpdst->rqs_flags); - ISP_IOXGET_8(isp, &hpsrc->rqs_flags, - hpdst->rqs_seqno); + ISP_IOXGET_8(isp, &hpsrc->rqs_entry_type, hpdst->rqs_entry_count); + ISP_IOXGET_8(isp, &hpsrc->rqs_entry_count, hpdst->rqs_entry_type); + ISP_IOXGET_8(isp, &hpsrc->rqs_seqno, hpdst->rqs_flags); + ISP_IOXGET_8(isp, &hpsrc->rqs_flags, hpdst->rqs_seqno); } else { - ISP_IOXGET_8(isp, &hpsrc->rqs_entry_type, - hpdst->rqs_entry_type); - ISP_IOXGET_8(isp, &hpsrc->rqs_entry_count, - hpdst->rqs_entry_count); - ISP_IOXGET_8(isp, &hpsrc->rqs_seqno, - hpdst->rqs_seqno); - ISP_IOXGET_8(isp, &hpsrc->rqs_flags, - hpdst->rqs_flags); + ISP_IOXGET_8(isp, &hpsrc->rqs_entry_type, hpdst->rqs_entry_type); + ISP_IOXGET_8(isp, &hpsrc->rqs_entry_count, hpdst->rqs_entry_count); + ISP_IOXGET_8(isp, &hpsrc->rqs_seqno, hpdst->rqs_seqno); + ISP_IOXGET_8(isp, &hpsrc->rqs_flags, hpdst->rqs_flags); } } int isp_get_response_type(ispsoftc_t *isp, isphdr_t *hp) { uint8_t type; if (ISP_IS_SBUS(isp)) { ISP_IOXGET_8(isp, &hp->rqs_entry_count, type); } else { ISP_IOXGET_8(isp, &hp->rqs_entry_type, type); } return ((int)type); } void isp_put_request(ispsoftc_t *isp, ispreq_t *rqsrc, ispreq_t *rqdst) { int i; isp_put_hdr(isp, &rqsrc->req_header, &rqdst->req_header); ISP_IOXPUT_32(isp, rqsrc->req_handle, &rqdst->req_handle); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, rqsrc->req_lun_trn, &rqdst->req_target); ISP_IOXPUT_8(isp, rqsrc->req_target, &rqdst->req_lun_trn); } else { ISP_IOXPUT_8(isp, rqsrc->req_lun_trn, &rqdst->req_lun_trn); ISP_IOXPUT_8(isp, rqsrc->req_target, &rqdst->req_target); } ISP_IOXPUT_16(isp, rqsrc->req_cdblen, &rqdst->req_cdblen); ISP_IOXPUT_16(isp, rqsrc->req_flags, &rqdst->req_flags); ISP_IOXPUT_16(isp, rqsrc->req_time, &rqdst->req_time); ISP_IOXPUT_16(isp, rqsrc->req_seg_count, &rqdst->req_seg_count); for (i = 0; i < ASIZE(rqsrc->req_cdb); i++) { ISP_IOXPUT_8(isp, rqsrc->req_cdb[i], &rqdst->req_cdb[i]); } for (i = 0; i < ISP_RQDSEG; i++) { - ISP_IOXPUT_32(isp, rqsrc->req_dataseg[i].ds_base, - &rqdst->req_dataseg[i].ds_base); - ISP_IOXPUT_32(isp, rqsrc->req_dataseg[i].ds_count, - &rqdst->req_dataseg[i].ds_count); + ISP_IOXPUT_32(isp, rqsrc->req_dataseg[i].ds_base, &rqdst->req_dataseg[i].ds_base); + ISP_IOXPUT_32(isp, rqsrc->req_dataseg[i].ds_count, &rqdst->req_dataseg[i].ds_count); } } void isp_put_marker(ispsoftc_t *isp, isp_marker_t *src, isp_marker_t *dst) { int i; isp_put_hdr(isp, &src->mrk_header, &dst->mrk_header); ISP_IOXPUT_32(isp, src->mrk_handle, &dst->mrk_handle); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->mrk_reserved0, &dst->mrk_target); ISP_IOXPUT_8(isp, src->mrk_target, &dst->mrk_reserved0); } else { ISP_IOXPUT_8(isp, src->mrk_reserved0, &dst->mrk_reserved0); ISP_IOXPUT_8(isp, src->mrk_target, &dst->mrk_target); } ISP_IOXPUT_16(isp, src->mrk_modifier, &dst->mrk_modifier); ISP_IOXPUT_16(isp, src->mrk_flags, &dst->mrk_flags); ISP_IOXPUT_16(isp, src->mrk_lun, &dst->mrk_lun); for (i = 0; i < ASIZE(src->mrk_reserved1); i++) { - ISP_IOXPUT_8(isp, src->mrk_reserved1[i], - &dst->mrk_reserved1[i]); + ISP_IOXPUT_8(isp, src->mrk_reserved1[i], &dst->mrk_reserved1[i]); } } void -isp_put_marker_24xx(ispsoftc_t *isp, - isp_marker_24xx_t *src, isp_marker_24xx_t *dst) +isp_put_marker_24xx(ispsoftc_t *isp, isp_marker_24xx_t *src, isp_marker_24xx_t *dst) { int i; isp_put_hdr(isp, &src->mrk_header, &dst->mrk_header); ISP_IOXPUT_32(isp, src->mrk_handle, &dst->mrk_handle); ISP_IOXPUT_16(isp, src->mrk_nphdl, &dst->mrk_nphdl); ISP_IOXPUT_8(isp, src->mrk_modifier, &dst->mrk_modifier); ISP_IOXPUT_8(isp, src->mrk_reserved0, &dst->mrk_reserved0); ISP_IOXPUT_8(isp, src->mrk_reserved1, &dst->mrk_reserved1); ISP_IOXPUT_8(isp, src->mrk_vphdl, &dst->mrk_vphdl); ISP_IOXPUT_8(isp, src->mrk_reserved2, &dst->mrk_reserved2); for (i = 0; i < ASIZE(src->mrk_lun); i++) { ISP_IOXPUT_8(isp, src->mrk_lun[i], &dst->mrk_lun[i]); } for (i = 0; i < ASIZE(src->mrk_reserved3); i++) { - ISP_IOXPUT_8(isp, src->mrk_reserved3[i], - &dst->mrk_reserved3[i]); + ISP_IOXPUT_8(isp, src->mrk_reserved3[i], &dst->mrk_reserved3[i]); } } void isp_put_request_t2(ispsoftc_t *isp, ispreqt2_t *src, ispreqt2_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXPUT_32(isp, src->req_handle, &dst->req_handle); ISP_IOXPUT_8(isp, src->req_lun_trn, &dst->req_lun_trn); ISP_IOXPUT_8(isp, src->req_target, &dst->req_target); ISP_IOXPUT_16(isp, src->req_scclun, &dst->req_scclun); ISP_IOXPUT_16(isp, src->req_flags, &dst->req_flags); ISP_IOXPUT_16(isp, src->req_reserved, &dst->req_reserved); ISP_IOXPUT_16(isp, src->req_time, &dst->req_time); ISP_IOXPUT_16(isp, src->req_seg_count, &dst->req_seg_count); for (i = 0; i < ASIZE(src->req_cdb); i++) { ISP_IOXPUT_8(isp, src->req_cdb[i], &dst->req_cdb[i]); } ISP_IOXPUT_32(isp, src->req_totalcnt, &dst->req_totalcnt); for (i = 0; i < ISP_RQDSEG_T2; i++) { - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, - &dst->req_dataseg[i].ds_base); - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, - &dst->req_dataseg[i].ds_count); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, &dst->req_dataseg[i].ds_base); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, &dst->req_dataseg[i].ds_count); } } void isp_put_request_t2e(ispsoftc_t *isp, ispreqt2e_t *src, ispreqt2e_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXPUT_32(isp, src->req_handle, &dst->req_handle); ISP_IOXPUT_16(isp, src->req_target, &dst->req_target); ISP_IOXPUT_16(isp, src->req_scclun, &dst->req_scclun); ISP_IOXPUT_16(isp, src->req_flags, &dst->req_flags); ISP_IOXPUT_16(isp, src->req_reserved, &dst->req_reserved); ISP_IOXPUT_16(isp, src->req_time, &dst->req_time); ISP_IOXPUT_16(isp, src->req_seg_count, &dst->req_seg_count); for (i = 0; i < ASIZE(src->req_cdb); i++) { ISP_IOXPUT_8(isp, src->req_cdb[i], &dst->req_cdb[i]); } ISP_IOXPUT_32(isp, src->req_totalcnt, &dst->req_totalcnt); for (i = 0; i < ISP_RQDSEG_T2; i++) { - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, - &dst->req_dataseg[i].ds_base); - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, - &dst->req_dataseg[i].ds_count); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, &dst->req_dataseg[i].ds_base); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, &dst->req_dataseg[i].ds_count); } } void isp_put_request_t3(ispsoftc_t *isp, ispreqt3_t *src, ispreqt3_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXPUT_32(isp, src->req_handle, &dst->req_handle); ISP_IOXPUT_8(isp, src->req_lun_trn, &dst->req_lun_trn); ISP_IOXPUT_8(isp, src->req_target, &dst->req_target); ISP_IOXPUT_16(isp, src->req_scclun, &dst->req_scclun); ISP_IOXPUT_16(isp, src->req_flags, &dst->req_flags); ISP_IOXPUT_16(isp, src->req_reserved, &dst->req_reserved); ISP_IOXPUT_16(isp, src->req_time, &dst->req_time); ISP_IOXPUT_16(isp, src->req_seg_count, &dst->req_seg_count); for (i = 0; i < ASIZE(src->req_cdb); i++) { ISP_IOXPUT_8(isp, src->req_cdb[i], &dst->req_cdb[i]); } ISP_IOXPUT_32(isp, src->req_totalcnt, &dst->req_totalcnt); for (i = 0; i < ISP_RQDSEG_T3; i++) { - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, - &dst->req_dataseg[i].ds_base); - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_basehi, - &dst->req_dataseg[i].ds_basehi); - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, - &dst->req_dataseg[i].ds_count); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, &dst->req_dataseg[i].ds_base); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_basehi, &dst->req_dataseg[i].ds_basehi); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, &dst->req_dataseg[i].ds_count); } } void isp_put_request_t3e(ispsoftc_t *isp, ispreqt3e_t *src, ispreqt3e_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXPUT_32(isp, src->req_handle, &dst->req_handle); ISP_IOXPUT_16(isp, src->req_target, &dst->req_target); ISP_IOXPUT_16(isp, src->req_scclun, &dst->req_scclun); ISP_IOXPUT_16(isp, src->req_flags, &dst->req_flags); ISP_IOXPUT_16(isp, src->req_reserved, &dst->req_reserved); ISP_IOXPUT_16(isp, src->req_time, &dst->req_time); ISP_IOXPUT_16(isp, src->req_seg_count, &dst->req_seg_count); for (i = 0; i < ASIZE(src->req_cdb); i++) { ISP_IOXPUT_8(isp, src->req_cdb[i], &dst->req_cdb[i]); } ISP_IOXPUT_32(isp, src->req_totalcnt, &dst->req_totalcnt); for (i = 0; i < ISP_RQDSEG_T3; i++) { - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, - &dst->req_dataseg[i].ds_base); - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_basehi, - &dst->req_dataseg[i].ds_basehi); - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, - &dst->req_dataseg[i].ds_count); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, &dst->req_dataseg[i].ds_base); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_basehi, &dst->req_dataseg[i].ds_basehi); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, &dst->req_dataseg[i].ds_count); } } void isp_put_extended_request(ispsoftc_t *isp, ispextreq_t *src, ispextreq_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXPUT_32(isp, src->req_handle, &dst->req_handle); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->req_lun_trn, &dst->req_target); ISP_IOXPUT_8(isp, src->req_target, &dst->req_lun_trn); } else { ISP_IOXPUT_8(isp, src->req_lun_trn, &dst->req_lun_trn); ISP_IOXPUT_8(isp, src->req_target, &dst->req_target); } ISP_IOXPUT_16(isp, src->req_cdblen, &dst->req_cdblen); ISP_IOXPUT_16(isp, src->req_flags, &dst->req_flags); ISP_IOXPUT_16(isp, src->req_time, &dst->req_time); ISP_IOXPUT_16(isp, src->req_seg_count, &dst->req_seg_count); for (i = 0; i < ASIZE(src->req_cdb); i++) { ISP_IOXPUT_8(isp, src->req_cdb[i], &dst->req_cdb[i]); } } void isp_put_request_t7(ispsoftc_t *isp, ispreqt7_t *src, ispreqt7_t *dst) { int i; uint32_t *a, *b; isp_put_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXPUT_32(isp, src->req_handle, &dst->req_handle); ISP_IOXPUT_16(isp, src->req_nphdl, &dst->req_nphdl); ISP_IOXPUT_16(isp, src->req_time, &dst->req_time); ISP_IOXPUT_16(isp, src->req_seg_count, &dst->req_seg_count); ISP_IOXPUT_16(isp, src->req_reserved, &dst->req_reserved); a = (uint32_t *) src->req_lun; b = (uint32_t *) dst->req_lun; for (i = 0; i < (ASIZE(src->req_lun) >> 2); i++ ) { *b++ = ISP_SWAP32(isp, *a++); } ISP_IOXPUT_8(isp, src->req_alen_datadir, &dst->req_alen_datadir); ISP_IOXPUT_8(isp, src->req_task_management, &dst->req_task_management); ISP_IOXPUT_8(isp, src->req_task_attribute, &dst->req_task_attribute); ISP_IOXPUT_8(isp, src->req_crn, &dst->req_crn); a = (uint32_t *) src->req_cdb; b = (uint32_t *) dst->req_cdb; for (i = 0; i < (ASIZE(src->req_cdb) >> 2); i++) { *b++ = ISP_SWAP32(isp, *a++); } ISP_IOXPUT_32(isp, src->req_dl, &dst->req_dl); ISP_IOXPUT_16(isp, src->req_tidlo, &dst->req_tidlo); ISP_IOXPUT_8(isp, src->req_tidhi, &dst->req_tidhi); ISP_IOXPUT_8(isp, src->req_vpidx, &dst->req_vpidx); - ISP_IOXPUT_32(isp, src->req_dataseg.ds_base, - &dst->req_dataseg.ds_base); - ISP_IOXPUT_32(isp, src->req_dataseg.ds_basehi, - &dst->req_dataseg.ds_basehi); - ISP_IOXPUT_32(isp, src->req_dataseg.ds_count, - &dst->req_dataseg.ds_count); + ISP_IOXPUT_32(isp, src->req_dataseg.ds_base, &dst->req_dataseg.ds_base); + ISP_IOXPUT_32(isp, src->req_dataseg.ds_basehi, &dst->req_dataseg.ds_basehi); + ISP_IOXPUT_32(isp, src->req_dataseg.ds_count, &dst->req_dataseg.ds_count); } void +isp_put_24xx_tmf(ispsoftc_t *isp, isp24xx_tmf_t *src, isp24xx_tmf_t *dst) +{ + int i; + uint32_t *a, *b; + + isp_put_hdr(isp, &src->tmf_header, &dst->tmf_header); + ISP_IOXPUT_32(isp, src->tmf_handle, &dst->tmf_handle); + ISP_IOXPUT_16(isp, src->tmf_nphdl, &dst->tmf_nphdl); + ISP_IOXPUT_16(isp, src->tmf_delay, &dst->tmf_delay); + ISP_IOXPUT_16(isp, src->tmf_timeout, &dst->tmf_timeout); + for (i = 0; i < ASIZE(src->tmf_reserved0); i++) { + ISP_IOXPUT_8(isp, src->tmf_reserved0[i], &dst->tmf_reserved0[i]); + } + a = (uint32_t *) src->tmf_lun; + b = (uint32_t *) dst->tmf_lun; + for (i = 0; i < (ASIZE(src->tmf_lun) >> 2); i++ ) { + *b++ = ISP_SWAP32(isp, *a++); + } + ISP_IOXPUT_32(isp, src->tmf_flags, &dst->tmf_flags); + for (i = 0; i < ASIZE(src->tmf_reserved1); i++) { + ISP_IOXPUT_8(isp, src->tmf_reserved1[i], &dst->tmf_reserved1[i]); + } + ISP_IOXPUT_16(isp, src->tmf_tidlo, &dst->tmf_tidlo); + ISP_IOXPUT_8(isp, src->tmf_tidhi, &dst->tmf_tidhi); + ISP_IOXPUT_8(isp, src->tmf_vpidx, &dst->tmf_vpidx); + for (i = 0; i < ASIZE(src->tmf_reserved2); i++) { + ISP_IOXPUT_8(isp, src->tmf_reserved2[i], &dst->tmf_reserved2[i]); + } +} + +void isp_put_24xx_abrt(ispsoftc_t *isp, isp24xx_abrt_t *src, isp24xx_abrt_t *dst) { int i; isp_put_hdr(isp, &src->abrt_header, &dst->abrt_header); ISP_IOXPUT_32(isp, src->abrt_handle, &dst->abrt_handle); ISP_IOXPUT_16(isp, src->abrt_nphdl, &dst->abrt_nphdl); ISP_IOXPUT_16(isp, src->abrt_options, &dst->abrt_options); ISP_IOXPUT_32(isp, src->abrt_cmd_handle, &dst->abrt_cmd_handle); for (i = 0; i < ASIZE(src->abrt_reserved); i++) { - ISP_IOXPUT_8(isp, src->abrt_reserved[i], - &dst->abrt_reserved[i]); + ISP_IOXPUT_8(isp, src->abrt_reserved[i], &dst->abrt_reserved[i]); } ISP_IOXPUT_16(isp, src->abrt_tidlo, &dst->abrt_tidlo); ISP_IOXPUT_8(isp, src->abrt_tidhi, &dst->abrt_tidhi); ISP_IOXPUT_8(isp, src->abrt_vpidx, &dst->abrt_vpidx); for (i = 0; i < ASIZE(src->abrt_reserved1); i++) { - ISP_IOXPUT_8(isp, src->abrt_reserved1[i], - &dst->abrt_reserved1[i]); + ISP_IOXPUT_8(isp, src->abrt_reserved1[i], &dst->abrt_reserved1[i]); } } void isp_put_cont_req(ispsoftc_t *isp, ispcontreq_t *src, ispcontreq_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); for (i = 0; i < ISP_CDSEG; i++) { - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, - &dst->req_dataseg[i].ds_base); - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, - &dst->req_dataseg[i].ds_count); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, &dst->req_dataseg[i].ds_base); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, &dst->req_dataseg[i].ds_count); } } void isp_put_cont64_req(ispsoftc_t *isp, ispcontreq64_t *src, ispcontreq64_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); for (i = 0; i < ISP_CDSEG64; i++) { - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, - &dst->req_dataseg[i].ds_base); - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_basehi, - &dst->req_dataseg[i].ds_basehi); - ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, - &dst->req_dataseg[i].ds_count); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, &dst->req_dataseg[i].ds_base); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_basehi, &dst->req_dataseg[i].ds_basehi); + ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, &dst->req_dataseg[i].ds_count); } } void isp_get_response(ispsoftc_t *isp, ispstatusreq_t *src, ispstatusreq_t *dst) { int i; isp_get_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXGET_32(isp, &src->req_handle, dst->req_handle); ISP_IOXGET_16(isp, &src->req_scsi_status, dst->req_scsi_status); - ISP_IOXGET_16(isp, &src->req_completion_status, - dst->req_completion_status); + ISP_IOXGET_16(isp, &src->req_completion_status, dst->req_completion_status); ISP_IOXGET_16(isp, &src->req_state_flags, dst->req_state_flags); ISP_IOXGET_16(isp, &src->req_status_flags, dst->req_status_flags); ISP_IOXGET_16(isp, &src->req_time, dst->req_time); ISP_IOXGET_16(isp, &src->req_sense_len, dst->req_sense_len); ISP_IOXGET_32(isp, &src->req_resid, dst->req_resid); for (i = 0; i < 8; i++) { - ISP_IOXGET_8(isp, &src->req_response[i], - dst->req_response[i]); + ISP_IOXGET_8(isp, &src->req_response[i], dst->req_response[i]); } for (i = 0; i < 32; i++) { - ISP_IOXGET_8(isp, &src->req_sense_data[i], - dst->req_sense_data[i]); + ISP_IOXGET_8(isp, &src->req_sense_data[i], dst->req_sense_data[i]); } } void -isp_get_24xx_response(ispsoftc_t *isp, isp24xx_statusreq_t *src, - isp24xx_statusreq_t *dst) +isp_get_24xx_response(ispsoftc_t *isp, isp24xx_statusreq_t *src, isp24xx_statusreq_t *dst) { int i; uint32_t *s, *d; isp_get_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXGET_32(isp, &src->req_handle, dst->req_handle); - ISP_IOXGET_16(isp, &src->req_completion_status, - dst->req_completion_status); + ISP_IOXGET_16(isp, &src->req_completion_status, dst->req_completion_status); ISP_IOXGET_16(isp, &src->req_oxid, dst->req_oxid); ISP_IOXGET_32(isp, &src->req_resid, dst->req_resid); ISP_IOXGET_16(isp, &src->req_reserved0, dst->req_reserved0); ISP_IOXGET_16(isp, &src->req_state_flags, dst->req_state_flags); ISP_IOXGET_16(isp, &src->req_reserved1, dst->req_reserved1); ISP_IOXGET_16(isp, &src->req_scsi_status, dst->req_scsi_status); ISP_IOXGET_32(isp, &src->req_fcp_residual, dst->req_fcp_residual); ISP_IOXGET_32(isp, &src->req_sense_len, dst->req_sense_len); ISP_IOXGET_32(isp, &src->req_response_len, dst->req_response_len); s = (uint32_t *)src->req_rsp_sense; d = (uint32_t *)dst->req_rsp_sense; for (i = 0; i < (ASIZE(src->req_rsp_sense) >> 2); i++) { d[i] = ISP_SWAP32(isp, s[i]); } } void isp_get_24xx_abrt(ispsoftc_t *isp, isp24xx_abrt_t *src, isp24xx_abrt_t *dst) { int i; isp_get_hdr(isp, &src->abrt_header, &dst->abrt_header); ISP_IOXGET_32(isp, &src->abrt_handle, dst->abrt_handle); ISP_IOXGET_16(isp, &src->abrt_nphdl, dst->abrt_nphdl); ISP_IOXGET_16(isp, &src->abrt_options, dst->abrt_options); ISP_IOXGET_32(isp, &src->abrt_cmd_handle, dst->abrt_cmd_handle); for (i = 0; i < ASIZE(src->abrt_reserved); i++) { - ISP_IOXGET_8(isp, &src->abrt_reserved[i], - dst->abrt_reserved[i]); + ISP_IOXGET_8(isp, &src->abrt_reserved[i], dst->abrt_reserved[i]); } ISP_IOXGET_16(isp, &src->abrt_tidlo, dst->abrt_tidlo); ISP_IOXGET_8(isp, &src->abrt_tidhi, dst->abrt_tidhi); ISP_IOXGET_8(isp, &src->abrt_vpidx, dst->abrt_vpidx); for (i = 0; i < ASIZE(src->abrt_reserved1); i++) { - ISP_IOXGET_8(isp, &src->abrt_reserved1[i], - dst->abrt_reserved1[i]); + ISP_IOXGET_8(isp, &src->abrt_reserved1[i], dst->abrt_reserved1[i]); } } void isp_get_rio2(ispsoftc_t *isp, isp_rio2_t *r2src, isp_rio2_t *r2dst) { int i; isp_get_hdr(isp, &r2src->req_header, &r2dst->req_header); if (r2dst->req_header.rqs_seqno > 30) { r2dst->req_header.rqs_seqno = 30; } for (i = 0; i < r2dst->req_header.rqs_seqno; i++) { - ISP_IOXGET_16(isp, &r2src->req_handles[i], - r2dst->req_handles[i]); + ISP_IOXGET_16(isp, &r2src->req_handles[i], r2dst->req_handles[i]); } while (i < 30) { r2dst->req_handles[i++] = 0; } } void isp_put_icb(ispsoftc_t *isp, isp_icb_t *src, isp_icb_t *dst) { int i; if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->icb_version, &dst->icb_reserved0); ISP_IOXPUT_8(isp, src->icb_reserved0, &dst->icb_version); } else { ISP_IOXPUT_8(isp, src->icb_version, &dst->icb_version); ISP_IOXPUT_8(isp, src->icb_reserved0, &dst->icb_reserved0); } ISP_IOXPUT_16(isp, src->icb_fwoptions, &dst->icb_fwoptions); ISP_IOXPUT_16(isp, src->icb_maxfrmlen, &dst->icb_maxfrmlen); ISP_IOXPUT_16(isp, src->icb_maxalloc, &dst->icb_maxalloc); ISP_IOXPUT_16(isp, src->icb_execthrottle, &dst->icb_execthrottle); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->icb_retry_count, &dst->icb_retry_delay); ISP_IOXPUT_8(isp, src->icb_retry_delay, &dst->icb_retry_count); } else { ISP_IOXPUT_8(isp, src->icb_retry_count, &dst->icb_retry_count); ISP_IOXPUT_8(isp, src->icb_retry_delay, &dst->icb_retry_delay); } for (i = 0; i < 8; i++) { ISP_IOXPUT_8(isp, src->icb_portname[i], &dst->icb_portname[i]); } ISP_IOXPUT_16(isp, src->icb_hardaddr, &dst->icb_hardaddr); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->icb_iqdevtype, &dst->icb_logintime); ISP_IOXPUT_8(isp, src->icb_logintime, &dst->icb_iqdevtype); } else { ISP_IOXPUT_8(isp, src->icb_iqdevtype, &dst->icb_iqdevtype); ISP_IOXPUT_8(isp, src->icb_logintime, &dst->icb_logintime); } for (i = 0; i < 8; i++) { ISP_IOXPUT_8(isp, src->icb_nodename[i], &dst->icb_nodename[i]); } ISP_IOXPUT_16(isp, src->icb_rqstout, &dst->icb_rqstout); ISP_IOXPUT_16(isp, src->icb_rspnsin, &dst->icb_rspnsin); ISP_IOXPUT_16(isp, src->icb_rqstqlen, &dst->icb_rqstqlen); ISP_IOXPUT_16(isp, src->icb_rsltqlen, &dst->icb_rsltqlen); for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->icb_rqstaddr[i], &dst->icb_rqstaddr[i]); } for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->icb_respaddr[i], &dst->icb_respaddr[i]); } ISP_IOXPUT_16(isp, src->icb_lunenables, &dst->icb_lunenables); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->icb_ccnt, &dst->icb_icnt); ISP_IOXPUT_8(isp, src->icb_icnt, &dst->icb_ccnt); } else { ISP_IOXPUT_8(isp, src->icb_ccnt, &dst->icb_ccnt); ISP_IOXPUT_8(isp, src->icb_icnt, &dst->icb_icnt); } ISP_IOXPUT_16(isp, src->icb_lunetimeout, &dst->icb_lunetimeout); ISP_IOXPUT_16(isp, src->icb_reserved1, &dst->icb_reserved1); ISP_IOXPUT_16(isp, src->icb_xfwoptions, &dst->icb_xfwoptions); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->icb_racctimer, &dst->icb_idelaytimer); ISP_IOXPUT_8(isp, src->icb_idelaytimer, &dst->icb_racctimer); } else { ISP_IOXPUT_8(isp, src->icb_racctimer, &dst->icb_racctimer); ISP_IOXPUT_8(isp, src->icb_idelaytimer, &dst->icb_idelaytimer); } ISP_IOXPUT_16(isp, src->icb_zfwoptions, &dst->icb_zfwoptions); } void isp_put_icb_2400(ispsoftc_t *isp, isp_icb_2400_t *src, isp_icb_2400_t *dst) { int i; ISP_IOXPUT_16(isp, src->icb_version, &dst->icb_version); ISP_IOXPUT_16(isp, src->icb_reserved0, &dst->icb_reserved0); ISP_IOXPUT_16(isp, src->icb_maxfrmlen, &dst->icb_maxfrmlen); ISP_IOXPUT_16(isp, src->icb_execthrottle, &dst->icb_execthrottle); ISP_IOXPUT_16(isp, src->icb_xchgcnt, &dst->icb_xchgcnt); ISP_IOXPUT_16(isp, src->icb_hardaddr, &dst->icb_hardaddr); for (i = 0; i < 8; i++) { ISP_IOXPUT_8(isp, src->icb_portname[i], &dst->icb_portname[i]); } for (i = 0; i < 8; i++) { ISP_IOXPUT_8(isp, src->icb_nodename[i], &dst->icb_nodename[i]); } ISP_IOXPUT_16(isp, src->icb_rspnsin, &dst->icb_rspnsin); ISP_IOXPUT_16(isp, src->icb_rqstout, &dst->icb_rqstout); ISP_IOXPUT_16(isp, src->icb_retry_count, &dst->icb_retry_count); ISP_IOXPUT_16(isp, src->icb_priout, &dst->icb_priout); ISP_IOXPUT_16(isp, src->icb_rsltqlen, &dst->icb_rsltqlen); ISP_IOXPUT_16(isp, src->icb_rqstqlen, &dst->icb_rqstqlen); ISP_IOXPUT_16(isp, src->icb_ldn_nols, &dst->icb_ldn_nols); ISP_IOXPUT_16(isp, src->icb_prqstqlen, &dst->icb_prqstqlen); for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->icb_rqstaddr[i], &dst->icb_rqstaddr[i]); } for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->icb_respaddr[i], &dst->icb_respaddr[i]); } for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->icb_priaddr[i], &dst->icb_priaddr[i]); } for (i = 0; i < 4; i++) { - ISP_IOXPUT_16(isp, src->icb_reserved1[i], - &dst->icb_reserved1[i]); + ISP_IOXPUT_16(isp, src->icb_reserved1[i], &dst->icb_reserved1[i]); } ISP_IOXPUT_16(isp, src->icb_atio_in, &dst->icb_atio_in); ISP_IOXPUT_16(isp, src->icb_atioqlen, &dst->icb_atioqlen); for (i = 0; i < 4; i++) { - ISP_IOXPUT_16(isp, src->icb_atioqaddr[i], - &dst->icb_atioqaddr[i]); + ISP_IOXPUT_16(isp, src->icb_atioqaddr[i], &dst->icb_atioqaddr[i]); } ISP_IOXPUT_16(isp, src->icb_idelaytimer, &dst->icb_idelaytimer); ISP_IOXPUT_16(isp, src->icb_logintime, &dst->icb_logintime); ISP_IOXPUT_32(isp, src->icb_fwoptions1, &dst->icb_fwoptions1); ISP_IOXPUT_32(isp, src->icb_fwoptions2, &dst->icb_fwoptions2); ISP_IOXPUT_32(isp, src->icb_fwoptions3, &dst->icb_fwoptions3); for (i = 0; i < 12; i++) { - ISP_IOXPUT_16(isp, src->icb_reserved2[i], - &dst->icb_reserved2[i]); + ISP_IOXPUT_16(isp, src->icb_reserved2[i], &dst->icb_reserved2[i]); } } void +isp_put_icb_2400_vpinfo(ispsoftc_t *isp, isp_icb_2400_vpinfo_t *src, isp_icb_2400_vpinfo_t *dst) +{ + ISP_IOXPUT_16(isp, src->vp_count, &dst->vp_count); + ISP_IOXPUT_16(isp, src->vp_global_options, &dst->vp_global_options); +} + +void +isp_put_vp_port_info(ispsoftc_t *isp, vp_port_info_t *src, vp_port_info_t *dst) +{ + int i; + ISP_IOXPUT_16(isp, src->vp_port_status, &dst->vp_port_status); + ISP_IOXPUT_8(isp, src->vp_port_options, &dst->vp_port_options); + ISP_IOXPUT_8(isp, src->vp_port_loopid, &dst->vp_port_loopid); + for (i = 0; i < 8; i++) { + ISP_IOXPUT_8(isp, src->vp_port_portname[i], &dst->vp_port_portname[i]); + } + for (i = 0; i < 8; i++) { + ISP_IOXPUT_8(isp, src->vp_port_nodename[i], &dst->vp_port_nodename[i]); + } + /* we never *put* portid_lo/portid_hi */ +} + +void +isp_get_vp_port_info(ispsoftc_t *isp, vp_port_info_t *src, vp_port_info_t *dst) +{ + int i; + ISP_IOXGET_16(isp, &src->vp_port_status, dst->vp_port_status); + ISP_IOXGET_8(isp, &src->vp_port_options, dst->vp_port_options); + ISP_IOXGET_8(isp, &src->vp_port_loopid, dst->vp_port_loopid); + for (i = 0; i < ASIZE(src->vp_port_portname); i++) { + ISP_IOXGET_8(isp, &src->vp_port_portname[i], dst->vp_port_portname[i]); + } + for (i = 0; i < ASIZE(src->vp_port_nodename); i++) { + ISP_IOXGET_8(isp, &src->vp_port_nodename[i], dst->vp_port_nodename[i]); + } + ISP_IOXGET_16(isp, &src->vp_port_portid_lo, dst->vp_port_portid_lo); + ISP_IOXGET_16(isp, &src->vp_port_portid_hi, dst->vp_port_portid_hi); +} + +void +isp_put_vp_ctrl_info(ispsoftc_t *isp, vp_ctrl_info_t *src, vp_ctrl_info_t *dst) +{ + int i; + isp_put_hdr(isp, &src->vp_ctrl_hdr, &dst->vp_ctrl_hdr); + ISP_IOXPUT_32(isp, src->vp_ctrl_handle, &dst->vp_ctrl_handle); + ISP_IOXPUT_16(isp, src->vp_ctrl_index_fail, &dst->vp_ctrl_index_fail); + ISP_IOXPUT_16(isp, src->vp_ctrl_status, &dst->vp_ctrl_status); + ISP_IOXPUT_16(isp, src->vp_ctrl_command, &dst->vp_ctrl_command); + ISP_IOXPUT_16(isp, src->vp_ctrl_vp_count, &dst->vp_ctrl_vp_count); + for (i = 0; i < ASIZE(src->vp_ctrl_idmap); i++) { + ISP_IOXPUT_16(isp, src->vp_ctrl_idmap[i], &dst->vp_ctrl_idmap[i]); + } + for (i = 0; i < ASIZE(src->vp_ctrl_reserved); i++) { + ISP_IOXPUT_8(isp, src->vp_ctrl_idmap[i], &dst->vp_ctrl_idmap[i]); + } +} + +void +isp_get_vp_ctrl_info(ispsoftc_t *isp, vp_ctrl_info_t *src, vp_ctrl_info_t *dst) +{ + int i; + isp_get_hdr(isp, &src->vp_ctrl_hdr, &dst->vp_ctrl_hdr); + ISP_IOXGET_32(isp, &src->vp_ctrl_handle, dst->vp_ctrl_handle); + ISP_IOXGET_16(isp, &src->vp_ctrl_index_fail, dst->vp_ctrl_index_fail); + ISP_IOXGET_16(isp, &src->vp_ctrl_status, dst->vp_ctrl_status); + ISP_IOXGET_16(isp, &src->vp_ctrl_command, dst->vp_ctrl_command); + ISP_IOXGET_16(isp, &src->vp_ctrl_vp_count, dst->vp_ctrl_vp_count); + for (i = 0; i < ASIZE(src->vp_ctrl_idmap); i++) { + ISP_IOXGET_16(isp, &src->vp_ctrl_idmap[i], dst->vp_ctrl_idmap[i]); + } + for (i = 0; i < ASIZE(src->vp_ctrl_reserved); i++) { + ISP_IOXGET_8(isp, &src->vp_ctrl_reserved[i], dst->vp_ctrl_reserved[i]); + } +} + +void +isp_put_vp_modify(ispsoftc_t *isp, vp_modify_t *src, vp_modify_t *dst) +{ + int i, j; + isp_put_hdr(isp, &src->vp_mod_hdr, &dst->vp_mod_hdr); + ISP_IOXPUT_32(isp, src->vp_mod_hdl, &dst->vp_mod_hdl); + ISP_IOXPUT_16(isp, src->vp_mod_reserved0, &dst->vp_mod_reserved0); + ISP_IOXPUT_16(isp, src->vp_mod_status, &dst->vp_mod_status); + ISP_IOXPUT_8(isp, src->vp_mod_cmd, &dst->vp_mod_cmd); + ISP_IOXPUT_8(isp, src->vp_mod_cnt, &dst->vp_mod_cnt); + ISP_IOXPUT_8(isp, src->vp_mod_idx0, &dst->vp_mod_idx0); + ISP_IOXPUT_8(isp, src->vp_mod_idx1, &dst->vp_mod_idx1); + for (i = 0; i < ASIZE(src->vp_mod_ports); i++) { + ISP_IOXPUT_8(isp, src->vp_mod_ports[i].options, &dst->vp_mod_ports[i].options); + ISP_IOXPUT_8(isp, src->vp_mod_ports[i].loopid, &dst->vp_mod_ports[i].loopid); + ISP_IOXPUT_16(isp, src->vp_mod_ports[i].reserved1, &dst->vp_mod_ports[i].reserved1); + for (j = 0; j < ASIZE(src->vp_mod_ports[i].wwpn); j++) { + ISP_IOXPUT_8(isp, src->vp_mod_ports[i].wwpn[j], &dst->vp_mod_ports[i].wwpn[j]); + } + for (j = 0; j < ASIZE(src->vp_mod_ports[i].wwnn); j++) { + ISP_IOXPUT_8(isp, src->vp_mod_ports[i].wwnn[j], &dst->vp_mod_ports[i].wwnn[j]); + } + } + for (i = 0; i < ASIZE(src->vp_mod_reserved2); i++) { + ISP_IOXPUT_8(isp, src->vp_mod_reserved2[i], &dst->vp_mod_reserved2[i]); + } +} + +void +isp_get_vp_modify(ispsoftc_t *isp, vp_modify_t *src, vp_modify_t *dst) +{ + int i, j; + isp_get_hdr(isp, &src->vp_mod_hdr, &dst->vp_mod_hdr); + ISP_IOXGET_32(isp, &src->vp_mod_hdl, dst->vp_mod_hdl); + ISP_IOXGET_16(isp, &src->vp_mod_reserved0, dst->vp_mod_reserved0); + ISP_IOXGET_16(isp, &src->vp_mod_status, dst->vp_mod_status); + ISP_IOXGET_8(isp, &src->vp_mod_cmd, dst->vp_mod_cmd); + ISP_IOXGET_8(isp, &src->vp_mod_cnt, dst->vp_mod_cnt); + ISP_IOXGET_8(isp, &src->vp_mod_idx0, dst->vp_mod_idx0); + ISP_IOXGET_8(isp, &src->vp_mod_idx1, dst->vp_mod_idx1); + for (i = 0; i < ASIZE(src->vp_mod_ports); i++) { + ISP_IOXGET_8(isp, &src->vp_mod_ports[i].options, dst->vp_mod_ports[i].options); + ISP_IOXGET_8(isp, &src->vp_mod_ports[i].loopid, dst->vp_mod_ports[i].loopid); + ISP_IOXGET_16(isp, &src->vp_mod_ports[i].reserved1, dst->vp_mod_ports[i].reserved1); + for (j = 0; j < ASIZE(src->vp_mod_ports[i].wwpn); j++) { + ISP_IOXGET_8(isp, &src->vp_mod_ports[i].wwpn[j], dst->vp_mod_ports[i].wwpn[j]); + } + for (j = 0; j < ASIZE(src->vp_mod_ports[i].wwnn); j++) { + ISP_IOXGET_8(isp, &src->vp_mod_ports[i].wwnn[j], dst->vp_mod_ports[i].wwnn[j]); + } + } + for (i = 0; i < ASIZE(src->vp_mod_reserved2); i++) { + ISP_IOXGET_8(isp, &src->vp_mod_reserved2[i], dst->vp_mod_reserved2[i]); + } +} + +void isp_get_pdb_21xx(ispsoftc_t *isp, isp_pdb_21xx_t *src, isp_pdb_21xx_t *dst) { int i; ISP_IOXGET_16(isp, &src->pdb_options, dst->pdb_options); ISP_IOXGET_8(isp, &src->pdb_mstate, dst->pdb_mstate); ISP_IOXGET_8(isp, &src->pdb_sstate, dst->pdb_sstate); for (i = 0; i < 4; i++) { - ISP_IOXGET_8(isp, &src->pdb_hardaddr_bits[i], - dst->pdb_hardaddr_bits[i]); + ISP_IOXGET_8(isp, &src->pdb_hardaddr_bits[i], dst->pdb_hardaddr_bits[i]); } for (i = 0; i < 4; i++) { - ISP_IOXGET_8(isp, &src->pdb_portid_bits[i], - dst->pdb_portid_bits[i]); + ISP_IOXGET_8(isp, &src->pdb_portid_bits[i], dst->pdb_portid_bits[i]); } for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->pdb_nodename[i], dst->pdb_nodename[i]); } for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->pdb_portname[i], dst->pdb_portname[i]); } ISP_IOXGET_16(isp, &src->pdb_execthrottle, dst->pdb_execthrottle); ISP_IOXGET_16(isp, &src->pdb_exec_count, dst->pdb_exec_count); ISP_IOXGET_8(isp, &src->pdb_retry_count, dst->pdb_retry_count); ISP_IOXGET_8(isp, &src->pdb_retry_delay, dst->pdb_retry_delay); ISP_IOXGET_16(isp, &src->pdb_resalloc, dst->pdb_resalloc); ISP_IOXGET_16(isp, &src->pdb_curalloc, dst->pdb_curalloc); ISP_IOXGET_16(isp, &src->pdb_qhead, dst->pdb_qhead); ISP_IOXGET_16(isp, &src->pdb_qtail, dst->pdb_qtail); ISP_IOXGET_16(isp, &src->pdb_tl_next, dst->pdb_tl_next); ISP_IOXGET_16(isp, &src->pdb_tl_last, dst->pdb_tl_last); ISP_IOXGET_16(isp, &src->pdb_features, dst->pdb_features); ISP_IOXGET_16(isp, &src->pdb_pconcurrnt, dst->pdb_pconcurrnt); ISP_IOXGET_16(isp, &src->pdb_roi, dst->pdb_roi); ISP_IOXGET_8(isp, &src->pdb_target, dst->pdb_target); ISP_IOXGET_8(isp, &src->pdb_initiator, dst->pdb_initiator); ISP_IOXGET_16(isp, &src->pdb_rdsiz, dst->pdb_rdsiz); ISP_IOXGET_16(isp, &src->pdb_ncseq, dst->pdb_ncseq); ISP_IOXGET_16(isp, &src->pdb_noseq, dst->pdb_noseq); ISP_IOXGET_16(isp, &src->pdb_labrtflg, dst->pdb_labrtflg); ISP_IOXGET_16(isp, &src->pdb_lstopflg, dst->pdb_lstopflg); ISP_IOXGET_16(isp, &src->pdb_sqhead, dst->pdb_sqhead); ISP_IOXGET_16(isp, &src->pdb_sqtail, dst->pdb_sqtail); ISP_IOXGET_16(isp, &src->pdb_ptimer, dst->pdb_ptimer); ISP_IOXGET_16(isp, &src->pdb_nxt_seqid, dst->pdb_nxt_seqid); ISP_IOXGET_16(isp, &src->pdb_fcount, dst->pdb_fcount); ISP_IOXGET_16(isp, &src->pdb_prli_len, dst->pdb_prli_len); ISP_IOXGET_16(isp, &src->pdb_prli_svc0, dst->pdb_prli_svc0); ISP_IOXGET_16(isp, &src->pdb_prli_svc3, dst->pdb_prli_svc3); ISP_IOXGET_16(isp, &src->pdb_loopid, dst->pdb_loopid); ISP_IOXGET_16(isp, &src->pdb_il_ptr, dst->pdb_il_ptr); ISP_IOXGET_16(isp, &src->pdb_sl_ptr, dst->pdb_sl_ptr); } void isp_get_pdb_24xx(ispsoftc_t *isp, isp_pdb_24xx_t *src, isp_pdb_24xx_t *dst) { int i; ISP_IOXGET_16(isp, &src->pdb_flags, dst->pdb_flags); ISP_IOXGET_8(isp, &src->pdb_curstate, dst->pdb_curstate); ISP_IOXGET_8(isp, &src->pdb_laststate, dst->pdb_laststate); for (i = 0; i < 4; i++) { - ISP_IOXGET_8(isp, &src->pdb_hardaddr_bits[i], - dst->pdb_hardaddr_bits[i]); + ISP_IOXGET_8(isp, &src->pdb_hardaddr_bits[i], dst->pdb_hardaddr_bits[i]); } for (i = 0; i < 4; i++) { - ISP_IOXGET_8(isp, &src->pdb_portid_bits[i], - dst->pdb_portid_bits[i]); + ISP_IOXGET_8(isp, &src->pdb_portid_bits[i], dst->pdb_portid_bits[i]); } ISP_IOXGET_16(isp, &src->pdb_retry_timer, dst->pdb_retry_timer); ISP_IOXGET_16(isp, &src->pdb_handle, dst->pdb_handle); ISP_IOXGET_16(isp, &src->pdb_rcv_dsize, dst->pdb_rcv_dsize); ISP_IOXGET_16(isp, &src->pdb_reserved0, dst->pdb_reserved0); ISP_IOXGET_16(isp, &src->pdb_prli_svc0, dst->pdb_prli_svc0); ISP_IOXGET_16(isp, &src->pdb_prli_svc3, dst->pdb_prli_svc3); for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->pdb_nodename[i], dst->pdb_nodename[i]); } for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->pdb_portname[i], dst->pdb_portname[i]); } for (i = 0; i < 24; i++) { - ISP_IOXGET_8(isp, &src->pdb_reserved1[i], - dst->pdb_reserved1[i]); + ISP_IOXGET_8(isp, &src->pdb_reserved1[i], dst->pdb_reserved1[i]); } } /* * PLOGI/LOGO IOCB canonicalization */ void isp_get_plogx(ispsoftc_t *isp, isp_plogx_t *src, isp_plogx_t *dst) { int i; isp_get_hdr(isp, &src->plogx_header, &dst->plogx_header); ISP_IOXGET_32(isp, &src->plogx_handle, dst->plogx_handle); ISP_IOXGET_16(isp, &src->plogx_status, dst->plogx_status); ISP_IOXGET_16(isp, &src->plogx_nphdl, dst->plogx_nphdl); ISP_IOXGET_16(isp, &src->plogx_flags, dst->plogx_flags); ISP_IOXGET_16(isp, &src->plogx_vphdl, dst->plogx_vphdl); ISP_IOXGET_16(isp, &src->plogx_portlo, dst->plogx_portlo); ISP_IOXGET_16(isp, &src->plogx_rspsz_porthi, dst->plogx_rspsz_porthi); for (i = 0; i < 11; i++) { - ISP_IOXGET_16(isp, &src->plogx_ioparm[i].lo16, - dst->plogx_ioparm[i].lo16); - ISP_IOXGET_16(isp, &src->plogx_ioparm[i].hi16, - dst->plogx_ioparm[i].hi16); + ISP_IOXGET_16(isp, &src->plogx_ioparm[i].lo16, dst->plogx_ioparm[i].lo16); + ISP_IOXGET_16(isp, &src->plogx_ioparm[i].hi16, dst->plogx_ioparm[i].hi16); } } void isp_put_plogx(ispsoftc_t *isp, isp_plogx_t *src, isp_plogx_t *dst) { int i; isp_put_hdr(isp, &src->plogx_header, &dst->plogx_header); ISP_IOXPUT_32(isp, src->plogx_handle, &dst->plogx_handle); ISP_IOXPUT_16(isp, src->plogx_status, &dst->plogx_status); ISP_IOXPUT_16(isp, src->plogx_nphdl, &dst->plogx_nphdl); ISP_IOXPUT_16(isp, src->plogx_flags, &dst->plogx_flags); ISP_IOXPUT_16(isp, src->plogx_vphdl, &dst->plogx_vphdl); ISP_IOXPUT_16(isp, src->plogx_portlo, &dst->plogx_portlo); ISP_IOXPUT_16(isp, src->plogx_rspsz_porthi, &dst->plogx_rspsz_porthi); for (i = 0; i < 11; i++) { - ISP_IOXPUT_16(isp, src->plogx_ioparm[i].lo16, - &dst->plogx_ioparm[i].lo16); - ISP_IOXPUT_16(isp, src->plogx_ioparm[i].hi16, - &dst->plogx_ioparm[i].hi16); + ISP_IOXPUT_16(isp, src->plogx_ioparm[i].lo16, &dst->plogx_ioparm[i].lo16); + ISP_IOXPUT_16(isp, src->plogx_ioparm[i].hi16, &dst->plogx_ioparm[i].hi16); } } /* + * Report ID canonicalization + */ +void +isp_get_ridacq(ispsoftc_t *isp, isp_ridacq_t *src, isp_ridacq_t *dst) +{ + int i; + isp_get_hdr(isp, &src->ridacq_hdr, &dst->ridacq_hdr); + ISP_IOXGET_32(isp, &src->ridacq_handle, dst->ridacq_handle); + ISP_IOXGET_16(isp, &src->ridacq_vp_port_lo, dst->ridacq_vp_port_lo); + ISP_IOXGET_8(isp, &src->ridacq_vp_port_hi, dst->ridacq_vp_port_hi); + ISP_IOXGET_8(isp, &src->ridacq_format, dst->ridacq_format); + for (i = 0; i < sizeof (src->ridacq_map) / sizeof (src->ridacq_map[0]); i++) { + ISP_IOXGET_16(isp, &src->ridacq_map[i], dst->ridacq_map[i]); + } + for (i = 0; i < sizeof (src->ridacq_reserved1) / sizeof (src->ridacq_reserved1[0]); i++) { + ISP_IOXGET_16(isp, &src->ridacq_reserved1[i], dst->ridacq_reserved1[i]); + } + if (dst->ridacq_format == 0) { + ISP_IOXGET_8(isp, &src->un.type0.ridacq_vp_acquired, dst->un.type0.ridacq_vp_acquired); + ISP_IOXGET_8(isp, &src->un.type0.ridacq_vp_setup, dst->un.type0.ridacq_vp_setup); + ISP_IOXGET_16(isp, &src->un.type0.ridacq_reserved0, dst->un.type0.ridacq_reserved0); + } else if (dst->ridacq_format == 1) { + ISP_IOXGET_16(isp, &src->un.type1.ridacq_vp_count, dst->un.type1.ridacq_vp_count); + ISP_IOXGET_8(isp, &src->un.type1.ridacq_vp_index, dst->un.type1.ridacq_vp_index); + ISP_IOXGET_8(isp, &src->un.type1.ridacq_vp_status, dst->un.type1.ridacq_vp_status); + } else { + ISP_MEMZERO(&dst->un, sizeof (dst->un)); + } +} + + +/* * CT Passthru canonicalization */ void isp_get_ct_pt(ispsoftc_t *isp, isp_ct_pt_t *src, isp_ct_pt_t *dst) { int i; isp_get_hdr(isp, &src->ctp_header, &dst->ctp_header); ISP_IOXGET_32(isp, &src->ctp_handle, dst->ctp_handle); ISP_IOXGET_16(isp, &src->ctp_status, dst->ctp_status); ISP_IOXGET_16(isp, &src->ctp_nphdl, dst->ctp_nphdl); ISP_IOXGET_16(isp, &src->ctp_cmd_cnt, dst->ctp_cmd_cnt); - ISP_IOXGET_16(isp, &src->ctp_vpidx, dst->ctp_vpidx); + ISP_IOXGET_8(isp, &src->ctp_vpidx, dst->ctp_vpidx); + ISP_IOXGET_8(isp, &src->ctp_reserved0, dst->ctp_reserved0); ISP_IOXGET_16(isp, &src->ctp_time, dst->ctp_time); - ISP_IOXGET_16(isp, &src->ctp_reserved0, dst->ctp_reserved0); + ISP_IOXGET_16(isp, &src->ctp_reserved1, dst->ctp_reserved1); ISP_IOXGET_16(isp, &src->ctp_rsp_cnt, dst->ctp_rsp_cnt); for (i = 0; i < 5; i++) { - ISP_IOXGET_16(isp, &src->ctp_reserved1[i], - dst->ctp_reserved1[i]); + ISP_IOXGET_16(isp, &src->ctp_reserved2[i], dst->ctp_reserved2[i]); } ISP_IOXGET_32(isp, &src->ctp_rsp_bcnt, dst->ctp_rsp_bcnt); ISP_IOXGET_32(isp, &src->ctp_cmd_bcnt, dst->ctp_cmd_bcnt); for (i = 0; i < 2; i++) { - ISP_IOXGET_32(isp, &src->ctp_dataseg[i].ds_base, - dst->ctp_dataseg[i].ds_base); - ISP_IOXGET_32(isp, &src->ctp_dataseg[i].ds_basehi, - dst->ctp_dataseg[i].ds_basehi); - ISP_IOXGET_32(isp, &src->ctp_dataseg[i].ds_count, - dst->ctp_dataseg[i].ds_count); + ISP_IOXGET_32(isp, &src->ctp_dataseg[i].ds_base, dst->ctp_dataseg[i].ds_base); + ISP_IOXGET_32(isp, &src->ctp_dataseg[i].ds_basehi, dst->ctp_dataseg[i].ds_basehi); + ISP_IOXGET_32(isp, &src->ctp_dataseg[i].ds_count, dst->ctp_dataseg[i].ds_count); } } void isp_get_ms(ispsoftc_t *isp, isp_ms_t *src, isp_ms_t *dst) { int i; isp_get_hdr(isp, &src->ms_header, &dst->ms_header); ISP_IOXGET_32(isp, &src->ms_handle, dst->ms_handle); ISP_IOXGET_16(isp, &src->ms_nphdl, dst->ms_nphdl); ISP_IOXGET_16(isp, &src->ms_status, dst->ms_status); ISP_IOXGET_16(isp, &src->ms_flags, dst->ms_flags); ISP_IOXGET_16(isp, &src->ms_reserved1, dst->ms_reserved1); ISP_IOXGET_16(isp, &src->ms_time, dst->ms_time); ISP_IOXGET_16(isp, &src->ms_cmd_cnt, dst->ms_cmd_cnt); ISP_IOXGET_16(isp, &src->ms_tot_cnt, dst->ms_tot_cnt); ISP_IOXGET_8(isp, &src->ms_type, dst->ms_type); ISP_IOXGET_8(isp, &src->ms_r_ctl, dst->ms_r_ctl); ISP_IOXGET_16(isp, &src->ms_rxid, dst->ms_rxid); ISP_IOXGET_16(isp, &src->ms_reserved2, dst->ms_reserved2); ISP_IOXGET_32(isp, &src->ms_rsp_bcnt, dst->ms_rsp_bcnt); ISP_IOXGET_32(isp, &src->ms_cmd_bcnt, dst->ms_cmd_bcnt); for (i = 0; i < 2; i++) { - ISP_IOXGET_32(isp, &src->ms_dataseg[i].ds_base, - dst->ms_dataseg[i].ds_base); - ISP_IOXGET_32(isp, &src->ms_dataseg[i].ds_basehi, - dst->ms_dataseg[i].ds_basehi); - ISP_IOXGET_32(isp, &src->ms_dataseg[i].ds_count, - dst->ms_dataseg[i].ds_count); + ISP_IOXGET_32(isp, &src->ms_dataseg[i].ds_base, dst->ms_dataseg[i].ds_base); + ISP_IOXGET_32(isp, &src->ms_dataseg[i].ds_basehi, dst->ms_dataseg[i].ds_basehi); + ISP_IOXGET_32(isp, &src->ms_dataseg[i].ds_count, dst->ms_dataseg[i].ds_count); } } void isp_put_ct_pt(ispsoftc_t *isp, isp_ct_pt_t *src, isp_ct_pt_t *dst) { int i; isp_put_hdr(isp, &src->ctp_header, &dst->ctp_header); ISP_IOXPUT_32(isp, src->ctp_handle, &dst->ctp_handle); ISP_IOXPUT_16(isp, src->ctp_status, &dst->ctp_status); ISP_IOXPUT_16(isp, src->ctp_nphdl, &dst->ctp_nphdl); ISP_IOXPUT_16(isp, src->ctp_cmd_cnt, &dst->ctp_cmd_cnt); - ISP_IOXPUT_16(isp, src->ctp_vpidx, &dst->ctp_vpidx); + ISP_IOXPUT_8(isp, src->ctp_vpidx, &dst->ctp_vpidx); + ISP_IOXPUT_8(isp, src->ctp_reserved0, &dst->ctp_reserved0); ISP_IOXPUT_16(isp, src->ctp_time, &dst->ctp_time); - ISP_IOXPUT_16(isp, src->ctp_reserved0, &dst->ctp_reserved0); + ISP_IOXPUT_16(isp, src->ctp_reserved1, &dst->ctp_reserved1); ISP_IOXPUT_16(isp, src->ctp_rsp_cnt, &dst->ctp_rsp_cnt); for (i = 0; i < 5; i++) { - ISP_IOXPUT_16(isp, src->ctp_reserved1[i], - &dst->ctp_reserved1[i]); + ISP_IOXPUT_16(isp, src->ctp_reserved2[i], &dst->ctp_reserved2[i]); } ISP_IOXPUT_32(isp, src->ctp_rsp_bcnt, &dst->ctp_rsp_bcnt); ISP_IOXPUT_32(isp, src->ctp_cmd_bcnt, &dst->ctp_cmd_bcnt); for (i = 0; i < 2; i++) { - ISP_IOXPUT_32(isp, src->ctp_dataseg[i].ds_base, - &dst->ctp_dataseg[i].ds_base); - ISP_IOXPUT_32(isp, src->ctp_dataseg[i].ds_basehi, - &dst->ctp_dataseg[i].ds_basehi); - ISP_IOXPUT_32(isp, src->ctp_dataseg[i].ds_count, - &dst->ctp_dataseg[i].ds_count); + ISP_IOXPUT_32(isp, src->ctp_dataseg[i].ds_base, &dst->ctp_dataseg[i].ds_base); + ISP_IOXPUT_32(isp, src->ctp_dataseg[i].ds_basehi, &dst->ctp_dataseg[i].ds_basehi); + ISP_IOXPUT_32(isp, src->ctp_dataseg[i].ds_count, &dst->ctp_dataseg[i].ds_count); } } void isp_put_ms(ispsoftc_t *isp, isp_ms_t *src, isp_ms_t *dst) { int i; isp_put_hdr(isp, &src->ms_header, &dst->ms_header); ISP_IOXPUT_32(isp, src->ms_handle, &dst->ms_handle); ISP_IOXPUT_16(isp, src->ms_nphdl, &dst->ms_nphdl); ISP_IOXPUT_16(isp, src->ms_status, &dst->ms_status); ISP_IOXPUT_16(isp, src->ms_flags, &dst->ms_flags); ISP_IOXPUT_16(isp, src->ms_reserved1, &dst->ms_reserved1); ISP_IOXPUT_16(isp, src->ms_time, &dst->ms_time); ISP_IOXPUT_16(isp, src->ms_cmd_cnt, &dst->ms_cmd_cnt); ISP_IOXPUT_16(isp, src->ms_tot_cnt, &dst->ms_tot_cnt); ISP_IOXPUT_8(isp, src->ms_type, &dst->ms_type); ISP_IOXPUT_8(isp, src->ms_r_ctl, &dst->ms_r_ctl); ISP_IOXPUT_16(isp, src->ms_rxid, &dst->ms_rxid); ISP_IOXPUT_16(isp, src->ms_reserved2, &dst->ms_reserved2); ISP_IOXPUT_32(isp, src->ms_rsp_bcnt, &dst->ms_rsp_bcnt); ISP_IOXPUT_32(isp, src->ms_cmd_bcnt, &dst->ms_cmd_bcnt); for (i = 0; i < 2; i++) { - ISP_IOXPUT_32(isp, src->ms_dataseg[i].ds_base, - &dst->ms_dataseg[i].ds_base); - ISP_IOXPUT_32(isp, src->ms_dataseg[i].ds_basehi, - &dst->ms_dataseg[i].ds_basehi); - ISP_IOXPUT_32(isp, src->ms_dataseg[i].ds_count, - &dst->ms_dataseg[i].ds_count); + ISP_IOXPUT_32(isp, src->ms_dataseg[i].ds_base, &dst->ms_dataseg[i].ds_base); + ISP_IOXPUT_32(isp, src->ms_dataseg[i].ds_basehi, &dst->ms_dataseg[i].ds_basehi); + ISP_IOXPUT_32(isp, src->ms_dataseg[i].ds_count, &dst->ms_dataseg[i].ds_count); } } /* * Generic SNS request - not particularly useful since the per-command data * isn't always 16 bit words. */ void isp_put_sns_request(ispsoftc_t *isp, sns_screq_t *src, sns_screq_t *dst) { int i, nw = (int) src->snscb_sblen; ISP_IOXPUT_16(isp, src->snscb_rblen, &dst->snscb_rblen); for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->snscb_addr[i], &dst->snscb_addr[i]); } ISP_IOXPUT_16(isp, src->snscb_sblen, &dst->snscb_sblen); for (i = 0; i < nw; i++) { ISP_IOXPUT_16(isp, src->snscb_data[i], &dst->snscb_data[i]); } - } void -isp_put_gid_ft_request(ispsoftc_t *isp, sns_gid_ft_req_t *src, - sns_gid_ft_req_t *dst) +isp_put_gid_ft_request(ispsoftc_t *isp, sns_gid_ft_req_t *src, sns_gid_ft_req_t *dst) { ISP_IOXPUT_16(isp, src->snscb_rblen, &dst->snscb_rblen); ISP_IOXPUT_16(isp, src->snscb_reserved0, &dst->snscb_reserved0); ISP_IOXPUT_16(isp, src->snscb_addr[0], &dst->snscb_addr[0]); ISP_IOXPUT_16(isp, src->snscb_addr[1], &dst->snscb_addr[1]); ISP_IOXPUT_16(isp, src->snscb_addr[2], &dst->snscb_addr[2]); ISP_IOXPUT_16(isp, src->snscb_addr[3], &dst->snscb_addr[3]); ISP_IOXPUT_16(isp, src->snscb_sblen, &dst->snscb_sblen); ISP_IOXPUT_16(isp, src->snscb_reserved1, &dst->snscb_reserved1); ISP_IOXPUT_16(isp, src->snscb_cmd, &dst->snscb_cmd); ISP_IOXPUT_16(isp, src->snscb_mword_div_2, &dst->snscb_mword_div_2); ISP_IOXPUT_32(isp, src->snscb_reserved3, &dst->snscb_reserved3); ISP_IOXPUT_32(isp, src->snscb_fc4_type, &dst->snscb_fc4_type); } void -isp_put_gxn_id_request(ispsoftc_t *isp, sns_gxn_id_req_t *src, - sns_gxn_id_req_t *dst) +isp_put_gxn_id_request(ispsoftc_t *isp, sns_gxn_id_req_t *src, sns_gxn_id_req_t *dst) { ISP_IOXPUT_16(isp, src->snscb_rblen, &dst->snscb_rblen); ISP_IOXPUT_16(isp, src->snscb_reserved0, &dst->snscb_reserved0); ISP_IOXPUT_16(isp, src->snscb_addr[0], &dst->snscb_addr[0]); ISP_IOXPUT_16(isp, src->snscb_addr[1], &dst->snscb_addr[1]); ISP_IOXPUT_16(isp, src->snscb_addr[2], &dst->snscb_addr[2]); ISP_IOXPUT_16(isp, src->snscb_addr[3], &dst->snscb_addr[3]); ISP_IOXPUT_16(isp, src->snscb_sblen, &dst->snscb_sblen); ISP_IOXPUT_16(isp, src->snscb_reserved1, &dst->snscb_reserved1); ISP_IOXPUT_16(isp, src->snscb_cmd, &dst->snscb_cmd); ISP_IOXPUT_16(isp, src->snscb_reserved2, &dst->snscb_reserved2); ISP_IOXPUT_32(isp, src->snscb_reserved3, &dst->snscb_reserved3); ISP_IOXPUT_32(isp, src->snscb_portid, &dst->snscb_portid); } /* * Generic SNS response - not particularly useful since the per-command data * isn't always 16 bit words. */ void -isp_get_sns_response(ispsoftc_t *isp, sns_scrsp_t *src, - sns_scrsp_t *dst, int nwords) +isp_get_sns_response(ispsoftc_t *isp, sns_scrsp_t *src, sns_scrsp_t *dst, int nwords) { int i; isp_get_ct_hdr(isp, &src->snscb_cthdr, &dst->snscb_cthdr); ISP_IOXGET_8(isp, &src->snscb_port_type, dst->snscb_port_type); for (i = 0; i < 3; i++) { ISP_IOXGET_8(isp, &src->snscb_port_id[i], dst->snscb_port_id[i]); } for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->snscb_portname[i], dst->snscb_portname[i]); } for (i = 0; i < nwords; i++) { ISP_IOXGET_16(isp, &src->snscb_data[i], dst->snscb_data[i]); } } void -isp_get_gid_ft_response(ispsoftc_t *isp, sns_gid_ft_rsp_t *src, - sns_gid_ft_rsp_t *dst, int nwords) +isp_get_gid_ft_response(ispsoftc_t *isp, sns_gid_ft_rsp_t *src, sns_gid_ft_rsp_t *dst, int nwords) { int i; isp_get_ct_hdr(isp, &src->snscb_cthdr, &dst->snscb_cthdr); for (i = 0; i < nwords; i++) { int j; - ISP_IOXGET_8(isp, - &src->snscb_ports[i].control, - dst->snscb_ports[i].control); + ISP_IOXGET_8(isp, &src->snscb_ports[i].control, dst->snscb_ports[i].control); for (j = 0; j < 3; j++) { - ISP_IOXGET_8(isp, - &src->snscb_ports[i].portid[j], - dst->snscb_ports[i].portid[j]); + ISP_IOXGET_8(isp, &src->snscb_ports[i].portid[j], dst->snscb_ports[i].portid[j]); } if (dst->snscb_ports[i].control & 0x80) { break; } } } void -isp_get_gxn_id_response(ispsoftc_t *isp, sns_gxn_id_rsp_t *src, - sns_gxn_id_rsp_t *dst) +isp_get_gxn_id_response(ispsoftc_t *isp, sns_gxn_id_rsp_t *src, sns_gxn_id_rsp_t *dst) { int i; isp_get_ct_hdr(isp, &src->snscb_cthdr, &dst->snscb_cthdr); - for (i = 0; i < 8; i++) + for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->snscb_wwn[i], dst->snscb_wwn[i]); + } } void -isp_get_gff_id_response(ispsoftc_t *isp, sns_gff_id_rsp_t *src, - sns_gff_id_rsp_t *dst) +isp_get_gff_id_response(ispsoftc_t *isp, sns_gff_id_rsp_t *src, sns_gff_id_rsp_t *dst) { int i; isp_get_ct_hdr(isp, &src->snscb_cthdr, &dst->snscb_cthdr); for (i = 0; i < 32; i++) { - ISP_IOXGET_32(isp, &src->snscb_fc4_features[i], - dst->snscb_fc4_features[i]); + ISP_IOXGET_32(isp, &src->snscb_fc4_features[i], dst->snscb_fc4_features[i]); } } void -isp_get_ga_nxt_response(ispsoftc_t *isp, sns_ga_nxt_rsp_t *src, - sns_ga_nxt_rsp_t *dst) +isp_get_ga_nxt_response(ispsoftc_t *isp, sns_ga_nxt_rsp_t *src, sns_ga_nxt_rsp_t *dst) { int i; isp_get_ct_hdr(isp, &src->snscb_cthdr, &dst->snscb_cthdr); ISP_IOXGET_8(isp, &src->snscb_port_type, dst->snscb_port_type); for (i = 0; i < 3; i++) { - ISP_IOXGET_8(isp, &src->snscb_port_id[i], - dst->snscb_port_id[i]); + ISP_IOXGET_8(isp, &src->snscb_port_id[i], dst->snscb_port_id[i]); } for (i = 0; i < 8; i++) { - ISP_IOXGET_8(isp, &src->snscb_portname[i], - dst->snscb_portname[i]); + ISP_IOXGET_8(isp, &src->snscb_portname[i], dst->snscb_portname[i]); } ISP_IOXGET_8(isp, &src->snscb_pnlen, dst->snscb_pnlen); for (i = 0; i < 255; i++) { ISP_IOXGET_8(isp, &src->snscb_pname[i], dst->snscb_pname[i]); } for (i = 0; i < 8; i++) { - ISP_IOXGET_8(isp, &src->snscb_nodename[i], - dst->snscb_nodename[i]); + ISP_IOXGET_8(isp, &src->snscb_nodename[i], dst->snscb_nodename[i]); } ISP_IOXGET_8(isp, &src->snscb_nnlen, dst->snscb_nnlen); for (i = 0; i < 255; i++) { ISP_IOXGET_8(isp, &src->snscb_nname[i], dst->snscb_nname[i]); } for (i = 0; i < 8; i++) { - ISP_IOXGET_8(isp, &src->snscb_ipassoc[i], - dst->snscb_ipassoc[i]); + ISP_IOXGET_8(isp, &src->snscb_ipassoc[i], dst->snscb_ipassoc[i]); } for (i = 0; i < 16; i++) { ISP_IOXGET_8(isp, &src->snscb_ipaddr[i], dst->snscb_ipaddr[i]); } for (i = 0; i < 4; i++) { - ISP_IOXGET_8(isp, &src->snscb_svc_class[i], - dst->snscb_svc_class[i]); + ISP_IOXGET_8(isp, &src->snscb_svc_class[i], dst->snscb_svc_class[i]); } for (i = 0; i < 32; i++) { - ISP_IOXGET_8(isp, &src->snscb_fc4_types[i], - dst->snscb_fc4_types[i]); + ISP_IOXGET_8(isp, &src->snscb_fc4_types[i], dst->snscb_fc4_types[i]); } for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->snscb_fpname[i], dst->snscb_fpname[i]); } ISP_IOXGET_8(isp, &src->snscb_reserved, dst->snscb_reserved); for (i = 0; i < 3; i++) { - ISP_IOXGET_8(isp, &src->snscb_hardaddr[i], - dst->snscb_hardaddr[i]); + ISP_IOXGET_8(isp, &src->snscb_hardaddr[i], dst->snscb_hardaddr[i]); } } void isp_get_els(ispsoftc_t *isp, els_t *src, els_t *dst) { int i; isp_get_hdr(isp, &src->els_hdr, &dst->els_hdr); ISP_IOXGET_32(isp, &src->els_handle, dst->els_handle); ISP_IOXGET_16(isp, &src->els_status, dst->els_status); ISP_IOXGET_16(isp, &src->els_nphdl, dst->els_nphdl); ISP_IOXGET_16(isp, &src->els_xmit_dsd_count, dst->els_xmit_dsd_count); ISP_IOXGET_8(isp, &src->els_vphdl, dst->els_vphdl); ISP_IOXGET_8(isp, &src->els_sof, dst->els_sof); ISP_IOXGET_32(isp, &src->els_rxid, dst->els_rxid); ISP_IOXGET_16(isp, &src->els_recv_dsd_count, dst->els_recv_dsd_count); ISP_IOXGET_8(isp, &src->els_opcode, dst->els_opcode); ISP_IOXGET_8(isp, &src->els_reserved2, dst->els_reserved1); ISP_IOXGET_8(isp, &src->els_did_lo, dst->els_did_lo); ISP_IOXGET_8(isp, &src->els_did_mid, dst->els_did_mid); ISP_IOXGET_8(isp, &src->els_did_hi, dst->els_did_hi); ISP_IOXGET_8(isp, &src->els_reserved2, dst->els_reserved2); ISP_IOXGET_16(isp, &src->els_reserved3, dst->els_reserved3); ISP_IOXGET_16(isp, &src->els_ctl_flags, dst->els_ctl_flags); ISP_IOXGET_32(isp, &src->els_bytecnt, dst->els_bytecnt); ISP_IOXGET_32(isp, &src->els_subcode1, dst->els_subcode1); ISP_IOXGET_32(isp, &src->els_subcode2, dst->els_subcode2); for (i = 0; i < 20; i++) { - ISP_IOXGET_8(isp, &src->els_reserved4[i], - dst->els_reserved4[i]); + ISP_IOXGET_8(isp, &src->els_reserved4[i], dst->els_reserved4[i]); } } void isp_put_els(ispsoftc_t *isp, els_t *src, els_t *dst) { isp_put_hdr(isp, &src->els_hdr, &dst->els_hdr); ISP_IOXPUT_32(isp, src->els_handle, &dst->els_handle); ISP_IOXPUT_16(isp, src->els_status, &dst->els_status); ISP_IOXPUT_16(isp, src->els_nphdl, &dst->els_nphdl); ISP_IOXPUT_16(isp, src->els_xmit_dsd_count, &dst->els_xmit_dsd_count); ISP_IOXPUT_8(isp, src->els_vphdl, &dst->els_vphdl); ISP_IOXPUT_8(isp, src->els_sof, &dst->els_sof); ISP_IOXPUT_32(isp, src->els_rxid, &dst->els_rxid); ISP_IOXPUT_16(isp, src->els_recv_dsd_count, &dst->els_recv_dsd_count); ISP_IOXPUT_8(isp, src->els_opcode, &dst->els_opcode); ISP_IOXPUT_8(isp, src->els_reserved2, &dst->els_reserved1); ISP_IOXPUT_8(isp, src->els_did_lo, &dst->els_did_lo); ISP_IOXPUT_8(isp, src->els_did_mid, &dst->els_did_mid); ISP_IOXPUT_8(isp, src->els_did_hi, &dst->els_did_hi); ISP_IOXPUT_8(isp, src->els_reserved2, &dst->els_reserved2); ISP_IOXPUT_16(isp, src->els_reserved3, &dst->els_reserved3); ISP_IOXPUT_16(isp, src->els_ctl_flags, &dst->els_ctl_flags); ISP_IOXPUT_32(isp, src->els_recv_bytecnt, &dst->els_recv_bytecnt); ISP_IOXPUT_32(isp, src->els_xmit_bytecnt, &dst->els_xmit_bytecnt); ISP_IOXPUT_32(isp, src->els_xmit_dsd_length, &dst->els_xmit_dsd_length); ISP_IOXPUT_16(isp, src->els_xmit_dsd_a1500, &dst->els_xmit_dsd_a1500); ISP_IOXPUT_16(isp, src->els_xmit_dsd_a3116, &dst->els_xmit_dsd_a3116); ISP_IOXPUT_16(isp, src->els_xmit_dsd_a4732, &dst->els_xmit_dsd_a4732); ISP_IOXPUT_16(isp, src->els_xmit_dsd_a6348, &dst->els_xmit_dsd_a6348); ISP_IOXPUT_32(isp, src->els_recv_dsd_length, &dst->els_recv_dsd_length); ISP_IOXPUT_16(isp, src->els_recv_dsd_a1500, &dst->els_recv_dsd_a1500); ISP_IOXPUT_16(isp, src->els_recv_dsd_a3116, &dst->els_recv_dsd_a3116); ISP_IOXPUT_16(isp, src->els_recv_dsd_a4732, &dst->els_recv_dsd_a4732); ISP_IOXPUT_16(isp, src->els_recv_dsd_a6348, &dst->els_recv_dsd_a6348); } /* * FC Structure Canonicalization */ void isp_get_fc_hdr(ispsoftc_t *isp, fc_hdr_t *src, fc_hdr_t *dst) { ISP_IOZGET_8(isp, &src->r_ctl, dst->r_ctl); ISP_IOZGET_8(isp, &src->d_id[0], dst->d_id[0]); ISP_IOZGET_8(isp, &src->d_id[1], dst->d_id[1]); ISP_IOZGET_8(isp, &src->d_id[2], dst->d_id[2]); ISP_IOZGET_8(isp, &src->cs_ctl, dst->cs_ctl); ISP_IOZGET_8(isp, &src->s_id[0], dst->s_id[0]); ISP_IOZGET_8(isp, &src->s_id[1], dst->s_id[1]); ISP_IOZGET_8(isp, &src->s_id[2], dst->s_id[2]); ISP_IOZGET_8(isp, &src->type, dst->type); - ISP_IOZGET_8(isp, &src->f_ctl, dst->f_ctl); + ISP_IOZGET_8(isp, &src->f_ctl[0], dst->f_ctl[0]); + ISP_IOZGET_8(isp, &src->f_ctl[1], dst->f_ctl[1]); + ISP_IOZGET_8(isp, &src->f_ctl[2], dst->f_ctl[2]); ISP_IOZGET_8(isp, &src->seq_id, dst->seq_id); ISP_IOZGET_8(isp, &src->df_ctl, dst->df_ctl); ISP_IOZGET_16(isp, &src->seq_cnt, dst->seq_cnt); - /* XXX SOMETHING WAS AND STILL CONTINUES WRONG HERE XXX */ -#if 0 ISP_IOZGET_16(isp, &src->ox_id, dst->ox_id); ISP_IOZGET_16(isp, &src->rx_id, dst->rx_id); -#else - ISP_IOZGET_32(isp, &src->ox_id, dst->parameter); - dst->ox_id = dst->parameter; - dst->rx_id = dst->parameter >> 16; -#endif ISP_IOZGET_32(isp, &src->parameter, dst->parameter); } void isp_get_fcp_cmnd_iu(ispsoftc_t *isp, fcp_cmnd_iu_t *src, fcp_cmnd_iu_t *dst) { int i; for (i = 0; i < 8; i++) { ISP_IOZGET_8(isp, &src->fcp_cmnd_lun[i], dst->fcp_cmnd_lun[i]); } ISP_IOZGET_8(isp, &src->fcp_cmnd_crn, dst->fcp_cmnd_crn); - ISP_IOZGET_8(isp, &src->fcp_cmnd_task_attribute, - dst->fcp_cmnd_task_attribute); - ISP_IOZGET_8(isp, &src->fcp_cmnd_task_management, - dst->fcp_cmnd_task_management); - ISP_IOZGET_8(isp, &src->fcp_cmnd_alen_datadir, - dst->fcp_cmnd_alen_datadir); + ISP_IOZGET_8(isp, &src->fcp_cmnd_task_attribute, dst->fcp_cmnd_task_attribute); + ISP_IOZGET_8(isp, &src->fcp_cmnd_task_management, dst->fcp_cmnd_task_management); + ISP_IOZGET_8(isp, &src->fcp_cmnd_alen_datadir, dst->fcp_cmnd_alen_datadir); for (i = 0; i < 16; i++) { - ISP_IOZGET_8(isp, &src->cdb_dl.sf.fcp_cmnd_cdb[i], - dst->cdb_dl.sf.fcp_cmnd_cdb[i]); + ISP_IOZGET_8(isp, &src->cdb_dl.sf.fcp_cmnd_cdb[i], dst->cdb_dl.sf.fcp_cmnd_cdb[i]); } - ISP_IOZGET_32(isp, &src->cdb_dl.sf.fcp_cmnd_dl, - dst->cdb_dl.sf.fcp_cmnd_dl); + ISP_IOZGET_32(isp, &src->cdb_dl.sf.fcp_cmnd_dl, dst->cdb_dl.sf.fcp_cmnd_dl); } void isp_put_rft_id(ispsoftc_t *isp, rft_id_t *src, rft_id_t *dst) { int i; isp_put_ct_hdr(isp, &src->rftid_hdr, &dst->rftid_hdr); ISP_IOZPUT_8(isp, src->rftid_reserved, &dst->rftid_reserved); for (i = 0; i < 3; i++) { ISP_IOZPUT_8(isp, src->rftid_portid[i], &dst->rftid_portid[i]); } for (i = 0; i < 8; i++) { - ISP_IOZPUT_32(isp, src->rftid_fc4types[i], - &dst->rftid_fc4types[i]); + ISP_IOZPUT_32(isp, src->rftid_fc4types[i], &dst->rftid_fc4types[i]); } } void isp_get_ct_hdr(ispsoftc_t *isp, ct_hdr_t *src, ct_hdr_t *dst) { ISP_IOZGET_8(isp, &src->ct_revision, dst->ct_revision); ISP_IOZGET_8(isp, &src->ct_in_id[0], dst->ct_in_id[0]); ISP_IOZGET_8(isp, &src->ct_in_id[1], dst->ct_in_id[1]); ISP_IOZGET_8(isp, &src->ct_in_id[2], dst->ct_in_id[2]); ISP_IOZGET_8(isp, &src->ct_fcs_type, dst->ct_fcs_type); ISP_IOZGET_8(isp, &src->ct_fcs_subtype, dst->ct_fcs_subtype); ISP_IOZGET_8(isp, &src->ct_options, dst->ct_options); ISP_IOZGET_8(isp, &src->ct_reserved0, dst->ct_reserved0); ISP_IOZGET_16(isp, &src->ct_cmd_resp, dst->ct_cmd_resp); ISP_IOZGET_16(isp, &src->ct_bcnt_resid, dst->ct_bcnt_resid); ISP_IOZGET_8(isp, &src->ct_reserved1, dst->ct_reserved1); ISP_IOZGET_8(isp, &src->ct_reason, dst->ct_reason); ISP_IOZGET_8(isp, &src->ct_explanation, dst->ct_explanation); ISP_IOZGET_8(isp, &src->ct_vunique, dst->ct_vunique); } void isp_put_ct_hdr(ispsoftc_t *isp, ct_hdr_t *src, ct_hdr_t *dst) { ISP_IOZPUT_8(isp, src->ct_revision, &dst->ct_revision); ISP_IOZPUT_8(isp, src->ct_in_id[0], &dst->ct_in_id[0]); ISP_IOZPUT_8(isp, src->ct_in_id[1], &dst->ct_in_id[1]); ISP_IOZPUT_8(isp, src->ct_in_id[2], &dst->ct_in_id[2]); ISP_IOZPUT_8(isp, src->ct_fcs_type, &dst->ct_fcs_type); ISP_IOZPUT_8(isp, src->ct_fcs_subtype, &dst->ct_fcs_subtype); ISP_IOZPUT_8(isp, src->ct_options, &dst->ct_options); ISP_IOZPUT_8(isp, src->ct_reserved0, &dst->ct_reserved0); ISP_IOZPUT_16(isp, src->ct_cmd_resp, &dst->ct_cmd_resp); ISP_IOZPUT_16(isp, src->ct_bcnt_resid, &dst->ct_bcnt_resid); ISP_IOZPUT_8(isp, src->ct_reserved1, &dst->ct_reserved1); ISP_IOZPUT_8(isp, src->ct_reason, &dst->ct_reason); ISP_IOZPUT_8(isp, src->ct_explanation, &dst->ct_explanation); ISP_IOZPUT_8(isp, src->ct_vunique, &dst->ct_vunique); } #ifdef ISP_TARGET_MODE + +/* + * Command shipping- finish off first queue entry and do dma mapping and + * additional segments as needed. + * + * Called with the first queue entry at least partially filled out. + */ int +isp_send_tgt_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t totalcnt, isp_ddir_t ddir, void *snsptr, uint32_t snslen) +{ + uint8_t storage[QENTRY_LEN], storage2[QENTRY_LEN]; + uint8_t type, nqe; + uint32_t seg, curseg, seglim, nxt, nxtnxt; + ispds_t *dsp = NULL; + ispds64_t *dsp64 = NULL; + void *qe0, *qe1, *sqe = NULL; + + qe0 = isp_getrqentry(isp); + if (qe0 == NULL) { + return (CMD_EAGAIN); + } + nxt = ISP_NXT_QENTRY(isp->isp_reqidx, RQUEST_QUEUE_LEN(isp)); + + type = ((isphdr_t *)fqe)->rqs_entry_type; + nqe = 1; + seglim = 0; + + /* + * If we have no data to transmit, just copy the first IOCB and start it up. + */ + if (ddir != ISP_NOXFR) { + /* + * First, figure out how many pieces of data to transfer and what kind and how many we can put into the first queue entry. + */ + switch (type) { + case RQSTYPE_CTIO: + dsp = ((ct_entry_t *)fqe)->ct_dataseg; + seglim = ISP_RQDSEG; + break; + case RQSTYPE_CTIO2: + case RQSTYPE_CTIO3: + { + ct2_entry_t *ct = fqe, *ct2 = (ct2_entry_t *) storage2; + uint16_t swd = ct->rsp.m0.ct_scsi_status & 0xff; + + if ((ct->ct_flags & CT2_SENDSTATUS) && (swd || ct->ct_resid)) { + memcpy(ct2, ct, QENTRY_LEN); + /* + * Clear fields from first CTIO2 that now need to be cleared + */ + ct->ct_header.rqs_seqno = 0; + ct->ct_flags &= ~(CT2_SENDSTATUS|CT2_CCINCR|CT2_FASTPOST); + ct->ct_resid = 0; + ct->ct_syshandle = 0; + ct->rsp.m0.ct_scsi_status = 0; + + /* + * Reset fields in the second CTIO2 as appropriate. + */ + ct2->ct_flags &= ~(CT2_FLAG_MMASK|CT2_DATAMASK|CT2_FASTPOST); + ct2->ct_flags |= CT2_NO_DATA|CT2_FLAG_MODE1; + ct2->ct_seg_count = 0; + ct2->ct_reloff = 0; + memset(&ct2->rsp, 0, sizeof (ct2->rsp)); + if (swd == SCSI_CHECK && snsptr && snslen) { + ct2->rsp.m1.ct_senselen = min(snslen, MAXRESPLEN); + memcpy(ct2->rsp.m1.ct_resp, snsptr, ct2->rsp.m1.ct_senselen); + swd |= CT2_SNSLEN_VALID; + } + if (ct2->ct_resid > 0) { + swd |= CT2_DATA_UNDER; + } else if (ct2->ct_resid < 0) { + swd |= CT2_DATA_OVER; + } + ct2->rsp.m1.ct_scsi_status = swd; + sqe = storage2; + } + if (type == RQSTYPE_CTIO2) { + dsp = ct->rsp.m0.u.ct_dataseg; + seglim = ISP_RQDSEG_T2; + } else { + dsp64 = ct->rsp.m0.u.ct_dataseg64; + seglim = ISP_RQDSEG_T3; + } + break; + } + case RQSTYPE_CTIO7: + { + ct7_entry_t *ct = fqe, *ct2 = (ct7_entry_t *)storage2; + uint16_t swd = ct->ct_scsi_status & 0xff; + + dsp64 = &ct->rsp.m0.ds; + seglim = 1; + if ((ct->ct_flags & CT7_SENDSTATUS) && (swd || ct->ct_resid)) { + memcpy(ct2, ct, sizeof (ct7_entry_t)); + + /* + * Clear fields from first CTIO7 that now need to be cleared + */ + ct->ct_header.rqs_seqno = 0; + ct->ct_flags &= ~CT7_SENDSTATUS; + ct->ct_resid = 0; + ct->ct_syshandle = 0; + ct->ct_scsi_status = 0; + + /* + * Reset fields in the second CTIO7 as appropriate. + */ + ct2->ct_flags &= ~(CT7_FLAG_MMASK|CT7_DATAMASK); + ct2->ct_flags |= CT7_NO_DATA|CT7_NO_DATA|CT7_FLAG_MODE1; + ct2->ct_seg_count = 0; + memset(&ct2->rsp, 0, sizeof (ct2->rsp)); + if (swd == SCSI_CHECK && snsptr && snslen) { + ct2->rsp.m1.ct_resplen = min(snslen, MAXRESPLEN_24XX); + memcpy(ct2->rsp.m1.ct_resp, snsptr, ct2->rsp.m1.ct_resplen); + swd |= (FCP_SNSLEN_VALID << 8); + } + if (ct2->ct_resid < 0) { + swd |= (FCP_RESID_OVERFLOW << 8); + } else if (ct2->ct_resid > 0) { + swd |= (FCP_RESID_UNDERFLOW << 8); + } + ct2->ct_scsi_status = swd; + sqe = storage2; + } + break; + } + default: + return (CMD_COMPLETE); + } + } + + /* + * Fill out the data transfer stuff in the first queue entry + */ + if (seglim > nsegs) { + seglim = nsegs; + } + + for (seg = curseg = 0; curseg < seglim; curseg++) { + if (dsp64) { + XS_GET_DMA64_SEG(dsp64++, segp, seg++); + } else { + XS_GET_DMA_SEG(dsp++, segp, seg++); + } + } + + /* + * First, if we are sending status with data and we have a non-zero + * status or non-zero residual, we have to make a synthetic extra CTIO + * that contains the status that we'll ship separately (FC cards only). + */ + + /* + * Second, start building additional continuation segments as needed. + */ + while (seg < nsegs) { + nxtnxt = ISP_NXT_QENTRY(nxt, RQUEST_QUEUE_LEN(isp)); + if (nxtnxt == isp->isp_reqodx) { + return (CMD_EAGAIN); + } + ISP_MEMZERO(storage, QENTRY_LEN); + qe1 = ISP_QUEUE_ENTRY(isp->isp_rquest, nxt); + nxt = nxtnxt; + if (dsp64) { + ispcontreq64_t *crq = (ispcontreq64_t *) storage; + seglim = ISP_CDSEG64; + crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; + crq->req_header.rqs_entry_count = 1; + dsp64 = crq->req_dataseg; + } else { + ispcontreq_t *crq = (ispcontreq_t *) storage; + seglim = ISP_CDSEG; + crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; + crq->req_header.rqs_entry_count = 1; + dsp = crq->req_dataseg; + } + if (seg + seglim > nsegs) { + seglim = nsegs - seg; + } + for (curseg = 0; curseg < seglim; curseg++) { + if (dsp64) { + XS_GET_DMA64_SEG(dsp64++, segp, seg++); + } else { + XS_GET_DMA_SEG(dsp++, segp, seg++); + } + } + if (dsp64) { + isp_put_cont64_req(isp, (ispcontreq64_t *)storage, qe1); + } else { + isp_put_cont_req(isp, (ispcontreq_t *)storage, qe1); + } + if (isp->isp_dblev & ISP_LOGTDEBUG1) { + isp_print_bytes(isp, "additional queue entry", QENTRY_LEN, storage); + } + nqe++; + } + + /* + * If we have a synthetic queue entry to complete things, do it here. + */ + if (sqe) { + nxtnxt = ISP_NXT_QENTRY(nxt, RQUEST_QUEUE_LEN(isp)); + if (nxtnxt == isp->isp_reqodx) { + return (CMD_EAGAIN); + } + qe1 = ISP_QUEUE_ENTRY(isp->isp_rquest, nxt); + nxt = nxtnxt; + if (type == RQSTYPE_CTIO7) { + isp_put_ctio7(isp, sqe, qe1); + } else { + isp_put_ctio2(isp, sqe, qe1); + } + if (isp->isp_dblev & ISP_LOGTDEBUG1) { + isp_print_bytes(isp, "synthetic final queue entry", QENTRY_LEN, storage2); + } + } + + ((isphdr_t *)fqe)->rqs_entry_count = nqe; + switch (type) { + case RQSTYPE_CTIO: + ((ct_entry_t *)fqe)->ct_seg_count = nsegs; + isp_put_ctio(isp, fqe, qe0); + break; + case RQSTYPE_CTIO2: + case RQSTYPE_CTIO3: + ((ct2_entry_t *)fqe)->ct_seg_count = nsegs; + if (ISP_CAP_2KLOGIN(isp)) { + isp_put_ctio2e(isp, fqe, qe0); + } else { + isp_put_ctio2(isp, fqe, qe0); + } + break; + case RQSTYPE_CTIO7: + ((ct7_entry_t *)fqe)->ct_seg_count = nsegs; + isp_put_ctio7(isp, fqe, qe0); + break; + default: + return (CMD_COMPLETE); + } + if (isp->isp_dblev & ISP_LOGTDEBUG1) { + isp_print_bytes(isp, "first queue entry", QENTRY_LEN, fqe); + } + ISP_ADD_REQUEST(isp, nxt); + return (CMD_QUEUED); +} + +int isp_save_xs_tgt(ispsoftc_t *isp, void *xs, uint32_t *handlep) { int i; for (i = 0; i < (int) isp->isp_maxcmds; i++) { if (isp->isp_tgtlist[i] == NULL) { break; } } if (i == isp->isp_maxcmds) { return (-1); } isp->isp_tgtlist[i] = xs; *handlep = (i+1) | 0x8000; return (0); } void * isp_find_xs_tgt(ispsoftc_t *isp, uint32_t handle) { - if (handle == 0 || IS_TARGET_HANDLE(handle) == 0 || - (handle & ISP_HANDLE_MASK) > isp->isp_maxcmds) { - isp_prt(isp, ISP_LOGERR, "bad handle in isp_find_xs_tgt"); + if (handle == 0 || IS_TARGET_HANDLE(handle) == 0 || (handle & ISP_HANDLE_MASK) > isp->isp_maxcmds) { + isp_prt(isp, ISP_LOGERR, "bad handle %u in isp_find_xs_tgt", handle); return (NULL); } else { return (isp->isp_tgtlist[(handle & ISP_HANDLE_MASK) - 1]); } } uint32_t isp_find_tgt_handle(ispsoftc_t *isp, void *xs) { int i; if (xs != NULL) { for (i = 0; i < isp->isp_maxcmds; i++) { if (isp->isp_tgtlist[i] == xs) { - return ((i+1) & ISP_HANDLE_MASK); + uint32_t handle = i; + handle += 1; + handle &= ISP_HANDLE_MASK; + handle |= 0x8000; + return (handle); } } } return (0); } void isp_destroy_tgt_handle(ispsoftc_t *isp, uint32_t handle) { - if (handle == 0 || IS_TARGET_HANDLE(handle) == 0 || - (handle & ISP_HANDLE_MASK) > isp->isp_maxcmds) { - isp_prt(isp, ISP_LOGERR, - "bad handle in isp_destroy_tgt_handle"); + if (handle == 0 || IS_TARGET_HANDLE(handle) == 0 || (handle & ISP_HANDLE_MASK) > isp->isp_maxcmds) { + isp_prt(isp, ISP_LOGERR, "bad handle in isp_destroy_tgt_handle"); } else { isp->isp_tgtlist[(handle & ISP_HANDLE_MASK) - 1] = NULL; } } +/* + * Find target mode entries + */ +int +isp_find_pdb_by_wwn(ispsoftc_t *isp, int chan, uint64_t wwn, fcportdb_t **lptr) +{ + fcparam *fcp; + int i; + + if (chan < isp->isp_nchan) { + fcp = FCPARAM(isp, chan); + for (i = 0; i < MAX_FC_TARG; i++) { + fcportdb_t *lp = &fcp->portdb[i]; + + if (lp->target_mode == 0) { + continue; + } + if (lp->port_wwn == wwn) { + *lptr = lp; + return (1); + } + } + } + return (0); +} + +int +isp_find_pdb_by_loopid(ispsoftc_t *isp, int chan, uint32_t loopid, fcportdb_t **lptr) +{ + fcparam *fcp; + int i; + + if (chan < isp->isp_nchan) { + fcp = FCPARAM(isp, chan); + for (i = 0; i < MAX_FC_TARG; i++) { + fcportdb_t *lp = &fcp->portdb[i]; + + if (lp->target_mode == 0) { + continue; + } + if (lp->handle == loopid) { + *lptr = lp; + return (1); + } + } + } + return (0); +} + +int +isp_find_pdb_by_sid(ispsoftc_t *isp, int chan, uint32_t sid, fcportdb_t **lptr) +{ + fcparam *fcp; + int i; + + if (chan >= isp->isp_nchan) { + return (0); + } + + fcp = FCPARAM(isp, chan); + for (i = 0; i < MAX_FC_TARG; i++) { + fcportdb_t *lp = &fcp->portdb[i]; + + if (lp->target_mode == 0) { + continue; + } + if (lp->portid == sid) { + *lptr = lp; + return (1); + } + } + return (0); +} + void +isp_find_chan_by_did(ispsoftc_t *isp, uint32_t did, uint16_t *cp) +{ + uint16_t chan; + + *cp = ISP_NOCHAN; + for (chan = 0; chan < isp->isp_nchan; chan++) { + fcparam *fcp = FCPARAM(isp, chan); + if ((fcp->role & ISP_ROLE_TARGET) == 0 || fcp->isp_fwstate != FW_READY || fcp->isp_loopstate < LOOP_PDB_RCVD) { + continue; + } + if (fcp->isp_portid == did) { + *cp = chan; + break; + } + } +} + +/* + * Add an initiator device to the port database + */ +void +isp_add_wwn_entry(ispsoftc_t *isp, int chan, uint64_t ini, uint16_t nphdl, uint32_t s_id) +{ + fcparam *fcp; + fcportdb_t *lp; + isp_notify_t nt; + int i; + + fcp = FCPARAM(isp, chan); + + if (nphdl >= MAX_NPORT_HANDLE) { + isp_prt(isp, ISP_LOGWARN, "%s: Chan %d IID 0x%016llx bad N-Port handle 0x%04x Port ID 0x%06x", + __func__, chan, (unsigned long long) ini, nphdl, s_id); + return; + } + + lp = NULL; + if (fcp->isp_tgt_map[nphdl]) { + lp = &fcp->portdb[fcp->isp_tgt_map[nphdl] - 1]; + } else { + /* + * Make sure the addition of a new target mode entry doesn't duplicate entries + * with the same N-Port handles, the same portids or the same Port WWN. + */ + for (i = 0; i < MAX_FC_TARG; i++) { + lp = &fcp->portdb[i]; + if (lp->target_mode == 0) { + lp = NULL; + continue; + } + if (lp->handle == nphdl) { + break; + } + if (s_id != PORT_ANY && lp->portid == s_id) { + break; + } + if (VALID_INI(ini) && lp->port_wwn == ini) { + break; + } + lp = NULL; + } + + } + + if (lp) { + int something = 0; + if (lp->handle != nphdl) { + isp_prt(isp, ISP_LOGWARN, "%s: Chan %d attempt to re-enter N-port handle 0x%04x IID 0x%016llx Port ID 0x%06x finds IID 0x%016llx N-Port Handle 0x%04x Port ID 0x%06x", + __func__, chan, nphdl, (unsigned long long)ini, s_id, (unsigned long long) lp->port_wwn, lp->handle, lp->portid); + isp_dump_portdb(isp, chan); + return; + } + if (s_id != PORT_NONE) { + if (lp->portid == PORT_NONE) { + lp->portid = s_id; + isp_prt(isp, ISP_LOGTINFO, "%s: Chan %d N-port handle 0x%04x gets Port ID 0x%06x", __func__, chan, nphdl, s_id); + something++; + } else if (lp->portid != s_id) { + isp_prt(isp, ISP_LOGTINFO, "%s: Chan %d N-port handle 0x%04x tries to change Port ID 0x%06x to 0x%06x", __func__, chan, nphdl, + lp->portid, s_id); + isp_dump_portdb(isp, chan); + return; + } + } + if (VALID_INI(ini)) { + if (!VALID_INI(lp->port_wwn)) { + lp->port_wwn = ini; + isp_prt(isp, ISP_LOGTINFO, "%s: Chan %d N-port handle 0x%04x gets WWN 0x%016llxx", __func__, chan, nphdl, (unsigned long long) ini); + something++; + } else if (lp->port_wwn != ini) { + isp_prt(isp, ISP_LOGWARN, "%s: Chan %d N-port handle 0x%04x tries to change WWN 0x%016llx to 0x%016llx", __func__, chan, nphdl, + (unsigned long long) lp->port_wwn, (unsigned long long) ini); + isp_dump_portdb(isp, chan); + return; + } + } + + if (!something) { + isp_prt(isp, ISP_LOGWARN, "%s: Chan %d IID 0x%016llx N-Port Handle 0x%04x Port ID 0x%06x reentered", __func__, chan, + (unsigned long long) lp->port_wwn, lp->handle, lp->portid); + } + return; + } + + /* + * Find a new spot + */ + for (i = MAX_FC_TARG - 1; i >= 0; i--) { + if (fcp->portdb[i].target_mode == 1) { + continue; + } + if (fcp->portdb[i].state == FC_PORTDB_STATE_NIL) { + break; + } + } + if (i < 0) { + isp_prt(isp, ISP_LOGWARN, "%s: Chan %d IID 0x%016llx N-Port Handle 0x%04x Port ID 0x%06x- no room in port database", + __func__, chan, (unsigned long long) ini, nphdl, s_id); + return; + } + + lp = &fcp->portdb[i]; + ISP_MEMZERO(lp, sizeof (fcportdb_t)); + lp->target_mode = 1; + lp->handle = nphdl; + lp->portid = s_id; + lp->port_wwn = ini; + fcp->isp_tgt_map[nphdl] = i + 1; + + isp_prt(isp, ISP_LOGTINFO, "%s: Chan %d IID 0x%016llx N-Port Handle 0x%04x Port ID 0x%06x vtgt %d added", __func__, chan, (unsigned long long) ini, nphdl, s_id, fcp->isp_tgt_map[nphdl] - 1); + + ISP_MEMZERO(&nt, sizeof (nt)); + nt.nt_hba = isp; + nt.nt_wwn = ini; + nt.nt_tgt = FCPARAM(isp, chan)->isp_wwpn; + nt.nt_sid = s_id; + nt.nt_did = FCPARAM(isp, chan)->isp_portid; + nt.nt_nphdl = nphdl; + nt.nt_channel = chan; + nt.nt_ncode = NT_ARRIVED; + isp_async(isp, ISPASYNC_TARGET_NOTIFY, &nt); +} + +/* + * Remove a target device to the port database + */ +void +isp_del_wwn_entry(ispsoftc_t *isp, int chan, uint64_t ini, uint16_t nphdl, uint32_t s_id) +{ + fcparam *fcp; + isp_notify_t nt; + fcportdb_t *lp; + + if (nphdl >= MAX_NPORT_HANDLE) { + isp_prt(isp, ISP_LOGWARN, "%s: Chan %d IID 0x%016llx bad N-Port handle 0x%04x Port ID 0x%06x", + __func__, chan, (unsigned long long) ini, nphdl, s_id); + return; + } + + fcp = FCPARAM(isp, chan); + if (fcp->isp_tgt_map[nphdl] == 0) { + lp = NULL; + } else { + lp = &fcp->portdb[fcp->isp_tgt_map[nphdl] - 1]; + if (lp->target_mode == 0) { + lp = NULL; + } + } + if (lp == NULL) { + isp_prt(isp, ISP_LOGWARN, "%s: Chan %d IID 0x%016llx N-Port Handle 0x%04x Port ID 0x%06x cannot be found to be cleared", + __func__, chan, (unsigned long long) ini, nphdl, s_id); + isp_dump_portdb(isp, chan); + return; + } + isp_prt(isp, ISP_LOGTINFO, "%s: Chan %d IID 0x%016llx N-Port Handle 0x%04x Port ID 0x%06x vtgt %d cleared", + __func__, chan, (unsigned long long) lp->port_wwn, nphdl, lp->portid, fcp->isp_tgt_map[nphdl] - 1); + fcp->isp_tgt_map[nphdl] = 0; + + ISP_MEMZERO(&nt, sizeof (nt)); + nt.nt_hba = isp; + nt.nt_wwn = lp->port_wwn; + nt.nt_tgt = FCPARAM(isp, chan)->isp_wwpn; + nt.nt_sid = lp->portid; + nt.nt_did = FCPARAM(isp, chan)->isp_portid; + nt.nt_nphdl = nphdl; + nt.nt_channel = chan; + nt.nt_ncode = NT_DEPARTED; + isp_async(isp, ISPASYNC_TARGET_NOTIFY, &nt); +} + +void +isp_del_all_wwn_entries(ispsoftc_t *isp, int chan) +{ + fcparam *fcp; + int i; + + if (!IS_FC(isp)) { + return; + } + + /* + * Handle iterations over all channels via recursion + */ + if (chan == ISP_NOCHAN) { + for (chan = 0; chan < isp->isp_nchan; chan++) { + isp_del_all_wwn_entries(isp, chan); + } + return; + } + + if (chan > isp->isp_nchan) { + return; + } + + fcp = FCPARAM(isp, chan); + if (fcp == NULL) { + return; + } + for (i = 0; i < MAX_NPORT_HANDLE; i++) { + if (fcp->isp_tgt_map[i]) { + fcportdb_t *lp = &fcp->portdb[fcp->isp_tgt_map[i] - 1]; + isp_del_wwn_entry(isp, chan, lp->port_wwn, lp->handle, lp->portid); + } + } +} + +void +isp_del_wwn_entries(ispsoftc_t *isp, isp_notify_t *mp) +{ + fcportdb_t *lp; + + /* + * Handle iterations over all channels via recursion + */ + if (mp->nt_channel == ISP_NOCHAN) { + for (mp->nt_channel = 0; mp->nt_channel < isp->isp_nchan; mp->nt_channel++) { + isp_del_wwn_entries(isp, mp); + } + mp->nt_channel = ISP_NOCHAN; + return; + } + + /* + * We have an entry which is only partially identified. + * + * It's only known by WWN, N-Port handle, or Port ID. + * We need to find the actual entry so we can delete it. + */ + if (mp->nt_nphdl != NIL_HANDLE) { + if (isp_find_pdb_by_loopid(isp, mp->nt_channel, mp->nt_nphdl, &lp)) { + isp_del_wwn_entry(isp, mp->nt_channel, lp->port_wwn, lp->handle, lp->portid); + return; + } + } + if (mp->nt_wwn != INI_ANY) { + if (isp_find_pdb_by_wwn(isp, mp->nt_channel, mp->nt_wwn, &lp)) { + isp_del_wwn_entry(isp, mp->nt_channel, lp->port_wwn, lp->handle, lp->portid); + return; + } + } + if (mp->nt_sid != PORT_ANY && mp->nt_sid != PORT_NONE) { + if (isp_find_pdb_by_sid(isp, mp->nt_channel, mp->nt_sid, &lp)) { + isp_del_wwn_entry(isp, mp->nt_channel, lp->port_wwn, lp->handle, lp->portid); + return; + } + } + isp_prt(isp, ISP_LOGWARN, "%s: Chan %d unable to find entry to delete N-port handle 0x%04x initiator WWN 0x%016llx Port ID 0x%06x", __func__, + mp->nt_channel, mp->nt_nphdl, (unsigned long long) mp->nt_wwn, mp->nt_sid); +} + +void isp_put_atio(ispsoftc_t *isp, at_entry_t *src, at_entry_t *dst) { int i; isp_put_hdr(isp, &src->at_header, &dst->at_header); ISP_IOXPUT_16(isp, src->at_reserved, &dst->at_reserved); ISP_IOXPUT_16(isp, src->at_handle, &dst->at_handle); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->at_lun, &dst->at_iid); ISP_IOXPUT_8(isp, src->at_iid, &dst->at_lun); ISP_IOXPUT_8(isp, src->at_cdblen, &dst->at_tgt); ISP_IOXPUT_8(isp, src->at_tgt, &dst->at_cdblen); ISP_IOXPUT_8(isp, src->at_status, &dst->at_scsi_status); ISP_IOXPUT_8(isp, src->at_scsi_status, &dst->at_status); ISP_IOXPUT_8(isp, src->at_tag_val, &dst->at_tag_type); ISP_IOXPUT_8(isp, src->at_tag_type, &dst->at_tag_val); } else { ISP_IOXPUT_8(isp, src->at_lun, &dst->at_lun); ISP_IOXPUT_8(isp, src->at_iid, &dst->at_iid); ISP_IOXPUT_8(isp, src->at_cdblen, &dst->at_cdblen); ISP_IOXPUT_8(isp, src->at_tgt, &dst->at_tgt); ISP_IOXPUT_8(isp, src->at_status, &dst->at_status); - ISP_IOXPUT_8(isp, src->at_scsi_status, - &dst->at_scsi_status); + ISP_IOXPUT_8(isp, src->at_scsi_status, &dst->at_scsi_status); ISP_IOXPUT_8(isp, src->at_tag_val, &dst->at_tag_val); ISP_IOXPUT_8(isp, src->at_tag_type, &dst->at_tag_type); } ISP_IOXPUT_32(isp, src->at_flags, &dst->at_flags); for (i = 0; i < ATIO_CDBLEN; i++) { ISP_IOXPUT_8(isp, src->at_cdb[i], &dst->at_cdb[i]); } for (i = 0; i < QLTM_SENSELEN; i++) { ISP_IOXPUT_8(isp, src->at_sense[i], &dst->at_sense[i]); } } void isp_get_atio(ispsoftc_t *isp, at_entry_t *src, at_entry_t *dst) { int i; isp_get_hdr(isp, &src->at_header, &dst->at_header); ISP_IOXGET_16(isp, &src->at_reserved, dst->at_reserved); ISP_IOXGET_16(isp, &src->at_handle, dst->at_handle); if (ISP_IS_SBUS(isp)) { ISP_IOXGET_8(isp, &src->at_lun, dst->at_iid); ISP_IOXGET_8(isp, &src->at_iid, dst->at_lun); ISP_IOXGET_8(isp, &src->at_cdblen, dst->at_tgt); ISP_IOXGET_8(isp, &src->at_tgt, dst->at_cdblen); ISP_IOXGET_8(isp, &src->at_status, dst->at_scsi_status); ISP_IOXGET_8(isp, &src->at_scsi_status, dst->at_status); ISP_IOXGET_8(isp, &src->at_tag_val, dst->at_tag_type); ISP_IOXGET_8(isp, &src->at_tag_type, dst->at_tag_val); } else { ISP_IOXGET_8(isp, &src->at_lun, dst->at_lun); ISP_IOXGET_8(isp, &src->at_iid, dst->at_iid); ISP_IOXGET_8(isp, &src->at_cdblen, dst->at_cdblen); ISP_IOXGET_8(isp, &src->at_tgt, dst->at_tgt); ISP_IOXGET_8(isp, &src->at_status, dst->at_status); - ISP_IOXGET_8(isp, &src->at_scsi_status, - dst->at_scsi_status); + ISP_IOXGET_8(isp, &src->at_scsi_status, dst->at_scsi_status); ISP_IOXGET_8(isp, &src->at_tag_val, dst->at_tag_val); ISP_IOXGET_8(isp, &src->at_tag_type, dst->at_tag_type); } ISP_IOXGET_32(isp, &src->at_flags, dst->at_flags); for (i = 0; i < ATIO_CDBLEN; i++) { ISP_IOXGET_8(isp, &src->at_cdb[i], dst->at_cdb[i]); } for (i = 0; i < QLTM_SENSELEN; i++) { ISP_IOXGET_8(isp, &src->at_sense[i], dst->at_sense[i]); } } void isp_put_atio2(ispsoftc_t *isp, at2_entry_t *src, at2_entry_t *dst) { int i; isp_put_hdr(isp, &src->at_header, &dst->at_header); ISP_IOXPUT_32(isp, src->at_reserved, &dst->at_reserved); ISP_IOXPUT_8(isp, src->at_lun, &dst->at_lun); ISP_IOXPUT_8(isp, src->at_iid, &dst->at_iid); ISP_IOXPUT_16(isp, src->at_rxid, &dst->at_rxid); ISP_IOXPUT_16(isp, src->at_flags, &dst->at_flags); ISP_IOXPUT_16(isp, src->at_status, &dst->at_status); ISP_IOXPUT_8(isp, src->at_crn, &dst->at_crn); ISP_IOXPUT_8(isp, src->at_taskcodes, &dst->at_taskcodes); ISP_IOXPUT_8(isp, src->at_taskflags, &dst->at_taskflags); ISP_IOXPUT_8(isp, src->at_execodes, &dst->at_execodes); for (i = 0; i < ATIO2_CDBLEN; i++) { ISP_IOXPUT_8(isp, src->at_cdb[i], &dst->at_cdb[i]); } ISP_IOXPUT_32(isp, src->at_datalen, &dst->at_datalen); ISP_IOXPUT_16(isp, src->at_scclun, &dst->at_scclun); for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->at_wwpn[i], &dst->at_wwpn[i]); } for (i = 0; i < 6; i++) { - ISP_IOXPUT_16(isp, src->at_reserved2[i], - &dst->at_reserved2[i]); + ISP_IOXPUT_16(isp, src->at_reserved2[i], &dst->at_reserved2[i]); } ISP_IOXPUT_16(isp, src->at_oxid, &dst->at_oxid); } void isp_put_atio2e(ispsoftc_t *isp, at2e_entry_t *src, at2e_entry_t *dst) { int i; isp_put_hdr(isp, &src->at_header, &dst->at_header); ISP_IOXPUT_32(isp, src->at_reserved, &dst->at_reserved); ISP_IOXPUT_16(isp, src->at_iid, &dst->at_iid); ISP_IOXPUT_16(isp, src->at_rxid, &dst->at_rxid); ISP_IOXPUT_16(isp, src->at_flags, &dst->at_flags); ISP_IOXPUT_16(isp, src->at_status, &dst->at_status); ISP_IOXPUT_8(isp, src->at_crn, &dst->at_crn); ISP_IOXPUT_8(isp, src->at_taskcodes, &dst->at_taskcodes); ISP_IOXPUT_8(isp, src->at_taskflags, &dst->at_taskflags); ISP_IOXPUT_8(isp, src->at_execodes, &dst->at_execodes); for (i = 0; i < ATIO2_CDBLEN; i++) { ISP_IOXPUT_8(isp, src->at_cdb[i], &dst->at_cdb[i]); } ISP_IOXPUT_32(isp, src->at_datalen, &dst->at_datalen); ISP_IOXPUT_16(isp, src->at_scclun, &dst->at_scclun); for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->at_wwpn[i], &dst->at_wwpn[i]); } for (i = 0; i < 6; i++) { - ISP_IOXPUT_16(isp, src->at_reserved2[i], - &dst->at_reserved2[i]); + ISP_IOXPUT_16(isp, src->at_reserved2[i], &dst->at_reserved2[i]); } ISP_IOXPUT_16(isp, src->at_oxid, &dst->at_oxid); } void isp_get_atio2(ispsoftc_t *isp, at2_entry_t *src, at2_entry_t *dst) { int i; isp_get_hdr(isp, &src->at_header, &dst->at_header); ISP_IOXGET_32(isp, &src->at_reserved, dst->at_reserved); ISP_IOXGET_8(isp, &src->at_lun, dst->at_lun); ISP_IOXGET_8(isp, &src->at_iid, dst->at_iid); ISP_IOXGET_16(isp, &src->at_rxid, dst->at_rxid); ISP_IOXGET_16(isp, &src->at_flags, dst->at_flags); ISP_IOXGET_16(isp, &src->at_status, dst->at_status); ISP_IOXGET_8(isp, &src->at_crn, dst->at_crn); ISP_IOXGET_8(isp, &src->at_taskcodes, dst->at_taskcodes); ISP_IOXGET_8(isp, &src->at_taskflags, dst->at_taskflags); ISP_IOXGET_8(isp, &src->at_execodes, dst->at_execodes); for (i = 0; i < ATIO2_CDBLEN; i++) { ISP_IOXGET_8(isp, &src->at_cdb[i], dst->at_cdb[i]); } ISP_IOXGET_32(isp, &src->at_datalen, dst->at_datalen); ISP_IOXGET_16(isp, &src->at_scclun, dst->at_scclun); for (i = 0; i < 4; i++) { ISP_IOXGET_16(isp, &src->at_wwpn[i], dst->at_wwpn[i]); } for (i = 0; i < 6; i++) { - ISP_IOXGET_16(isp, &src->at_reserved2[i], - dst->at_reserved2[i]); + ISP_IOXGET_16(isp, &src->at_reserved2[i], dst->at_reserved2[i]); } ISP_IOXGET_16(isp, &src->at_oxid, dst->at_oxid); } void isp_get_atio2e(ispsoftc_t *isp, at2e_entry_t *src, at2e_entry_t *dst) { int i; isp_get_hdr(isp, &src->at_header, &dst->at_header); ISP_IOXGET_32(isp, &src->at_reserved, dst->at_reserved); ISP_IOXGET_16(isp, &src->at_iid, dst->at_iid); ISP_IOXGET_16(isp, &src->at_rxid, dst->at_rxid); ISP_IOXGET_16(isp, &src->at_flags, dst->at_flags); ISP_IOXGET_16(isp, &src->at_status, dst->at_status); ISP_IOXGET_8(isp, &src->at_crn, dst->at_crn); ISP_IOXGET_8(isp, &src->at_taskcodes, dst->at_taskcodes); ISP_IOXGET_8(isp, &src->at_taskflags, dst->at_taskflags); ISP_IOXGET_8(isp, &src->at_execodes, dst->at_execodes); for (i = 0; i < ATIO2_CDBLEN; i++) { ISP_IOXGET_8(isp, &src->at_cdb[i], dst->at_cdb[i]); } ISP_IOXGET_32(isp, &src->at_datalen, dst->at_datalen); ISP_IOXGET_16(isp, &src->at_scclun, dst->at_scclun); for (i = 0; i < 4; i++) { ISP_IOXGET_16(isp, &src->at_wwpn[i], dst->at_wwpn[i]); } for (i = 0; i < 6; i++) { - ISP_IOXGET_16(isp, &src->at_reserved2[i], - dst->at_reserved2[i]); + ISP_IOXGET_16(isp, &src->at_reserved2[i], dst->at_reserved2[i]); } ISP_IOXGET_16(isp, &src->at_oxid, dst->at_oxid); } void isp_get_atio7(ispsoftc_t *isp, at7_entry_t *src, at7_entry_t *dst) { ISP_IOXGET_8(isp, &src->at_type, dst->at_type); ISP_IOXGET_8(isp, &src->at_count, dst->at_count); ISP_IOXGET_16(isp, &src->at_ta_len, dst->at_ta_len); ISP_IOXGET_32(isp, &src->at_rxid, dst->at_rxid); isp_get_fc_hdr(isp, &src->at_hdr, &dst->at_hdr); isp_get_fcp_cmnd_iu(isp, &src->at_cmnd, &dst->at_cmnd); } void isp_put_ctio(ispsoftc_t *isp, ct_entry_t *src, ct_entry_t *dst) { int i; isp_put_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXPUT_16(isp, src->ct_syshandle, &dst->ct_syshandle); ISP_IOXPUT_16(isp, src->ct_fwhandle, &dst->ct_fwhandle); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->ct_iid, &dst->ct_lun); ISP_IOXPUT_8(isp, src->ct_lun, &dst->ct_iid); ISP_IOXPUT_8(isp, src->ct_tgt, &dst->ct_reserved2); ISP_IOXPUT_8(isp, src->ct_reserved2, &dst->ct_tgt); ISP_IOXPUT_8(isp, src->ct_status, &dst->ct_scsi_status); ISP_IOXPUT_8(isp, src->ct_scsi_status, &dst->ct_status); ISP_IOXPUT_8(isp, src->ct_tag_type, &dst->ct_tag_val); ISP_IOXPUT_8(isp, src->ct_tag_val, &dst->ct_tag_type); } else { ISP_IOXPUT_8(isp, src->ct_iid, &dst->ct_iid); ISP_IOXPUT_8(isp, src->ct_lun, &dst->ct_lun); ISP_IOXPUT_8(isp, src->ct_tgt, &dst->ct_tgt); ISP_IOXPUT_8(isp, src->ct_reserved2, &dst->ct_reserved2); ISP_IOXPUT_8(isp, src->ct_scsi_status, &dst->ct_scsi_status); ISP_IOXPUT_8(isp, src->ct_status, &dst->ct_status); ISP_IOXPUT_8(isp, src->ct_tag_type, &dst->ct_tag_type); ISP_IOXPUT_8(isp, src->ct_tag_val, &dst->ct_tag_val); } ISP_IOXPUT_32(isp, src->ct_flags, &dst->ct_flags); ISP_IOXPUT_32(isp, src->ct_xfrlen, &dst->ct_xfrlen); ISP_IOXPUT_32(isp, src->ct_resid, &dst->ct_resid); ISP_IOXPUT_16(isp, src->ct_timeout, &dst->ct_timeout); ISP_IOXPUT_16(isp, src->ct_seg_count, &dst->ct_seg_count); for (i = 0; i < ISP_RQDSEG; i++) { - ISP_IOXPUT_32(isp, src->ct_dataseg[i].ds_base, - &dst->ct_dataseg[i].ds_base); - ISP_IOXPUT_32(isp, src->ct_dataseg[i].ds_count, - &dst->ct_dataseg[i].ds_count); + ISP_IOXPUT_32(isp, src->ct_dataseg[i].ds_base, &dst->ct_dataseg[i].ds_base); + ISP_IOXPUT_32(isp, src->ct_dataseg[i].ds_count, &dst->ct_dataseg[i].ds_count); } } void isp_get_ctio(ispsoftc_t *isp, ct_entry_t *src, ct_entry_t *dst) { int i; isp_get_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXGET_16(isp, &src->ct_syshandle, dst->ct_syshandle); ISP_IOXGET_16(isp, &src->ct_fwhandle, dst->ct_fwhandle); if (ISP_IS_SBUS(isp)) { ISP_IOXGET_8(isp, &src->ct_lun, dst->ct_iid); ISP_IOXGET_8(isp, &src->ct_iid, dst->ct_lun); ISP_IOXGET_8(isp, &src->ct_reserved2, dst->ct_tgt); ISP_IOXGET_8(isp, &src->ct_tgt, dst->ct_reserved2); ISP_IOXGET_8(isp, &src->ct_status, dst->ct_scsi_status); ISP_IOXGET_8(isp, &src->ct_scsi_status, dst->ct_status); ISP_IOXGET_8(isp, &src->ct_tag_val, dst->ct_tag_type); ISP_IOXGET_8(isp, &src->ct_tag_type, dst->ct_tag_val); } else { ISP_IOXGET_8(isp, &src->ct_lun, dst->ct_lun); ISP_IOXGET_8(isp, &src->ct_iid, dst->ct_iid); ISP_IOXGET_8(isp, &src->ct_reserved2, dst->ct_reserved2); ISP_IOXGET_8(isp, &src->ct_tgt, dst->ct_tgt); ISP_IOXGET_8(isp, &src->ct_status, dst->ct_status); - ISP_IOXGET_8(isp, &src->ct_scsi_status, - dst->ct_scsi_status); + ISP_IOXGET_8(isp, &src->ct_scsi_status, dst->ct_scsi_status); ISP_IOXGET_8(isp, &src->ct_tag_val, dst->ct_tag_val); ISP_IOXGET_8(isp, &src->ct_tag_type, dst->ct_tag_type); } ISP_IOXGET_32(isp, &src->ct_flags, dst->ct_flags); ISP_IOXGET_32(isp, &src->ct_xfrlen, dst->ct_xfrlen); ISP_IOXGET_32(isp, &src->ct_resid, dst->ct_resid); ISP_IOXGET_16(isp, &src->ct_timeout, dst->ct_timeout); ISP_IOXGET_16(isp, &src->ct_seg_count, dst->ct_seg_count); for (i = 0; i < ISP_RQDSEG; i++) { - ISP_IOXGET_32(isp, - &src->ct_dataseg[i].ds_base, - dst->ct_dataseg[i].ds_base); - ISP_IOXGET_32(isp, - &src->ct_dataseg[i].ds_count, - dst->ct_dataseg[i].ds_count); + ISP_IOXGET_32(isp, &src->ct_dataseg[i].ds_base, dst->ct_dataseg[i].ds_base); + ISP_IOXGET_32(isp, &src->ct_dataseg[i].ds_count, dst->ct_dataseg[i].ds_count); } } void isp_put_ctio2(ispsoftc_t *isp, ct2_entry_t *src, ct2_entry_t *dst) { int i; isp_put_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXPUT_32(isp, src->ct_syshandle, &dst->ct_syshandle); ISP_IOXPUT_8(isp, src->ct_lun, &dst->ct_lun); ISP_IOXPUT_8(isp, src->ct_iid, &dst->ct_iid); ISP_IOXPUT_16(isp, src->ct_rxid, &dst->ct_rxid); ISP_IOXPUT_16(isp, src->ct_flags, &dst->ct_flags); ISP_IOXPUT_16(isp, src->ct_timeout, &dst->ct_timeout); ISP_IOXPUT_16(isp, src->ct_seg_count, &dst->ct_seg_count); ISP_IOXPUT_32(isp, src->ct_resid, &dst->ct_resid); ISP_IOXPUT_32(isp, src->ct_reloff, &dst->ct_reloff); if ((src->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE0) { - ISP_IOXPUT_32(isp, src->rsp.m0._reserved, - &dst->rsp.m0._reserved); - ISP_IOXPUT_16(isp, src->rsp.m0._reserved2, - &dst->rsp.m0._reserved2); - ISP_IOXPUT_16(isp, src->rsp.m0.ct_scsi_status, - &dst->rsp.m0.ct_scsi_status); - ISP_IOXPUT_32(isp, src->rsp.m0.ct_xfrlen, - &dst->rsp.m0.ct_xfrlen); + ISP_IOXPUT_32(isp, src->rsp.m0._reserved, &dst->rsp.m0._reserved); + ISP_IOXPUT_16(isp, src->rsp.m0._reserved2, &dst->rsp.m0._reserved2); + ISP_IOXPUT_16(isp, src->rsp.m0.ct_scsi_status, &dst->rsp.m0.ct_scsi_status); + ISP_IOXPUT_32(isp, src->rsp.m0.ct_xfrlen, &dst->rsp.m0.ct_xfrlen); if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO2) { for (i = 0; i < ISP_RQDSEG_T2; i++) { - ISP_IOXPUT_32(isp, - src->rsp.m0.u.ct_dataseg[i].ds_base, - &dst->rsp.m0.u.ct_dataseg[i].ds_base); - ISP_IOXPUT_32(isp, - src->rsp.m0.u.ct_dataseg[i].ds_count, - &dst->rsp.m0.u.ct_dataseg[i].ds_count); + ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg[i].ds_base, &dst->rsp.m0.u.ct_dataseg[i].ds_base); + ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg[i].ds_count, &dst->rsp.m0.u.ct_dataseg[i].ds_count); } } else if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { for (i = 0; i < ISP_RQDSEG_T3; i++) { - ISP_IOXPUT_32(isp, - src->rsp.m0.u.ct_dataseg64[i].ds_base, - &dst->rsp.m0.u.ct_dataseg64[i].ds_base); - ISP_IOXPUT_32(isp, - src->rsp.m0.u.ct_dataseg64[i].ds_basehi, - &dst->rsp.m0.u.ct_dataseg64[i].ds_basehi); - ISP_IOXPUT_32(isp, - src->rsp.m0.u.ct_dataseg64[i].ds_count, - &dst->rsp.m0.u.ct_dataseg64[i].ds_count); + ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg64[i].ds_base, &dst->rsp.m0.u.ct_dataseg64[i].ds_base); + ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg64[i].ds_basehi, &dst->rsp.m0.u.ct_dataseg64[i].ds_basehi); + ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg64[i].ds_count, &dst->rsp.m0.u.ct_dataseg64[i].ds_count); } } else if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO4) { - ISP_IOXPUT_16(isp, src->rsp.m0.u.ct_dslist.ds_type, - &dst->rsp.m0.u.ct_dslist.ds_type); - ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dslist.ds_segment, + ISP_IOXPUT_16(isp, src->rsp.m0.u.ct_dslist.ds_type, &dst->rsp.m0.u.ct_dslist.ds_type); ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dslist.ds_segment, &dst->rsp.m0.u.ct_dslist.ds_segment); - ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dslist.ds_base, - &dst->rsp.m0.u.ct_dslist.ds_base); + ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dslist.ds_base, &dst->rsp.m0.u.ct_dslist.ds_base); } } else if ((src->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE1) { - ISP_IOXPUT_16(isp, src->rsp.m1._reserved, - &dst->rsp.m1._reserved); - ISP_IOXPUT_16(isp, src->rsp.m1._reserved2, - &dst->rsp.m1._reserved2); - ISP_IOXPUT_16(isp, src->rsp.m1.ct_senselen, - &dst->rsp.m1.ct_senselen); - ISP_IOXPUT_16(isp, src->rsp.m1.ct_scsi_status, - &dst->rsp.m1.ct_scsi_status); - ISP_IOXPUT_16(isp, src->rsp.m1.ct_resplen, - &dst->rsp.m1.ct_resplen); + ISP_IOXPUT_16(isp, src->rsp.m1._reserved, &dst->rsp.m1._reserved); + ISP_IOXPUT_16(isp, src->rsp.m1._reserved2, &dst->rsp.m1._reserved2); + ISP_IOXPUT_16(isp, src->rsp.m1.ct_senselen, &dst->rsp.m1.ct_senselen); + ISP_IOXPUT_16(isp, src->rsp.m1.ct_scsi_status, &dst->rsp.m1.ct_scsi_status); + ISP_IOXPUT_16(isp, src->rsp.m1.ct_resplen, &dst->rsp.m1.ct_resplen); for (i = 0; i < MAXRESPLEN; i++) { - ISP_IOXPUT_8(isp, src->rsp.m1.ct_resp[i], - &dst->rsp.m1.ct_resp[i]); + ISP_IOXPUT_8(isp, src->rsp.m1.ct_resp[i], &dst->rsp.m1.ct_resp[i]); } } else { - ISP_IOXPUT_32(isp, src->rsp.m2._reserved, - &dst->rsp.m2._reserved); - ISP_IOXPUT_16(isp, src->rsp.m2._reserved2, - &dst->rsp.m2._reserved2); - ISP_IOXPUT_16(isp, src->rsp.m2._reserved3, - &dst->rsp.m2._reserved3); - ISP_IOXPUT_32(isp, src->rsp.m2.ct_datalen, - &dst->rsp.m2.ct_datalen); - ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_base, - &dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); - ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_count, - &dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); + ISP_IOXPUT_32(isp, src->rsp.m2._reserved, &dst->rsp.m2._reserved); + ISP_IOXPUT_16(isp, src->rsp.m2._reserved2, &dst->rsp.m2._reserved2); + ISP_IOXPUT_16(isp, src->rsp.m2._reserved3, &dst->rsp.m2._reserved3); + ISP_IOXPUT_32(isp, src->rsp.m2.ct_datalen, &dst->rsp.m2.ct_datalen); + ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_base, &dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); + ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_count, &dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); } } void isp_put_ctio2e(ispsoftc_t *isp, ct2e_entry_t *src, ct2e_entry_t *dst) { int i; isp_put_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXPUT_32(isp, src->ct_syshandle, &dst->ct_syshandle); ISP_IOXPUT_16(isp, src->ct_iid, &dst->ct_iid); ISP_IOXPUT_16(isp, src->ct_rxid, &dst->ct_rxid); ISP_IOXPUT_16(isp, src->ct_flags, &dst->ct_flags); ISP_IOXPUT_16(isp, src->ct_timeout, &dst->ct_timeout); ISP_IOXPUT_16(isp, src->ct_seg_count, &dst->ct_seg_count); ISP_IOXPUT_32(isp, src->ct_resid, &dst->ct_resid); ISP_IOXPUT_32(isp, src->ct_reloff, &dst->ct_reloff); if ((src->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE0) { - ISP_IOXPUT_32(isp, src->rsp.m0._reserved, - &dst->rsp.m0._reserved); - ISP_IOXPUT_16(isp, src->rsp.m0._reserved2, - &dst->rsp.m0._reserved2); - ISP_IOXPUT_16(isp, src->rsp.m0.ct_scsi_status, - &dst->rsp.m0.ct_scsi_status); - ISP_IOXPUT_32(isp, src->rsp.m0.ct_xfrlen, - &dst->rsp.m0.ct_xfrlen); + ISP_IOXPUT_32(isp, src->rsp.m0._reserved, &dst->rsp.m0._reserved); + ISP_IOXPUT_16(isp, src->rsp.m0._reserved2, &dst->rsp.m0._reserved2); + ISP_IOXPUT_16(isp, src->rsp.m0.ct_scsi_status, &dst->rsp.m0.ct_scsi_status); + ISP_IOXPUT_32(isp, src->rsp.m0.ct_xfrlen, &dst->rsp.m0.ct_xfrlen); if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO2) { for (i = 0; i < ISP_RQDSEG_T2; i++) { - ISP_IOXPUT_32(isp, - src->rsp.m0.u.ct_dataseg[i].ds_base, - &dst->rsp.m0.u.ct_dataseg[i].ds_base); - ISP_IOXPUT_32(isp, - src->rsp.m0.u.ct_dataseg[i].ds_count, - &dst->rsp.m0.u.ct_dataseg[i].ds_count); + ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg[i].ds_base, &dst->rsp.m0.u.ct_dataseg[i].ds_base); + ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg[i].ds_count, &dst->rsp.m0.u.ct_dataseg[i].ds_count); } } else if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { for (i = 0; i < ISP_RQDSEG_T3; i++) { - ISP_IOXPUT_32(isp, - src->rsp.m0.u.ct_dataseg64[i].ds_base, - &dst->rsp.m0.u.ct_dataseg64[i].ds_base); - ISP_IOXPUT_32(isp, - src->rsp.m0.u.ct_dataseg64[i].ds_basehi, - &dst->rsp.m0.u.ct_dataseg64[i].ds_basehi); - ISP_IOXPUT_32(isp, - src->rsp.m0.u.ct_dataseg64[i].ds_count, - &dst->rsp.m0.u.ct_dataseg64[i].ds_count); + ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg64[i].ds_base, &dst->rsp.m0.u.ct_dataseg64[i].ds_base); + ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg64[i].ds_basehi, &dst->rsp.m0.u.ct_dataseg64[i].ds_basehi); + ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg64[i].ds_count, &dst->rsp.m0.u.ct_dataseg64[i].ds_count); } } else if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO4) { - ISP_IOXPUT_16(isp, src->rsp.m0.u.ct_dslist.ds_type, - &dst->rsp.m0.u.ct_dslist.ds_type); - ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dslist.ds_segment, - &dst->rsp.m0.u.ct_dslist.ds_segment); - ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dslist.ds_base, - &dst->rsp.m0.u.ct_dslist.ds_base); + ISP_IOXPUT_16(isp, src->rsp.m0.u.ct_dslist.ds_type, &dst->rsp.m0.u.ct_dslist.ds_type); + ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dslist.ds_segment, &dst->rsp.m0.u.ct_dslist.ds_segment); + ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dslist.ds_base, &dst->rsp.m0.u.ct_dslist.ds_base); } } else if ((src->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE1) { - ISP_IOXPUT_16(isp, src->rsp.m1._reserved, - &dst->rsp.m1._reserved); - ISP_IOXPUT_16(isp, src->rsp.m1._reserved2, - &dst->rsp.m1._reserved2); - ISP_IOXPUT_16(isp, src->rsp.m1.ct_senselen, - &dst->rsp.m1.ct_senselen); - ISP_IOXPUT_16(isp, src->rsp.m1.ct_scsi_status, - &dst->rsp.m1.ct_scsi_status); - ISP_IOXPUT_16(isp, src->rsp.m1.ct_resplen, - &dst->rsp.m1.ct_resplen); + ISP_IOXPUT_16(isp, src->rsp.m1._reserved, &dst->rsp.m1._reserved); + ISP_IOXPUT_16(isp, src->rsp.m1._reserved2, &dst->rsp.m1._reserved2); + ISP_IOXPUT_16(isp, src->rsp.m1.ct_senselen, &dst->rsp.m1.ct_senselen); + ISP_IOXPUT_16(isp, src->rsp.m1.ct_scsi_status, &dst->rsp.m1.ct_scsi_status); + ISP_IOXPUT_16(isp, src->rsp.m1.ct_resplen, &dst->rsp.m1.ct_resplen); for (i = 0; i < MAXRESPLEN; i++) { - ISP_IOXPUT_8(isp, src->rsp.m1.ct_resp[i], - &dst->rsp.m1.ct_resp[i]); + ISP_IOXPUT_8(isp, src->rsp.m1.ct_resp[i], &dst->rsp.m1.ct_resp[i]); } } else { - ISP_IOXPUT_32(isp, src->rsp.m2._reserved, - &dst->rsp.m2._reserved); - ISP_IOXPUT_16(isp, src->rsp.m2._reserved2, - &dst->rsp.m2._reserved2); - ISP_IOXPUT_16(isp, src->rsp.m2._reserved3, - &dst->rsp.m2._reserved3); - ISP_IOXPUT_32(isp, src->rsp.m2.ct_datalen, - &dst->rsp.m2.ct_datalen); - ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_base, - &dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); - ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_count, - &dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); + ISP_IOXPUT_32(isp, src->rsp.m2._reserved, &dst->rsp.m2._reserved); + ISP_IOXPUT_16(isp, src->rsp.m2._reserved2, &dst->rsp.m2._reserved2); + ISP_IOXPUT_16(isp, src->rsp.m2._reserved3, &dst->rsp.m2._reserved3); + ISP_IOXPUT_32(isp, src->rsp.m2.ct_datalen, &dst->rsp.m2.ct_datalen); + ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_base, &dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); + ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_count, &dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); } } void isp_put_ctio7(ispsoftc_t *isp, ct7_entry_t *src, ct7_entry_t *dst) { int i; isp_put_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXPUT_32(isp, src->ct_syshandle, &dst->ct_syshandle); ISP_IOXPUT_16(isp, src->ct_nphdl, &dst->ct_nphdl); ISP_IOXPUT_16(isp, src->ct_timeout, &dst->ct_timeout); ISP_IOXPUT_16(isp, src->ct_seg_count, &dst->ct_seg_count); - ISP_IOXPUT_8(isp, src->ct_vpindex, &dst->ct_vpindex); + ISP_IOXPUT_8(isp, src->ct_vpidx, &dst->ct_vpidx); ISP_IOXPUT_8(isp, src->ct_xflags, &dst->ct_xflags); ISP_IOXPUT_16(isp, src->ct_iid_lo, &dst->ct_iid_lo); ISP_IOXPUT_8(isp, src->ct_iid_hi, &dst->ct_iid_hi); ISP_IOXPUT_8(isp, src->ct_reserved, &dst->ct_reserved); ISP_IOXPUT_32(isp, src->ct_rxid, &dst->ct_rxid); ISP_IOXPUT_16(isp, src->ct_senselen, &dst->ct_senselen); ISP_IOXPUT_16(isp, src->ct_flags, &dst->ct_flags); ISP_IOXPUT_32(isp, src->ct_resid, &dst->ct_resid); ISP_IOXPUT_16(isp, src->ct_oxid, &dst->ct_oxid); ISP_IOXPUT_16(isp, src->ct_scsi_status, &dst->ct_scsi_status); if ((dst->ct_flags & CT7_FLAG_MMASK) == CT7_FLAG_MODE0) { ISP_IOXPUT_32(isp, src->rsp.m0.reloff, &dst->rsp.m0.reloff); - ISP_IOXPUT_32(isp, src->rsp.m0.reserved0, - &dst->rsp.m0.reserved0); - ISP_IOXPUT_32(isp, src->rsp.m0.ct_xfrlen, - &dst->rsp.m0.ct_xfrlen); - ISP_IOXPUT_32(isp, src->rsp.m0.reserved1, - &dst->rsp.m0.reserved1); - ISP_IOXPUT_32(isp, src->rsp.m0.ds.ds_base, - &dst->rsp.m0.ds.ds_base); - ISP_IOXPUT_32(isp, src->rsp.m0.ds.ds_basehi, - &dst->rsp.m0.ds.ds_basehi); - ISP_IOXPUT_32(isp, src->rsp.m0.ds.ds_count, - &dst->rsp.m0.ds.ds_count); + ISP_IOXPUT_32(isp, src->rsp.m0.reserved0, &dst->rsp.m0.reserved0); + ISP_IOXPUT_32(isp, src->rsp.m0.ct_xfrlen, &dst->rsp.m0.ct_xfrlen); + ISP_IOXPUT_32(isp, src->rsp.m0.reserved1, &dst->rsp.m0.reserved1); + ISP_IOXPUT_32(isp, src->rsp.m0.ds.ds_base, &dst->rsp.m0.ds.ds_base); + ISP_IOXPUT_32(isp, src->rsp.m0.ds.ds_basehi, &dst->rsp.m0.ds.ds_basehi); + ISP_IOXPUT_32(isp, src->rsp.m0.ds.ds_count, &dst->rsp.m0.ds.ds_count); } else if ((dst->ct_flags & CT7_FLAG_MMASK) == CT7_FLAG_MODE1) { - ISP_IOXPUT_16(isp, src->rsp.m1.ct_resplen, - &dst->rsp.m1.ct_resplen); + uint32_t *a, *b; + + ISP_IOXPUT_16(isp, src->rsp.m1.ct_resplen, &dst->rsp.m1.ct_resplen); ISP_IOXPUT_16(isp, src->rsp.m1.reserved, &dst->rsp.m1.reserved); - for (i = 0; i < MAXRESPLEN_24XX; i++) { - ISP_IOXPUT_8(isp, src->rsp.m1.ct_resp[i], - &dst->rsp.m1.ct_resp[i]); + a = (uint32_t *) src->rsp.m1.ct_resp; + b = (uint32_t *) dst->rsp.m1.ct_resp; + for (i = 0; i < (ASIZE(src->rsp.m1.ct_resp) >> 2); i++) { + *b++ = ISP_SWAP32(isp, *a++); } } else { - ISP_IOXPUT_32(isp, src->rsp.m2.reserved0, - &dst->rsp.m2.reserved0); - ISP_IOXPUT_32(isp, src->rsp.m2.ct_datalen, - &dst->rsp.m2.ct_datalen); - ISP_IOXPUT_32(isp, src->rsp.m2.reserved1, - &dst->rsp.m2.reserved1); - ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_base, - &dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); - ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_basehi, - &dst->rsp.m2.ct_fcp_rsp_iudata.ds_basehi); - ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_count, - &dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); + ISP_IOXPUT_32(isp, src->rsp.m2.reserved0, &dst->rsp.m2.reserved0); + ISP_IOXPUT_32(isp, src->rsp.m2.ct_datalen, &dst->rsp.m2.ct_datalen); + ISP_IOXPUT_32(isp, src->rsp.m2.reserved1, &dst->rsp.m2.reserved1); + ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_base, &dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); + ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_basehi, &dst->rsp.m2.ct_fcp_rsp_iudata.ds_basehi); + ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_count, &dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); } } void isp_get_ctio2(ispsoftc_t *isp, ct2_entry_t *src, ct2_entry_t *dst) { int i; isp_get_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXGET_32(isp, &src->ct_syshandle, dst->ct_syshandle); ISP_IOXGET_8(isp, &src->ct_lun, dst->ct_lun); ISP_IOXGET_8(isp, &src->ct_iid, dst->ct_iid); ISP_IOXGET_16(isp, &src->ct_rxid, dst->ct_rxid); ISP_IOXGET_16(isp, &src->ct_flags, dst->ct_flags); ISP_IOXGET_16(isp, &src->ct_status, dst->ct_status); ISP_IOXGET_16(isp, &src->ct_timeout, dst->ct_timeout); ISP_IOXGET_16(isp, &src->ct_seg_count, dst->ct_seg_count); ISP_IOXGET_32(isp, &src->ct_reloff, dst->ct_reloff); ISP_IOXGET_32(isp, &src->ct_resid, dst->ct_resid); if ((dst->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE0) { - ISP_IOXGET_32(isp, &src->rsp.m0._reserved, - dst->rsp.m0._reserved); - ISP_IOXGET_16(isp, &src->rsp.m0._reserved2, - dst->rsp.m0._reserved2); - ISP_IOXGET_16(isp, &src->rsp.m0.ct_scsi_status, - dst->rsp.m0.ct_scsi_status); - ISP_IOXGET_32(isp, &src->rsp.m0.ct_xfrlen, - dst->rsp.m0.ct_xfrlen); + ISP_IOXGET_32(isp, &src->rsp.m0._reserved, dst->rsp.m0._reserved); + ISP_IOXGET_16(isp, &src->rsp.m0._reserved2, dst->rsp.m0._reserved2); + ISP_IOXGET_16(isp, &src->rsp.m0.ct_scsi_status, dst->rsp.m0.ct_scsi_status); + ISP_IOXGET_32(isp, &src->rsp.m0.ct_xfrlen, dst->rsp.m0.ct_xfrlen); if (dst->ct_header.rqs_entry_type == RQSTYPE_CTIO2) { for (i = 0; i < ISP_RQDSEG_T2; i++) { - ISP_IOXGET_32(isp, - &src->rsp.m0.u.ct_dataseg[i].ds_base, - dst->rsp.m0.u.ct_dataseg[i].ds_base); - ISP_IOXGET_32(isp, - &src->rsp.m0.u.ct_dataseg[i].ds_count, - dst->rsp.m0.u.ct_dataseg[i].ds_count); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg[i].ds_base, dst->rsp.m0.u.ct_dataseg[i].ds_base); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg[i].ds_count, dst->rsp.m0.u.ct_dataseg[i].ds_count); } } else if (dst->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { for (i = 0; i < ISP_RQDSEG_T3; i++) { - ISP_IOXGET_32(isp, - &src->rsp.m0.u.ct_dataseg64[i].ds_base, - dst->rsp.m0.u.ct_dataseg64[i].ds_base); - ISP_IOXGET_32(isp, - &src->rsp.m0.u.ct_dataseg64[i].ds_basehi, - dst->rsp.m0.u.ct_dataseg64[i].ds_basehi); - ISP_IOXGET_32(isp, - &src->rsp.m0.u.ct_dataseg64[i].ds_count, - dst->rsp.m0.u.ct_dataseg64[i].ds_count); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg64[i].ds_base, dst->rsp.m0.u.ct_dataseg64[i].ds_base); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg64[i].ds_basehi, dst->rsp.m0.u.ct_dataseg64[i].ds_basehi); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg64[i].ds_count, dst->rsp.m0.u.ct_dataseg64[i].ds_count); } } else if (dst->ct_header.rqs_entry_type == RQSTYPE_CTIO4) { - ISP_IOXGET_16(isp, &src->rsp.m0.u.ct_dslist.ds_type, - dst->rsp.m0.u.ct_dslist.ds_type); - ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dslist.ds_segment, - dst->rsp.m0.u.ct_dslist.ds_segment); - ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dslist.ds_base, - dst->rsp.m0.u.ct_dslist.ds_base); + ISP_IOXGET_16(isp, &src->rsp.m0.u.ct_dslist.ds_type, dst->rsp.m0.u.ct_dslist.ds_type); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dslist.ds_segment, dst->rsp.m0.u.ct_dslist.ds_segment); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dslist.ds_base, dst->rsp.m0.u.ct_dslist.ds_base); } } else if ((dst->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE1) { - ISP_IOXGET_16(isp, &src->rsp.m1._reserved, - dst->rsp.m1._reserved); - ISP_IOXGET_16(isp, &src->rsp.m1._reserved2, - dst->rsp.m1._reserved2); - ISP_IOXGET_16(isp, &src->rsp.m1.ct_senselen, - dst->rsp.m1.ct_senselen); - ISP_IOXGET_16(isp, &src->rsp.m1.ct_scsi_status, - dst->rsp.m1.ct_scsi_status); - ISP_IOXGET_16(isp, &src->rsp.m1.ct_resplen, - dst->rsp.m1.ct_resplen); + ISP_IOXGET_16(isp, &src->rsp.m1._reserved, dst->rsp.m1._reserved); + ISP_IOXGET_16(isp, &src->rsp.m1._reserved2, dst->rsp.m1._reserved2); + ISP_IOXGET_16(isp, &src->rsp.m1.ct_senselen, dst->rsp.m1.ct_senselen); + ISP_IOXGET_16(isp, &src->rsp.m1.ct_scsi_status, dst->rsp.m1.ct_scsi_status); + ISP_IOXGET_16(isp, &src->rsp.m1.ct_resplen, dst->rsp.m1.ct_resplen); for (i = 0; i < MAXRESPLEN; i++) { - ISP_IOXGET_8(isp, &src->rsp.m1.ct_resp[i], - dst->rsp.m1.ct_resp[i]); + ISP_IOXGET_8(isp, &src->rsp.m1.ct_resp[i], dst->rsp.m1.ct_resp[i]); } } else { - ISP_IOXGET_32(isp, &src->rsp.m2._reserved, - dst->rsp.m2._reserved); - ISP_IOXGET_16(isp, &src->rsp.m2._reserved2, - dst->rsp.m2._reserved2); - ISP_IOXGET_16(isp, &src->rsp.m2._reserved3, - dst->rsp.m2._reserved3); - ISP_IOXGET_32(isp, &src->rsp.m2.ct_datalen, - dst->rsp.m2.ct_datalen); - ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_base, - dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); - ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_count, - dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); + ISP_IOXGET_32(isp, &src->rsp.m2._reserved, dst->rsp.m2._reserved); + ISP_IOXGET_16(isp, &src->rsp.m2._reserved2, dst->rsp.m2._reserved2); + ISP_IOXGET_16(isp, &src->rsp.m2._reserved3, dst->rsp.m2._reserved3); + ISP_IOXGET_32(isp, &src->rsp.m2.ct_datalen, dst->rsp.m2.ct_datalen); + ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_base, dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); + ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_count, dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); } } void isp_get_ctio2e(ispsoftc_t *isp, ct2e_entry_t *src, ct2e_entry_t *dst) { int i; isp_get_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXGET_32(isp, &src->ct_syshandle, dst->ct_syshandle); ISP_IOXGET_16(isp, &src->ct_iid, dst->ct_iid); ISP_IOXGET_16(isp, &src->ct_rxid, dst->ct_rxid); ISP_IOXGET_16(isp, &src->ct_flags, dst->ct_flags); ISP_IOXGET_16(isp, &src->ct_status, dst->ct_status); ISP_IOXGET_16(isp, &src->ct_timeout, dst->ct_timeout); ISP_IOXGET_16(isp, &src->ct_seg_count, dst->ct_seg_count); ISP_IOXGET_32(isp, &src->ct_reloff, dst->ct_reloff); ISP_IOXGET_32(isp, &src->ct_resid, dst->ct_resid); if ((dst->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE0) { - ISP_IOXGET_32(isp, &src->rsp.m0._reserved, - dst->rsp.m0._reserved); - ISP_IOXGET_16(isp, &src->rsp.m0._reserved2, - dst->rsp.m0._reserved2); - ISP_IOXGET_16(isp, &src->rsp.m0.ct_scsi_status, - dst->rsp.m0.ct_scsi_status); - ISP_IOXGET_32(isp, &src->rsp.m0.ct_xfrlen, - dst->rsp.m0.ct_xfrlen); + ISP_IOXGET_32(isp, &src->rsp.m0._reserved, dst->rsp.m0._reserved); + ISP_IOXGET_16(isp, &src->rsp.m0._reserved2, dst->rsp.m0._reserved2); + ISP_IOXGET_16(isp, &src->rsp.m0.ct_scsi_status, dst->rsp.m0.ct_scsi_status); + ISP_IOXGET_32(isp, &src->rsp.m0.ct_xfrlen, dst->rsp.m0.ct_xfrlen); if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO2) { for (i = 0; i < ISP_RQDSEG_T2; i++) { - ISP_IOXGET_32(isp, - &src->rsp.m0.u.ct_dataseg[i].ds_base, - dst->rsp.m0.u.ct_dataseg[i].ds_base); - ISP_IOXGET_32(isp, - &src->rsp.m0.u.ct_dataseg[i].ds_count, - dst->rsp.m0.u.ct_dataseg[i].ds_count); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg[i].ds_base, dst->rsp.m0.u.ct_dataseg[i].ds_base); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg[i].ds_count, dst->rsp.m0.u.ct_dataseg[i].ds_count); } } else if (dst->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { for (i = 0; i < ISP_RQDSEG_T3; i++) { - ISP_IOXGET_32(isp, - &src->rsp.m0.u.ct_dataseg64[i].ds_base, - dst->rsp.m0.u.ct_dataseg64[i].ds_base); - ISP_IOXGET_32(isp, - &src->rsp.m0.u.ct_dataseg64[i].ds_basehi, - dst->rsp.m0.u.ct_dataseg64[i].ds_basehi); - ISP_IOXGET_32(isp, - &src->rsp.m0.u.ct_dataseg64[i].ds_count, - dst->rsp.m0.u.ct_dataseg64[i].ds_count); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg64[i].ds_base, dst->rsp.m0.u.ct_dataseg64[i].ds_base); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg64[i].ds_basehi, dst->rsp.m0.u.ct_dataseg64[i].ds_basehi); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg64[i].ds_count, dst->rsp.m0.u.ct_dataseg64[i].ds_count); } } else if (dst->ct_header.rqs_entry_type == RQSTYPE_CTIO4) { - ISP_IOXGET_16(isp, &src->rsp.m0.u.ct_dslist.ds_type, - dst->rsp.m0.u.ct_dslist.ds_type); - ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dslist.ds_segment, - dst->rsp.m0.u.ct_dslist.ds_segment); - ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dslist.ds_base, - dst->rsp.m0.u.ct_dslist.ds_base); + ISP_IOXGET_16(isp, &src->rsp.m0.u.ct_dslist.ds_type, dst->rsp.m0.u.ct_dslist.ds_type); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dslist.ds_segment, dst->rsp.m0.u.ct_dslist.ds_segment); + ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dslist.ds_base, dst->rsp.m0.u.ct_dslist.ds_base); } } else if ((dst->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE1) { - ISP_IOXGET_16(isp, &src->rsp.m1._reserved, - dst->rsp.m1._reserved); - ISP_IOXGET_16(isp, &src->rsp.m1._reserved2, - dst->rsp.m1._reserved2); - ISP_IOXGET_16(isp, &src->rsp.m1.ct_senselen, - dst->rsp.m1.ct_senselen); - ISP_IOXGET_16(isp, &src->rsp.m1.ct_scsi_status, - dst->rsp.m1.ct_scsi_status); - ISP_IOXGET_16(isp, &src->rsp.m1.ct_resplen, - dst->rsp.m1.ct_resplen); + ISP_IOXGET_16(isp, &src->rsp.m1._reserved, dst->rsp.m1._reserved); + ISP_IOXGET_16(isp, &src->rsp.m1._reserved2, dst->rsp.m1._reserved2); + ISP_IOXGET_16(isp, &src->rsp.m1.ct_senselen, dst->rsp.m1.ct_senselen); + ISP_IOXGET_16(isp, &src->rsp.m1.ct_scsi_status, dst->rsp.m1.ct_scsi_status); + ISP_IOXGET_16(isp, &src->rsp.m1.ct_resplen, dst->rsp.m1.ct_resplen); for (i = 0; i < MAXRESPLEN; i++) { - ISP_IOXGET_8(isp, &src->rsp.m1.ct_resp[i], - dst->rsp.m1.ct_resp[i]); + ISP_IOXGET_8(isp, &src->rsp.m1.ct_resp[i], dst->rsp.m1.ct_resp[i]); } } else { - ISP_IOXGET_32(isp, &src->rsp.m2._reserved, - dst->rsp.m2._reserved); - ISP_IOXGET_16(isp, &src->rsp.m2._reserved2, - dst->rsp.m2._reserved2); - ISP_IOXGET_16(isp, &src->rsp.m2._reserved3, - dst->rsp.m2._reserved3); - ISP_IOXGET_32(isp, &src->rsp.m2.ct_datalen, - dst->rsp.m2.ct_datalen); - ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_base, - dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); - ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_count, - dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); + ISP_IOXGET_32(isp, &src->rsp.m2._reserved, dst->rsp.m2._reserved); + ISP_IOXGET_16(isp, &src->rsp.m2._reserved2, dst->rsp.m2._reserved2); + ISP_IOXGET_16(isp, &src->rsp.m2._reserved3, dst->rsp.m2._reserved3); + ISP_IOXGET_32(isp, &src->rsp.m2.ct_datalen, dst->rsp.m2.ct_datalen); + ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_base, dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); + ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_count, dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); } } void isp_get_ctio7(ispsoftc_t *isp, ct7_entry_t *src, ct7_entry_t *dst) { int i; isp_get_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXGET_32(isp, &src->ct_syshandle, dst->ct_syshandle); ISP_IOXGET_16(isp, &src->ct_nphdl, dst->ct_nphdl); ISP_IOXGET_16(isp, &src->ct_timeout, dst->ct_timeout); ISP_IOXGET_16(isp, &src->ct_seg_count, dst->ct_seg_count); - ISP_IOXGET_8(isp, &src->ct_vpindex, dst->ct_vpindex); + ISP_IOXGET_8(isp, &src->ct_vpidx, dst->ct_vpidx); ISP_IOXGET_8(isp, &src->ct_xflags, dst->ct_xflags); ISP_IOXGET_16(isp, &src->ct_iid_lo, dst->ct_iid_lo); ISP_IOXGET_8(isp, &src->ct_iid_hi, dst->ct_iid_hi); ISP_IOXGET_8(isp, &src->ct_reserved, dst->ct_reserved); ISP_IOXGET_32(isp, &src->ct_rxid, dst->ct_rxid); ISP_IOXGET_16(isp, &src->ct_senselen, dst->ct_senselen); ISP_IOXGET_16(isp, &src->ct_flags, dst->ct_flags); ISP_IOXGET_32(isp, &src->ct_resid, dst->ct_resid); ISP_IOXGET_16(isp, &src->ct_oxid, dst->ct_oxid); ISP_IOXGET_16(isp, &src->ct_scsi_status, dst->ct_scsi_status); if ((dst->ct_flags & CT7_FLAG_MMASK) == CT7_FLAG_MODE0) { ISP_IOXGET_32(isp, &src->rsp.m0.reloff, dst->rsp.m0.reloff); - ISP_IOXGET_32(isp, &src->rsp.m0.reserved0, - dst->rsp.m0.reserved0); - ISP_IOXGET_32(isp, &src->rsp.m0.ct_xfrlen, - dst->rsp.m0.ct_xfrlen); - ISP_IOXGET_32(isp, &src->rsp.m0.reserved1, - dst->rsp.m0.reserved1); - ISP_IOXGET_32(isp, &src->rsp.m0.ds.ds_base, - dst->rsp.m0.ds.ds_base); - ISP_IOXGET_32(isp, &src->rsp.m0.ds.ds_basehi, - dst->rsp.m0.ds.ds_basehi); - ISP_IOXGET_32(isp, &src->rsp.m0.ds.ds_count, - dst->rsp.m0.ds.ds_count); + ISP_IOXGET_32(isp, &src->rsp.m0.reserved0, dst->rsp.m0.reserved0); + ISP_IOXGET_32(isp, &src->rsp.m0.ct_xfrlen, dst->rsp.m0.ct_xfrlen); + ISP_IOXGET_32(isp, &src->rsp.m0.reserved1, dst->rsp.m0.reserved1); + ISP_IOXGET_32(isp, &src->rsp.m0.ds.ds_base, dst->rsp.m0.ds.ds_base); + ISP_IOXGET_32(isp, &src->rsp.m0.ds.ds_basehi, dst->rsp.m0.ds.ds_basehi); + ISP_IOXGET_32(isp, &src->rsp.m0.ds.ds_count, dst->rsp.m0.ds.ds_count); } else if ((dst->ct_flags & CT7_FLAG_MMASK) == CT7_FLAG_MODE1) { - ISP_IOXGET_16(isp, &src->rsp.m1.ct_resplen, - dst->rsp.m1.ct_resplen); + uint32_t *a, *b; + + ISP_IOXGET_16(isp, &src->rsp.m1.ct_resplen, dst->rsp.m1.ct_resplen); ISP_IOXGET_16(isp, &src->rsp.m1.reserved, dst->rsp.m1.reserved); + a = (uint32_t *) src->rsp.m1.ct_resp; + b = (uint32_t *) dst->rsp.m1.ct_resp; for (i = 0; i < MAXRESPLEN_24XX; i++) { - ISP_IOXGET_8(isp, &src->rsp.m1.ct_resp[i], - dst->rsp.m1.ct_resp[i]); + ISP_IOXGET_8(isp, &src->rsp.m1.ct_resp[i], dst->rsp.m1.ct_resp[i]); } + for (i = 0; i < (ASIZE(src->rsp.m1.ct_resp) >> 2); i++) { + *b++ = ISP_SWAP32(isp, *a++); + } } else { - ISP_IOXGET_32(isp, &src->rsp.m2.reserved0, - dst->rsp.m2.reserved0); - ISP_IOXGET_32(isp, &src->rsp.m2.ct_datalen, - dst->rsp.m2.ct_datalen); - ISP_IOXGET_32(isp, &src->rsp.m2.reserved1, - dst->rsp.m2.reserved1); - ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_base, - dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); - ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_basehi, - dst->rsp.m2.ct_fcp_rsp_iudata.ds_basehi); - ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_count, - dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); + ISP_IOXGET_32(isp, &src->rsp.m2.reserved0, dst->rsp.m2.reserved0); + ISP_IOXGET_32(isp, &src->rsp.m2.ct_datalen, dst->rsp.m2.ct_datalen); + ISP_IOXGET_32(isp, &src->rsp.m2.reserved1, dst->rsp.m2.reserved1); + ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_base, dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); + ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_basehi, dst->rsp.m2.ct_fcp_rsp_iudata.ds_basehi); + ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_count, dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); } } void isp_put_enable_lun(ispsoftc_t *isp, lun_entry_t *lesrc, lun_entry_t *ledst) { int i; isp_put_hdr(isp, &lesrc->le_header, &ledst->le_header); ISP_IOXPUT_32(isp, lesrc->le_reserved, &ledst->le_reserved); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, lesrc->le_lun, &ledst->le_rsvd); ISP_IOXPUT_8(isp, lesrc->le_rsvd, &ledst->le_lun); ISP_IOXPUT_8(isp, lesrc->le_ops, &ledst->le_tgt); ISP_IOXPUT_8(isp, lesrc->le_tgt, &ledst->le_ops); ISP_IOXPUT_8(isp, lesrc->le_status, &ledst->le_reserved2); ISP_IOXPUT_8(isp, lesrc->le_reserved2, &ledst->le_status); ISP_IOXPUT_8(isp, lesrc->le_cmd_count, &ledst->le_in_count); ISP_IOXPUT_8(isp, lesrc->le_in_count, &ledst->le_cmd_count); ISP_IOXPUT_8(isp, lesrc->le_cdb6len, &ledst->le_cdb7len); ISP_IOXPUT_8(isp, lesrc->le_cdb7len, &ledst->le_cdb6len); } else { ISP_IOXPUT_8(isp, lesrc->le_lun, &ledst->le_lun); ISP_IOXPUT_8(isp, lesrc->le_rsvd, &ledst->le_rsvd); ISP_IOXPUT_8(isp, lesrc->le_ops, &ledst->le_ops); ISP_IOXPUT_8(isp, lesrc->le_tgt, &ledst->le_tgt); ISP_IOXPUT_8(isp, lesrc->le_status, &ledst->le_status); ISP_IOXPUT_8(isp, lesrc->le_reserved2, &ledst->le_reserved2); ISP_IOXPUT_8(isp, lesrc->le_cmd_count, &ledst->le_cmd_count); ISP_IOXPUT_8(isp, lesrc->le_in_count, &ledst->le_in_count); ISP_IOXPUT_8(isp, lesrc->le_cdb6len, &ledst->le_cdb6len); ISP_IOXPUT_8(isp, lesrc->le_cdb7len, &ledst->le_cdb7len); } ISP_IOXPUT_32(isp, lesrc->le_flags, &ledst->le_flags); ISP_IOXPUT_16(isp, lesrc->le_timeout, &ledst->le_timeout); for (i = 0; i < 20; i++) { - ISP_IOXPUT_8(isp, lesrc->le_reserved3[i], - &ledst->le_reserved3[i]); + ISP_IOXPUT_8(isp, lesrc->le_reserved3[i], &ledst->le_reserved3[i]); } } void isp_get_enable_lun(ispsoftc_t *isp, lun_entry_t *lesrc, lun_entry_t *ledst) { int i; isp_get_hdr(isp, &lesrc->le_header, &ledst->le_header); ISP_IOXGET_32(isp, &lesrc->le_reserved, ledst->le_reserved); if (ISP_IS_SBUS(isp)) { ISP_IOXGET_8(isp, &lesrc->le_lun, ledst->le_rsvd); ISP_IOXGET_8(isp, &lesrc->le_rsvd, ledst->le_lun); ISP_IOXGET_8(isp, &lesrc->le_ops, ledst->le_tgt); ISP_IOXGET_8(isp, &lesrc->le_tgt, ledst->le_ops); ISP_IOXGET_8(isp, &lesrc->le_status, ledst->le_reserved2); ISP_IOXGET_8(isp, &lesrc->le_reserved2, ledst->le_status); ISP_IOXGET_8(isp, &lesrc->le_cmd_count, ledst->le_in_count); ISP_IOXGET_8(isp, &lesrc->le_in_count, ledst->le_cmd_count); ISP_IOXGET_8(isp, &lesrc->le_cdb6len, ledst->le_cdb7len); ISP_IOXGET_8(isp, &lesrc->le_cdb7len, ledst->le_cdb6len); } else { ISP_IOXGET_8(isp, &lesrc->le_lun, ledst->le_lun); ISP_IOXGET_8(isp, &lesrc->le_rsvd, ledst->le_rsvd); ISP_IOXGET_8(isp, &lesrc->le_ops, ledst->le_ops); ISP_IOXGET_8(isp, &lesrc->le_tgt, ledst->le_tgt); ISP_IOXGET_8(isp, &lesrc->le_status, ledst->le_status); ISP_IOXGET_8(isp, &lesrc->le_reserved2, ledst->le_reserved2); ISP_IOXGET_8(isp, &lesrc->le_cmd_count, ledst->le_cmd_count); ISP_IOXGET_8(isp, &lesrc->le_in_count, ledst->le_in_count); ISP_IOXGET_8(isp, &lesrc->le_cdb6len, ledst->le_cdb6len); ISP_IOXGET_8(isp, &lesrc->le_cdb7len, ledst->le_cdb7len); } ISP_IOXGET_32(isp, &lesrc->le_flags, ledst->le_flags); ISP_IOXGET_16(isp, &lesrc->le_timeout, ledst->le_timeout); for (i = 0; i < 20; i++) { - ISP_IOXGET_8(isp, &lesrc->le_reserved3[i], - ledst->le_reserved3[i]); + ISP_IOXGET_8(isp, &lesrc->le_reserved3[i], ledst->le_reserved3[i]); } } void isp_put_notify(ispsoftc_t *isp, in_entry_t *src, in_entry_t *dst) { int i; isp_put_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXPUT_32(isp, src->in_reserved, &dst->in_reserved); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->in_lun, &dst->in_iid); ISP_IOXPUT_8(isp, src->in_iid, &dst->in_lun); ISP_IOXPUT_8(isp, src->in_reserved2, &dst->in_tgt); ISP_IOXPUT_8(isp, src->in_tgt, &dst->in_reserved2); ISP_IOXPUT_8(isp, src->in_status, &dst->in_rsvd2); ISP_IOXPUT_8(isp, src->in_rsvd2, &dst->in_status); ISP_IOXPUT_8(isp, src->in_tag_val, &dst->in_tag_type); ISP_IOXPUT_8(isp, src->in_tag_type, &dst->in_tag_val); } else { ISP_IOXPUT_8(isp, src->in_lun, &dst->in_lun); ISP_IOXPUT_8(isp, src->in_iid, &dst->in_iid); ISP_IOXPUT_8(isp, src->in_reserved2, &dst->in_reserved2); ISP_IOXPUT_8(isp, src->in_tgt, &dst->in_tgt); ISP_IOXPUT_8(isp, src->in_status, &dst->in_status); ISP_IOXPUT_8(isp, src->in_rsvd2, &dst->in_rsvd2); ISP_IOXPUT_8(isp, src->in_tag_val, &dst->in_tag_val); ISP_IOXPUT_8(isp, src->in_tag_type, &dst->in_tag_type); } ISP_IOXPUT_32(isp, src->in_flags, &dst->in_flags); ISP_IOXPUT_16(isp, src->in_seqid, &dst->in_seqid); for (i = 0; i < IN_MSGLEN; i++) { ISP_IOXPUT_8(isp, src->in_msg[i], &dst->in_msg[i]); } for (i = 0; i < IN_RSVDLEN; i++) { - ISP_IOXPUT_8(isp, src->in_reserved3[i], - &dst->in_reserved3[i]); + ISP_IOXPUT_8(isp, src->in_reserved3[i], &dst->in_reserved3[i]); } for (i = 0; i < QLTM_SENSELEN; i++) { - ISP_IOXPUT_8(isp, src->in_sense[i], - &dst->in_sense[i]); + ISP_IOXPUT_8(isp, src->in_sense[i], &dst->in_sense[i]); } } void isp_get_notify(ispsoftc_t *isp, in_entry_t *src, in_entry_t *dst) { int i; isp_get_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXGET_32(isp, &src->in_reserved, dst->in_reserved); if (ISP_IS_SBUS(isp)) { ISP_IOXGET_8(isp, &src->in_lun, dst->in_iid); ISP_IOXGET_8(isp, &src->in_iid, dst->in_lun); ISP_IOXGET_8(isp, &src->in_reserved2, dst->in_tgt); ISP_IOXGET_8(isp, &src->in_tgt, dst->in_reserved2); ISP_IOXGET_8(isp, &src->in_status, dst->in_rsvd2); ISP_IOXGET_8(isp, &src->in_rsvd2, dst->in_status); ISP_IOXGET_8(isp, &src->in_tag_val, dst->in_tag_type); ISP_IOXGET_8(isp, &src->in_tag_type, dst->in_tag_val); } else { ISP_IOXGET_8(isp, &src->in_lun, dst->in_lun); ISP_IOXGET_8(isp, &src->in_iid, dst->in_iid); ISP_IOXGET_8(isp, &src->in_reserved2, dst->in_reserved2); ISP_IOXGET_8(isp, &src->in_tgt, dst->in_tgt); ISP_IOXGET_8(isp, &src->in_status, dst->in_status); ISP_IOXGET_8(isp, &src->in_rsvd2, dst->in_rsvd2); ISP_IOXGET_8(isp, &src->in_tag_val, dst->in_tag_val); ISP_IOXGET_8(isp, &src->in_tag_type, dst->in_tag_type); } ISP_IOXGET_32(isp, &src->in_flags, dst->in_flags); ISP_IOXGET_16(isp, &src->in_seqid, dst->in_seqid); for (i = 0; i < IN_MSGLEN; i++) { ISP_IOXGET_8(isp, &src->in_msg[i], dst->in_msg[i]); } for (i = 0; i < IN_RSVDLEN; i++) { - ISP_IOXGET_8(isp, &src->in_reserved3[i], - dst->in_reserved3[i]); + ISP_IOXGET_8(isp, &src->in_reserved3[i], dst->in_reserved3[i]); } for (i = 0; i < QLTM_SENSELEN; i++) { - ISP_IOXGET_8(isp, &src->in_sense[i], - dst->in_sense[i]); + ISP_IOXGET_8(isp, &src->in_sense[i], dst->in_sense[i]); } } void -isp_put_notify_fc(ispsoftc_t *isp, in_fcentry_t *src, - in_fcentry_t *dst) +isp_put_notify_fc(ispsoftc_t *isp, in_fcentry_t *src, in_fcentry_t *dst) { isp_put_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXPUT_32(isp, src->in_reserved, &dst->in_reserved); ISP_IOXPUT_8(isp, src->in_lun, &dst->in_lun); ISP_IOXPUT_8(isp, src->in_iid, &dst->in_iid); ISP_IOXPUT_16(isp, src->in_scclun, &dst->in_scclun); ISP_IOXPUT_32(isp, src->in_reserved2, &dst->in_reserved2); ISP_IOXPUT_16(isp, src->in_status, &dst->in_status); ISP_IOXPUT_16(isp, src->in_task_flags, &dst->in_task_flags); ISP_IOXPUT_16(isp, src->in_seqid, &dst->in_seqid); } void -isp_put_notify_fc_e(ispsoftc_t *isp, in_fcentry_e_t *src, - in_fcentry_e_t *dst) +isp_put_notify_fc_e(ispsoftc_t *isp, in_fcentry_e_t *src, in_fcentry_e_t *dst) { isp_put_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXPUT_32(isp, src->in_reserved, &dst->in_reserved); ISP_IOXPUT_16(isp, src->in_iid, &dst->in_iid); ISP_IOXPUT_16(isp, src->in_scclun, &dst->in_scclun); ISP_IOXPUT_32(isp, src->in_reserved2, &dst->in_reserved2); ISP_IOXPUT_16(isp, src->in_status, &dst->in_status); ISP_IOXPUT_16(isp, src->in_task_flags, &dst->in_task_flags); ISP_IOXPUT_16(isp, src->in_seqid, &dst->in_seqid); } void -isp_put_notify_24xx(ispsoftc_t *isp, in_fcentry_24xx_t *src, - in_fcentry_24xx_t *dst) +isp_put_notify_24xx(ispsoftc_t *isp, in_fcentry_24xx_t *src, in_fcentry_24xx_t *dst) { int i; isp_put_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXPUT_32(isp, src->in_reserved, &dst->in_reserved); ISP_IOXPUT_16(isp, src->in_nphdl, &dst->in_nphdl); ISP_IOXPUT_16(isp, src->in_reserved1, &dst->in_reserved1); ISP_IOXPUT_16(isp, src->in_flags, &dst->in_flags); ISP_IOXPUT_16(isp, src->in_srr_rxid, &dst->in_srr_rxid); ISP_IOXPUT_16(isp, src->in_status, &dst->in_status); ISP_IOXPUT_8(isp, src->in_status_subcode, &dst->in_status_subcode); ISP_IOXPUT_16(isp, src->in_reserved2, &dst->in_reserved2); ISP_IOXPUT_32(isp, src->in_rxid, &dst->in_rxid); ISP_IOXPUT_16(isp, src->in_srr_reloff_hi, &dst->in_srr_reloff_hi); ISP_IOXPUT_16(isp, src->in_srr_reloff_lo, &dst->in_srr_reloff_lo); ISP_IOXPUT_16(isp, src->in_srr_iu, &dst->in_srr_iu); ISP_IOXPUT_16(isp, src->in_srr_oxid, &dst->in_srr_oxid); - for (i = 0; i < 18; i++) { - ISP_IOXPUT_8(isp, src->in_reserved3[i], &dst->in_reserved3[i]); + ISP_IOXPUT_16(isp, src->in_nport_id_hi, &dst->in_nport_id_hi); + ISP_IOXPUT_8(isp, src->in_nport_id_lo, &dst->in_nport_id_lo); + ISP_IOXPUT_8(isp, src->in_reserved3, &dst->in_reserved3); + ISP_IOXPUT_16(isp, src->in_np_handle, &dst->in_np_handle); + for (i = 0; i < ASIZE(src->in_reserved4); i++) { + ISP_IOXPUT_8(isp, src->in_reserved4[i], &dst->in_reserved4[i]); } - ISP_IOXPUT_8(isp, src->in_reserved4, &dst->in_reserved4); - ISP_IOXPUT_8(isp, src->in_vpindex, &dst->in_vpindex); - ISP_IOXPUT_32(isp, src->in_reserved5, &dst->in_reserved5); + ISP_IOXPUT_8(isp, src->in_reserved5, &dst->in_reserved5); + ISP_IOXPUT_8(isp, src->in_vpidx, &dst->in_vpidx); + ISP_IOXPUT_32(isp, src->in_reserved6, &dst->in_reserved6); ISP_IOXPUT_16(isp, src->in_portid_lo, &dst->in_portid_lo); ISP_IOXPUT_8(isp, src->in_portid_hi, &dst->in_portid_hi); - ISP_IOXPUT_8(isp, src->in_reserved6, &dst->in_reserved6); - ISP_IOXPUT_16(isp, src->in_reserved7, &dst->in_reserved7); + ISP_IOXPUT_8(isp, src->in_reserved7, &dst->in_reserved7); + ISP_IOXPUT_16(isp, src->in_reserved8, &dst->in_reserved8); ISP_IOXPUT_16(isp, src->in_oxid, &dst->in_oxid); } void -isp_get_notify_fc(ispsoftc_t *isp, in_fcentry_t *src, - in_fcentry_t *dst) +isp_get_notify_fc(ispsoftc_t *isp, in_fcentry_t *src, in_fcentry_t *dst) { isp_get_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXGET_32(isp, &src->in_reserved, dst->in_reserved); ISP_IOXGET_8(isp, &src->in_lun, dst->in_lun); ISP_IOXGET_8(isp, &src->in_iid, dst->in_iid); ISP_IOXGET_16(isp, &src->in_scclun, dst->in_scclun); ISP_IOXGET_32(isp, &src->in_reserved2, dst->in_reserved2); ISP_IOXGET_16(isp, &src->in_status, dst->in_status); ISP_IOXGET_16(isp, &src->in_task_flags, dst->in_task_flags); ISP_IOXGET_16(isp, &src->in_seqid, dst->in_seqid); } void -isp_get_notify_fc_e(ispsoftc_t *isp, in_fcentry_e_t *src, - in_fcentry_e_t *dst) +isp_get_notify_fc_e(ispsoftc_t *isp, in_fcentry_e_t *src, in_fcentry_e_t *dst) { isp_get_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXGET_32(isp, &src->in_reserved, dst->in_reserved); ISP_IOXGET_16(isp, &src->in_iid, dst->in_iid); ISP_IOXGET_16(isp, &src->in_scclun, dst->in_scclun); ISP_IOXGET_32(isp, &src->in_reserved2, dst->in_reserved2); ISP_IOXGET_16(isp, &src->in_status, dst->in_status); ISP_IOXGET_16(isp, &src->in_task_flags, dst->in_task_flags); ISP_IOXGET_16(isp, &src->in_seqid, dst->in_seqid); } void -isp_get_notify_24xx(ispsoftc_t *isp, in_fcentry_24xx_t *src, - in_fcentry_24xx_t *dst) +isp_get_notify_24xx(ispsoftc_t *isp, in_fcentry_24xx_t *src, in_fcentry_24xx_t *dst) { int i; isp_get_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXGET_32(isp, &src->in_reserved, dst->in_reserved); ISP_IOXGET_16(isp, &src->in_nphdl, dst->in_nphdl); ISP_IOXGET_16(isp, &src->in_reserved1, dst->in_reserved1); ISP_IOXGET_16(isp, &src->in_flags, dst->in_flags); ISP_IOXGET_16(isp, &src->in_srr_rxid, dst->in_srr_rxid); ISP_IOXGET_16(isp, &src->in_status, dst->in_status); ISP_IOXGET_8(isp, &src->in_status_subcode, dst->in_status_subcode); ISP_IOXGET_16(isp, &src->in_reserved2, dst->in_reserved2); ISP_IOXGET_32(isp, &src->in_rxid, dst->in_rxid); ISP_IOXGET_16(isp, &src->in_srr_reloff_hi, dst->in_srr_reloff_hi); ISP_IOXGET_16(isp, &src->in_srr_reloff_lo, dst->in_srr_reloff_lo); ISP_IOXGET_16(isp, &src->in_srr_iu, dst->in_srr_iu); ISP_IOXGET_16(isp, &src->in_srr_oxid, dst->in_srr_oxid); - for (i = 0; i < 18; i++) { - ISP_IOXGET_8(isp, &src->in_reserved3[i], dst->in_reserved3[i]); + ISP_IOXGET_16(isp, &src->in_nport_id_hi, dst->in_nport_id_hi); + ISP_IOXGET_8(isp, &src->in_nport_id_lo, dst->in_nport_id_lo); + ISP_IOXGET_8(isp, &src->in_reserved3, dst->in_reserved3); + ISP_IOXGET_16(isp, &src->in_np_handle, dst->in_np_handle); + for (i = 0; i < ASIZE(src->in_reserved4); i++) { + ISP_IOXGET_8(isp, &src->in_reserved4[i], dst->in_reserved4[i]); } - ISP_IOXGET_8(isp, &src->in_reserved4, dst->in_reserved4); - ISP_IOXGET_8(isp, &src->in_vpindex, dst->in_vpindex); - ISP_IOXGET_32(isp, &src->in_reserved5, dst->in_reserved5); + ISP_IOXGET_8(isp, &src->in_reserved5, dst->in_reserved5); + ISP_IOXGET_8(isp, &src->in_vpidx, dst->in_vpidx); + ISP_IOXGET_32(isp, &src->in_reserved6, dst->in_reserved6); ISP_IOXGET_16(isp, &src->in_portid_lo, dst->in_portid_lo); ISP_IOXGET_8(isp, &src->in_portid_hi, dst->in_portid_hi); - ISP_IOXGET_8(isp, &src->in_reserved6, dst->in_reserved6); - ISP_IOXGET_16(isp, &src->in_reserved7, dst->in_reserved7); + ISP_IOXGET_8(isp, &src->in_reserved7, dst->in_reserved7); + ISP_IOXGET_16(isp, &src->in_reserved8, dst->in_reserved8); ISP_IOXGET_16(isp, &src->in_oxid, dst->in_oxid); } void isp_put_notify_ack(ispsoftc_t *isp, na_entry_t *src, na_entry_t *dst) { int i; isp_put_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXPUT_32(isp, src->na_reserved, &dst->na_reserved); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->na_lun, &dst->na_iid); ISP_IOXPUT_8(isp, src->na_iid, &dst->na_lun); ISP_IOXPUT_8(isp, src->na_status, &dst->na_event); ISP_IOXPUT_8(isp, src->na_event, &dst->na_status); } else { ISP_IOXPUT_8(isp, src->na_lun, &dst->na_lun); ISP_IOXPUT_8(isp, src->na_iid, &dst->na_iid); ISP_IOXPUT_8(isp, src->na_status, &dst->na_status); ISP_IOXPUT_8(isp, src->na_event, &dst->na_event); } ISP_IOXPUT_32(isp, src->na_flags, &dst->na_flags); for (i = 0; i < NA_RSVDLEN; i++) { - ISP_IOXPUT_16(isp, src->na_reserved3[i], - &dst->na_reserved3[i]); + ISP_IOXPUT_16(isp, src->na_reserved3[i], &dst->na_reserved3[i]); } } void isp_get_notify_ack(ispsoftc_t *isp, na_entry_t *src, na_entry_t *dst) { int i; isp_get_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXGET_32(isp, &src->na_reserved, dst->na_reserved); if (ISP_IS_SBUS(isp)) { ISP_IOXGET_8(isp, &src->na_lun, dst->na_iid); ISP_IOXGET_8(isp, &src->na_iid, dst->na_lun); ISP_IOXGET_8(isp, &src->na_status, dst->na_event); ISP_IOXGET_8(isp, &src->na_event, dst->na_status); } else { ISP_IOXGET_8(isp, &src->na_lun, dst->na_lun); ISP_IOXGET_8(isp, &src->na_iid, dst->na_iid); ISP_IOXGET_8(isp, &src->na_status, dst->na_status); ISP_IOXGET_8(isp, &src->na_event, dst->na_event); } ISP_IOXGET_32(isp, &src->na_flags, dst->na_flags); for (i = 0; i < NA_RSVDLEN; i++) { - ISP_IOXGET_16(isp, &src->na_reserved3[i], - dst->na_reserved3[i]); + ISP_IOXGET_16(isp, &src->na_reserved3[i], dst->na_reserved3[i]); } } void -isp_put_notify_ack_fc(ispsoftc_t *isp, na_fcentry_t *src, - na_fcentry_t *dst) +isp_put_notify_ack_fc(ispsoftc_t *isp, na_fcentry_t *src, na_fcentry_t *dst) { int i; isp_put_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXPUT_32(isp, src->na_reserved, &dst->na_reserved); ISP_IOXPUT_8(isp, src->na_reserved1, &dst->na_reserved1); ISP_IOXPUT_8(isp, src->na_iid, &dst->na_iid); ISP_IOXPUT_16(isp, src->na_response, &dst->na_response); ISP_IOXPUT_16(isp, src->na_flags, &dst->na_flags); ISP_IOXPUT_16(isp, src->na_reserved2, &dst->na_reserved2); ISP_IOXPUT_16(isp, src->na_status, &dst->na_status); ISP_IOXPUT_16(isp, src->na_task_flags, &dst->na_task_flags); ISP_IOXPUT_16(isp, src->na_seqid, &dst->na_seqid); for (i = 0; i < NA2_RSVDLEN; i++) { - ISP_IOXPUT_16(isp, src->na_reserved3[i], - &dst->na_reserved3[i]); + ISP_IOXPUT_16(isp, src->na_reserved3[i], &dst->na_reserved3[i]); } } void -isp_put_notify_ack_fc_e(ispsoftc_t *isp, na_fcentry_e_t *src, - na_fcentry_e_t *dst) +isp_put_notify_ack_fc_e(ispsoftc_t *isp, na_fcentry_e_t *src, na_fcentry_e_t *dst) { int i; isp_put_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXPUT_32(isp, src->na_reserved, &dst->na_reserved); ISP_IOXPUT_16(isp, src->na_iid, &dst->na_iid); ISP_IOXPUT_16(isp, src->na_response, &dst->na_response); ISP_IOXPUT_16(isp, src->na_flags, &dst->na_flags); ISP_IOXPUT_16(isp, src->na_reserved2, &dst->na_reserved2); ISP_IOXPUT_16(isp, src->na_status, &dst->na_status); ISP_IOXPUT_16(isp, src->na_task_flags, &dst->na_task_flags); ISP_IOXPUT_16(isp, src->na_seqid, &dst->na_seqid); for (i = 0; i < NA2_RSVDLEN; i++) { - ISP_IOXPUT_16(isp, src->na_reserved3[i], - &dst->na_reserved3[i]); + ISP_IOXPUT_16(isp, src->na_reserved3[i], &dst->na_reserved3[i]); } } void -isp_put_notify_24xx_ack(ispsoftc_t *isp, na_fcentry_24xx_t *src, - na_fcentry_24xx_t *dst) +isp_put_notify_24xx_ack(ispsoftc_t *isp, na_fcentry_24xx_t *src, na_fcentry_24xx_t *dst) { int i; isp_put_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXPUT_32(isp, src->na_handle, &dst->na_handle); ISP_IOXPUT_16(isp, src->na_nphdl, &dst->na_nphdl); ISP_IOXPUT_16(isp, src->na_reserved1, &dst->na_reserved1); ISP_IOXPUT_16(isp, src->na_flags, &dst->na_flags); ISP_IOXPUT_16(isp, src->na_srr_rxid, &dst->na_srr_rxid); ISP_IOXPUT_16(isp, src->na_status, &dst->na_status); ISP_IOXPUT_8(isp, src->na_status_subcode, &dst->na_status_subcode); ISP_IOXPUT_16(isp, src->na_reserved2, &dst->na_reserved2); ISP_IOXPUT_32(isp, src->na_rxid, &dst->na_rxid); ISP_IOXPUT_16(isp, src->na_srr_reloff_hi, &dst->na_srr_reloff_hi); ISP_IOXPUT_16(isp, src->na_srr_reloff_lo, &dst->na_srr_reloff_lo); ISP_IOXPUT_16(isp, src->na_srr_iu, &dst->na_srr_iu); ISP_IOXPUT_16(isp, src->na_srr_flags, &dst->na_srr_flags); for (i = 0; i < 18; i++) { ISP_IOXPUT_8(isp, src->na_reserved3[i], &dst->na_reserved3[i]); } ISP_IOXPUT_8(isp, src->na_reserved4, &dst->na_reserved4); - ISP_IOXPUT_8(isp, src->na_vpindex, &dst->na_vpindex); - ISP_IOXPUT_8(isp, src->na_srr_reject_vunique, - &dst->na_srr_reject_vunique); - ISP_IOXPUT_8(isp, src->na_srr_reject_explanation, - &dst->na_srr_reject_explanation); + ISP_IOXPUT_8(isp, src->na_vpidx, &dst->na_vpidx); + ISP_IOXPUT_8(isp, src->na_srr_reject_vunique, &dst->na_srr_reject_vunique); + ISP_IOXPUT_8(isp, src->na_srr_reject_explanation, &dst->na_srr_reject_explanation); ISP_IOXPUT_8(isp, src->na_srr_reject_code, &dst->na_srr_reject_code); ISP_IOXPUT_8(isp, src->na_reserved5, &dst->na_reserved5); for (i = 0; i < 6; i++) { ISP_IOXPUT_8(isp, src->na_reserved6[i], &dst->na_reserved6[i]); } ISP_IOXPUT_16(isp, src->na_oxid, &dst->na_oxid); } void -isp_get_notify_ack_fc(ispsoftc_t *isp, na_fcentry_t *src, - na_fcentry_t *dst) +isp_get_notify_ack_fc(ispsoftc_t *isp, na_fcentry_t *src, na_fcentry_t *dst) { int i; isp_get_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXGET_32(isp, &src->na_reserved, dst->na_reserved); ISP_IOXGET_8(isp, &src->na_reserved1, dst->na_reserved1); ISP_IOXGET_8(isp, &src->na_iid, dst->na_iid); ISP_IOXGET_16(isp, &src->na_response, dst->na_response); ISP_IOXGET_16(isp, &src->na_flags, dst->na_flags); ISP_IOXGET_16(isp, &src->na_reserved2, dst->na_reserved2); ISP_IOXGET_16(isp, &src->na_status, dst->na_status); ISP_IOXGET_16(isp, &src->na_task_flags, dst->na_task_flags); ISP_IOXGET_16(isp, &src->na_seqid, dst->na_seqid); for (i = 0; i < NA2_RSVDLEN; i++) { - ISP_IOXGET_16(isp, &src->na_reserved3[i], - dst->na_reserved3[i]); + ISP_IOXGET_16(isp, &src->na_reserved3[i], dst->na_reserved3[i]); } } void -isp_get_notify_ack_fc_e(ispsoftc_t *isp, na_fcentry_e_t *src, - na_fcentry_e_t *dst) +isp_get_notify_ack_fc_e(ispsoftc_t *isp, na_fcentry_e_t *src, na_fcentry_e_t *dst) { int i; isp_get_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXGET_32(isp, &src->na_reserved, dst->na_reserved); ISP_IOXGET_16(isp, &src->na_iid, dst->na_iid); ISP_IOXGET_16(isp, &src->na_response, dst->na_response); ISP_IOXGET_16(isp, &src->na_flags, dst->na_flags); ISP_IOXGET_16(isp, &src->na_reserved2, dst->na_reserved2); ISP_IOXGET_16(isp, &src->na_status, dst->na_status); ISP_IOXGET_16(isp, &src->na_task_flags, dst->na_task_flags); ISP_IOXGET_16(isp, &src->na_seqid, dst->na_seqid); for (i = 0; i < NA2_RSVDLEN; i++) { - ISP_IOXGET_16(isp, &src->na_reserved3[i], - dst->na_reserved3[i]); + ISP_IOXGET_16(isp, &src->na_reserved3[i], dst->na_reserved3[i]); } } void -isp_get_notify_ack_24xx(ispsoftc_t *isp, na_fcentry_24xx_t *src, - na_fcentry_24xx_t *dst) +isp_get_notify_ack_24xx(ispsoftc_t *isp, na_fcentry_24xx_t *src, na_fcentry_24xx_t *dst) { int i; isp_get_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXGET_32(isp, &src->na_handle, dst->na_handle); ISP_IOXGET_16(isp, &src->na_nphdl, dst->na_nphdl); ISP_IOXGET_16(isp, &src->na_reserved1, dst->na_reserved1); ISP_IOXGET_16(isp, &src->na_flags, dst->na_flags); ISP_IOXGET_16(isp, &src->na_srr_rxid, dst->na_srr_rxid); ISP_IOXGET_16(isp, &src->na_status, dst->na_status); ISP_IOXGET_8(isp, &src->na_status_subcode, dst->na_status_subcode); ISP_IOXGET_16(isp, &src->na_reserved2, dst->na_reserved2); ISP_IOXGET_32(isp, &src->na_rxid, dst->na_rxid); ISP_IOXGET_16(isp, &src->na_srr_reloff_hi, dst->na_srr_reloff_hi); ISP_IOXGET_16(isp, &src->na_srr_reloff_lo, dst->na_srr_reloff_lo); ISP_IOXGET_16(isp, &src->na_srr_iu, dst->na_srr_iu); ISP_IOXGET_16(isp, &src->na_srr_flags, dst->na_srr_flags); for (i = 0; i < 18; i++) { ISP_IOXGET_8(isp, &src->na_reserved3[i], dst->na_reserved3[i]); } ISP_IOXGET_8(isp, &src->na_reserved4, dst->na_reserved4); - ISP_IOXGET_8(isp, &src->na_vpindex, dst->na_vpindex); - ISP_IOXGET_8(isp, &src->na_srr_reject_vunique, - dst->na_srr_reject_vunique); - ISP_IOXGET_8(isp, &src->na_srr_reject_explanation, - dst->na_srr_reject_explanation); + ISP_IOXGET_8(isp, &src->na_vpidx, dst->na_vpidx); + ISP_IOXGET_8(isp, &src->na_srr_reject_vunique, dst->na_srr_reject_vunique); + ISP_IOXGET_8(isp, &src->na_srr_reject_explanation, dst->na_srr_reject_explanation); ISP_IOXGET_8(isp, &src->na_srr_reject_code, dst->na_srr_reject_code); ISP_IOXGET_8(isp, &src->na_reserved5, dst->na_reserved5); for (i = 0; i < 6; i++) { ISP_IOXGET_8(isp, &src->na_reserved6[i], dst->na_reserved6[i]); } ISP_IOXGET_16(isp, &src->na_oxid, dst->na_oxid); } void isp_get_abts(ispsoftc_t *isp, abts_t *src, abts_t *dst) { int i; isp_get_hdr(isp, &src->abts_header, &dst->abts_header); for (i = 0; i < 6; i++) { - ISP_IOXGET_8(isp, &src->abts_reserved0[i], - dst->abts_reserved0[i]); + ISP_IOXGET_8(isp, &src->abts_reserved0[i], dst->abts_reserved0[i]); } ISP_IOXGET_16(isp, &src->abts_nphdl, dst->abts_nphdl); ISP_IOXGET_16(isp, &src->abts_reserved1, dst->abts_reserved1); ISP_IOXGET_16(isp, &src->abts_sof, dst->abts_sof); ISP_IOXGET_32(isp, &src->abts_rxid_abts, dst->abts_rxid_abts); ISP_IOXGET_16(isp, &src->abts_did_lo, dst->abts_did_lo); ISP_IOXGET_8(isp, &src->abts_did_hi, dst->abts_did_hi); ISP_IOXGET_8(isp, &src->abts_r_ctl, dst->abts_r_ctl); ISP_IOXGET_16(isp, &src->abts_sid_lo, dst->abts_sid_lo); ISP_IOXGET_8(isp, &src->abts_sid_hi, dst->abts_sid_hi); ISP_IOXGET_8(isp, &src->abts_cs_ctl, dst->abts_cs_ctl); ISP_IOXGET_16(isp, &src->abts_fs_ctl, dst->abts_fs_ctl); ISP_IOXGET_8(isp, &src->abts_f_ctl, dst->abts_f_ctl); ISP_IOXGET_8(isp, &src->abts_type, dst->abts_type); ISP_IOXGET_16(isp, &src->abts_seq_cnt, dst->abts_seq_cnt); ISP_IOXGET_8(isp, &src->abts_df_ctl, dst->abts_df_ctl); ISP_IOXGET_8(isp, &src->abts_seq_id, dst->abts_seq_id); ISP_IOXGET_16(isp, &src->abts_rx_id, dst->abts_rx_id); ISP_IOXGET_16(isp, &src->abts_ox_id, dst->abts_ox_id); ISP_IOXGET_32(isp, &src->abts_param, dst->abts_param); for (i = 0; i < 16; i++) { - ISP_IOXGET_8(isp, &src->abts_reserved2[i], - dst->abts_reserved2[i]); + ISP_IOXGET_8(isp, &src->abts_reserved2[i], dst->abts_reserved2[i]); } ISP_IOXGET_32(isp, &src->abts_rxid_task, dst->abts_rxid_task); } void isp_put_abts_rsp(ispsoftc_t *isp, abts_rsp_t *src, abts_rsp_t *dst) { int i; isp_put_hdr(isp, &src->abts_rsp_header, &dst->abts_rsp_header); ISP_IOXPUT_32(isp, src->abts_rsp_handle, &dst->abts_rsp_handle); ISP_IOXPUT_16(isp, src->abts_rsp_status, &dst->abts_rsp_status); ISP_IOXPUT_16(isp, src->abts_rsp_nphdl, &dst->abts_rsp_nphdl); ISP_IOXPUT_16(isp, src->abts_rsp_ctl_flags, &dst->abts_rsp_ctl_flags); ISP_IOXPUT_16(isp, src->abts_rsp_sof, &dst->abts_rsp_sof); ISP_IOXPUT_32(isp, src->abts_rsp_rxid_abts, &dst->abts_rsp_rxid_abts); ISP_IOXPUT_16(isp, src->abts_rsp_did_lo, &dst->abts_rsp_did_lo); ISP_IOXPUT_8(isp, src->abts_rsp_did_hi, &dst->abts_rsp_did_hi); ISP_IOXPUT_8(isp, src->abts_rsp_r_ctl, &dst->abts_rsp_r_ctl); ISP_IOXPUT_16(isp, src->abts_rsp_sid_lo, &dst->abts_rsp_sid_lo); ISP_IOXPUT_8(isp, src->abts_rsp_sid_hi, &dst->abts_rsp_sid_hi); ISP_IOXPUT_8(isp, src->abts_rsp_cs_ctl, &dst->abts_rsp_cs_ctl); ISP_IOXPUT_16(isp, src->abts_rsp_f_ctl_lo, &dst->abts_rsp_f_ctl_lo); ISP_IOXPUT_8(isp, src->abts_rsp_f_ctl_hi, &dst->abts_rsp_f_ctl_hi); ISP_IOXPUT_8(isp, src->abts_rsp_type, &dst->abts_rsp_type); ISP_IOXPUT_16(isp, src->abts_rsp_seq_cnt, &dst->abts_rsp_seq_cnt); ISP_IOXPUT_8(isp, src->abts_rsp_df_ctl, &dst->abts_rsp_df_ctl); ISP_IOXPUT_8(isp, src->abts_rsp_seq_id, &dst->abts_rsp_seq_id); ISP_IOXPUT_16(isp, src->abts_rsp_rx_id, &dst->abts_rsp_rx_id); ISP_IOXPUT_16(isp, src->abts_rsp_ox_id, &dst->abts_rsp_ox_id); ISP_IOXPUT_32(isp, src->abts_rsp_param, &dst->abts_rsp_param); if (src->abts_rsp_r_ctl == BA_ACC) { - ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.reserved, - &dst->abts_rsp_payload.ba_acc.reserved); - ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_acc.last_seq_id, - &dst->abts_rsp_payload.ba_acc.last_seq_id); - ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_acc.seq_id_valid, - &dst->abts_rsp_payload.ba_acc.seq_id_valid); - ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.aborted_rx_id, - &dst->abts_rsp_payload.ba_acc.aborted_rx_id); - ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.aborted_ox_id, - &dst->abts_rsp_payload.ba_acc.aborted_ox_id); - ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.high_seq_cnt, - &dst->abts_rsp_payload.ba_acc.high_seq_cnt); - ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.low_seq_cnt, - &dst->abts_rsp_payload.ba_acc.low_seq_cnt); + ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.reserved, &dst->abts_rsp_payload.ba_acc.reserved); + ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_acc.last_seq_id, &dst->abts_rsp_payload.ba_acc.last_seq_id); + ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_acc.seq_id_valid, &dst->abts_rsp_payload.ba_acc.seq_id_valid); + ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.aborted_rx_id, &dst->abts_rsp_payload.ba_acc.aborted_rx_id); + ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.aborted_ox_id, &dst->abts_rsp_payload.ba_acc.aborted_ox_id); + ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.high_seq_cnt, &dst->abts_rsp_payload.ba_acc.high_seq_cnt); + ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.low_seq_cnt, &dst->abts_rsp_payload.ba_acc.low_seq_cnt); for (i = 0; i < 4; i++) { - ISP_IOXPUT_16(isp, - src->abts_rsp_payload.ba_acc.reserved2[i], - &dst->abts_rsp_payload.ba_acc.reserved2[i]); + ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.reserved2[i], &dst->abts_rsp_payload.ba_acc.reserved2[i]); } } else if (src->abts_rsp_r_ctl == BA_RJT) { - ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_rjt.vendor_unique, - &dst->abts_rsp_payload.ba_rjt.vendor_unique); - ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_rjt.explanation, - &dst->abts_rsp_payload.ba_rjt.explanation); - ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_rjt.reason, - &dst->abts_rsp_payload.ba_rjt.reason); - ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_rjt.reserved, - &dst->abts_rsp_payload.ba_rjt.reserved); + ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_rjt.vendor_unique, &dst->abts_rsp_payload.ba_rjt.vendor_unique); + ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_rjt.explanation, &dst->abts_rsp_payload.ba_rjt.explanation); + ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_rjt.reason, &dst->abts_rsp_payload.ba_rjt.reason); + ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_rjt.reserved, &dst->abts_rsp_payload.ba_rjt.reserved); for (i = 0; i < 12; i++) { - ISP_IOXPUT_16(isp, - src->abts_rsp_payload.ba_rjt.reserved2[i], - &dst->abts_rsp_payload.ba_rjt.reserved2[i]); + ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_rjt.reserved2[i], &dst->abts_rsp_payload.ba_rjt.reserved2[i]); } } else { for (i = 0; i < 16; i++) { - ISP_IOXPUT_8(isp, src->abts_rsp_payload.reserved[i], - &dst->abts_rsp_payload.reserved[i]); + ISP_IOXPUT_8(isp, src->abts_rsp_payload.reserved[i], &dst->abts_rsp_payload.reserved[i]); } } ISP_IOXPUT_32(isp, src->abts_rsp_rxid_task, &dst->abts_rsp_rxid_task); } void isp_get_abts_rsp(ispsoftc_t *isp, abts_rsp_t *src, abts_rsp_t *dst) { int i; isp_get_hdr(isp, &src->abts_rsp_header, &dst->abts_rsp_header); ISP_IOXGET_32(isp, &src->abts_rsp_handle, dst->abts_rsp_handle); ISP_IOXGET_16(isp, &src->abts_rsp_status, dst->abts_rsp_status); ISP_IOXGET_16(isp, &src->abts_rsp_nphdl, dst->abts_rsp_nphdl); ISP_IOXGET_16(isp, &src->abts_rsp_ctl_flags, dst->abts_rsp_ctl_flags); ISP_IOXGET_16(isp, &src->abts_rsp_sof, dst->abts_rsp_sof); ISP_IOXGET_32(isp, &src->abts_rsp_rxid_abts, dst->abts_rsp_rxid_abts); ISP_IOXGET_16(isp, &src->abts_rsp_did_lo, dst->abts_rsp_did_lo); ISP_IOXGET_8(isp, &src->abts_rsp_did_hi, dst->abts_rsp_did_hi); ISP_IOXGET_8(isp, &src->abts_rsp_r_ctl, dst->abts_rsp_r_ctl); ISP_IOXGET_16(isp, &src->abts_rsp_sid_lo, dst->abts_rsp_sid_lo); ISP_IOXGET_8(isp, &src->abts_rsp_sid_hi, dst->abts_rsp_sid_hi); ISP_IOXGET_8(isp, &src->abts_rsp_cs_ctl, dst->abts_rsp_cs_ctl); ISP_IOXGET_16(isp, &src->abts_rsp_f_ctl_lo, dst->abts_rsp_f_ctl_lo); ISP_IOXGET_8(isp, &src->abts_rsp_f_ctl_hi, dst->abts_rsp_f_ctl_hi); ISP_IOXGET_8(isp, &src->abts_rsp_type, dst->abts_rsp_type); ISP_IOXGET_16(isp, &src->abts_rsp_seq_cnt, dst->abts_rsp_seq_cnt); ISP_IOXGET_8(isp, &src->abts_rsp_df_ctl, dst->abts_rsp_df_ctl); ISP_IOXGET_8(isp, &src->abts_rsp_seq_id, dst->abts_rsp_seq_id); ISP_IOXGET_16(isp, &src->abts_rsp_rx_id, dst->abts_rsp_rx_id); ISP_IOXGET_16(isp, &src->abts_rsp_ox_id, dst->abts_rsp_ox_id); ISP_IOXGET_32(isp, &src->abts_rsp_param, dst->abts_rsp_param); for (i = 0; i < 8; i++) { - ISP_IOXGET_8(isp, &src->abts_rsp_payload.rsp.reserved[i], - dst->abts_rsp_payload.rsp.reserved[i]); + ISP_IOXGET_8(isp, &src->abts_rsp_payload.rsp.reserved[i], dst->abts_rsp_payload.rsp.reserved[i]); } - ISP_IOXGET_32(isp, &src->abts_rsp_payload.rsp.subcode1, - dst->abts_rsp_payload.rsp.subcode1); - ISP_IOXGET_32(isp, &src->abts_rsp_payload.rsp.subcode2, - dst->abts_rsp_payload.rsp.subcode2); + ISP_IOXGET_32(isp, &src->abts_rsp_payload.rsp.subcode1, dst->abts_rsp_payload.rsp.subcode1); + ISP_IOXGET_32(isp, &src->abts_rsp_payload.rsp.subcode2, dst->abts_rsp_payload.rsp.subcode2); ISP_IOXGET_32(isp, &src->abts_rsp_rxid_task, dst->abts_rsp_rxid_task); } #endif /* ISP_TARGET_MODE */ /* * vim:ts=8:sw=8 */ Index: head/sys/dev/isp/isp_library.h =================================================================== --- head/sys/dev/isp/isp_library.h (revision 196007) +++ head/sys/dev/isp/isp_library.h (revision 196008) @@ -1,224 +1,213 @@ /* $FreeBSD$ */ /*- - * Copyright (c) 1997-2007 by Matthew Jacob + * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * */ -#ifndef _ISP_LIBRARY_H -#define _ISP_LIBRARY_H +#ifndef _ISP_LIBRARY_H +#define _ISP_LIBRARY_H -extern int isp_save_xs(ispsoftc_t *, XS_T *, uint32_t *); -extern XS_T *isp_find_xs(ispsoftc_t *, uint32_t); -extern uint32_t isp_find_handle(ispsoftc_t *, XS_T *); -extern uint32_t isp_handle_index(uint32_t); -extern void isp_destroy_handle(ispsoftc_t *, uint32_t); -extern int isp_getrqentry(ispsoftc_t *, uint32_t *, uint32_t *, void **); -extern void isp_print_qentry (ispsoftc_t *, char *, int, void *); -extern void isp_print_bytes(ispsoftc_t *, const char *, int, void *); -extern int isp_fc_runstate(ispsoftc_t *, int); -extern void isp_dump_portdb(ispsoftc_t *); -extern void isp_shutdown(ispsoftc_t *); -extern void isp_put_hdr(ispsoftc_t *, isphdr_t *, isphdr_t *); -extern void isp_get_hdr(ispsoftc_t *, isphdr_t *, isphdr_t *); -extern int isp_get_response_type(ispsoftc_t *, isphdr_t *); -extern void -isp_put_request(ispsoftc_t *, ispreq_t *, ispreq_t *); -extern void -isp_put_marker(ispsoftc_t *, isp_marker_t *, isp_marker_t *); -extern void -isp_put_marker_24xx(ispsoftc_t *, isp_marker_24xx_t *, isp_marker_24xx_t *); -extern void -isp_put_request_t2(ispsoftc_t *, ispreqt2_t *, ispreqt2_t *); -extern void -isp_put_request_t2e(ispsoftc_t *, ispreqt2e_t *, ispreqt2e_t *); -extern void -isp_put_request_t3(ispsoftc_t *, ispreqt3_t *, ispreqt3_t *); -extern void -isp_put_request_t3e(ispsoftc_t *, ispreqt3e_t *, ispreqt3e_t *); -extern void -isp_put_extended_request(ispsoftc_t *, ispextreq_t *, ispextreq_t *); -extern void -isp_put_request_t7(ispsoftc_t *, ispreqt7_t *, ispreqt7_t *); -extern void -isp_put_24xx_abrt(ispsoftc_t *, isp24xx_abrt_t *, isp24xx_abrt_t *); -extern void -isp_put_cont_req(ispsoftc_t *, ispcontreq_t *, ispcontreq_t *); -extern void -isp_put_cont64_req(ispsoftc_t *, ispcontreq64_t *, ispcontreq64_t *); -extern void -isp_get_response(ispsoftc_t *, ispstatusreq_t *, ispstatusreq_t *); -extern void isp_get_24xx_response(ispsoftc_t *, isp24xx_statusreq_t *, - isp24xx_statusreq_t *); -void -isp_get_24xx_abrt(ispsoftc_t *, isp24xx_abrt_t *, isp24xx_abrt_t *); -extern void -isp_get_rio2(ispsoftc_t *, isp_rio2_t *, isp_rio2_t *); -extern void -isp_put_icb(ispsoftc_t *, isp_icb_t *, isp_icb_t *); -extern void -isp_put_icb_2400(ispsoftc_t *, isp_icb_2400_t *, isp_icb_2400_t *); -extern void -isp_get_pdb_21xx(ispsoftc_t *, isp_pdb_21xx_t *, isp_pdb_21xx_t *); -extern void -isp_get_pdb_24xx(ispsoftc_t *, isp_pdb_24xx_t *, isp_pdb_24xx_t *); -extern void -isp_get_plogx(ispsoftc_t *, isp_plogx_t *, isp_plogx_t *); -extern void -isp_put_plogx(ispsoftc_t *, isp_plogx_t *, isp_plogx_t *); -extern void -isp_get_ct_pt(ispsoftc_t *isp, isp_ct_pt_t *, isp_ct_pt_t *); -extern void -isp_get_ms(ispsoftc_t *isp, isp_ms_t *, isp_ms_t *); -extern void -isp_put_ct_pt(ispsoftc_t *isp, isp_ct_pt_t *, isp_ct_pt_t *); -extern void -isp_put_ms(ispsoftc_t *isp, isp_ms_t *, isp_ms_t *); -extern void -isp_put_sns_request(ispsoftc_t *, sns_screq_t *, sns_screq_t *); -extern void -isp_put_gid_ft_request(ispsoftc_t *, sns_gid_ft_req_t *, - sns_gid_ft_req_t *); -extern void -isp_put_gxn_id_request(ispsoftc_t *, sns_gxn_id_req_t *, - sns_gxn_id_req_t *); -extern void -isp_get_sns_response(ispsoftc_t *, sns_scrsp_t *, sns_scrsp_t *, int); -extern void -isp_get_gid_ft_response(ispsoftc_t *, sns_gid_ft_rsp_t *, - sns_gid_ft_rsp_t *, int); -extern void -isp_get_gxn_id_response(ispsoftc_t *, sns_gxn_id_rsp_t *, - sns_gxn_id_rsp_t *); -extern void -isp_get_gff_id_response(ispsoftc_t *, sns_gff_id_rsp_t *, - sns_gff_id_rsp_t *); -extern void -isp_get_ga_nxt_response(ispsoftc_t *, sns_ga_nxt_rsp_t *, - sns_ga_nxt_rsp_t *); -extern void -isp_get_els(ispsoftc_t *, els_t *, els_t *); -extern void -isp_put_els(ispsoftc_t *, els_t *, els_t *); -extern void -isp_get_fc_hdr(ispsoftc_t *, fc_hdr_t *, fc_hdr_t *); -extern void -isp_get_fcp_cmnd_iu(ispsoftc_t *, fcp_cmnd_iu_t *, fcp_cmnd_iu_t *); -extern void isp_put_rft_id(ispsoftc_t *, rft_id_t *, rft_id_t *); -extern void isp_get_ct_hdr(ispsoftc_t *isp, ct_hdr_t *, ct_hdr_t *); -extern void isp_put_ct_hdr(ispsoftc_t *isp, ct_hdr_t *, ct_hdr_t *); +/* + * Common command shipping routine. + * + * This used to be platform specific, but basically once you get the segment + * stuff figured out, you can make all the code in one spot. + */ +typedef enum { ISP_TO_DEVICE, ISP_FROM_DEVICE, ISP_NOXFR} isp_ddir_t; +int isp_send_cmd(ispsoftc_t *, void *, void *, uint32_t, uint32_t, isp_ddir_t); -#define ISP_HANDLE_MASK 0x7fff +/* + * Handle management functions. + * + * These handles are associate with a command. + */ +int isp_save_xs(ispsoftc_t *, XS_T *, uint32_t *); +XS_T * isp_find_xs(ispsoftc_t *, uint32_t); +uint32_t isp_find_handle(ispsoftc_t *, XS_T *); +uint32_t isp_handle_index(uint32_t); +void isp_destroy_handle(ispsoftc_t *, uint32_t); -#ifdef ISP_TARGET_MODE -#if defined(__NetBSD__) || defined(__OpenBSD__) +/* + * Request Queue allocation + */ +void *isp_getrqentry(ispsoftc_t *); + +/* + * Queue Entry debug functions + */ +void isp_print_qentry (ispsoftc_t *, const char *, int, void *); +void isp_print_bytes(ispsoftc_t *, const char *, int, void *); + +/* + * Fibre Channel specific routines and data. + */ +extern const char *isp_class3_roles[4]; +int isp_fc_runstate(ispsoftc_t *, int, int); +void isp_dump_portdb(ispsoftc_t *, int); + +const char *isp_fc_fw_statename(int); +const char *isp_fc_loop_statename(int); +const char *isp_fc_toponame(fcparam *); + +int isp_fc_change_role(ispsoftc_t *, int, int); + + +/* + * Cleanup + */ +void isp_clear_commands(ispsoftc_t *); + +/* + * Common chip shutdown function + */ +void isp_shutdown(ispsoftc_t *); + +/* + * Put/Get routines to push from CPU view to device view + * or to pull from device view to CPU view for various + * data structures (IOCB) + */ +void isp_put_hdr(ispsoftc_t *, isphdr_t *, isphdr_t *); +void isp_get_hdr(ispsoftc_t *, isphdr_t *, isphdr_t *); +int isp_get_response_type(ispsoftc_t *, isphdr_t *); +void isp_put_request(ispsoftc_t *, ispreq_t *, ispreq_t *); +void isp_put_marker(ispsoftc_t *, isp_marker_t *, isp_marker_t *); +void isp_put_marker_24xx(ispsoftc_t *, isp_marker_24xx_t *, isp_marker_24xx_t *); +void isp_put_request_t2(ispsoftc_t *, ispreqt2_t *, ispreqt2_t *); +void isp_put_request_t2e(ispsoftc_t *, ispreqt2e_t *, ispreqt2e_t *); +void isp_put_request_t3(ispsoftc_t *, ispreqt3_t *, ispreqt3_t *); +void isp_put_request_t3e(ispsoftc_t *, ispreqt3e_t *, ispreqt3e_t *); +void isp_put_extended_request(ispsoftc_t *, ispextreq_t *, ispextreq_t *); +void isp_put_request_t7(ispsoftc_t *, ispreqt7_t *, ispreqt7_t *); +void isp_put_24xx_tmf(ispsoftc_t *, isp24xx_tmf_t *, isp24xx_tmf_t *); +void isp_put_24xx_abrt(ispsoftc_t *, isp24xx_abrt_t *, isp24xx_abrt_t *); +void isp_put_cont_req(ispsoftc_t *, ispcontreq_t *, ispcontreq_t *); +void isp_put_cont64_req(ispsoftc_t *, ispcontreq64_t *, ispcontreq64_t *); +void isp_get_response(ispsoftc_t *, ispstatusreq_t *, ispstatusreq_t *); +void isp_get_24xx_response(ispsoftc_t *, isp24xx_statusreq_t *, isp24xx_statusreq_t *); +void isp_get_24xx_abrt(ispsoftc_t *, isp24xx_abrt_t *, isp24xx_abrt_t *); +void isp_get_rio2(ispsoftc_t *, isp_rio2_t *, isp_rio2_t *); +void isp_put_icb(ispsoftc_t *, isp_icb_t *, isp_icb_t *); +void isp_put_icb_2400(ispsoftc_t *, isp_icb_2400_t *, isp_icb_2400_t *); +void isp_put_icb_2400_vpinfo(ispsoftc_t *, isp_icb_2400_vpinfo_t *, isp_icb_2400_vpinfo_t *); +void isp_put_vp_port_info(ispsoftc_t *, vp_port_info_t *, vp_port_info_t *); +void isp_get_vp_port_info(ispsoftc_t *, vp_port_info_t *, vp_port_info_t *); +void isp_put_vp_ctrl_info(ispsoftc_t *, vp_ctrl_info_t *, vp_ctrl_info_t *); +void isp_get_vp_ctrl_info(ispsoftc_t *, vp_ctrl_info_t *, vp_ctrl_info_t *); +void isp_put_vp_modify(ispsoftc_t *, vp_modify_t *, vp_modify_t *); +void isp_get_vp_modify(ispsoftc_t *, vp_modify_t *, vp_modify_t *); +void isp_get_pdb_21xx(ispsoftc_t *, isp_pdb_21xx_t *, isp_pdb_21xx_t *); +void isp_get_pdb_24xx(ispsoftc_t *, isp_pdb_24xx_t *, isp_pdb_24xx_t *); +void isp_get_ridacq(ispsoftc_t *, isp_ridacq_t *, isp_ridacq_t *); +void isp_get_plogx(ispsoftc_t *, isp_plogx_t *, isp_plogx_t *); +void isp_put_plogx(ispsoftc_t *, isp_plogx_t *, isp_plogx_t *); +void isp_get_ct_pt(ispsoftc_t *isp, isp_ct_pt_t *, isp_ct_pt_t *); +void isp_get_ms(ispsoftc_t *isp, isp_ms_t *, isp_ms_t *); +void isp_put_ct_pt(ispsoftc_t *isp, isp_ct_pt_t *, isp_ct_pt_t *); +void isp_put_ms(ispsoftc_t *isp, isp_ms_t *, isp_ms_t *); +void isp_put_sns_request(ispsoftc_t *, sns_screq_t *, sns_screq_t *); +void isp_put_gid_ft_request(ispsoftc_t *, sns_gid_ft_req_t *, sns_gid_ft_req_t *); +void isp_put_gxn_id_request(ispsoftc_t *, sns_gxn_id_req_t *, sns_gxn_id_req_t *); +void isp_get_sns_response(ispsoftc_t *, sns_scrsp_t *, sns_scrsp_t *, int); +void isp_get_gid_ft_response(ispsoftc_t *, sns_gid_ft_rsp_t *, sns_gid_ft_rsp_t *, int); +void isp_get_gxn_id_response(ispsoftc_t *, sns_gxn_id_rsp_t *, sns_gxn_id_rsp_t *); +void isp_get_gff_id_response(ispsoftc_t *, sns_gff_id_rsp_t *, sns_gff_id_rsp_t *); +void isp_get_ga_nxt_response(ispsoftc_t *, sns_ga_nxt_rsp_t *, sns_ga_nxt_rsp_t *); +void isp_get_els(ispsoftc_t *, els_t *, els_t *); +void isp_put_els(ispsoftc_t *, els_t *, els_t *); +void isp_get_fc_hdr(ispsoftc_t *, fc_hdr_t *, fc_hdr_t *); +void isp_get_fcp_cmnd_iu(ispsoftc_t *, fcp_cmnd_iu_t *, fcp_cmnd_iu_t *); +void isp_put_rft_id(ispsoftc_t *, rft_id_t *, rft_id_t *); +void isp_get_ct_hdr(ispsoftc_t *isp, ct_hdr_t *, ct_hdr_t *); +void isp_put_ct_hdr(ispsoftc_t *isp, ct_hdr_t *, ct_hdr_t *); + +#define ISP_HANDLE_MASK 0x7fff + +#ifdef ISP_TARGET_MODE +#if defined(__NetBSD__) || defined(__OpenBSD__) #include -#elif defined(__FreeBSD__) +#elif defined(__FreeBSD__) #include #else #include "isp_target.h" #endif -#define IS_TARGET_HANDLE(x) ((x) & 0x8000) +int isp_send_tgt_cmd(ispsoftc_t *, void *, void *, uint32_t, uint32_t, isp_ddir_t, void *, uint32_t); -extern int isp_save_xs_tgt(ispsoftc_t *, void *, uint32_t *); -extern void *isp_find_xs_tgt(ispsoftc_t *, uint32_t); -extern uint32_t isp_find_tgt_handle(ispsoftc_t *, void *); -extern void isp_destroy_tgt_handle(ispsoftc_t *, uint32_t); +#define IS_TARGET_HANDLE(x) ((x) & 0x8000) -extern void -isp_put_atio(ispsoftc_t *, at_entry_t *, at_entry_t *); -extern void -isp_get_atio(ispsoftc_t *, at_entry_t *, at_entry_t *); -extern void -isp_put_atio2(ispsoftc_t *, at2_entry_t *, at2_entry_t *); -extern void -isp_put_atio2e(ispsoftc_t *, at2e_entry_t *, at2e_entry_t *); -extern void -isp_get_atio2(ispsoftc_t *, at2_entry_t *, at2_entry_t *); -extern void -isp_get_atio2e(ispsoftc_t *, at2e_entry_t *, at2e_entry_t *); -extern void -isp_get_atio7(ispsoftc_t *isp, at7_entry_t *, at7_entry_t *); -extern void -isp_put_ctio(ispsoftc_t *, ct_entry_t *, ct_entry_t *); -extern void -isp_get_ctio(ispsoftc_t *, ct_entry_t *, ct_entry_t *); -extern void -isp_put_ctio2(ispsoftc_t *, ct2_entry_t *, ct2_entry_t *); -extern void -isp_put_ctio2e(ispsoftc_t *, ct2e_entry_t *, ct2e_entry_t *); -extern void -isp_put_ctio7(ispsoftc_t *, ct7_entry_t *, ct7_entry_t *); -extern void -isp_get_ctio2(ispsoftc_t *, ct2_entry_t *, ct2_entry_t *); -extern void -isp_get_ctio2e(ispsoftc_t *, ct2e_entry_t *, ct2e_entry_t *); -extern void -isp_get_ctio7(ispsoftc_t *, ct7_entry_t *, ct7_entry_t *); -extern void -isp_put_enable_lun(ispsoftc_t *, lun_entry_t *, lun_entry_t *); -extern void -isp_get_enable_lun(ispsoftc_t *, lun_entry_t *, lun_entry_t *); -extern void -isp_put_notify(ispsoftc_t *, in_entry_t *, in_entry_t *); -extern void -isp_get_notify(ispsoftc_t *, in_entry_t *, in_entry_t *); -extern void -isp_put_notify_fc(ispsoftc_t *, in_fcentry_t *, in_fcentry_t *); -extern void -isp_put_notify_fc_e(ispsoftc_t *, in_fcentry_e_t *, in_fcentry_e_t *); -extern void -isp_put_notify_24xx(ispsoftc_t *, in_fcentry_24xx_t *, in_fcentry_24xx_t *); -extern void -isp_get_notify_fc(ispsoftc_t *, in_fcentry_t *, in_fcentry_t *); -extern void -isp_get_notify_fc_e(ispsoftc_t *, in_fcentry_e_t *, in_fcentry_e_t *); -extern void -isp_get_notify_24xx(ispsoftc_t *, in_fcentry_24xx_t *, in_fcentry_24xx_t *); -extern void -isp_put_notify_ack(ispsoftc_t *, na_entry_t *, na_entry_t *); -extern void -isp_get_notify_ack(ispsoftc_t *, na_entry_t *, na_entry_t *); -extern void -isp_put_notify_24xx_ack(ispsoftc_t *, na_fcentry_24xx_t *, na_fcentry_24xx_t *); -extern void -isp_put_notify_ack_fc(ispsoftc_t *, na_fcentry_t *, na_fcentry_t *); -extern void -isp_put_notify_ack_fc_e(ispsoftc_t *, na_fcentry_e_t *, na_fcentry_e_t *); -extern void isp_put_notify_ack_24xx(ispsoftc_t *, na_fcentry_24xx_t *, - na_fcentry_24xx_t *); -extern void -isp_get_notify_ack_fc(ispsoftc_t *, na_fcentry_t *, na_fcentry_t *); -extern void -isp_get_notify_ack_fc_e(ispsoftc_t *, na_fcentry_e_t *, na_fcentry_e_t *); -extern void isp_get_notify_ack_24xx(ispsoftc_t *, na_fcentry_24xx_t *, - na_fcentry_24xx_t *); -extern void -isp_get_abts(ispsoftc_t *, abts_t *, abts_t *); -extern void -isp_put_abts_rsp(ispsoftc_t *, abts_rsp_t *, abts_rsp_t *); -extern void -isp_get_abts_rsp(ispsoftc_t *, abts_rsp_t *, abts_rsp_t *); -#endif /* ISP_TARGET_MODE */ -#endif /* _ISP_LIBRARY_H */ +int isp_save_xs_tgt(ispsoftc_t *, void *, uint32_t *); +void *isp_find_xs_tgt(ispsoftc_t *, uint32_t); +uint32_t isp_find_tgt_handle(ispsoftc_t *, void *); +void isp_destroy_tgt_handle(ispsoftc_t *, uint32_t); + +int isp_find_pdb_by_wwn(ispsoftc_t *, int, uint64_t, fcportdb_t **); +int isp_find_pdb_by_loopid(ispsoftc_t *, int, uint32_t, fcportdb_t **); +int isp_find_pdb_by_sid(ispsoftc_t *, int, uint32_t, fcportdb_t **); +void isp_find_chan_by_did(ispsoftc_t *, uint32_t, uint16_t *); +void isp_add_wwn_entry(ispsoftc_t *, int, uint64_t, uint16_t, uint32_t); +void isp_del_wwn_entry(ispsoftc_t *, int, uint64_t, uint16_t, uint32_t); +void isp_del_all_wwn_entries(ispsoftc_t *, int); +void isp_del_wwn_entries(ispsoftc_t *, isp_notify_t *); + +void isp_put_atio(ispsoftc_t *, at_entry_t *, at_entry_t *); +void isp_get_atio(ispsoftc_t *, at_entry_t *, at_entry_t *); +void isp_put_atio2(ispsoftc_t *, at2_entry_t *, at2_entry_t *); +void isp_put_atio2e(ispsoftc_t *, at2e_entry_t *, at2e_entry_t *); +void isp_get_atio2(ispsoftc_t *, at2_entry_t *, at2_entry_t *); +void isp_get_atio2e(ispsoftc_t *, at2e_entry_t *, at2e_entry_t *); +void isp_get_atio7(ispsoftc_t *isp, at7_entry_t *, at7_entry_t *); +void isp_put_ctio(ispsoftc_t *, ct_entry_t *, ct_entry_t *); +void isp_get_ctio(ispsoftc_t *, ct_entry_t *, ct_entry_t *); +void isp_put_ctio2(ispsoftc_t *, ct2_entry_t *, ct2_entry_t *); +void isp_put_ctio2e(ispsoftc_t *, ct2e_entry_t *, ct2e_entry_t *); +void isp_put_ctio7(ispsoftc_t *, ct7_entry_t *, ct7_entry_t *); +void isp_get_ctio2(ispsoftc_t *, ct2_entry_t *, ct2_entry_t *); +void isp_get_ctio2e(ispsoftc_t *, ct2e_entry_t *, ct2e_entry_t *); +void isp_get_ctio7(ispsoftc_t *, ct7_entry_t *, ct7_entry_t *); +void isp_put_enable_lun(ispsoftc_t *, lun_entry_t *, lun_entry_t *); +void isp_get_enable_lun(ispsoftc_t *, lun_entry_t *, lun_entry_t *); +void isp_put_notify(ispsoftc_t *, in_entry_t *, in_entry_t *); +void isp_get_notify(ispsoftc_t *, in_entry_t *, in_entry_t *); +void isp_put_notify_fc(ispsoftc_t *, in_fcentry_t *, in_fcentry_t *); +void isp_put_notify_fc_e(ispsoftc_t *, in_fcentry_e_t *, in_fcentry_e_t *); +void isp_put_notify_24xx(ispsoftc_t *, in_fcentry_24xx_t *, in_fcentry_24xx_t *); +void isp_get_notify_fc(ispsoftc_t *, in_fcentry_t *, in_fcentry_t *); +void isp_get_notify_fc_e(ispsoftc_t *, in_fcentry_e_t *, in_fcentry_e_t *); +void isp_get_notify_24xx(ispsoftc_t *, in_fcentry_24xx_t *, in_fcentry_24xx_t *); +void isp_put_notify_ack(ispsoftc_t *, na_entry_t *, na_entry_t *); +void isp_get_notify_ack(ispsoftc_t *, na_entry_t *, na_entry_t *); +void isp_put_notify_24xx_ack(ispsoftc_t *, na_fcentry_24xx_t *, na_fcentry_24xx_t *); +void isp_put_notify_ack_fc(ispsoftc_t *, na_fcentry_t *, na_fcentry_t *); +void isp_put_notify_ack_fc_e(ispsoftc_t *, na_fcentry_e_t *, na_fcentry_e_t *); +void isp_put_notify_ack_24xx(ispsoftc_t *, na_fcentry_24xx_t *, na_fcentry_24xx_t *); +void isp_get_notify_ack_fc(ispsoftc_t *, na_fcentry_t *, na_fcentry_t *); +void isp_get_notify_ack_fc_e(ispsoftc_t *, na_fcentry_e_t *, na_fcentry_e_t *); +void isp_get_notify_ack_24xx(ispsoftc_t *, na_fcentry_24xx_t *, na_fcentry_24xx_t *); +void isp_get_abts(ispsoftc_t *, abts_t *, abts_t *); +void isp_put_abts_rsp(ispsoftc_t *, abts_rsp_t *, abts_rsp_t *); +void isp_get_abts_rsp(ispsoftc_t *, abts_rsp_t *, abts_rsp_t *); +#endif /* ISP_TARGET_MODE */ +#endif /* _ISP_LIBRARY_H */ Index: head/sys/dev/isp/isp_pci.c =================================================================== --- head/sys/dev/isp/isp_pci.c (revision 196007) +++ head/sys/dev/isp/isp_pci.c (revision 196008) @@ -1,2929 +1,1960 @@ /*- - * Copyright (c) 1997-2006 by Matthew Jacob + * Copyright (c) 1997-2008 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. * FreeBSD Version. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include -#if __FreeBSD_version >= 700000 #include #include -#endif #include -#if __FreeBSD_version < 500000 -#include -#include -#include -#include -#else #include #include #include -#endif #include #include #include #include +#include #include -#if __FreeBSD_version < 500000 -#define BUS_PROBE_DEFAULT 0 -#endif - static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); -static int -isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); -static int -isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); -static int -isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); +static int isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); +static int isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); +static int isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); static int isp_pci_mbxdma(ispsoftc_t *); -static int -isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t); +static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); static void isp_pci_reset0(ispsoftc_t *); static void isp_pci_reset1(ispsoftc_t *); static void isp_pci_dumpregs(ispsoftc_t *, const char *); static struct ispmdvec mdvec = { isp_pci_rd_isr, isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static struct ispmdvec mdvec_1080 = { isp_pci_rd_isr, isp_pci_rd_reg_1080, isp_pci_wr_reg_1080, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static struct ispmdvec mdvec_12160 = { isp_pci_rd_isr, isp_pci_rd_reg_1080, isp_pci_wr_reg_1080, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static struct ispmdvec mdvec_2100 = { isp_pci_rd_isr, isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs }; static struct ispmdvec mdvec_2200 = { isp_pci_rd_isr, isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs }; static struct ispmdvec mdvec_2300 = { isp_pci_rd_isr_2300, isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs }; static struct ispmdvec mdvec_2400 = { isp_pci_rd_isr_2400, isp_pci_rd_reg_2400, isp_pci_wr_reg_2400, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, NULL }; +static struct ispmdvec mdvec_2500 = { + isp_pci_rd_isr_2400, + isp_pci_rd_reg_2400, + isp_pci_wr_reg_2400, + isp_pci_mbxdma, + isp_pci_dmasetup, + isp_common_dmateardown, + isp_pci_reset0, + isp_pci_reset1, + NULL +}; + #ifndef PCIM_CMD_INVEN #define PCIM_CMD_INVEN 0x10 #endif #ifndef PCIM_CMD_BUSMASTEREN #define PCIM_CMD_BUSMASTEREN 0x0004 #endif #ifndef PCIM_CMD_PERRESPEN #define PCIM_CMD_PERRESPEN 0x0040 #endif #ifndef PCIM_CMD_SEREN #define PCIM_CMD_SEREN 0x0100 #endif #ifndef PCIM_CMD_INTX_DISABLE #define PCIM_CMD_INTX_DISABLE 0x0400 #endif #ifndef PCIR_COMMAND #define PCIR_COMMAND 0x04 #endif #ifndef PCIR_CACHELNSZ #define PCIR_CACHELNSZ 0x0c #endif #ifndef PCIR_LATTIMER #define PCIR_LATTIMER 0x0d #endif #ifndef PCIR_ROMADDR #define PCIR_ROMADDR 0x30 #endif #ifndef PCI_VENDOR_QLOGIC #define PCI_VENDOR_QLOGIC 0x1077 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1020 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1080 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP10160 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP12160 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1240 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1280 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2100 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2200 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2300 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2312 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2322 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2422 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2432 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 #endif +#ifndef PCI_PRODUCT_QLOGIC_ISP2532 +#define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 +#endif + #ifndef PCI_PRODUCT_QLOGIC_ISP6312 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP6322 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 #endif #define PCI_QLOGIC_ISP1020 \ ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1080 \ ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP10160 \ ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP12160 \ ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1240 \ ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1280 \ ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2100 \ ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2200 \ ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2300 \ ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2312 \ ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2322 \ ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2422 \ ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2432 \ ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) +#define PCI_QLOGIC_ISP2532 \ + ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) + #define PCI_QLOGIC_ISP6312 \ ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP6322 \ ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) /* * Odd case for some AMI raid cards... We need to *not* attach to this. */ #define AMI_RAID_SUBVENDOR_ID 0x101e #define IO_MAP_REG 0x10 #define MEM_MAP_REG 0x14 #define PCI_DFLT_LTNCY 0x40 #define PCI_DFLT_LNSZ 0x10 static int isp_pci_probe (device_t); static int isp_pci_attach (device_t); static int isp_pci_detach (device_t); #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev struct isp_pcisoftc { ispsoftc_t pci_isp; device_t pci_dev; struct resource * pci_reg; void * ih; int16_t pci_poff[_NREG_BLKS]; bus_dma_tag_t dmat; -#if __FreeBSD_version > 700025 int msicount; -#endif }; static device_method_t isp_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, isp_pci_probe), DEVMETHOD(device_attach, isp_pci_attach), DEVMETHOD(device_detach, isp_pci_detach), { 0, 0 } }; static driver_t isp_pci_driver = { "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) }; static devclass_t isp_devclass; DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); -#if __FreeBSD_version < 700000 -extern ispfwfunc *isp_get_firmware_p; -#endif static int isp_pci_probe(device_t dev) { - switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { + switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { case PCI_QLOGIC_ISP1020: device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP1080: device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP1240: device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP1280: device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP10160: device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP12160: if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { return (ENXIO); } device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP2100: device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2200: device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2300: device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2312: device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2322: device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2422: device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2432: device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); break; + case PCI_QLOGIC_ISP2532: + device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); + break; case PCI_QLOGIC_ISP6312: device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP6322: device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); break; default: return (ENXIO); } if (isp_announced == 0 && bootverbose) { printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " "Core Version %d.%d\n", ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); isp_announced++; } /* * XXXX: Here is where we might load the f/w module * XXXX: (or increase a reference count to it). */ return (BUS_PROBE_DEFAULT); } -#if __FreeBSD_version < 500000 static void -isp_get_generic_options(device_t dev, ispsoftc_t *isp) +isp_get_generic_options(device_t dev, ispsoftc_t *isp, int *nvp) { - int bitmap, unit; - - unit = device_get_unit(dev); - if (getenv_int("isp_disable", &bitmap)) { - if (bitmap & (1 << unit)) { - isp->isp_osinfo.disabled = 1; - return; - } - } - if (getenv_int("isp_no_fwload", &bitmap)) { - if (bitmap & (1 << unit)) - isp->isp_confopts |= ISP_CFG_NORELOAD; - } - if (getenv_int("isp_fwload", &bitmap)) { - if (bitmap & (1 << unit)) - isp->isp_confopts &= ~ISP_CFG_NORELOAD; - } - if (getenv_int("isp_no_nvram", &bitmap)) { - if (bitmap & (1 << unit)) - isp->isp_confopts |= ISP_CFG_NONVRAM; - } - if (getenv_int("isp_nvram", &bitmap)) { - if (bitmap & (1 << unit)) - isp->isp_confopts &= ~ISP_CFG_NONVRAM; - } - - bitmap = 0; - (void) getenv_int("isp_debug", &bitmap); - if (bitmap) { - isp->isp_dblev = bitmap; - } else { - isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; - } - if (bootverbose) { - isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; - } - - bitmap = 0; - if (getenv_int("role", &bitmap)) { - isp->isp_role = bitmap; - } else { - isp->isp_role = ISP_DEFAULT_ROLES; - } - -} - -static void -isp_get_pci_options(device_t dev, int *m1, int *m2) -{ - int bitmap; - int unit = device_get_unit(dev); - - *m1 = PCIM_CMD_MEMEN; - *m2 = PCIM_CMD_PORTEN; - if (getenv_int("isp_mem_map", &bitmap)) { - if (bitmap & (1 << unit)) { - *m1 = PCIM_CMD_MEMEN; - *m2 = PCIM_CMD_PORTEN; - } - } - bitmap = 0; - if (getenv_int("isp_io_map", &bitmap)) { - if (bitmap & (1 << unit)) { - *m1 = PCIM_CMD_PORTEN; - *m2 = PCIM_CMD_MEMEN; - } - } -} - -static void -isp_get_specific_options(device_t dev, ispsoftc_t *isp) -{ - uint64_t wwn; - int bitmap; - int unit = device_get_unit(dev); - - - if (IS_SCSI(isp)) { - return; - } - - if (getenv_int("isp_fcduplex", &bitmap)) { - if (bitmap & (1 << unit)) - isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; - } - if (getenv_int("isp_no_fcduplex", &bitmap)) { - if (bitmap & (1 << unit)) - isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; - } - if (getenv_int("isp_nport", &bitmap)) { - if (bitmap & (1 << unit)) - isp->isp_confopts |= ISP_CFG_NPORT; - } - - /* - * Because the resource_*_value functions can neither return - * 64 bit integer values, nor can they be directly coerced - * to interpret the right hand side of the assignment as - * you want them to interpret it, we have to force WWN - * hint replacement to specify WWN strings with a leading - * 'w' (e..g w50000000aaaa0001). Sigh. - */ - if (getenv_quad("isp_portwwn", &wwn)) { - isp->isp_osinfo.default_port_wwn = wwn; - isp->isp_confopts |= ISP_CFG_OWNWWPN; - } - if (isp->isp_osinfo.default_port_wwn == 0) { - isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; - } - - if (getenv_quad("isp_nodewwn", &wwn)) { - isp->isp_osinfo.default_node_wwn = wwn; - isp->isp_confopts |= ISP_CFG_OWNWWNN; - } - if (isp->isp_osinfo.default_node_wwn == 0) { - isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; - } - - bitmap = 0; - (void) getenv_int("isp_fabric_hysteresis", &bitmap); - if (bitmap >= 0 && bitmap < 256) { - isp->isp_osinfo.hysteresis = bitmap; - } else { - isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; - } - - bitmap = 0; - (void) getenv_int("isp_loop_down_limit", &bitmap); - if (bitmap >= 0 && bitmap < 0xffff) { - isp->isp_osinfo.loop_down_limit = bitmap; - } else { - isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; - } - - bitmap = 0; - (void) getenv_int("isp_gone_device_time", &bitmap); - if (bitmap >= 0 && bitmap < 0xffff) { - isp->isp_osinfo.gone_device_time = bitmap; - } else { - isp->isp_osinfo.gone_device_time = isp_gone_device_time; - } -#ifdef ISP_FW_CRASH_DUMP - bitmap = 0; - if (getenv_int("isp_fw_dump_enable", &bitmap)) { - if (bitmap & (1 << unit) { - size_t amt = 0; - if (IS_2200(isp)) { - amt = QLA2200_RISC_IMAGE_DUMP_SIZE; - } else if (IS_23XX(isp)) { - amt = QLA2300_RISC_IMAGE_DUMP_SIZE; - } - if (amt) { - FCPARAM(isp)->isp_dump_data = - malloc(amt, M_DEVBUF, M_WAITOK); - memset(FCPARAM(isp)->isp_dump_data, 0, amt); - } else { - device_printf(dev, - "f/w crash dumps not supported for card\n"); - } - } - } -#endif -} -#else -static void -isp_get_generic_options(device_t dev, ispsoftc_t *isp) -{ int tval; /* * Figure out if we're supposed to skip this one. */ tval = 0; - if (resource_int_value(device_get_name(dev), device_get_unit(dev), - "disable", &tval) == 0 && tval) { + if (resource_int_value(device_get_name(dev), device_get_unit(dev), "disable", &tval) == 0 && tval) { device_printf(dev, "disabled at user request\n"); isp->isp_osinfo.disabled = 1; return; } - tval = -1; - if (resource_int_value(device_get_name(dev), device_get_unit(dev), - "role", &tval) == 0 && tval != -1) { - tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); - isp->isp_role = tval; - device_printf(dev, "setting role to 0x%x\n", isp->isp_role); - } else { -#ifdef ISP_TARGET_MODE - isp->isp_role = ISP_ROLE_TARGET; -#else - isp->isp_role = ISP_DEFAULT_ROLES; -#endif - } - tval = 0; - if (resource_int_value(device_get_name(dev), device_get_unit(dev), - "fwload_disable", &tval) == 0 && tval != 0) { + if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_NORELOAD; } tval = 0; - if (resource_int_value(device_get_name(dev), device_get_unit(dev), - "ignore_nvram", &tval) == 0 && tval != 0) { + if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_NONVRAM; } - tval = 0; - (void) resource_int_value(device_get_name(dev), device_get_unit(dev), - "debug", &tval); + (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); if (tval) { isp->isp_dblev = tval; } else { isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; } if (bootverbose) { isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; } + (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); + if (tval > 0 && tval < 127) { + *nvp = tval; + } else { + *nvp = 0; + } + tval = 1; + (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "autoconfig", &tval); + isp_autoconfig = tval; + tval = 7; + (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); + isp_quickboot_time = tval; + tval = 0; + if (resource_int_value(device_get_name(dev), device_get_unit(dev), "forcemulti", &tval) == 0 && tval != 0) { + isp->isp_osinfo.forcemulti = 1; + } } static void isp_get_pci_options(device_t dev, int *m1, int *m2) { int tval; /* * Which we should try first - memory mapping or i/o mapping? * * We used to try memory first followed by i/o on alpha, otherwise * the reverse, but we should just try memory first all the time now. */ *m1 = PCIM_CMD_MEMEN; *m2 = PCIM_CMD_PORTEN; tval = 0; - if (resource_int_value(device_get_name(dev), device_get_unit(dev), - "prefer_iomap", &tval) == 0 && tval != 0) { + if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &tval) == 0 && tval != 0) { *m1 = PCIM_CMD_PORTEN; *m2 = PCIM_CMD_MEMEN; } tval = 0; - if (resource_int_value(device_get_name(dev), device_get_unit(dev), - "prefer_memmap", &tval) == 0 && tval != 0) { + if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_memmap", &tval) == 0 && tval != 0) { *m1 = PCIM_CMD_MEMEN; *m2 = PCIM_CMD_PORTEN; } } static void -isp_get_specific_options(device_t dev, ispsoftc_t *isp) +isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) { const char *sptr; int tval; - isp->isp_osinfo.default_id = -1; - if (resource_int_value(device_get_name(dev), device_get_unit(dev), - "iid", &tval) == 0) { - isp->isp_osinfo.default_id = tval; - isp->isp_confopts |= ISP_CFG_OWNLOOPID; - } - if (isp->isp_osinfo.default_id == -1) { + if (resource_int_value(device_get_name(dev), device_get_unit(dev), "iid", &tval)) { if (IS_FC(isp)) { - isp->isp_osinfo.default_id = 109; + ISP_FC_PC(isp, chan)->default_id = 109 - chan; } else { - isp->isp_osinfo.default_id = 7; + ISP_SPI_PC(isp, chan)->iid = 7; } + } else { + if (IS_FC(isp)) { + ISP_FC_PC(isp, chan)->default_id = tval - chan; + } else { + ISP_SPI_PC(isp, chan)->iid = tval; + } + isp->isp_confopts |= ISP_CFG_OWNLOOPID; } + tval = -1; + if (resource_int_value(device_get_name(dev), device_get_unit(dev), "role", &tval) == 0) { + switch (tval) { + case ISP_ROLE_NONE: + case ISP_ROLE_INITIATOR: + case ISP_ROLE_TARGET: + case ISP_ROLE_INITIATOR|ISP_ROLE_TARGET: + device_printf(dev, "setting role to 0x%x\n", tval); + break; + default: + tval = -1; + break; + } + } + if (tval == -1) { + tval = ISP_DEFAULT_ROLES; + } + if (IS_SCSI(isp)) { + ISP_SPI_PC(isp, chan)->role = tval; return; } + ISP_FC_PC(isp, chan)->role = tval; tval = 0; - if (resource_int_value(device_get_name(dev), device_get_unit(dev), - "fullduplex", &tval) == 0 && tval != 0) { + if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fullduplex", &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; } -#ifdef ISP_FW_CRASH_DUMP - tval = 0; - if (resource_int_value(device_get_name(dev), device_get_unit(dev), - "fw_dump_enable", &tval) == 0 && tval != 0) { - size_t amt = 0; - if (IS_2200(isp)) { - amt = QLA2200_RISC_IMAGE_DUMP_SIZE; - } else if (IS_23XX(isp)) { - amt = QLA2300_RISC_IMAGE_DUMP_SIZE; - } - if (amt) { - FCPARAM(isp)->isp_dump_data = - malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); - } else { - device_printf(dev, - "f/w crash dumps not supported for this model\n"); - } - } -#endif sptr = 0; - if (resource_string_value(device_get_name(dev), device_get_unit(dev), - "topology", (const char **) &sptr) == 0 && sptr != 0) { + if (resource_string_value(device_get_name(dev), device_get_unit(dev), "topology", (const char **) &sptr) == 0 && sptr != 0) { if (strcmp(sptr, "lport") == 0) { isp->isp_confopts |= ISP_CFG_LPORT; } else if (strcmp(sptr, "nport") == 0) { isp->isp_confopts |= ISP_CFG_NPORT; } else if (strcmp(sptr, "lport-only") == 0) { isp->isp_confopts |= ISP_CFG_LPORT_ONLY; } else if (strcmp(sptr, "nport-only") == 0) { isp->isp_confopts |= ISP_CFG_NPORT_ONLY; } } /* * Because the resource_*_value functions can neither return * 64 bit integer values, nor can they be directly coerced * to interpret the right hand side of the assignment as * you want them to interpret it, we have to force WWN * hint replacement to specify WWN strings with a leading * 'w' (e..g w50000000aaaa0001). Sigh. */ sptr = 0; - tval = resource_string_value(device_get_name(dev), device_get_unit(dev), - "portwwn", (const char **) &sptr); + tval = resource_string_value(device_get_name(dev), device_get_unit(dev), "portwwn", (const char **) &sptr); if (tval == 0 && sptr != 0 && *sptr++ == 'w') { char *eptr = 0; - isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); - if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { + ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); + if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { device_printf(dev, "mangled portwwn hint '%s'\n", sptr); - isp->isp_osinfo.default_port_wwn = 0; - } else { - isp->isp_confopts |= ISP_CFG_OWNWWPN; + ISP_FC_PC(isp, chan)->def_wwpn = 0; } } - if (isp->isp_osinfo.default_port_wwn == 0) { - isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; - } sptr = 0; - tval = resource_string_value(device_get_name(dev), device_get_unit(dev), - "nodewwn", (const char **) &sptr); + tval = resource_string_value(device_get_name(dev), device_get_unit(dev), "nodewwn", (const char **) &sptr); if (tval == 0 && sptr != 0 && *sptr++ == 'w') { char *eptr = 0; - isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); - if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { + ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); + if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); - isp->isp_osinfo.default_node_wwn = 0; - } else { - isp->isp_confopts |= ISP_CFG_OWNWWNN; + ISP_FC_PC(isp, chan)->def_wwnn = 0; } } - if (isp->isp_osinfo.default_node_wwn == 0) { - isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; - } - tval = 0; - (void) resource_int_value(device_get_name(dev), device_get_unit(dev), - "hysteresis", &tval); + (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "hysteresis", &tval); if (tval >= 0 && tval < 256) { - isp->isp_osinfo.hysteresis = tval; + ISP_FC_PC(isp, chan)->hysteresis = tval; } else { - isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; + ISP_FC_PC(isp, chan)->hysteresis = isp_fabric_hysteresis; } tval = -1; - (void) resource_int_value(device_get_name(dev), device_get_unit(dev), - "loop_down_limit", &tval); + (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "loop_down_limit", &tval); if (tval >= 0 && tval < 0xffff) { - isp->isp_osinfo.loop_down_limit = tval; + ISP_FC_PC(isp, chan)->loop_down_limit = tval; } else { - isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; + ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; } tval = -1; - (void) resource_int_value(device_get_name(dev), device_get_unit(dev), - "gone_device_time", &tval); + (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "gone_device_time", &tval); if (tval >= 0 && tval < 0xffff) { - isp->isp_osinfo.gone_device_time = tval; + ISP_FC_PC(isp, chan)->gone_device_time = tval; } else { - isp->isp_osinfo.gone_device_time = isp_gone_device_time; + ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; } } -#endif static int isp_pci_attach(device_t dev) { struct resource *regs, *irq; - int rtp, rgd, iqd, m1, m2; - uint32_t data, cmd, linesz, psize, basetype; + int rtp, rgd, iqd, i, m1, m2, locksetup = 0; + int isp_nvports = 0; + uint32_t data, cmd, linesz, did; struct isp_pcisoftc *pcs; ispsoftc_t *isp = NULL; - struct ispmdvec *mdvp; -#if __FreeBSD_version >= 500000 - int locksetup = 0; -#endif + size_t psize, xsize; + char fwname[32]; pcs = device_get_softc(dev); if (pcs == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } memset(pcs, 0, sizeof (*pcs)); + pcs->pci_dev = dev; isp = &pcs->pci_isp; + isp->isp_dev = dev; + isp->isp_nchan = 1; /* * Get Generic Options */ - isp_get_generic_options(dev, isp); + isp_get_generic_options(dev, isp, &isp_nvports); /* * Check to see if options have us disabled */ if (isp->isp_osinfo.disabled) { /* * But return zero to preserve unit numbering */ return (0); } /* * Get PCI options- which in this case are just mapping preferences. */ isp_get_pci_options(dev, &m1, &m2); linesz = PCI_DFLT_LNSZ; irq = regs = NULL; rgd = rtp = iqd = 0; cmd = pci_read_config(dev, PCIR_COMMAND, 2); if (cmd & m1) { rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); } if (regs == NULL && (cmd & m2)) { rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); } if (regs == NULL) { device_printf(dev, "unable to map any ports\n"); goto bad; } if (bootverbose) { - device_printf(dev, "using %s space register mapping\n", - (rgd == IO_MAP_REG)? "I/O" : "Memory"); + device_printf(dev, "using %s space register mapping\n", (rgd == IO_MAP_REG)? "I/O" : "Memory"); } - pcs->pci_dev = dev; - pcs->pci_reg = regs; isp->isp_bus_tag = rman_get_bustag(regs); isp->isp_bus_handle = rman_get_bushandle(regs); + pcs->pci_dev = dev; + pcs->pci_reg = regs; pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; - mdvp = &mdvec; - basetype = ISP_HA_SCSI_UNKNOWN; - psize = sizeof (sdparam); - if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { - mdvp = &mdvec; - basetype = ISP_HA_SCSI_UNKNOWN; - psize = sizeof (sdparam); - } - if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { - mdvp = &mdvec_1080; - basetype = ISP_HA_SCSI_1080; - psize = sizeof (sdparam); - pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = - ISP1080_DMA_REGS_OFF; - } - if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { - mdvp = &mdvec_1080; - basetype = ISP_HA_SCSI_1240; - psize = 2 * sizeof (sdparam); - pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = - ISP1080_DMA_REGS_OFF; - } - if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { - mdvp = &mdvec_1080; - basetype = ISP_HA_SCSI_1280; - psize = 2 * sizeof (sdparam); - pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = - ISP1080_DMA_REGS_OFF; - } - if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { - mdvp = &mdvec_12160; - basetype = ISP_HA_SCSI_10160; - psize = sizeof (sdparam); - pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = - ISP1080_DMA_REGS_OFF; - } - if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { - mdvp = &mdvec_12160; - basetype = ISP_HA_SCSI_12160; - psize = 2 * sizeof (sdparam); - pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = - ISP1080_DMA_REGS_OFF; - } - if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { - mdvp = &mdvec_2100; - basetype = ISP_HA_FC_2100; - psize = sizeof (fcparam); - pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = - PCI_MBOX_REGS2100_OFF; + + switch (pci_get_devid(dev)) { + case PCI_QLOGIC_ISP1020: + did = 0x1040; + isp->isp_mdvec = &mdvec; + isp->isp_type = ISP_HA_SCSI_UNKNOWN; + break; + case PCI_QLOGIC_ISP1080: + did = 0x1080; + isp->isp_mdvec = &mdvec_1080; + isp->isp_type = ISP_HA_SCSI_1080; + pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; + break; + case PCI_QLOGIC_ISP1240: + did = 0x1080; + isp->isp_mdvec = &mdvec_1080; + isp->isp_type = ISP_HA_SCSI_1240; + isp->isp_nchan = 2; + pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; + break; + case PCI_QLOGIC_ISP1280: + did = 0x1080; + isp->isp_mdvec = &mdvec_1080; + isp->isp_type = ISP_HA_SCSI_1280; + pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; + break; + case PCI_QLOGIC_ISP10160: + did = 0x12160; + isp->isp_mdvec = &mdvec_12160; + isp->isp_type = ISP_HA_SCSI_10160; + pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; + break; + case PCI_QLOGIC_ISP12160: + did = 0x12160; + isp->isp_nchan = 2; + isp->isp_mdvec = &mdvec_12160; + isp->isp_type = ISP_HA_SCSI_12160; + pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; + break; + case PCI_QLOGIC_ISP2100: + did = 0x2100; + isp->isp_mdvec = &mdvec_2100; + isp->isp_type = ISP_HA_FC_2100; + pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; if (pci_get_revid(dev) < 3) { /* * XXX: Need to get the actual revision * XXX: number of the 2100 FB. At any rate, * XXX: lower cache line size for early revision * XXX; boards. */ linesz = 1; } + break; + case PCI_QLOGIC_ISP2200: + did = 0x2200; + isp->isp_mdvec = &mdvec_2200; + isp->isp_type = ISP_HA_FC_2200; + pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; + break; + case PCI_QLOGIC_ISP2300: + did = 0x2300; + isp->isp_mdvec = &mdvec_2300; + isp->isp_type = ISP_HA_FC_2300; + pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; + break; + case PCI_QLOGIC_ISP2312: + case PCI_QLOGIC_ISP6312: + did = 0x2300; + isp->isp_mdvec = &mdvec_2300; + isp->isp_type = ISP_HA_FC_2312; + pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; + break; + case PCI_QLOGIC_ISP2322: + case PCI_QLOGIC_ISP6322: + did = 0x2322; + isp->isp_mdvec = &mdvec_2300; + isp->isp_type = ISP_HA_FC_2322; + pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; + break; + case PCI_QLOGIC_ISP2422: + case PCI_QLOGIC_ISP2432: + did = 0x2400; + isp->isp_nchan += isp_nvports; + isp->isp_mdvec = &mdvec_2400; + isp->isp_type = ISP_HA_FC_2400; + pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; + break; + case PCI_QLOGIC_ISP2532: + did = 0x2500; + isp->isp_nchan += isp_nvports; + isp->isp_mdvec = &mdvec_2500; + isp->isp_type = ISP_HA_FC_2500; + pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; + break; + default: + device_printf(dev, "unknown device type\n"); + goto bad; + break; } - if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { - mdvp = &mdvec_2200; - basetype = ISP_HA_FC_2200; + isp->isp_revision = pci_get_revid(dev); + + if (IS_FC(isp)) { psize = sizeof (fcparam); - pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = - PCI_MBOX_REGS2100_OFF; + xsize = sizeof (struct isp_fc); + } else { + psize = sizeof (sdparam); + xsize = sizeof (struct isp_spi); } - if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { - mdvp = &mdvec_2300; - basetype = ISP_HA_FC_2300; - psize = sizeof (fcparam); - pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = - PCI_MBOX_REGS2300_OFF; - } - if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || - pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { - mdvp = &mdvec_2300; - basetype = ISP_HA_FC_2312; - psize = sizeof (fcparam); - pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = - PCI_MBOX_REGS2300_OFF; - } - if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 || - pci_get_devid(dev) == PCI_QLOGIC_ISP6322) { - mdvp = &mdvec_2300; - basetype = ISP_HA_FC_2322; - psize = sizeof (fcparam); - pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = - PCI_MBOX_REGS2300_OFF; - } - if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422 || - pci_get_devid(dev) == PCI_QLOGIC_ISP2432) { - mdvp = &mdvec_2400; - basetype = ISP_HA_FC_2400; - psize = sizeof (fcparam); - pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = - PCI_MBOX_REGS2400_OFF; - } - isp = &pcs->pci_isp; + psize *= isp->isp_nchan; + xsize *= isp->isp_nchan; isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); if (isp->isp_param == NULL) { device_printf(dev, "cannot allocate parameter data\n"); goto bad; } - isp->isp_mdvec = mdvp; - isp->isp_type = basetype; - isp->isp_revision = pci_get_revid(dev); - isp->isp_dev = dev; + isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); + if (isp->isp_osinfo.pc.ptr == NULL) { + device_printf(dev, "cannot allocate parameter data\n"); + goto bad; + } /* * Now that we know who we are (roughly) get/set specific options */ - isp_get_specific_options(dev, isp); + for (i = 0; i < isp->isp_nchan; i++) { + isp_get_specific_options(dev, i, isp); + } -#if __FreeBSD_version >= 700000 /* - * Try and find firmware for this device. + * The 'it' suffix really only matters for SCSI cards in target mode. */ - { - char fwname[32]; - unsigned int did = pci_get_device(dev); - - /* - * Map a few pci ids to fw names - */ - switch (did) { - case PCI_PRODUCT_QLOGIC_ISP1020: - did = 0x1040; - break; - case PCI_PRODUCT_QLOGIC_ISP1240: - did = 0x1080; - break; - case PCI_PRODUCT_QLOGIC_ISP10160: - case PCI_PRODUCT_QLOGIC_ISP12160: - did = 0x12160; - break; - case PCI_PRODUCT_QLOGIC_ISP6312: - case PCI_PRODUCT_QLOGIC_ISP2312: - did = 0x2300; - break; - case PCI_PRODUCT_QLOGIC_ISP6322: - did = 0x2322; - break; - case PCI_PRODUCT_QLOGIC_ISP2422: - case PCI_PRODUCT_QLOGIC_ISP2432: - did = 0x2400; - break; - default: - break; - } - - isp->isp_osinfo.fw = NULL; - if (isp->isp_role & ISP_ROLE_TARGET) { - snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); - isp->isp_osinfo.fw = firmware_get(fwname); - } - if (isp->isp_osinfo.fw == NULL) { - snprintf(fwname, sizeof (fwname), "isp_%04x", did); - isp->isp_osinfo.fw = firmware_get(fwname); - } - if (isp->isp_osinfo.fw != NULL) { - isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; - } + isp->isp_osinfo.fw = NULL; + if (IS_SCSI(isp) && (ISP_SPI_PC(isp, 0)->role & ISP_ROLE_TARGET)) { + snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); + isp->isp_osinfo.fw = firmware_get(fwname); + } else if (IS_24XX(isp) && (isp->isp_nchan > 1 || isp->isp_osinfo.forcemulti)) { + snprintf(fwname, sizeof (fwname), "isp_%04x_multi", did); + isp->isp_osinfo.fw = firmware_get(fwname); } -#else - if (isp_get_firmware_p) { - int device = (int) pci_get_device(dev); -#ifdef ISP_TARGET_MODE - (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); -#else - (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); -#endif + if (isp->isp_osinfo.fw == NULL) { + snprintf(fwname, sizeof (fwname), "isp_%04x", did); + isp->isp_osinfo.fw = firmware_get(fwname); } -#endif + if (isp->isp_osinfo.fw != NULL) { + isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; + } /* * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER * are set. */ cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; if (IS_2300(isp)) { /* per QLogic errata */ cmd &= ~PCIM_CMD_INVEN; } if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { cmd &= ~PCIM_CMD_INTX_DISABLE; } -#ifdef WE_KNEW_WHAT_WE_WERE_DOING if (IS_24XX(isp)) { - int reg; - cmd &= ~PCIM_CMD_INTX_DISABLE; - - /* - * Is this a PCI-X card? If so, set max read byte count. - */ - if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { - uint16_t pxcmd; - reg += 2; - - pxcmd = pci_read_config(dev, reg, 2); - pxcmd &= ~0xc; - pxcmd |= 0x8; - pci_write_config(dev, reg, 2, pxcmd); - } - - /* - * Is this a PCI Express card? If so, set max read byte count. - */ - if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { - uint16_t pectl; - - reg += 0x8; - pectl = pci_read_config(dev, reg, 2); - pectl &= ~0x7000; - pectl |= 0x4000; - pci_write_config(dev, reg, 2, pectl); - } } -#else - if (IS_24XX(isp)) { - cmd &= ~PCIM_CMD_INTX_DISABLE; - } -#endif pci_write_config(dev, PCIR_COMMAND, cmd, 2); /* * Make sure the Cache Line Size register is set sensibly. */ data = pci_read_config(dev, PCIR_CACHELNSZ, 1); if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { - isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d from %d", - linesz, data); + isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d from %d", linesz, data); data = linesz; pci_write_config(dev, PCIR_CACHELNSZ, data, 1); } /* * Make sure the Latency Timer is sane. */ data = pci_read_config(dev, PCIR_LATTIMER, 1); if (data < PCI_DFLT_LTNCY) { data = PCI_DFLT_LTNCY; isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); pci_write_config(dev, PCIR_LATTIMER, data, 1); } /* * Make sure we've disabled the ROM. */ data = pci_read_config(dev, PCIR_ROMADDR, 4); data &= ~1; pci_write_config(dev, PCIR_ROMADDR, data, 4); -#if __FreeBSD_version > 700025 + + /* + * Do MSI + * + * NB: MSI-X needs to be disabled for the 2432 (PCI-Express) + */ if (IS_24XX(isp) || IS_2322(isp)) { pcs->msicount = pci_msi_count(dev); if (pcs->msicount > 1) { pcs->msicount = 1; } if (pci_alloc_msi(dev, &pcs->msicount) == 0) { iqd = 1; } else { iqd = 0; } } -#else - iqd = 0; -#endif - irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, - RF_ACTIVE | RF_SHAREABLE); + irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, RF_ACTIVE | RF_SHAREABLE); if (irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto bad; } -#if __FreeBSD_version >= 500000 /* Make sure the lock is set up. */ mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); locksetup++; -#endif - if (isp_setup_intr(dev, irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, - &pcs->ih)) { + if (isp_setup_intr(dev, irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) { device_printf(dev, "could not setup interrupt\n"); goto bad; } /* * Last minute checks... */ if (IS_23XX(isp) || IS_24XX(isp)) { isp->isp_port = pci_get_function(dev); } - if (IS_23XX(isp)) { - /* - * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. - */ - isp->isp_touched = 1; - } - /* * Make sure we're in reset state. */ ISP_LOCK(isp); - isp_reset(isp); + isp_reset(isp, 1); if (isp->isp_state != ISP_RESETSTATE) { ISP_UNLOCK(isp); goto bad; } isp_init(isp); - if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { - isp_uninit(isp); - ISP_UNLOCK(isp); - goto bad; + if (isp->isp_state == ISP_INITSTATE) { + isp->isp_state = ISP_RUNSTATE; } - isp_attach(isp); - if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { + ISP_UNLOCK(isp); + if (isp_attach(isp)) { + ISP_LOCK(isp); isp_uninit(isp); ISP_UNLOCK(isp); goto bad; } - ISP_UNLOCK(isp); return (0); bad: if (pcs && pcs->ih) { (void) bus_teardown_intr(dev, irq, pcs->ih); } -#if __FreeBSD_version >= 500000 if (locksetup && isp) { mtx_destroy(&isp->isp_osinfo.lock); } -#endif if (irq) { (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); } -#if __FreeBSD_version > 700025 if (pcs && pcs->msicount) { pci_release_msi(dev); } -#endif if (regs) { (void) bus_release_resource(dev, rtp, rgd, regs); } if (pcs) { if (pcs->pci_isp.isp_param) { -#ifdef ISP_FW_CRASH_DUMP - if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) { - free(FCPARAM(isp)->isp_dump_data, M_DEVBUF); - } -#endif free(pcs->pci_isp.isp_param, M_DEVBUF); + pcs->pci_isp.isp_param = NULL; } + if (pcs->pci_isp.isp_osinfo.pc.ptr) { + free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); + pcs->pci_isp.isp_osinfo.pc.ptr = NULL; + } } return (ENXIO); } static int isp_pci_detach(device_t dev) { struct isp_pcisoftc *pcs; ispsoftc_t *isp; pcs = device_get_softc(dev); if (pcs == NULL) { return (ENXIO); } isp = (ispsoftc_t *) pcs; ISP_DISABLE_INTS(isp); + mtx_destroy(&isp->isp_osinfo.lock); return (0); } #define IspVirt2Off(a, x) \ (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ _BLK_REG_SHFT] + ((x) & 0xfff)) #define BXR2(isp, off) \ bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off) #define BXW2(isp, off, v) \ bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v) #define BXR4(isp, off) \ bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off) #define BXW4(isp, off, v) \ bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v) -static __inline int +static ISP_INLINE int isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) { uint32_t val0, val1; int i = 0; do { val0 = BXR2(isp, IspVirt2Off(isp, off)); val1 = BXR2(isp, IspVirt2Off(isp, off)); } while (val0 != val1 && ++i < 1000); if (val0 != val1) { return (1); } *rp = val0; return (0); } static int isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbp) { uint16_t isr, sema; if (IS_2100(isp)) { if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { return (0); } if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { return (0); } } else { isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); } isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); isr &= INT_PENDING_MASK(isp); sema &= BIU_SEMA_LOCK; if (isr == 0 && sema == 0) { return (0); } *isrp = isr; if ((*semap = sema) != 0) { if (IS_2100(isp)) { if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { return (0); } } else { *mbp = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); } } return (1); } static int isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbox0p) { uint32_t hccr; uint32_t r2hisr; if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { *isrp = 0; return (0); } r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); if ((r2hisr & BIU_R2HST_INTR) == 0) { *isrp = 0; return (0); } switch (r2hisr & BIU_R2HST_ISTAT_MASK) { case ISPR2HST_ROM_MBX_OK: case ISPR2HST_ROM_MBX_FAIL: case ISPR2HST_MBX_OK: case ISPR2HST_MBX_FAIL: case ISPR2HST_ASYNC_EVENT: *isrp = r2hisr & 0xffff; *mbox0p = (r2hisr >> 16); *semap = 1; return (1); case ISPR2HST_RIO_16: *isrp = r2hisr & 0xffff; *mbox0p = ASYNC_RIO1; *semap = 1; return (1); case ISPR2HST_FPOST: *isrp = r2hisr & 0xffff; *mbox0p = ASYNC_CMD_CMPLT; *semap = 1; return (1); case ISPR2HST_FPOST_CTIO: *isrp = r2hisr & 0xffff; *mbox0p = ASYNC_CTIO_DONE; *semap = 1; return (1); case ISPR2HST_RSPQ_UPDATE: *isrp = r2hisr & 0xffff; *mbox0p = 0; *semap = 0; return (1); default: hccr = ISP_READ(isp, HCCR); if (hccr & HCCR_PAUSE) { ISP_WRITE(isp, HCCR, HCCR_RESET); isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); ISP_WRITE(isp, BIU_ICR, 0); } else { isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); } return (0); } } static int isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbox0p) { uint32_t r2hisr; r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); if ((r2hisr & BIU2400_R2HST_INTR) == 0) { *isrp = 0; return (0); } switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { case ISP2400R2HST_ROM_MBX_OK: case ISP2400R2HST_ROM_MBX_FAIL: case ISP2400R2HST_MBX_OK: case ISP2400R2HST_MBX_FAIL: case ISP2400R2HST_ASYNC_EVENT: *isrp = r2hisr & 0xffff; *mbox0p = (r2hisr >> 16); *semap = 1; return (1); case ISP2400R2HST_RSPQ_UPDATE: case ISP2400R2HST_ATIO_RSPQ_UPDATE: case ISP2400R2HST_ATIO_RQST_UPDATE: *isrp = r2hisr & 0xffff; *mbox0p = 0; *semap = 0; return (1); default: ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); return (0); } } static uint32_t isp_pci_rd_reg(ispsoftc_t *isp, int regoff) { uint16_t rv; int oldconf = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { /* * We will assume that someone has paused the RISC processor. */ oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); } rv = BXR2(isp, IspVirt2Off(isp, regoff)); if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); } return (rv); } static void isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) { int oldconf = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { /* * We will assume that someone has paused the RISC processor. */ oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); } BXW2(isp, IspVirt2Off(isp, regoff), val); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2); if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); } } static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) { uint32_t rv, oc = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { uint32_t tc; /* * We will assume that someone has paused the RISC processor. */ oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); tc = oc & ~BIU_PCI1080_CONF1_DMA; if (regoff & SXP_BANK1_SELECT) tc |= BIU_PCI1080_CONF1_SXP1; else tc |= BIU_PCI1080_CONF1_SXP0; BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc | BIU_PCI1080_CONF1_DMA); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); } rv = BXR2(isp, IspVirt2Off(isp, regoff)); if (oc) { BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); } return (rv); } static void isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) { int oc = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { uint32_t tc; /* * We will assume that someone has paused the RISC processor. */ oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); tc = oc & ~BIU_PCI1080_CONF1_DMA; if (regoff & SXP_BANK1_SELECT) tc |= BIU_PCI1080_CONF1_SXP1; else tc |= BIU_PCI1080_CONF1_SXP0; BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc | BIU_PCI1080_CONF1_DMA); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); } BXW2(isp, IspVirt2Off(isp, regoff), val); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2); if (oc) { BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); } } static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) { uint32_t rv; int block = regoff & _BLK_REG_MASK; switch (block) { case BIU_BLOCK: break; case MBOX_BLOCK: return (BXR2(isp, IspVirt2Off(isp, regoff))); case SXP_BLOCK: isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); return (0xffffffff); case RISC_BLOCK: isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); return (0xffffffff); case DMA_BLOCK: isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); return (0xffffffff); default: isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); return (0xffffffff); } switch (regoff) { case BIU2400_FLASH_ADDR: case BIU2400_FLASH_DATA: case BIU2400_ICR: case BIU2400_ISR: case BIU2400_CSR: case BIU2400_REQINP: case BIU2400_REQOUTP: case BIU2400_RSPINP: case BIU2400_RSPOUTP: - case BIU2400_PRI_RQINP: - case BIU2400_PRI_RSPINP: + case BIU2400_PRI_REQINP: + case BIU2400_PRI_REQOUTP: case BIU2400_ATIO_RSPINP: - case BIU2400_ATIO_REQINP: + case BIU2400_ATIO_RSPOUTP: case BIU2400_HCCR: case BIU2400_GPIOD: case BIU2400_GPIOE: case BIU2400_HSEMA: rv = BXR4(isp, IspVirt2Off(isp, regoff)); break; case BIU2400_R2HSTSLO: rv = BXR4(isp, IspVirt2Off(isp, regoff)); break; case BIU2400_R2HSTSHI: rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; break; default: isp_prt(isp, ISP_LOGERR, "isp_pci_rd_reg_2400: unknown offset %x", regoff); rv = 0xffffffff; break; } return (rv); } static void isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) { int block = regoff & _BLK_REG_MASK; switch (block) { case BIU_BLOCK: break; case MBOX_BLOCK: BXW2(isp, IspVirt2Off(isp, regoff), val); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2); return; case SXP_BLOCK: isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); return; case RISC_BLOCK: isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); return; case DMA_BLOCK: isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); return; default: isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", regoff); break; } switch (regoff) { case BIU2400_FLASH_ADDR: case BIU2400_FLASH_DATA: case BIU2400_ICR: case BIU2400_ISR: case BIU2400_CSR: case BIU2400_REQINP: case BIU2400_REQOUTP: case BIU2400_RSPINP: case BIU2400_RSPOUTP: - case BIU2400_PRI_RQINP: - case BIU2400_PRI_RSPINP: + case BIU2400_PRI_REQINP: + case BIU2400_PRI_REQOUTP: case BIU2400_ATIO_RSPINP: - case BIU2400_ATIO_REQINP: + case BIU2400_ATIO_RSPOUTP: case BIU2400_HCCR: case BIU2400_GPIOD: case BIU2400_GPIOE: case BIU2400_HSEMA: BXW4(isp, IspVirt2Off(isp, regoff), val); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4); break; default: isp_prt(isp, ISP_LOGERR, "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); break; } } struct imush { ispsoftc_t *isp; + caddr_t vbase; + int chan; int error; }; static void imc(void *, bus_dma_segment_t *, int, int); +static void imc1(void *, bus_dma_segment_t *, int, int); static void imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct imush *imushp = (struct imush *) arg; + if (error) { imushp->error = error; - } else { - ispsoftc_t *isp =imushp->isp; - bus_addr_t addr = segs->ds_addr; + return; + } + if (nseg != 1) { + imushp->error = EINVAL; + return; + } + imushp->isp->isp_rquest = imushp->vbase; + imushp->isp->isp_rquest_dma = segs->ds_addr; + segs->ds_addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); + imushp->vbase += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); + imushp->isp->isp_result_dma = segs->ds_addr; + imushp->isp->isp_result = imushp->vbase; - isp->isp_rquest_dma = addr; - addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); - isp->isp_result_dma = addr; - if (IS_FC(isp)) { - addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); - FCPARAM(isp)->isp_scdma = addr; - } +#ifdef ISP_TARGET_MODE + if (IS_24XX(imushp->isp)) { + segs->ds_addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); + imushp->vbase += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); + imushp->isp->isp_atioq_dma = segs->ds_addr; + imushp->isp->isp_atioq = imushp->vbase; } +#endif } +static void +imc1(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + struct imush *imushp = (struct imush *) arg; + if (error) { + imushp->error = error; + return; + } + if (nseg != 1) { + imushp->error = EINVAL; + return; + } + FCPARAM(imushp->isp, imushp->chan)->isp_scdma = segs->ds_addr; + FCPARAM(imushp->isp, imushp->chan)->isp_scratch = imushp->vbase; +} + static int isp_pci_mbxdma(ispsoftc_t *isp) { caddr_t base; uint32_t len; - int i, error, ns; + int i, error, ns, cmap = 0; bus_size_t slim; /* segment size */ bus_addr_t llim; /* low limit of unavailable dma */ bus_addr_t hlim; /* high limit of unavailable dma */ struct imush im; /* * Already been here? If so, leave... */ if (isp->isp_rquest) { return (0); } ISP_UNLOCK(isp); if (isp->isp_maxcmds == 0) { isp_prt(isp, ISP_LOGERR, "maxcmds not set"); ISP_LOCK(isp); return (1); } hlim = BUS_SPACE_MAXADDR; if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { if (sizeof (bus_size_t) > 4) { slim = (bus_size_t) (1ULL << 32); } else { slim = (bus_size_t) (1UL << 31); } llim = BUS_SPACE_MAXADDR; } else { llim = BUS_SPACE_MAXADDR_32BIT; slim = (1UL << 24); } len = isp->isp_maxcmds * sizeof (struct isp_pcmd); - isp->isp_osinfo.pcmd_pool = - (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); + isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); if (isp->isp_osinfo.pcmd_pool == NULL) { isp_prt(isp, ISP_LOGERR, "cannot allocate pcmds"); ISP_LOCK(isp); return (1); } /* * XXX: We don't really support 64 bit target mode for parallel scsi yet */ #ifdef ISP_TARGET_MODE if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); - ISP_LOCK(isp); isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); + ISP_LOCK(isp); return (1); } #endif - if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, - slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, - slim, 0, &isp->isp_osinfo.dmat)) { + if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &isp->isp_osinfo.dmat)) { free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); ISP_LOCK(isp); isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); return (1); } - len = sizeof (XS_T **) * isp->isp_maxcmds; isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); if (isp->isp_xflist == NULL) { free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); ISP_LOCK(isp); isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); return (1); } #ifdef ISP_TARGET_MODE len = sizeof (void **) * isp->isp_maxcmds; isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); if (isp->isp_tgtlist == NULL) { free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); free(isp->isp_xflist, M_DEVBUF); ISP_LOCK(isp); isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); return (1); } #endif /* - * Allocate and map the request, result queues, plus FC scratch area. + * Allocate and map the request and result queues (and ATIO queue + * if we're a 2400 supporting target mode). */ len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); - if (IS_FC(isp)) { - len += ISP2100_SCRLEN; +#ifdef ISP_TARGET_MODE + if (IS_24XX(isp)) { + len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); } +#endif ns = (len / PAGE_SIZE) + 1; + /* - * Create a tag for the control spaces- force it to within 32 bits. + * Create a tag for the control spaces. We don't always need this + * to be 32 bits, but we do this for simplicity and speed's sake. */ - if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, - BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, - NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { - isp_prt(isp, ISP_LOGERR, - "cannot create a dma tag for control spaces"); + if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, ns, slim, 0, &isp->isp_osinfo.cdmat)) { + isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces"); free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); free(isp->isp_xflist, M_DEVBUF); #ifdef ISP_TARGET_MODE free(isp->isp_tgtlist, M_DEVBUF); #endif ISP_LOCK(isp); return (1); } - if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, - &isp->isp_cdmap) != 0) { - isp_prt(isp, ISP_LOGERR, - "cannot allocate %d bytes of CCB memory", len); - bus_dma_tag_destroy(isp->isp_cdmat); + if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT, &isp->isp_osinfo.cdmap) != 0) { + isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len); + bus_dma_tag_destroy(isp->isp_osinfo.cdmat); free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); free(isp->isp_xflist, M_DEVBUF); #ifdef ISP_TARGET_MODE free(isp->isp_tgtlist, M_DEVBUF); #endif ISP_LOCK(isp); return (1); } + im.isp = isp; + im.chan = 0; + im.vbase = base; + im.error = 0; + + bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0); + if (im.error) { + isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error); + goto bad; + } + + if (IS_FC(isp)) { + for (cmap = 0; cmap < isp->isp_nchan; cmap++) { + struct isp_fc *fc = ISP_FC_PC(isp, cmap); + if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ISP_FC_SCRLEN, 1, slim, 0, &fc->tdmat)) { + goto bad; + } + if (bus_dmamem_alloc(fc->tdmat, (void **)&base, BUS_DMA_NOWAIT, &fc->tdmap) != 0) { + bus_dma_tag_destroy(fc->tdmat); + goto bad; + } + im.isp = isp; + im.chan = cmap; + im.vbase = base; + im.error = 0; + bus_dmamap_load(fc->tdmat, fc->tdmap, base, ISP_FC_SCRLEN, imc1, &im, 0); + if (im.error) { + bus_dmamem_free(fc->tdmat, base, fc->tdmap); + bus_dma_tag_destroy(fc->tdmat); + goto bad; + } + } + } + for (i = 0; i < isp->isp_maxcmds; i++) { struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); if (error) { - isp_prt(isp, ISP_LOGERR, - "error %d creating per-cmd DMA maps", error); + isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); while (--i >= 0) { - bus_dmamap_destroy(isp->isp_osinfo.dmat, - isp->isp_osinfo.pcmd_pool[i].dmap); + bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap); } goto bad; } - isp_callout_init(&pcmd->wdog); + callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); if (i == isp->isp_maxcmds-1) { pcmd->next = NULL; } else { pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; } } isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; - - im.isp = isp; - im.error = 0; - bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); - if (im.error) { - isp_prt(isp, ISP_LOGERR, - "error %d loading dma map for control areas", im.error); - goto bad; - } - - isp->isp_rquest = base; - base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); - isp->isp_result = base; - if (IS_FC(isp)) { - base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); - FCPARAM(isp)->isp_scratch = base; - } ISP_LOCK(isp); return (0); bad: - bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); - bus_dma_tag_destroy(isp->isp_cdmat); + while (--cmap >= 0) { + struct isp_fc *fc = ISP_FC_PC(isp, cmap); + bus_dmamem_free(fc->tdmat, base, fc->tdmap); + bus_dma_tag_destroy(fc->tdmat); + } + bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap); + bus_dma_tag_destroy(isp->isp_osinfo.cdmat); free(isp->isp_xflist, M_DEVBUF); #ifdef ISP_TARGET_MODE free(isp->isp_tgtlist, M_DEVBUF); #endif free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); isp->isp_rquest = NULL; ISP_LOCK(isp); return (1); } typedef struct { ispsoftc_t *isp; void *cmd_token; - void *rq; - uint32_t *nxtip; - uint32_t optr; + void *rq; /* original request */ int error; + bus_size_t mapsize; } mush_t; #define MUSHERR_NOQENTRIES -2 #ifdef ISP_TARGET_MODE -/* - * We need to handle DMA for target mode differently from initiator mode. - * - * DMA mapping and construction and submission of CTIO Request Entries - * and rendevous for completion are very tightly coupled because we start - * out by knowing (per platform) how much data we have to move, but we - * don't know, up front, how many DMA mapping segments will have to be used - * cover that data, so we don't know how many CTIO Request Entries we - * will end up using. Further, for performance reasons we may want to - * (on the last CTIO for Fibre Channel), send status too (if all went well). - * - * The standard vector still goes through isp_pci_dmasetup, but the callback - * for the DMA mapping routines comes here instead with the whole transfer - * mapped and a pointer to a partially filled in already allocated request - * queue entry. We finish the job. - */ -static void tdma_mk(void *, bus_dma_segment_t *, int, int); -static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); +static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); +static void tdma2(void *, bus_dma_segment_t *, int, int); -#define STATUS_WITH_DATA 1 +static void +tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) +{ + mush_t *mp; + mp = (mush_t *)arg; + mp->mapsize = mapsize; + tdma2(arg, dm_segs, nseg, error); +} static void -tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) +tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { mush_t *mp; - struct ccb_scsiio *csio; ispsoftc_t *isp; - ct_entry_t *cto, *qe; - uint8_t scsi_status; - uint32_t curi, nxti, handle; - uint32_t sflags; - int32_t resid; - int nth_ctio, nctios, send_status; + struct ccb_scsiio *csio; + isp_ddir_t ddir; + ispreq_t *rq; mp = (mush_t *) arg; if (error) { mp->error = error; return; } - - isp = mp->isp; csio = mp->cmd_token; - cto = mp->rq; - curi = isp->isp_reqidx; - qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); - - cto->ct_xfrlen = 0; - cto->ct_seg_count = 0; - cto->ct_header.rqs_entry_count = 1; - MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); - - if (nseg == 0) { - cto->ct_header.rqs_seqno = 1; - isp_prt(isp, ISP_LOGTDEBUG1, - "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", - cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, - cto->ct_tag_val, cto->ct_flags, cto->ct_status, - cto->ct_scsi_status, cto->ct_resid); - ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); - isp_put_ctio(isp, cto, qe); - return; - } - - nctios = nseg / ISP_RQDSEG; - if (nseg % ISP_RQDSEG) { - nctios++; - } - - /* - * Save syshandle, and potentially any SCSI status, which we'll - * reinsert on the last CTIO we're going to send. - */ - - handle = cto->ct_syshandle; - cto->ct_syshandle = 0; - cto->ct_header.rqs_seqno = 0; - send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; - - if (send_status) { - sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); - cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); - /* - * Preserve residual. - */ - resid = cto->ct_resid; - - /* - * Save actual SCSI status. - */ - scsi_status = cto->ct_scsi_status; - -#ifndef STATUS_WITH_DATA - sflags |= CT_NO_DATA; - /* - * We can't do a status at the same time as a data CTIO, so - * we need to synthesize an extra CTIO at this level. - */ - nctios++; -#endif - } else { - sflags = scsi_status = resid = 0; - } - - cto->ct_resid = 0; - cto->ct_scsi_status = 0; - - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - bus_dmamap_sync(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); - } else { - bus_dmamap_sync(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); - } - - nxti = *mp->nxtip; - - for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { - int seglim; - - seglim = nseg; - if (seglim) { - int seg; - - if (seglim > ISP_RQDSEG) - seglim = ISP_RQDSEG; - - for (seg = 0; seg < seglim; seg++, nseg--) { - /* - * Unlike normal initiator commands, we don't - * do any swizzling here. - */ - cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; - cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; - cto->ct_xfrlen += dm_segs->ds_len; - dm_segs++; - } - cto->ct_seg_count = seg; - } else { - /* - * This case should only happen when we're sending an - * extra CTIO with final status. - */ - if (send_status == 0) { - isp_prt(isp, ISP_LOGWARN, - "tdma_mk ran out of segments"); - mp->error = EINVAL; + isp = mp->isp; + rq = mp->rq; + if (nseg) { + if (sizeof (bus_addr_t) > 4) { + if (nseg >= ISP_NSEG64_MAX) { + isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); + mp->error = EFAULT; return; } - } - - /* - * At this point, the fields ct_lun, ct_iid, ct_tagval, - * ct_tagtype, and ct_timeout have been carried over - * unchanged from what our caller had set. - * - * The dataseg fields and the seg_count fields we just got - * through setting. The data direction we've preserved all - * along and only clear it if we're now sending status. - */ - - if (nth_ctio == nctios - 1) { - /* - * We're the last in a sequence of CTIOs, so mark - * this CTIO and save the handle to the CCB such that - * when this CTIO completes we can free dma resources - * and do whatever else we need to do to finish the - * rest of the command. We *don't* give this to the - * firmware to work on- the caller will do that. - */ - - cto->ct_syshandle = handle; - cto->ct_header.rqs_seqno = 1; - - if (send_status) { - cto->ct_scsi_status = scsi_status; - cto->ct_flags |= sflags; - cto->ct_resid = resid; + if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { + rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; } - if (send_status) { - isp_prt(isp, ISP_LOGTDEBUG1, - "CTIO[%x] lun%d iid %d tag %x ct_flags %x " - "scsi status %x resid %d", - cto->ct_fwhandle, csio->ccb_h.target_lun, - cto->ct_iid, cto->ct_tag_val, cto->ct_flags, - cto->ct_scsi_status, cto->ct_resid); - } else { - isp_prt(isp, ISP_LOGTDEBUG1, - "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", - cto->ct_fwhandle, csio->ccb_h.target_lun, - cto->ct_iid, cto->ct_tag_val, - cto->ct_flags); - } - isp_put_ctio(isp, cto, qe); - ISP_TDQE(isp, "last tdma_mk", curi, cto); - if (nctios > 1) { - MEMORYBARRIER(isp, SYNC_REQUEST, - curi, QENTRY_LEN); - } } else { - ct_entry_t *oqe = qe; - - /* - * Make sure syshandle fields are clean - */ - cto->ct_syshandle = 0; - cto->ct_header.rqs_seqno = 0; - - isp_prt(isp, ISP_LOGTDEBUG1, - "CTIO[%x] lun%d for ID%d ct_flags 0x%x", - cto->ct_fwhandle, csio->ccb_h.target_lun, - cto->ct_iid, cto->ct_flags); - - /* - * Get a new CTIO - */ - qe = (ct_entry_t *) - ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); - nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); - if (nxti == mp->optr) { - isp_prt(isp, ISP_LOGTDEBUG0, - "Queue Overflow in tdma_mk"); - mp->error = MUSHERR_NOQENTRIES; + if (nseg >= ISP_NSEG_MAX) { + isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); + mp->error = EFAULT; return; } - - /* - * Now that we're done with the old CTIO, - * flush it out to the request queue. - */ - ISP_TDQE(isp, "dma_tgt_fc", curi, cto); - isp_put_ctio(isp, cto, oqe); - if (nth_ctio != 0) { - MEMORYBARRIER(isp, SYNC_REQUEST, curi, - QENTRY_LEN); - } - curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); - - /* - * Reset some fields in the CTIO so we can reuse - * for the next one we'll flush to the request - * queue. - */ - cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; - cto->ct_header.rqs_entry_count = 1; - cto->ct_header.rqs_flags = 0; - cto->ct_status = 0; - cto->ct_scsi_status = 0; - cto->ct_xfrlen = 0; - cto->ct_resid = 0; - cto->ct_seg_count = 0; - MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); } - } - *mp->nxtip = nxti; -} - -/* - * We don't have to do multiple CTIOs here. Instead, we can just do - * continuation segments as needed. This greatly simplifies the code - * improves performance. - */ - -static void -tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) -{ - mush_t *mp; - struct ccb_scsiio *csio; - ispsoftc_t *isp; - ct2_entry_t *cto, *qe; - uint32_t curi, nxti; - ispds_t *ds; - ispds64_t *ds64; - int segcnt, seglim; - - mp = (mush_t *) arg; - if (error) { - mp->error = error; - return; - } - - isp = mp->isp; - csio = mp->cmd_token; - cto = mp->rq; - - curi = isp->isp_reqidx; - qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); - - if (nseg == 0) { - if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { - isp_prt(isp, ISP_LOGWARN, - "dma2_tgt_fc, a status CTIO2 without MODE1 " - "set (0x%x)", cto->ct_flags); - mp->error = EINVAL; - return; - } - /* - * We preserve ct_lun, ct_iid, ct_rxid. We set the data - * flags to NO DATA and clear relative offset flags. - * We preserve the ct_resid and the response area. - */ - cto->ct_header.rqs_seqno = 1; - cto->ct_seg_count = 0; - cto->ct_reloff = 0; - isp_prt(isp, ISP_LOGTDEBUG1, - "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " - "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, - cto->ct_iid, cto->ct_flags, cto->ct_status, - cto->rsp.m1.ct_scsi_status, cto->ct_resid); - if (FCPARAM(isp)->isp_2klogin) { - isp_put_ctio2e(isp, - (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); + if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); + ddir = ISP_TO_DEVICE; + } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { + bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); + ddir = ISP_FROM_DEVICE; } else { - isp_put_ctio2(isp, cto, qe); + ddir = ISP_NOXFR; } - ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); - return; - } - - if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { - isp_prt(isp, ISP_LOGERR, - "dma2_tgt_fc, a data CTIO2 without MODE0 set " - "(0x%x)", cto->ct_flags); - mp->error = EINVAL; - return; - } - - - nxti = *mp->nxtip; - - /* - * Check to see if we need to DAC addressing or not. - * - * Any address that's over the 4GB boundary causes this - * to happen. - */ - segcnt = nseg; - if (sizeof (bus_addr_t) > 4) { - for (segcnt = 0; segcnt < nseg; segcnt++) { - uint64_t addr = dm_segs[segcnt].ds_addr; - if (addr >= 0x100000000LL) { - break; - } - } - } - if (segcnt != nseg) { - cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; - seglim = ISP_RQDSEG_T3; - ds64 = &cto->rsp.m0.u.ct_dataseg64[0]; - ds = NULL; } else { - seglim = ISP_RQDSEG_T2; - ds64 = NULL; - ds = &cto->rsp.m0.u.ct_dataseg[0]; + dm_segs = NULL; + nseg = 0; + ddir = ISP_NOXFR; } - cto->ct_seg_count = 0; - /* - * Set up the CTIO2 data segments. - */ - for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg; - cto->ct_seg_count++, segcnt++) { - if (ds64) { - ds64->ds_basehi = - ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); - ds64->ds_base = dm_segs[segcnt].ds_addr; - ds64->ds_count = dm_segs[segcnt].ds_len; - ds64++; - } else { - ds->ds_base = dm_segs[segcnt].ds_addr; - ds->ds_count = dm_segs[segcnt].ds_len; - ds++; - } - cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; -#if __FreeBSD_version < 500000 - isp_prt(isp, ISP_LOGTDEBUG1, - "isp_send_ctio2: ent0[%d]0x%llx:%llu", - cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr, - (uint64_t)dm_segs[segcnt].ds_len); -#else - isp_prt(isp, ISP_LOGTDEBUG1, - "isp_send_ctio2: ent0[%d]0x%jx:%ju", - cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, - (uintmax_t)dm_segs[segcnt].ds_len); -#endif + if (isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len) != CMD_QUEUED) { + mp->error = MUSHERR_NOQENTRIES; } - - while (segcnt < nseg) { - uint32_t curip; - int seg; - ispcontreq_t local, *crq = &local, *qep; - - qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); - curip = nxti; - nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); - if (nxti == mp->optr) { - isp_prt(isp, ISP_LOGTDEBUG0, - "tdma_mkfc: request queue overflow"); - mp->error = MUSHERR_NOQENTRIES; - return; - } - cto->ct_header.rqs_entry_count++; - MEMZERO((void *)crq, sizeof (*crq)); - crq->req_header.rqs_entry_count = 1; - if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { - seglim = ISP_CDSEG64; - ds = NULL; - ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0]; - crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; - } else { - seglim = ISP_CDSEG; - ds = &crq->req_dataseg[0]; - ds64 = NULL; - crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; - } - for (seg = 0; segcnt < nseg && seg < seglim; - segcnt++, seg++) { - if (ds64) { - ds64->ds_basehi = - ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); - ds64->ds_base = dm_segs[segcnt].ds_addr; - ds64->ds_count = dm_segs[segcnt].ds_len; - ds64++; - } else { - ds->ds_base = dm_segs[segcnt].ds_addr; - ds->ds_count = dm_segs[segcnt].ds_len; - ds++; - } -#if __FreeBSD_version < 500000 - isp_prt(isp, ISP_LOGTDEBUG1, - "isp_send_ctio2: ent%d[%d]%llx:%llu", - cto->ct_header.rqs_entry_count-1, seg, - (uint64_t)dm_segs[segcnt].ds_addr, - (uint64_t)dm_segs[segcnt].ds_len); -#else - isp_prt(isp, ISP_LOGTDEBUG1, - "isp_send_ctio2: ent%d[%d]%jx:%ju", - cto->ct_header.rqs_entry_count-1, seg, - (uintmax_t)dm_segs[segcnt].ds_addr, - (uintmax_t)dm_segs[segcnt].ds_len); -#endif - cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; - cto->ct_seg_count++; - } - MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); - isp_put_cont_req(isp, crq, qep); - ISP_TDQE(isp, "cont entry", curi, qep); - } - - /* - * No do final twiddling for the CTIO itself. - */ - cto->ct_header.rqs_seqno = 1; - isp_prt(isp, ISP_LOGTDEBUG1, - "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", - cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, - cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, - cto->ct_resid); - if (FCPARAM(isp)->isp_2klogin) { - isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); - } else { - isp_put_ctio2(isp, cto, qe); - } - ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); - *mp->nxtip = nxti; } #endif -static void dma_2400(void *, bus_dma_segment_t *, int, int); -static void dma2_a64(void *, bus_dma_segment_t *, int, int); +static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); static void dma2(void *, bus_dma_segment_t *, int, int); static void -dma_2400(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) +dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) { mush_t *mp; - ispsoftc_t *isp; - struct ccb_scsiio *csio; - bus_dma_segment_t *eseg; - ispreqt7_t *rq; - int seglim, datalen; - uint32_t nxti; - - mp = (mush_t *) arg; - if (error) { - mp->error = error; - return; - } - - if (nseg < 1) { - isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); - mp->error = EFAULT; - return; - } - - csio = mp->cmd_token; - isp = mp->isp; - rq = mp->rq; - nxti = *mp->nxtip; - - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - bus_dmamap_sync(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); - } else { - bus_dmamap_sync(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); - } - datalen = XS_XFRLEN(csio); - - /* - * We're passed an initial partially filled in entry that - * has most fields filled in except for data transfer - * related values. - * - * Our job is to fill in the initial request queue entry and - * then to start allocating and filling in continuation entries - * until we've covered the entire transfer. - */ - - rq->req_header.rqs_entry_type = RQSTYPE_T7RQS; - rq->req_dl = datalen; - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - rq->req_alen_datadir = 0x2; - } else { - rq->req_alen_datadir = 0x1; - } - - eseg = dm_segs + nseg; - - rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr); - rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr); - rq->req_dataseg.ds_count = dm_segs->ds_len; - - datalen -= dm_segs->ds_len; - - dm_segs++; - rq->req_seg_count++; - - while (datalen > 0 && dm_segs != eseg) { - uint32_t onxti; - ispcontreq64_t local, *crq = &local, *cqe; - - cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); - onxti = nxti; - nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); - if (nxti == mp->optr) { - isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); - mp->error = MUSHERR_NOQENTRIES; - return; - } - rq->req_header.rqs_entry_count++; - MEMZERO((void *)crq, sizeof (*crq)); - crq->req_header.rqs_entry_count = 1; - crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; - - seglim = 0; - while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { - crq->req_dataseg[seglim].ds_base = - DMA_LO32(dm_segs->ds_addr); - crq->req_dataseg[seglim].ds_basehi = - DMA_HI32(dm_segs->ds_addr); - crq->req_dataseg[seglim].ds_count = - dm_segs->ds_len; - rq->req_seg_count++; - dm_segs++; - seglim++; - datalen -= dm_segs->ds_len; - } - if (isp->isp_dblev & ISP_LOGDEBUG1) { - isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); - } - isp_put_cont64_req(isp, crq, cqe); - MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); - } - *mp->nxtip = nxti; + mp = (mush_t *)arg; + mp->mapsize = mapsize; + dma2(arg, dm_segs, nseg, error); } static void -dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) -{ - mush_t *mp; - ispsoftc_t *isp; - struct ccb_scsiio *csio; - bus_dma_segment_t *eseg; - ispreq64_t *rq; - int seglim, datalen; - uint32_t nxti; - - mp = (mush_t *) arg; - if (error) { - mp->error = error; - return; - } - - if (nseg < 1) { - isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); - mp->error = EFAULT; - return; - } - csio = mp->cmd_token; - isp = mp->isp; - rq = mp->rq; - nxti = *mp->nxtip; - - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - bus_dmamap_sync(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); - } else { - bus_dmamap_sync(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); - } - datalen = XS_XFRLEN(csio); - - /* - * We're passed an initial partially filled in entry that - * has most fields filled in except for data transfer - * related values. - * - * Our job is to fill in the initial request queue entry and - * then to start allocating and filling in continuation entries - * until we've covered the entire transfer. - */ - - if (IS_FC(isp)) { - rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; - seglim = ISP_RQDSEG_T3; - ((ispreqt3_t *)rq)->req_totalcnt = datalen; - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; - } else { - ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; - } - } else { - rq->req_header.rqs_entry_type = RQSTYPE_A64; - if (csio->cdb_len > 12) { - seglim = 0; - } else { - seglim = ISP_RQDSEG_A64; - } - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - rq->req_flags |= REQFLAG_DATA_IN; - } else { - rq->req_flags |= REQFLAG_DATA_OUT; - } - } - - eseg = dm_segs + nseg; - - while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { - if (IS_FC(isp)) { - ispreqt3_t *rq3 = (ispreqt3_t *)rq; - rq3->req_dataseg[rq3->req_seg_count].ds_base = - DMA_LO32(dm_segs->ds_addr); - rq3->req_dataseg[rq3->req_seg_count].ds_basehi = - DMA_HI32(dm_segs->ds_addr); - rq3->req_dataseg[rq3->req_seg_count].ds_count = - dm_segs->ds_len; - } else { - rq->req_dataseg[rq->req_seg_count].ds_base = - DMA_LO32(dm_segs->ds_addr); - rq->req_dataseg[rq->req_seg_count].ds_basehi = - DMA_HI32(dm_segs->ds_addr); - rq->req_dataseg[rq->req_seg_count].ds_count = - dm_segs->ds_len; - } - datalen -= dm_segs->ds_len; - rq->req_seg_count++; - dm_segs++; - } - - while (datalen > 0 && dm_segs != eseg) { - uint32_t onxti; - ispcontreq64_t local, *crq = &local, *cqe; - - cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); - onxti = nxti; - nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); - if (nxti == mp->optr) { - isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); - mp->error = MUSHERR_NOQENTRIES; - return; - } - rq->req_header.rqs_entry_count++; - MEMZERO((void *)crq, sizeof (*crq)); - crq->req_header.rqs_entry_count = 1; - crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; - - seglim = 0; - while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { - crq->req_dataseg[seglim].ds_base = - DMA_LO32(dm_segs->ds_addr); - crq->req_dataseg[seglim].ds_basehi = - DMA_HI32(dm_segs->ds_addr); - crq->req_dataseg[seglim].ds_count = - dm_segs->ds_len; - rq->req_seg_count++; - dm_segs++; - seglim++; - datalen -= dm_segs->ds_len; - } - if (isp->isp_dblev & ISP_LOGDEBUG1) { - isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); - } - isp_put_cont64_req(isp, crq, cqe); - MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); - } - *mp->nxtip = nxti; -} - -static void dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { mush_t *mp; ispsoftc_t *isp; struct ccb_scsiio *csio; - bus_dma_segment_t *eseg; + isp_ddir_t ddir; ispreq_t *rq; - int seglim, datalen; - uint32_t nxti; mp = (mush_t *) arg; if (error) { mp->error = error; return; } - - if (nseg < 1) { - isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); - mp->error = EFAULT; - return; - } csio = mp->cmd_token; isp = mp->isp; rq = mp->rq; - nxti = *mp->nxtip; - - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - bus_dmamap_sync(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); - } else { - bus_dmamap_sync(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); - } - - datalen = XS_XFRLEN(csio); - - /* - * We're passed an initial partially filled in entry that - * has most fields filled in except for data transfer - * related values. - * - * Our job is to fill in the initial request queue entry and - * then to start allocating and filling in continuation entries - * until we've covered the entire transfer. - */ - - if (IS_FC(isp)) { - seglim = ISP_RQDSEG_T2; - ((ispreqt2_t *)rq)->req_totalcnt = datalen; - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; + if (nseg) { + if (sizeof (bus_addr_t) > 4) { + if (nseg >= ISP_NSEG64_MAX) { + isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); + mp->error = EFAULT; + return; + } + if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { + rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; + } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { + rq->req_header.rqs_entry_type = RQSTYPE_A64; + } } else { - ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; + if (nseg >= ISP_NSEG_MAX) { + isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); + mp->error = EFAULT; + return; + } } - } else { - if (csio->cdb_len > 12) { - seglim = 0; - } else { - seglim = ISP_RQDSEG; - } if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - rq->req_flags |= REQFLAG_DATA_IN; + bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); + ddir = ISP_FROM_DEVICE; + } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { + bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); + ddir = ISP_TO_DEVICE; } else { - rq->req_flags |= REQFLAG_DATA_OUT; + ddir = ISP_NOXFR; } + } else { + dm_segs = NULL; + nseg = 0; + ddir = ISP_NOXFR; } - eseg = dm_segs + nseg; - - while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { - if (IS_FC(isp)) { - ispreqt2_t *rq2 = (ispreqt2_t *)rq; - rq2->req_dataseg[rq2->req_seg_count].ds_base = - DMA_LO32(dm_segs->ds_addr); - rq2->req_dataseg[rq2->req_seg_count].ds_count = - dm_segs->ds_len; - } else { - rq->req_dataseg[rq->req_seg_count].ds_base = - DMA_LO32(dm_segs->ds_addr); - rq->req_dataseg[rq->req_seg_count].ds_count = - dm_segs->ds_len; - } - datalen -= dm_segs->ds_len; - rq->req_seg_count++; - dm_segs++; + if (isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir) != CMD_QUEUED) { + mp->error = MUSHERR_NOQENTRIES; } - - while (datalen > 0 && dm_segs != eseg) { - uint32_t onxti; - ispcontreq_t local, *crq = &local, *cqe; - - cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); - onxti = nxti; - nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); - if (nxti == mp->optr) { - isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); - mp->error = MUSHERR_NOQENTRIES; - return; - } - rq->req_header.rqs_entry_count++; - MEMZERO((void *)crq, sizeof (*crq)); - crq->req_header.rqs_entry_count = 1; - crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; - - seglim = 0; - while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { - crq->req_dataseg[seglim].ds_base = - DMA_LO32(dm_segs->ds_addr); - crq->req_dataseg[seglim].ds_count = - dm_segs->ds_len; - rq->req_seg_count++; - dm_segs++; - seglim++; - datalen -= dm_segs->ds_len; - } - if (isp->isp_dblev & ISP_LOGDEBUG1) { - isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); - } - isp_put_cont_req(isp, crq, cqe); - MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); - } - *mp->nxtip = nxti; } -/* - */ static int -isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq, - uint32_t *nxtip, uint32_t optr) +isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) { - ispreq_t *qep; mush_t mush, *mp; void (*eptr)(void *, bus_dma_segment_t *, int, int); + void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); - qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); + mp = &mush; + mp->isp = isp; + mp->cmd_token = csio; + mp->rq = ff; + mp->error = 0; + mp->mapsize = 0; + #ifdef ISP_TARGET_MODE if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { - if (IS_FC(isp)) { - eptr = tdma_mkfc; - } else { - eptr = tdma_mk; - } - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || - (csio->dxfer_len == 0)) { - mp = &mush; - mp->isp = isp; - mp->cmd_token = csio; - mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ - mp->nxtip = nxtip; - mp->optr = optr; - mp->error = 0; - (*eptr)(mp, NULL, 0, 0); - goto mbxsync; - } + eptr = tdma2; + eptr2 = tdma2_2; } else #endif - if (IS_24XX(isp)) { - eptr = dma_2400; - } else if (sizeof (bus_addr_t) > 4) { - eptr = dma2_a64; - } else { + { eptr = dma2; + eptr2 = dma2_2; } - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || - (csio->dxfer_len == 0)) { - rq->req_seg_count = 1; - goto mbxsync; - } - - /* - * Do a virtual grapevine step to collect info for - * the callback dma allocation that we have to use... - */ - mp = &mush; - mp->isp = isp; - mp->cmd_token = csio; - mp->rq = rq; - mp->nxtip = nxtip; - mp->optr = optr; - mp->error = 0; - - if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { + if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || (csio->dxfer_len == 0)) { + (*eptr)(mp, NULL, 0, 0); + } else if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { int error; -#if __FreeBSD_version < 500000 - int s = splsoftvm(); + error = bus_dmamap_load(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, csio->data_ptr, csio->dxfer_len, eptr, mp, 0); +#if 0 + xpt_print(csio->ccb_h.path, "%s: bus_dmamap_load " "ptr %p len %d returned %d\n", __func__, csio->data_ptr, csio->dxfer_len, error); #endif - error = bus_dmamap_load(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, csio->data_ptr, - csio->dxfer_len, eptr, mp, 0); -#if __FreeBSD_version < 500000 - splx(s); -#endif + if (error == EINPROGRESS) { - bus_dmamap_unload(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap); + bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); mp->error = EINVAL; - isp_prt(isp, ISP_LOGERR, - "deferred dma allocation not supported"); + isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); } else if (error && mp->error == 0) { #ifdef DIAGNOSTIC - isp_prt(isp, ISP_LOGERR, - "error %d in dma mapping code", error); + isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); #endif mp->error = error; } } else { /* Pointer to physical buffer */ struct bus_dma_segment seg; seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; (*eptr)(mp, &seg, 1, 0); } } else { struct bus_dma_segment *segs; if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { - isp_prt(isp, ISP_LOGERR, - "Physical segment pointers unsupported"); + isp_prt(isp, ISP_LOGERR, "Physical segment pointers unsupported"); mp->error = EINVAL; } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { - isp_prt(isp, ISP_LOGERR, - "Virtual segment addresses unsupported"); - mp->error = EINVAL; + struct uio sguio; + int error; + + /* + * We're taking advantage of the fact that + * the pointer/length sizes and layout of the iovec + * structure are the same as the bus_dma_segment + * structure. This might be a little dangerous, + * but only if they change the structures, which + * seems unlikely. + */ + KASSERT((sizeof (sguio.uio_iov) == sizeof (csio->data_ptr) && + sizeof (sguio.uio_iovcnt) >= sizeof (csio->sglist_cnt) && + sizeof (sguio.uio_resid) >= sizeof (csio->dxfer_len)), ("Ken's assumption failed")); + sguio.uio_iov = (struct iovec *)csio->data_ptr; + sguio.uio_iovcnt = csio->sglist_cnt; + sguio.uio_resid = csio->dxfer_len; + sguio.uio_segflg = UIO_SYSSPACE; + + error = bus_dmamap_load_uio(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, &sguio, eptr2, mp, 0); + + if (error != 0 && mp->error == 0) { + isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); + mp->error = error; + } } else { /* Just use the segments provided */ segs = (struct bus_dma_segment *) csio->data_ptr; (*eptr)(mp, segs, csio->sglist_cnt, 0); } } if (mp->error) { int retval = CMD_COMPLETE; if (mp->error == MUSHERR_NOQENTRIES) { retval = CMD_EAGAIN; } else if (mp->error == EFBIG) { XS_SETERR(csio, CAM_REQ_TOO_BIG); } else if (mp->error == EINVAL) { XS_SETERR(csio, CAM_REQ_INVALID); } else { XS_SETERR(csio, CAM_UNREC_HBA_ERROR); } return (retval); - } -mbxsync: - if (isp->isp_dblev & ISP_LOGDEBUG1) { - isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq); - } - switch (rq->req_header.rqs_entry_type) { - case RQSTYPE_REQUEST: - isp_put_request(isp, rq, qep); - break; - case RQSTYPE_CMDONLY: - isp_put_extended_request(isp, (ispextreq_t *)rq, - (ispextreq_t *)qep); - break; - case RQSTYPE_T2RQS: - if (FCPARAM(isp)->isp_2klogin) { - isp_put_request_t2e(isp, - (ispreqt2e_t *) rq, (ispreqt2e_t *) qep); - } else { - isp_put_request_t2(isp, - (ispreqt2_t *) rq, (ispreqt2_t *) qep); - } - break; - case RQSTYPE_T3RQS: - if (FCPARAM(isp)->isp_2klogin) { - isp_put_request_t3e(isp, - (ispreqt3e_t *) rq, (ispreqt3e_t *) qep); - break; - } - /* FALLTHROUGH */ - case RQSTYPE_A64: - isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); - break; - case RQSTYPE_T7RQS: - isp_put_request_t7(isp, (ispreqt7_t *) rq, (ispreqt7_t *) qep); - break; } return (CMD_QUEUED); } static void isp_pci_reset0(ispsoftc_t *isp) { ISP_DISABLE_INTS(isp); } static void isp_pci_reset1(ispsoftc_t *isp) { if (!IS_24XX(isp)) { /* Make sure the BIOS is disabled */ isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); } /* and enable interrupts */ ISP_ENABLE_INTS(isp); } static void isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) { struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; if (msg) printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); else printf("%s:\n", device_get_nameunit(isp->isp_dev)); if (IS_SCSI(isp)) printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); else printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); if (IS_SCSI(isp)) { ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), ISP_READ(isp, CDMA_FIFO_STS)); printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), ISP_READ(isp, DDMA_FIFO_STS)); printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", ISP_READ(isp, SXP_INTERRUPT), ISP_READ(isp, SXP_GROSS_ERR), ISP_READ(isp, SXP_PINS_CTRL)); ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); } printf(" mbox regs: %x %x %x %x %x\n", ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), ISP_READ(isp, OUTMAILBOX4)); printf(" PCI Status Command/Status=%x\n", pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); } Index: head/sys/dev/isp/isp_sbus.c =================================================================== --- head/sys/dev/isp/isp_sbus.c (revision 196007) +++ head/sys/dev/isp/isp_sbus.c (revision 196008) @@ -1,834 +1,728 @@ /*- * Copyright (c) 1997-2006 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * SBus specific probe and attach routines for Qlogic ISP SCSI adapters. * FreeBSD Version. */ #include __FBSDID("$FreeBSD$"); #include #include -#if __FreeBSD_version >= 700000 #include #include -#endif #include #include #include #include #include #include #include #include #include #include -static uint32_t -isp_sbus_rd_reg(ispsoftc_t *, int); -static void -isp_sbus_wr_reg(ispsoftc_t *, int, uint32_t); -static int -isp_sbus_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); +static uint32_t isp_sbus_rd_reg(ispsoftc_t *, int); +static void isp_sbus_wr_reg(ispsoftc_t *, int, uint32_t); +static int isp_sbus_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); static int isp_sbus_mbxdma(ispsoftc_t *); -static int -isp_sbus_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t); +static int isp_sbus_dmasetup(ispsoftc_t *, XS_T *, void *); + static void isp_sbus_reset0(ispsoftc_t *); static void isp_sbus_reset1(ispsoftc_t *); static void isp_sbus_dumpregs(ispsoftc_t *, const char *); static struct ispmdvec mdvec = { isp_sbus_rd_isr, isp_sbus_rd_reg, isp_sbus_wr_reg, isp_sbus_mbxdma, isp_sbus_dmasetup, isp_common_dmateardown, isp_sbus_reset0, isp_sbus_reset1, isp_sbus_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static int isp_sbus_probe (device_t); static int isp_sbus_attach (device_t); #define ISP_SBD(isp) ((struct isp_sbussoftc *)isp)->sbus_dev struct isp_sbussoftc { ispsoftc_t sbus_isp; device_t sbus_dev; struct resource * sbus_reg; void * ih; int16_t sbus_poff[_NREG_BLKS]; sdparam sbus_param; + struct isp_spi sbus_spi; struct ispmdvec sbus_mdvec; struct resource * sbus_ires; }; static device_method_t isp_sbus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, isp_sbus_probe), DEVMETHOD(device_attach, isp_sbus_attach), { 0, 0 } }; static driver_t isp_sbus_driver = { "isp", isp_sbus_methods, sizeof (struct isp_sbussoftc) }; static devclass_t isp_devclass; DRIVER_MODULE(isp, sbus, isp_sbus_driver, isp_devclass, 0, 0); -#if __FreeBSD_version < 700000 -extern ispfwfunc *isp_get_firmware_p; -#endif static int isp_sbus_probe(device_t dev) { int found = 0; const char *name = ofw_bus_get_name(dev); if (strcmp(name, "SUNW,isp") == 0 || strcmp(name, "QLGC,isp") == 0 || strcmp(name, "ptisp") == 0 || strcmp(name, "PTI,ptisp") == 0) { found++; } if (!found) return (ENXIO); if (isp_announced == 0 && bootverbose) { printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " "Core Version %d.%d\n", ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); isp_announced++; } return (0); } static int isp_sbus_attach(device_t dev) { struct resource *regs; - int tval, iqd, isp_debug, role, rid, ispburst; + int tval, iqd, isp_debug, role, rid, ispburst, default_id; struct isp_sbussoftc *sbs; ispsoftc_t *isp = NULL; int locksetup = 0; int ints_setup = 0; /* * Figure out if we're supposed to skip this one. * If we are, we actually go to ISP_ROLE_NONE. */ tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "disable", &tval) == 0 && tval) { device_printf(dev, "device is disabled\n"); /* but return 0 so the !$)$)*!$*) unit isn't reused */ return (0); } role = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "role", &role) == 0 && ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) { device_printf(dev, "setting role to 0x%x\n", role); } else { -#ifdef ISP_TARGET_MODE - role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET; -#else role = ISP_DEFAULT_ROLES; -#endif } sbs = malloc(sizeof (*sbs), M_DEVBUF, M_NOWAIT | M_ZERO); if (sbs == NULL) { device_printf(dev, "cannot allocate softc\n"); return (ENOMEM); } regs = NULL; iqd = 0; rid = 0; regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (regs == 0) { device_printf(dev, "unable to map registers\n"); goto bad; } sbs->sbus_dev = dev; sbs->sbus_reg = regs; sbs->sbus_mdvec = mdvec; sbs->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; sbs->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF; sbs->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF; sbs->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF; sbs->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; isp = &sbs->sbus_isp; isp->isp_bus_tag = rman_get_bustag(regs); isp->isp_bus_handle = rman_get_bushandle(regs); isp->isp_mdvec = &sbs->sbus_mdvec; isp->isp_bustype = ISP_BT_SBUS; isp->isp_type = ISP_HA_SCSI_UNKNOWN; isp->isp_param = &sbs->sbus_param; + isp->isp_osinfo.pc.ptr = &sbs->sbus_spi; isp->isp_revision = 0; /* XXX */ - isp->isp_role = role; - isp->isp_dev = dev; + ISP_SET_PC(isp, 0, role, role); /* * Get the clock frequency and convert it from HZ to MHz, * rounding up. This defaults to 25MHz if there isn't a * device specific one in the OFW device tree. */ sbs->sbus_mdvec.dv_clock = (sbus_get_clockfreq(dev) + 500000)/1000000; /* * Now figure out what the proper burst sizes, etc., to use. * Unfortunately, there is no ddi_dma_burstsizes here which * walks up the tree finding the limiting burst size node (if * any). We just use what's here for isp. */ ispburst = sbus_get_burstsz(dev); if (ispburst == 0) { ispburst = SBUS_BURST_32 - 1; } sbs->sbus_mdvec.dv_conf1 = 0; if (ispburst & (1 << 5)) { sbs->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32; } else if (ispburst & (1 << 4)) { sbs->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16; } else if (ispburst & (1 << 3)) { sbs->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8; } if (sbs->sbus_mdvec.dv_conf1) { sbs->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE; } /* * We don't trust NVRAM on SBus cards */ isp->isp_confopts |= ISP_CFG_NONVRAM; /* * Mark things if we're a PTI SBus adapter. */ if (strcmp("PTI,ptisp", ofw_bus_get_name(dev)) == 0 || strcmp("ptisp", ofw_bus_get_name(dev)) == 0) { - SDPARAM(isp)->isp_ptisp = 1; + SDPARAM(isp, 0)->isp_ptisp = 1; } -#if __FreeBSD_version >= 700000 isp->isp_osinfo.fw = firmware_get("isp_1000"); if (isp->isp_osinfo.fw) { union { const void *cp; uint16_t *sp; } stupid; stupid.cp = isp->isp_osinfo.fw->data; isp->isp_mdvec->dv_ispfw = stupid.sp; } -#else - /* - * Try and find firmware for this device. - */ - if (isp_get_firmware_p) { - (*isp_get_firmware_p)(0, 0, 0x1000, &sbs->sbus_mdvec.dv_ispfw); - } -#endif tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_NORELOAD; } - isp->isp_osinfo.default_id = -1; + default_id = -1; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "iid", &tval) == 0) { - isp->isp_osinfo.default_id = tval; + default_id = tval; isp->isp_confopts |= ISP_CFG_OWNLOOPID; } - if (isp->isp_osinfo.default_id == -1) { + if (default_id == -1) { /* * XXX: should be a way to get properties w/o having * XXX: to call OF_xxx functions */ - isp->isp_osinfo.default_id = 7; + default_id = 7; } + ISP_SPI_PC(isp, 0)->iid = default_id; isp_debug = 0; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &isp_debug); /* Make sure the lock is set up. */ mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); locksetup++; iqd = 0; sbs->sbus_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, RF_ACTIVE | RF_SHAREABLE); if (sbs->sbus_ires == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto bad; } if (isp_setup_intr(dev, sbs->sbus_ires, ISP_IFLAGS, NULL, isp_platform_intr, isp, &sbs->ih)) { device_printf(dev, "could not setup interrupt\n"); goto bad; } ints_setup++; /* * Set up logging levels. */ if (isp_debug) { isp->isp_dblev = isp_debug; } else { isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; } if (bootverbose) { isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; } /* * Make sure we're in reset state. */ ISP_LOCK(isp); - isp_reset(isp); + isp_reset(isp, 1); if (isp->isp_state != ISP_RESETSTATE) { isp_uninit(isp); ISP_UNLOCK(isp); goto bad; } isp_init(isp); - if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { + if (role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { isp_uninit(isp); ISP_UNLOCK(isp); goto bad; } isp_attach(isp); - if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { + if (role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { isp_uninit(isp); ISP_UNLOCK(isp); goto bad; } ISP_UNLOCK(isp); return (0); bad: if (sbs && ints_setup) { (void) bus_teardown_intr(dev, sbs->sbus_ires, sbs->ih); } if (sbs && sbs->sbus_ires) { bus_release_resource(dev, SYS_RES_IRQ, iqd, sbs->sbus_ires); } if (locksetup && isp) { mtx_destroy(&isp->isp_osinfo.lock); } if (regs) { (void) bus_release_resource(dev, 0, 0, regs); } if (sbs) { if (sbs->sbus_isp.isp_param) { free(sbs->sbus_isp.isp_param, M_DEVBUF); } free(sbs, M_DEVBUF); } return (ENXIO); } #define IspVirt2Off(a, x) \ (((struct isp_sbussoftc *)a)->sbus_poff[((x) & _BLK_REG_MASK) >> \ _BLK_REG_SHFT] + ((x) & 0xff)) #define BXR2(sbc, off) \ bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off) static int isp_sbus_rd_isr(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbp) { uint16_t isr, sema; isr = BXR2(sbc, IspVirt2Off(isp, BIU_ISR)); sema = BXR2(sbc, IspVirt2Off(isp, BIU_SEMA)); isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); isr &= INT_PENDING_MASK(isp); sema &= BIU_SEMA_LOCK; if (isr == 0 && sema == 0) { return (0); } *isrp = isr; if ((*semap = sema) != 0) { *mbp = BXR2(sbc, IspVirt2Off(isp, OUTMAILBOX0)); } return (1); } static uint32_t isp_sbus_rd_reg(ispsoftc_t *isp, int regoff) { uint16_t rval; struct isp_sbussoftc *sbs = (struct isp_sbussoftc *) isp; int offset = sbs->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; offset += (regoff & 0xff); rval = bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, offset); isp_prt(isp, ISP_LOGDEBUG3, "isp_sbus_rd_reg(off %x) = %x", regoff, rval); return (rval); } static void isp_sbus_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) { struct isp_sbussoftc *sbs = (struct isp_sbussoftc *) isp; int offset = sbs->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; offset += (regoff & 0xff); isp_prt(isp, ISP_LOGDEBUG3, "isp_sbus_wr_reg(off %x) = %x", regoff, val); bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, offset, val); MEMORYBARRIER(isp, SYNC_REG, offset, 2); } struct imush { ispsoftc_t *isp; int error; }; static void imc(void *, bus_dma_segment_t *, int, int); static void imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct imush *imushp = (struct imush *) arg; if (error) { imushp->error = error; } else { ispsoftc_t *isp =imushp->isp; bus_addr_t addr = segs->ds_addr; isp->isp_rquest_dma = addr; addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); isp->isp_result_dma = addr; } } static int isp_sbus_mbxdma(ispsoftc_t *isp) { caddr_t base; uint32_t len; int i, error, ns; struct imush im; /* * Already been here? If so, leave... */ if (isp->isp_rquest) { return (0); } ISP_UNLOCK(isp); len = sizeof (struct isp_pcmd) * isp->isp_maxcmds; isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); if (isp->isp_osinfo.pcmd_pool == NULL) { isp_prt(isp, ISP_LOGERR, "cannot alloc pcmd pool"); ISP_LOCK(isp); return (1); } len = sizeof (XS_T **) * isp->isp_maxcmds; isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); if (isp->isp_xflist == NULL) { isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); ISP_LOCK(isp); return (1); } len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_SBD(isp)), 1, BUS_SPACE_MAXADDR_24BIT+1, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR_32BIT, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, ISP_NSEGS, BUS_SPACE_MAXADDR_24BIT, 0, &isp->isp_osinfo.dmat)) { isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); free(isp->isp_xflist, M_DEVBUF); ISP_LOCK(isp); return(1); } /* * Allocate and map the request, result queues, plus FC scratch area. */ len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); ns = (len / PAGE_SIZE) + 1; if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, BUS_SPACE_MAXADDR_24BIT+1, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR_32BIT, NULL, NULL, len, ns, - BUS_SPACE_MAXADDR_24BIT, 0, &isp->isp_cdmat)) { + BUS_SPACE_MAXADDR_24BIT, 0, &isp->isp_osinfo.cdmat)) { isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces"); free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); free(isp->isp_xflist, M_DEVBUF); ISP_LOCK(isp); return (1); } - if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, - &isp->isp_cdmap) != 0) { + if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT, + &isp->isp_osinfo.cdmap) != 0) { isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len); - bus_dma_tag_destroy(isp->isp_cdmat); + bus_dma_tag_destroy(isp->isp_osinfo.cdmat); free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); free(isp->isp_xflist, M_DEVBUF); ISP_LOCK(isp); return (1); } for (i = 0; i < isp->isp_maxcmds; i++) { struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); if (error) { isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); while (--i >= 0) { bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap); } goto bad; } - isp_callout_init(&pcmd->wdog); + callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); if (i == isp->isp_maxcmds-1) { pcmd->next = NULL; } else { pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; } } isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; im.isp = isp; im.error = 0; - bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); + bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0); if (im.error) { isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error); goto bad; } isp->isp_rquest = base; base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); isp->isp_result = base; ISP_LOCK(isp); return (0); bad: - bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); - bus_dma_tag_destroy(isp->isp_cdmat); + bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap); + bus_dma_tag_destroy(isp->isp_osinfo.cdmat); free(isp->isp_xflist, M_DEVBUF); free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); isp->isp_rquest = NULL; ISP_LOCK(isp); return (1); } typedef struct { ispsoftc_t *isp; void *cmd_token; - void *rq; - uint32_t *nxtip; - uint32_t optr; + void *rq; /* original request */ int error; + bus_size_t mapsize; } mush_t; #define MUSHERR_NOQENTRIES -2 - static void dma2(void *, bus_dma_segment_t *, int, int); static void dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { mush_t *mp; ispsoftc_t *isp; struct ccb_scsiio *csio; - bus_dma_segment_t *eseg; + isp_ddir_t ddir; ispreq_t *rq; - int seglim, datalen; - uint16_t nxti; mp = (mush_t *) arg; if (error) { mp->error = error; return; } - - if (nseg < 1) { - isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); - mp->error = EFAULT; - return; - } csio = mp->cmd_token; isp = mp->isp; rq = mp->rq; - nxti = *mp->nxtip; - - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - bus_dmamap_sync(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); + if (nseg) { + if (sizeof (bus_addr_t) > 4) { + if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { + rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; + } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { + rq->req_header.rqs_entry_type = RQSTYPE_A64; + } + } + if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); + ddir = ISP_FROM_DEVICE; + } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { + bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); + ddir = ISP_TO_DEVICE; + } else { + ddir = ISP_NOXFR; + } } else { - bus_dmamap_sync(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); + dm_segs = NULL; + nseg = 0; + ddir = ISP_NOXFR; } - datalen = XS_XFRLEN(csio); - - /* - * We're passed an initial partially filled in entry that - * has most fields filled in except for data transfer - * related values. - * - * Our job is to fill in the initial request queue entry and - * then to start allocating and filling in continuation entries - * until we've covered the entire transfer. - */ - - if (csio->cdb_len > 12) { - seglim = 0; - } else { - seglim = ISP_RQDSEG; + if (isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir) != CMD_QUEUED) { + mp->error = MUSHERR_NOQENTRIES; } - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - rq->req_flags |= REQFLAG_DATA_IN; - } else { - rq->req_flags |= REQFLAG_DATA_OUT; - } - - eseg = dm_segs + nseg; - - while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { - rq->req_dataseg[rq->req_seg_count].ds_base = dm_segs->ds_addr; - rq->req_dataseg[rq->req_seg_count].ds_count = dm_segs->ds_len; - datalen -= dm_segs->ds_len; - rq->req_seg_count++; - dm_segs++; - } - - while (datalen > 0 && dm_segs != eseg) { - uint16_t onxti; - ispcontreq_t local, *crq = &local, *cqe; - - cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); - onxti = nxti; - nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); - if (nxti == mp->optr) { - isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); - mp->error = MUSHERR_NOQENTRIES; - return; - } - rq->req_header.rqs_entry_count++; - MEMZERO((void *)crq, sizeof (*crq)); - crq->req_header.rqs_entry_count = 1; - crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; - - seglim = 0; - while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { - crq->req_dataseg[seglim].ds_base = - dm_segs->ds_addr; - crq->req_dataseg[seglim].ds_count = - dm_segs->ds_len; - rq->req_seg_count++; - dm_segs++; - seglim++; - datalen -= dm_segs->ds_len; - } - isp_put_cont_req(isp, crq, cqe); - MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); - } - *mp->nxtip = nxti; } static int -isp_sbus_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq, - uint32_t *nxtip, uint32_t optr) +isp_sbus_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) { - ispreq_t *qep; mush_t mush, *mp; void (*eptr)(void *, bus_dma_segment_t *, int, int); - qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); - eptr = dma2; - - - if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || - (csio->dxfer_len == 0)) { - rq->req_seg_count = 1; - goto mbxsync; - } - - /* - * Do a virtual grapevine step to collect info for - * the callback dma allocation that we have to use... - */ mp = &mush; mp->isp = isp; mp->cmd_token = csio; - mp->rq = rq; - mp->nxtip = nxtip; - mp->optr = optr; + mp->rq = ff; mp->error = 0; + mp->mapsize = 0; - if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { + eptr = dma2; + + if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || (csio->dxfer_len == 0)) { + (*eptr)(mp, NULL, 0, 0); + } else if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { - int error = bus_dmamap_load(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap, csio->data_ptr, - csio->dxfer_len, eptr, mp, 0); + int error; + error = bus_dmamap_load(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, csio->data_ptr, csio->dxfer_len, eptr, mp, 0); +#if 0 + xpt_print(csio->ccb_h.path, "%s: bus_dmamap_load " "ptr %p len %d returned %d\n", __func__, csio->data_ptr, csio->dxfer_len, error); +#endif + if (error == EINPROGRESS) { - bus_dmamap_unload(isp->isp_osinfo.dmat, - PISP_PCMD(csio)->dmap); + bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); mp->error = EINVAL; - isp_prt(isp, ISP_LOGERR, - "deferred dma allocation not supported"); + isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); } else if (error && mp->error == 0) { #ifdef DIAGNOSTIC - isp_prt(isp, ISP_LOGERR, - "error %d in dma mapping code", error); + isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); #endif mp->error = error; } } else { /* Pointer to physical buffer */ struct bus_dma_segment seg; - seg.ds_addr = (bus_addr_t)csio->data_ptr; + seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; (*eptr)(mp, &seg, 1, 0); } } else { struct bus_dma_segment *segs; if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { - isp_prt(isp, ISP_LOGERR, - "Physical segment pointers unsupported"); + isp_prt(isp, ISP_LOGERR, "Physical segment pointers unsupported"); mp->error = EINVAL; } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { - isp_prt(isp, ISP_LOGERR, - "Virtual segment addresses unsupported"); + isp_prt(isp, ISP_LOGERR, "Physical SG/LIST Phys segment pointers unsupported"); mp->error = EINVAL; } else { /* Just use the segments provided */ segs = (struct bus_dma_segment *) csio->data_ptr; (*eptr)(mp, segs, csio->sglist_cnt, 0); } } if (mp->error) { int retval = CMD_COMPLETE; if (mp->error == MUSHERR_NOQENTRIES) { retval = CMD_EAGAIN; } else if (mp->error == EFBIG) { XS_SETERR(csio, CAM_REQ_TOO_BIG); } else if (mp->error == EINVAL) { XS_SETERR(csio, CAM_REQ_INVALID); } else { XS_SETERR(csio, CAM_UNREC_HBA_ERROR); } return (retval); - } -mbxsync: - if (isp->isp_dblev & ISP_LOGDEBUG1) { - isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq); - } - switch (rq->req_header.rqs_entry_type) { - case RQSTYPE_REQUEST: - isp_put_request(isp, rq, qep); - break; - case RQSTYPE_CMDONLY: - isp_put_extended_request(isp, (ispextreq_t *)rq, - (ispextreq_t *)qep); - break; } return (CMD_QUEUED); } static void isp_sbus_reset0(ispsoftc_t *isp) { ISP_DISABLE_INTS(isp); } static void isp_sbus_reset1(ispsoftc_t *isp) { ISP_ENABLE_INTS(isp); } static void isp_sbus_dumpregs(ispsoftc_t *isp, const char *msg) { if (msg) printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); else printf("%s:\n", device_get_nameunit(isp->isp_dev)); printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), ISP_READ(isp, CDMA_FIFO_STS)); printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), ISP_READ(isp, DDMA_FIFO_STS)); printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", ISP_READ(isp, SXP_INTERRUPT), ISP_READ(isp, SXP_GROSS_ERR), ISP_READ(isp, SXP_PINS_CTRL)); ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); printf(" mbox regs: %x %x %x %x %x\n", ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), ISP_READ(isp, OUTMAILBOX4)); } Index: head/sys/dev/isp/isp_stds.h =================================================================== --- head/sys/dev/isp/isp_stds.h (revision 196007) +++ head/sys/dev/isp/isp_stds.h (revision 196008) @@ -1,211 +1,225 @@ /* $FreeBSD$ */ /*- - * Copyright (c) 1997-2007 by Matthew Jacob + * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * */ /* * Structures that derive directly from public standards. */ #ifndef _ISP_STDS_H #define _ISP_STDS_H /* * FC Frame Header * * Source: dpANS-X3.xxx-199x, section 18 (AKA FC-PH-2) * */ typedef struct { uint8_t r_ctl; uint8_t d_id[3]; uint8_t cs_ctl; uint8_t s_id[3]; uint8_t type; - uint8_t f_ctl; + uint8_t f_ctl[3]; uint8_t seq_id; uint8_t df_ctl; uint16_t seq_cnt; uint16_t ox_id; uint16_t rx_id; uint32_t parameter; } fc_hdr_t; /* * FCP_CMND_IU Payload * * Source: NICTS T10, Project 1144D, Revision 07a, Section 9 (AKA fcp2-r07a) * * Notes: * When additional cdb length is defined in fcp_cmnd_alen_datadir, * bits 2..7, the actual cdb length is 16 + ((fcp_cmnd_alen_datadir>>2)*4), * with the datalength following in MSB format just after. */ typedef struct { uint8_t fcp_cmnd_lun[8]; uint8_t fcp_cmnd_crn; uint8_t fcp_cmnd_task_attribute; uint8_t fcp_cmnd_task_management; uint8_t fcp_cmnd_alen_datadir; union { struct { uint8_t fcp_cmnd_cdb[16]; uint32_t fcp_cmnd_dl; } sf; struct { uint8_t fcp_cmnd_cdb[1]; } lf; } cdb_dl; } fcp_cmnd_iu_t; #define FCP_CMND_TASK_ATTR_SIMPLE 0x00 #define FCP_CMND_TASK_ATTR_HEAD 0x01 #define FCP_CMND_TASK_ATTR_ORDERED 0x02 #define FCP_CMND_TASK_ATTR_ACA 0x04 #define FCP_CMND_TASK_ATTR_UNTAGGED 0x05 #define FCP_CMND_TASK_ATTR_MASK 0x07 #define FCP_CMND_ADDTL_CDBLEN_SHIFT 2 #define FCP_CMND_DATA_WRITE 0x01 #define FCP_CMND_DATA_READ 0x02 #define FCP_CMND_DATA_DIR_MASK 0x03 #define FCP_CMND_TMF_CLEAR_ACA 0x40 #define FCP_CMND_TMF_TGT_RESET 0x20 #define FCP_CMND_TMF_LUN_RESET 0x10 #define FCP_CMND_TMF_CLEAR_TASK_SET 0x04 #define FCP_CMND_TMF_ABORT_TASK_SET 0x02 /* * Basic CT IU Header * * Source: X3.288-199x Generic Services 2 Rev 5.3 (FC-GS-2) Section 4.3.1 */ typedef struct { uint8_t ct_revision; uint8_t ct_in_id[3]; uint8_t ct_fcs_type; uint8_t ct_fcs_subtype; uint8_t ct_options; uint8_t ct_reserved0; uint16_t ct_cmd_resp; uint16_t ct_bcnt_resid; uint8_t ct_reserved1; uint8_t ct_reason; uint8_t ct_explanation; uint8_t ct_vunique; } ct_hdr_t; #define CT_REVISION 1 #define CT_FC_TYPE_FC 0xFC #define CT_FC_SUBTYPE_NS 0x02 /* * RFT_ID Requet CT_IU * * Source: NCITS xxx-200x Generic Services- 5 Rev 8.5 Section 5.2.5.30 */ typedef struct { ct_hdr_t rftid_hdr; uint8_t rftid_reserved; uint8_t rftid_portid[3]; uint32_t rftid_fc4types[8]; } rft_id_t; /* + * FCP Response IU Bits of interest + * Source: NCITS T10, Project 1144D, Revision 08 (aka FCP2r08) + */ +#define FCP_CONF_REQ 0x10 +#define FCP_RESID_UNDERFLOW 0x08 +#define FCP_RESID_OVERFLOW 0x04 +#define FCP_SNSLEN_VALID 0x02 +#define FCP_RSPLEN_VALID 0x01 + +/* * FCP Response Code Definitions - * Source: NCITS T10, Project 1144D, Revision 07a (aka FCP2r07a) + * Source: NCITS T10, Project 1144D, Revision 08 (aka FCP2r08) */ #define FCP_RSPNS_CODE_OFFSET 3 #define FCP_RSPNS_TMF_DONE 0 #define FCP_RSPNS_DLBRSTX 1 #define FCP_RSPNS_BADCMND 2 #define FCP_RSPNS_EROFS 3 #define FCP_RSPNS_TMF_REJECT 4 #define FCP_RSPNS_TMF_FAILED 5 /* unconverted miscellany */ /* * Basic FC Link Service defines */ /* * These are in the R_CTL field. */ #define ABTS 0x81 #define BA_ACC 0x84 /* of ABORT SEQUENCE */ #define BA_RJT 0x85 /* of ABORT SEQUENCE */ /* * Link Service Accept/Reject */ #define LS_ACC 0x8002 #define LS_RJT 0x8001 /* * FC ELS Codes- bits 31-24 of the first payload word of an ELS frame. */ #define PLOGI 0x03 #define FLOGI 0x04 #define LOGO 0x05 #define ABTX 0x06 #define PRLI 0x20 #define PRLO 0x21 +#define SCN 0x22 #define TPRLO 0x24 +#define PDISC 0x50 +#define ADISC 0x52 #define RNC 0x53 /* * FC4 defines */ #define FC4_IP 5 /* ISO/EEC 8802-2 LLC/SNAP */ #define FC4_SCSI 8 /* SCSI-3 via Fibre Channel Protocol (FCP) */ #define FC4_FC_SVC 0x20 /* Fibre Channel Services */ #ifndef MSG_ABORT #define MSG_ABORT 0x06 #endif #ifndef MSG_BUS_DEV_RESET #define MSG_BUS_DEV_RESET 0x0c #endif #ifndef MSG_ABORT_TAG #define MSG_ABORT_TAG 0x0d #endif #ifndef MSG_CLEAR_QUEUE #define MSG_CLEAR_QUEUE 0x0e #endif #ifndef MSG_REL_RECOVERY #define MSG_REL_RECOVERY 0x10 #endif #ifndef MSG_TERM_IO_PROC #define MSG_TERM_IO_PROC 0x11 #endif #ifndef MSG_LUN_RESET #define MSG_LUN_RESET 0x17 #endif #endif /* _ISP_STDS_H */ Index: head/sys/dev/isp/isp_target.c =================================================================== --- head/sys/dev/isp/isp_target.c (revision 196007) +++ head/sys/dev/isp/isp_target.c (revision 196008) @@ -1,1767 +1,1872 @@ /*- - * Copyright (c) 1997-2007 by Matthew Jacob + * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * */ /* * Machine and OS Independent Target Mode Code for the Qlogic SCSI/FC adapters. */ /* * Bug fixes gratefully acknowledged from: * Oded Kedem */ /* * Include header file appropriate for platform we're building on. */ #ifdef __NetBSD__ #include #endif #ifdef __FreeBSD__ #include __FBSDID("$FreeBSD$"); #include #endif #ifdef __OpenBSD__ #include #endif #ifdef __linux__ #include "isp_linux.h" #endif #ifdef ISP_TARGET_MODE -static const char atiocope[] = - "ATIO returned for lun %d because it was in the middle of Bus Device Reset " - "on bus %d"; -static const char atior[] = - "ATIO returned on for lun %d on from loopid %d because a Bus Reset " - "occurred on bus %d"; +static const char atiocope[] = "ATIO returned for lun %d because it was in the middle of Bus Device Reset on bus %d"; +static const char atior[] = "ATIO returned on for lun %d on from loopid %d because a Bus Reset occurred on bus %d"; +static const char rqo[] = "%s: Request Queue Overflow"; static void isp_got_msg(ispsoftc_t *, in_entry_t *); static void isp_got_msg_fc(ispsoftc_t *, in_fcentry_t *); static void isp_got_tmf_24xx(ispsoftc_t *, at7_entry_t *); static void isp_handle_atio(ispsoftc_t *, at_entry_t *); static void isp_handle_atio2(ispsoftc_t *, at2_entry_t *); static void isp_handle_ctio(ispsoftc_t *, ct_entry_t *); static void isp_handle_ctio2(ispsoftc_t *, ct2_entry_t *); static void isp_handle_ctio7(ispsoftc_t *, ct7_entry_t *); +static void isp_handle_24xx_inotify(ispsoftc_t *, in_fcentry_24xx_t *); /* * The Qlogic driver gets an interrupt to look at response queue entries. * Some of these are status completions for initiatior mode commands, but * if target mode is enabled, we get a whole wad of response queue entries * to be handled here. * * Basically the split into 3 main groups: Lun Enable/Modification responses, * SCSI Command processing, and Immediate Notification events. * * You start by writing a request queue entry to enable target mode (and * establish some resource limitations which you can modify later). * The f/w responds with a LUN ENABLE or LUN MODIFY response with * the status of this action. If the enable was successful, you can expect... * * Response queue entries with SCSI commands encapsulate show up in an ATIO * (Accept Target IO) type- sometimes with enough info to stop the command at * this level. Ultimately the driver has to feed back to the f/w's request * queue a sequence of CTIOs (continue target I/O) that describe data to * be moved and/or status to be sent) and finally finishing with sending * to the f/w's response queue an ATIO which then completes the handshake * with the f/w for that command. There's a lot of variations on this theme, * including flags you can set in the CTIO for the Qlogic 2X00 fibre channel * cards that 'auto-replenish' the f/w's ATIO count, but this is the basic * gist of it. * * The third group that can show up in the response queue are Immediate * Notification events. These include things like notifications of SCSI bus * resets, or Bus Device Reset messages or other messages received. This * a classic oddbins area. It can get a little weird because you then turn * around and acknowledge the Immediate Notify by writing an entry onto the * request queue and then the f/w turns around and gives you an acknowledgement * to *your* acknowledgement on the response queue (the idea being to let * the f/w tell you when the event is *really* over I guess). * */ /* * A new response queue entry has arrived. The interrupt service code * has already swizzled it into the platform dependent from canonical form. * * Because of the way this driver is designed, unfortunately most of the * actual synchronization work has to be done in the platform specific * code- we have no synchroniation primitives in the common code. */ int isp_target_notify(ispsoftc_t *isp, void *vptr, uint32_t *optrp) { uint16_t status; uint32_t seqid; union { at_entry_t *atiop; at2_entry_t *at2iop; at2e_entry_t *at2eiop; at7_entry_t *at7iop; ct_entry_t *ctiop; ct2_entry_t *ct2iop; ct2e_entry_t *ct2eiop; ct7_entry_t *ct7iop; lun_entry_t *lunenp; in_entry_t *inotp; in_fcentry_t *inot_fcp; in_fcentry_e_t *inote_fcp; in_fcentry_24xx_t *inot_24xx; na_entry_t *nackp; na_fcentry_t *nack_fcp; na_fcentry_e_t *nacke_fcp; na_fcentry_24xx_t *nack_24xx; isphdr_t *hp; abts_t *abts; abts_rsp_t *abts_rsp; els_t *els; void * *vp; #define atiop unp.atiop #define at2iop unp.at2iop #define at2eiop unp.at2eiop #define at7iop unp.at7iop #define ctiop unp.ctiop #define ct2iop unp.ct2iop #define ct2eiop unp.ct2eiop #define ct7iop unp.ct7iop #define lunenp unp.lunenp #define inotp unp.inotp #define inot_fcp unp.inot_fcp #define inote_fcp unp.inote_fcp #define inot_24xx unp.inot_24xx #define nackp unp.nackp #define nack_fcp unp.nack_fcp #define nacke_fcp unp.nacke_fcp #define nack_24xx unp.nack_24xx #define abts unp.abts #define abts_rsp unp.abts_rsp #define els unp.els #define hdrp unp.hp } unp; uint8_t local[QENTRY_LEN]; + uint16_t iid; int bus, type, level, rval = 1; + isp_notify_t notify; type = isp_get_response_type(isp, (isphdr_t *)vptr); unp.vp = vptr; ISP_TDQE(isp, "isp_target_notify", (int) *optrp, vptr); - switch(type) { + switch (type) { case RQSTYPE_ATIO: if (IS_24XX(isp)) { int len; isp_get_atio7(isp, at7iop, (at7_entry_t *) local); at7iop = (at7_entry_t *) local; /* - * Check for and do something with commands whose IULEN - * extends past a singel queue entry. + * Check for and do something with commands whose + * IULEN extends past a single queue entry. */ len = at7iop->at_ta_len & 0xfffff; if (len > (QENTRY_LEN - 8)) { len -= (QENTRY_LEN - 8); - isp_prt(isp, ISP_LOGINFO, - "long IU length (%d) ignored", len); + isp_prt(isp, ISP_LOGINFO, "long IU length (%d) ignored", len); while (len > 0) { - *optrp = ISP_NXT_QENTRY(*optrp, - RESULT_QUEUE_LEN(isp)); + *optrp = ISP_NXT_QENTRY(*optrp, RESULT_QUEUE_LEN(isp)); len -= QENTRY_LEN; } } /* * Check for a task management function */ if (at7iop->at_cmnd.fcp_cmnd_task_management) { isp_got_tmf_24xx(isp, at7iop); break; } /* * Just go straight to outer layer for this one. */ - (void) isp_async(isp, ISPASYNC_TARGET_ACTION, local); + isp_async(isp, ISPASYNC_TARGET_ACTION, local); } else { isp_get_atio(isp, atiop, (at_entry_t *) local); isp_handle_atio(isp, (at_entry_t *) local); } break; case RQSTYPE_CTIO: isp_get_ctio(isp, ctiop, (ct_entry_t *) local); isp_handle_ctio(isp, (ct_entry_t *) local); break; case RQSTYPE_ATIO2: - if (FCPARAM(isp)->isp_2klogin) { + if (ISP_CAP_2KLOGIN(isp)) { isp_get_atio2e(isp, at2eiop, (at2e_entry_t *) local); } else { isp_get_atio2(isp, at2iop, (at2_entry_t *) local); } isp_handle_atio2(isp, (at2_entry_t *) local); break; case RQSTYPE_CTIO3: case RQSTYPE_CTIO2: - if (FCPARAM(isp)->isp_2klogin) { + if (ISP_CAP_2KLOGIN(isp)) { isp_get_ctio2e(isp, ct2eiop, (ct2e_entry_t *) local); } else { isp_get_ctio2(isp, ct2iop, (ct2_entry_t *) local); } isp_handle_ctio2(isp, (ct2_entry_t *) local); break; case RQSTYPE_CTIO7: isp_get_ctio7(isp, ct7iop, (ct7_entry_t *) local); isp_handle_ctio7(isp, (ct7_entry_t *) local); break; case RQSTYPE_ENABLE_LUN: case RQSTYPE_MODIFY_LUN: isp_get_enable_lun(isp, lunenp, (lun_entry_t *) local); - (void) isp_async(isp, ISPASYNC_TARGET_ACTION, local); + isp_async(isp, ISPASYNC_TARGET_ACTION, local); break; case RQSTYPE_NOTIFY: - /* - * Either the ISP received a SCSI message it can't - * handle, or it's returning an Immed. Notify entry - * we sent. We can send Immed. Notify entries to - * increment the firmware's resource count for them - * (we set this initially in the Enable Lun entry). - */ bus = 0; if (IS_24XX(isp)) { - isp_get_notify_24xx(isp, inot_24xx, - (in_fcentry_24xx_t *)local); + isp_get_notify_24xx(isp, inot_24xx, (in_fcentry_24xx_t *)local); inot_24xx = (in_fcentry_24xx_t *) local; - status = inot_24xx->in_status; - seqid = inot_24xx->in_rxid; - isp_prt(isp, ISP_LOGTDEBUG0, - "Immediate Notify status=0x%x seqid=0x%x", - status, seqid); - switch (status) { - case IN24XX_LIP_RESET: - case IN24XX_LINK_RESET: - case IN24XX_PORT_LOGOUT: - case IN24XX_PORT_CHANGED: - case IN24XX_LINK_FAILED: - case IN24XX_SRR_RCVD: - case IN24XX_ELS_RCVD: - (void) isp_async(isp, ISPASYNC_TARGET_ACTION, - &local); - break; - default: - isp_prt(isp, ISP_LOGINFO, - "isp_target_notify: unknown status (0x%x)", - status); - isp_notify_ack(isp, local); - break; - } + isp_handle_24xx_inotify(isp, inot_24xx); break; - } else if (IS_FC(isp)) { - if (FCPARAM(isp)->isp_2klogin) { - isp_get_notify_fc_e(isp, inote_fcp, - (in_fcentry_e_t *)local); + } + if (IS_FC(isp)) { + if (ISP_CAP_2KLOGIN(isp)) { + in_fcentry_e_t *ecp = (in_fcentry_e_t *)local; + isp_get_notify_fc_e(isp, inote_fcp, ecp); + iid = ecp->in_iid; + status = ecp->in_status; + seqid = ecp->in_seqid; } else { - isp_get_notify_fc(isp, inot_fcp, - (in_fcentry_t *)local); + in_fcentry_t *fcp = (in_fcentry_t *)local; + isp_get_notify_fc(isp, inot_fcp, fcp); + iid = fcp->in_iid; + status = fcp->in_status; + seqid = fcp->in_seqid; } - inot_fcp = (in_fcentry_t *) local; - status = inot_fcp->in_status; - seqid = inot_fcp->in_seqid; } else { - isp_get_notify(isp, inotp, (in_entry_t *)local); - inotp = (in_entry_t *) local; - status = inotp->in_status & 0xff; - seqid = inotp->in_seqid; + in_entry_t *inp = (in_entry_t *)local; + isp_get_notify(isp, inotp, inp); + status = inp->in_status & 0xff; + seqid = inp->in_seqid; + iid = inp->in_iid; if (IS_DUALBUS(isp)) { - bus = GET_BUS_VAL(inotp->in_iid); - SET_BUS_VAL(inotp->in_iid, 0); + bus = GET_BUS_VAL(inp->in_iid); + SET_BUS_VAL(inp->in_iid, 0); } } - isp_prt(isp, ISP_LOGTDEBUG0, - "Immediate Notify On Bus %d, status=0x%x seqid=0x%x", - bus, status, seqid); + isp_prt(isp, ISP_LOGTDEBUG0, "Immediate Notify On Bus %d, status=0x%x seqid=0x%x", bus, status, seqid); switch (status) { case IN_MSG_RECEIVED: case IN_IDE_RECEIVED: if (IS_FC(isp)) { isp_got_msg_fc(isp, (in_fcentry_t *)local); } else { isp_got_msg(isp, (in_entry_t *)local); } break; case IN_RSRC_UNAVAIL: isp_prt(isp, ISP_LOGINFO, "Firmware out of ATIOs"); - isp_notify_ack(isp, local); + (void) isp_notify_ack(isp, local); break; - case IN_RESET: - { - /* - * We form the notify structure here because we need - * to mark it as needing a NOTIFY ACK on return. - */ - tmd_notify_t notify; - MEMZERO(¬ify, sizeof (tmd_notify_t)); + case IN_RESET: + ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); notify.nt_hba = isp; - notify.nt_iid = INI_ANY; - /* nt_tgt set in outer layers */ + notify.nt_wwn = INI_ANY; + notify.nt_tgt = TGT_ANY; + notify.nt_nphdl = iid; + notify.nt_sid = PORT_ANY; + notify.nt_did = PORT_ANY; notify.nt_lun = LUN_ANY; notify.nt_tagval = TAG_ANY; + notify.nt_tagval |= (((uint64_t)(isp->isp_serno++)) << 32); notify.nt_ncode = NT_BUS_RESET; notify.nt_need_ack = 1; - (void) isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); + notify.nt_lreserved = local; + isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; - } + case IN_PORT_LOGOUT: + ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); + notify.nt_hba = isp; + notify.nt_wwn = INI_ANY; + notify.nt_nphdl = iid; + notify.nt_sid = PORT_ANY; + notify.nt_did = PORT_ANY; + notify.nt_ncode = NT_LOGOUT; + notify.nt_need_ack = 1; + notify.nt_lreserved = local; + isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); + break; + case IN_ABORT_TASK: - case IN_PORT_CHANGED: + ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); + notify.nt_hba = isp; + notify.nt_wwn = INI_ANY; + notify.nt_nphdl = iid; + notify.nt_sid = PORT_ANY; + notify.nt_did = PORT_ANY; + notify.nt_ncode = NT_ABORT_TASK; + notify.nt_need_ack = 1; + notify.nt_lreserved = local; + isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); + break; + case IN_GLOBAL_LOGO: - (void) isp_async(isp, ISPASYNC_TARGET_ACTION, &local); + isp_prt(isp, ISP_LOGTINFO, "%s: all ports logged out", __func__); + ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); + notify.nt_hba = isp; + notify.nt_wwn = INI_ANY; + notify.nt_nphdl = NIL_HANDLE; + notify.nt_sid = PORT_ANY; + notify.nt_did = PORT_ANY; + notify.nt_ncode = NT_GLOBAL_LOGOUT; + isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); + (void) isp_notify_ack(isp, local); break; + + case IN_PORT_CHANGED: + isp_prt(isp, ISP_LOGTINFO, "%s: port changed", __func__); + (void) isp_notify_ack(isp, local); + break; + default: - isp_prt(isp, ISP_LOGINFO, - "isp_target_notify: unknown status (0x%x)", - status); - isp_notify_ack(isp, local); + ISP_SNPRINTF(local, sizeof local, "%s: unknown status to RQSTYPE_NOTIFY (0x%x)", __func__, status); + isp_print_bytes(isp, local, QENTRY_LEN, vptr); + (void) isp_notify_ack(isp, local); break; } break; case RQSTYPE_NOTIFY_ACK: /* * The ISP is acknowledging our acknowledgement of an * Immediate Notify entry for some asynchronous event. */ if (IS_24XX(isp)) { - isp_get_notify_ack_24xx(isp, nack_24xx, - (na_fcentry_24xx_t *) local); + isp_get_notify_ack_24xx(isp, nack_24xx, (na_fcentry_24xx_t *) local); nack_24xx = (na_fcentry_24xx_t *) local; if (nack_24xx->na_status != NA_OK) { level = ISP_LOGINFO; } else { level = ISP_LOGTDEBUG1; } - isp_prt(isp, level, - "Notify Ack Status=0x%x; Subcode 0x%x seqid=0x%x", - nack_24xx->na_status, nack_24xx->na_status_subcode, - nack_24xx->na_rxid); + isp_prt(isp, level, "Notify Ack Status=0x%x; Subcode 0x%x seqid=0x%x", nack_24xx->na_status, nack_24xx->na_status_subcode, nack_24xx->na_rxid); } else if (IS_FC(isp)) { - if (FCPARAM(isp)->isp_2klogin) { - isp_get_notify_ack_fc_e(isp, nacke_fcp, - (na_fcentry_e_t *)local); + if (ISP_CAP_2KLOGIN(isp)) { + isp_get_notify_ack_fc_e(isp, nacke_fcp, (na_fcentry_e_t *)local); } else { - isp_get_notify_ack_fc(isp, nack_fcp, - (na_fcentry_t *)local); + isp_get_notify_ack_fc(isp, nack_fcp, (na_fcentry_t *)local); } nack_fcp = (na_fcentry_t *)local; if (nack_fcp->na_status != NA_OK) { level = ISP_LOGINFO; } else { level = ISP_LOGTDEBUG1; } - isp_prt(isp, level, - "Notify Ack Status=0x%x seqid 0x%x", - nack_fcp->na_status, nack_fcp->na_seqid); + isp_prt(isp, level, "Notify Ack Status=0x%x seqid 0x%x", nack_fcp->na_status, nack_fcp->na_seqid); } else { isp_get_notify_ack(isp, nackp, (na_entry_t *)local); nackp = (na_entry_t *)local; if (nackp->na_status != NA_OK) { level = ISP_LOGINFO; } else { level = ISP_LOGTDEBUG1; } - isp_prt(isp, level, - "Notify Ack event 0x%x status=0x%x seqid 0x%x", - nackp->na_event, nackp->na_status, nackp->na_seqid); + isp_prt(isp, level, "Notify Ack event 0x%x status=0x%x seqid 0x%x", nackp->na_event, nackp->na_status, nackp->na_seqid); } break; case RQSTYPE_ABTS_RCVD: isp_get_abts(isp, abts, (abts_t *)local); - (void) isp_async(isp, ISPASYNC_TARGET_ACTION, &local); + isp_async(isp, ISPASYNC_TARGET_ACTION, &local); break; case RQSTYPE_ABTS_RSP: isp_get_abts_rsp(isp, abts_rsp, (abts_rsp_t *)local); abts_rsp = (abts_rsp_t *) local; if (abts_rsp->abts_rsp_status) { level = ISP_LOGINFO; } else { level = ISP_LOGTDEBUG0; } - isp_prt(isp, level, - "ABTS RSP response[0x%x]: status=0x%x sub=(0x%x 0x%x)", - abts_rsp->abts_rsp_rxid_task, abts_rsp->abts_rsp_status, - abts_rsp->abts_rsp_payload.rsp.subcode1, - abts_rsp->abts_rsp_payload.rsp.subcode2); + isp_prt(isp, level, "ABTS RSP response[0x%x]: status=0x%x sub=(0x%x 0x%x)", abts_rsp->abts_rsp_rxid_task, abts_rsp->abts_rsp_status, + abts_rsp->abts_rsp_payload.rsp.subcode1, abts_rsp->abts_rsp_payload.rsp.subcode2); break; default: - isp_prt(isp, ISP_LOGERR, - "Unknown entry type 0x%x in isp_target_notify", type); + isp_prt(isp, ISP_LOGERR, "%s: unknown entry type 0x%x", __func__, type); rval = 0; break; } #undef atiop #undef at2iop #undef at2eiop #undef at7iop #undef ctiop #undef ct2iop #undef ct2eiop #undef ct7iop #undef lunenp #undef inotp #undef inot_fcp #undef inote_fcp #undef inot_24xx #undef nackp #undef nack_fcp #undef nacke_fcp #undef hack_24xx #undef abts #undef abts_rsp #undef els #undef hdrp return (rval); } - + /* - * Toggle (on/off) target mode for bus/target/lun + * Toggle (on/off) target mode for bus/target/lun. * * The caller has checked for overlap and legality. * * Note that not all of bus, target or lun can be paid attention to. * Note also that this action will not be complete until the f/w writes - * response entry. The caller is responsible for synchronizing this. + * a response entry. The caller is responsible for synchronizing with this. */ int -isp_lun_cmd(ispsoftc_t *isp, int cmd, int bus, int tgt, int lun, - int cmd_cnt, int inot_cnt, uint32_t opaque) +isp_lun_cmd(ispsoftc_t *isp, int cmd, int bus, int lun, int cmd_cnt, int inot_cnt) { lun_entry_t el; - uint32_t nxti, optr; void *outp; - - MEMZERO(&el, sizeof (el)); + ISP_MEMZERO(&el, sizeof (el)); if (IS_DUALBUS(isp)) { el.le_rsvd = (bus & 0x1) << 7; } el.le_cmd_count = cmd_cnt; el.le_in_count = inot_cnt; if (cmd == RQSTYPE_ENABLE_LUN) { if (IS_SCSI(isp)) { el.le_flags = LUN_TQAE|LUN_DISAD; el.le_cdb6len = 12; el.le_cdb7len = 12; } } else if (cmd == -RQSTYPE_ENABLE_LUN) { cmd = RQSTYPE_ENABLE_LUN; el.le_cmd_count = 0; el.le_in_count = 0; } else if (cmd == -RQSTYPE_MODIFY_LUN) { cmd = RQSTYPE_MODIFY_LUN; el.le_ops = LUN_CCDECR | LUN_INDECR; } else { el.le_ops = LUN_CCINCR | LUN_ININCR; } el.le_header.rqs_entry_type = cmd; el.le_header.rqs_entry_count = 1; - el.le_reserved = opaque; if (IS_SCSI(isp)) { - el.le_tgt = tgt; + el.le_tgt = SDPARAM(isp, bus)->isp_initiator_id; el.le_lun = lun; - } else if (FCPARAM(isp)->isp_sccfw == 0) { + } else if (ISP_CAP_SCCFW(isp) == 0) { el.le_lun = lun; } el.le_timeout = 30; - if (isp_getrqentry(isp, &nxti, &optr, &outp)) { - isp_prt(isp, ISP_LOGERR, - "Request Queue Overflow in isp_lun_cmd"); + outp = isp_getrqentry(isp); + if (outp == NULL) { + isp_prt(isp, ISP_LOGERR, rqo, __func__); return (-1); } - ISP_TDQE(isp, "isp_lun_cmd", (int) optr, &el); isp_put_enable_lun(isp, &el, outp); - ISP_ADD_REQUEST(isp, nxti); + ISP_TDQE(isp, "isp_lun_cmd", isp->isp_reqidx, &el); + ISP_SYNC_REQUEST(isp); return (0); } - int isp_target_put_entry(ispsoftc_t *isp, void *ap) { void *outp; - uint32_t nxti, optr; uint8_t etype = ((isphdr_t *) ap)->rqs_entry_type; - if (isp_getrqentry(isp, &nxti, &optr, &outp)) { - isp_prt(isp, ISP_LOGWARN, - "Request Queue Overflow in isp_target_put_entry"); + outp = isp_getrqentry(isp); + if (outp == NULL) { + isp_prt(isp, ISP_LOGWARN, rqo, __func__); return (-1); } switch (etype) { case RQSTYPE_ATIO: isp_put_atio(isp, (at_entry_t *) ap, (at_entry_t *) outp); break; case RQSTYPE_ATIO2: - if (FCPARAM(isp)->isp_2klogin) { - isp_put_atio2e(isp, (at2e_entry_t *) ap, - (at2e_entry_t *) outp); + if (ISP_CAP_2KLOGIN(isp)) { + isp_put_atio2e(isp, (at2e_entry_t *) ap, (at2e_entry_t *) outp); } else { - isp_put_atio2(isp, (at2_entry_t *) ap, - (at2_entry_t *) outp); + isp_put_atio2(isp, (at2_entry_t *) ap, (at2_entry_t *) outp); } break; case RQSTYPE_CTIO: isp_put_ctio(isp, (ct_entry_t *) ap, (ct_entry_t *) outp); break; case RQSTYPE_CTIO2: - if (FCPARAM(isp)->isp_2klogin) { - isp_put_ctio2e(isp, (ct2e_entry_t *) ap, - (ct2e_entry_t *) outp); + if (ISP_CAP_2KLOGIN(isp)) { + isp_put_ctio2e(isp, (ct2e_entry_t *) ap, (ct2e_entry_t *) outp); } else { - isp_put_ctio2(isp, (ct2_entry_t *) ap, - (ct2_entry_t *) outp); + isp_put_ctio2(isp, (ct2_entry_t *) ap, (ct2_entry_t *) outp); } break; case RQSTYPE_CTIO7: isp_put_ctio7(isp, (ct7_entry_t *) ap, (ct7_entry_t *) outp); break; default: - isp_prt(isp, ISP_LOGERR, - "Unknown type 0x%x in isp_put_entry", etype); + isp_prt(isp, ISP_LOGERR, "%s: Unknown type 0x%x", __func__, etype); return (-1); } - ISP_TDQE(isp, "isp_target_put_entry", (int) optr, ap); - ISP_ADD_REQUEST(isp, nxti); + ISP_TDQE(isp, __func__, isp->isp_reqidx, ap); + ISP_SYNC_REQUEST(isp); return (0); } int isp_target_put_atio(ispsoftc_t *isp, void *arg) { union { at_entry_t _atio; at2_entry_t _atio2; at2e_entry_t _atio2e; } atun; - MEMZERO(&atun, sizeof atun); + ISP_MEMZERO(&atun, sizeof atun); if (IS_FC(isp)) { at2_entry_t *aep = arg; atun._atio2.at_header.rqs_entry_type = RQSTYPE_ATIO2; atun._atio2.at_header.rqs_entry_count = 1; - if (FCPARAM(isp)->isp_sccfw) { + if (ISP_CAP_SCCFW(isp)) { atun._atio2.at_scclun = aep->at_scclun; } else { atun._atio2.at_lun = (uint8_t) aep->at_lun; } - if (FCPARAM(isp)->isp_2klogin) { + if (ISP_CAP_2KLOGIN(isp)) { atun._atio2e.at_iid = ((at2e_entry_t *)aep)->at_iid; } else { atun._atio2.at_iid = aep->at_iid; } atun._atio2.at_rxid = aep->at_rxid; atun._atio2.at_status = CT_OK; } else { at_entry_t *aep = arg; atun._atio.at_header.rqs_entry_type = RQSTYPE_ATIO; atun._atio.at_header.rqs_entry_count = 1; atun._atio.at_handle = aep->at_handle; atun._atio.at_iid = aep->at_iid; atun._atio.at_tgt = aep->at_tgt; atun._atio.at_lun = aep->at_lun; atun._atio.at_tag_type = aep->at_tag_type; atun._atio.at_tag_val = aep->at_tag_val; atun._atio.at_status = (aep->at_flags & AT_TQAE); atun._atio.at_status |= CT_OK; } return (isp_target_put_entry(isp, &atun)); } /* * Command completion- both for handling cases of no resources or * no blackhole driver, or other cases where we have to, inline, * finish the command sanely, or for normal command completion. * * The 'completion' code value has the scsi status byte in the low 8 bits. * If status is a CHECK CONDITION and bit 8 is nonzero, then bits 12..15 have * the sense key and bits 16..23 have the ASCQ and bits 24..31 have the ASC * values. * * NB: the key, asc, ascq, cannot be used for parallel SCSI as it doesn't * NB: inline SCSI sense reporting. As such, we lose this information. XXX. * * For both parallel && fibre channel, we use the feature that does * an automatic resource autoreplenish so we don't have then later do * put of an atio to replenish the f/w's resource count. */ int -isp_endcmd(ispsoftc_t *isp, void *arg, uint32_t code, uint32_t hdl) +isp_endcmd(ispsoftc_t *isp, ...) { - int sts; + uint32_t code, hdl; + uint8_t sts; union { ct_entry_t _ctio; ct2_entry_t _ctio2; ct2e_entry_t _ctio2e; ct7_entry_t _ctio7; } un; + va_list ap; - MEMZERO(&un, sizeof un); - sts = code & 0xff; + ISP_MEMZERO(&un, sizeof un); if (IS_24XX(isp)) { - at7_entry_t *aep = arg; + int vpidx, nphdl; + at7_entry_t *aep; ct7_entry_t *cto = &un._ctio7; + va_start(ap, isp); + aep = va_arg(ap, at7_entry_t *); + nphdl = va_arg(ap, int); + /* + * Note that vpidx may equal 0xff (unknown) here + */ + vpidx = va_arg(ap, int); + code = va_arg(ap, uint32_t); + hdl = va_arg(ap, uint32_t); + va_end(ap); + isp_prt(isp, ISP_LOGTDEBUG0, "%s: [RX_ID 0x%x] chan %d code %x", __func__, aep->at_rxid, vpidx, code); + + sts = code & 0xff; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; cto->ct_header.rqs_entry_count = 1; -/* XXXX */ cto->ct_nphdl = aep->at_hdr.seq_id; + cto->ct_nphdl = nphdl; cto->ct_rxid = aep->at_rxid; - cto->ct_iid_lo = (aep->at_hdr.s_id[1] << 8) | - aep->at_hdr.s_id[2]; + cto->ct_iid_lo = (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; cto->ct_iid_hi = aep->at_hdr.s_id[0]; cto->ct_oxid = aep->at_hdr.ox_id; cto->ct_scsi_status = sts; - cto->ct_flags = CT7_FLAG_MODE1 | CT7_NO_DATA | CT7_SENDSTATUS; - if (sts == SCSI_CHECK && (code & ECMD_SVALID)) { - cto->rsp.m1.ct_resplen = 16; + cto->ct_vpidx = vpidx; + cto->ct_flags = CT7_NOACK; + if (code & ECMD_TERMINATE) { + cto->ct_flags |= CT7_TERMINATE; + } else if (code & ECMD_SVALID) { + cto->ct_flags |= CT7_FLAG_MODE1 | CT7_SENDSTATUS; + cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8); + cto->rsp.m1.ct_resplen = cto->ct_senselen = min(16, MAXRESPLEN_24XX); + ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp)); cto->rsp.m1.ct_resp[0] = 0xf0; cto->rsp.m1.ct_resp[2] = (code >> 12) & 0xf; cto->rsp.m1.ct_resp[7] = 8; cto->rsp.m1.ct_resp[12] = (code >> 24) & 0xff; cto->rsp.m1.ct_resp[13] = (code >> 16) & 0xff; + } else { + cto->ct_flags |= CT7_FLAG_MODE1 | CT7_SENDSTATUS; } if (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl) { cto->ct_resid = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl; - cto->ct_scsi_status |= CT2_DATA_UNDER; + if (cto->ct_resid < 0) { + cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8); + } else if (cto->ct_resid > 0) { + cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8); + } } cto->ct_syshandle = hdl; } else if (IS_FC(isp)) { - at2_entry_t *aep = arg; + at2_entry_t *aep; ct2_entry_t *cto = &un._ctio2; + va_start(ap, isp); + aep = va_arg(ap, at2_entry_t *); + code = va_arg(ap, uint32_t); + hdl = va_arg(ap, uint32_t); + va_end(ap); + + isp_prt(isp, ISP_LOGTDEBUG0, "%s: [RX_ID 0x%x] code %x", __func__, aep->at_rxid, code); + + sts = code & 0xff; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; cto->ct_header.rqs_entry_count = 1; - if (FCPARAM(isp)->isp_sccfw == 0) { + if (ISP_CAP_SCCFW(isp) == 0) { cto->ct_lun = aep->at_lun; } - if (FCPARAM(isp)->isp_2klogin) { + if (ISP_CAP_2KLOGIN(isp)) { un._ctio2e.ct_iid = ((at2e_entry_t *)aep)->at_iid; } else { cto->ct_iid = aep->at_iid; } cto->ct_rxid = aep->at_rxid; cto->rsp.m1.ct_scsi_status = sts; cto->ct_flags = CT2_SENDSTATUS | CT2_NO_DATA | CT2_FLAG_MODE1; if (hdl == 0) { cto->ct_flags |= CT2_CCINCR; } if (aep->at_datalen) { cto->ct_resid = aep->at_datalen; cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER; } if (sts == SCSI_CHECK && (code & ECMD_SVALID)) { cto->rsp.m1.ct_resp[0] = 0xf0; cto->rsp.m1.ct_resp[2] = (code >> 12) & 0xf; cto->rsp.m1.ct_resp[7] = 8; cto->rsp.m1.ct_resp[12] = (code >> 24) & 0xff; cto->rsp.m1.ct_resp[13] = (code >> 16) & 0xff; cto->rsp.m1.ct_senselen = 16; cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; } cto->ct_syshandle = hdl; } else { - at_entry_t *aep = arg; + at_entry_t *aep; ct_entry_t *cto = &un._ctio; + va_start(ap, isp); + aep = va_arg(ap, at_entry_t *); + code = va_arg(ap, uint32_t); + hdl = va_arg(ap, uint32_t); + va_end(ap); + isp_prt(isp, ISP_LOGTDEBUG0, "%s: [IID %d] code %x", __func__, aep->at_iid, code); + sts = code; + cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; cto->ct_header.rqs_entry_count = 1; cto->ct_fwhandle = aep->at_handle; cto->ct_iid = aep->at_iid; cto->ct_tgt = aep->at_tgt; cto->ct_lun = aep->at_lun; cto->ct_tag_type = aep->at_tag_type; cto->ct_tag_val = aep->at_tag_val; if (aep->at_flags & AT_TQAE) { cto->ct_flags |= CT_TQAE; } cto->ct_flags = CT_SENDSTATUS | CT_NO_DATA; if (hdl == 0) { cto->ct_flags |= CT_CCINCR; } cto->ct_scsi_status = sts; cto->ct_syshandle = hdl; } return (isp_target_put_entry(isp, &un)); } /* * These are either broadcast events or specifically CTIO fast completion */ + int isp_target_async(ispsoftc_t *isp, int bus, int event) { - tmd_notify_t notify; + isp_notify_t notify; - MEMZERO(¬ify, sizeof (tmd_notify_t)); + ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); notify.nt_hba = isp; - notify.nt_iid = INI_ANY; - /* nt_tgt set in outer layers */ + notify.nt_wwn = INI_ANY; + notify.nt_nphdl = NIL_HANDLE; + notify.nt_sid = PORT_ANY; + notify.nt_did = PORT_ANY; + notify.nt_tgt = TGT_ANY; + notify.nt_channel = bus; notify.nt_lun = LUN_ANY; notify.nt_tagval = TAG_ANY; + notify.nt_tagval |= (((uint64_t)(isp->isp_serno++)) << 32); - if (IS_SCSI(isp)) { - TAG_INSERT_BUS(notify.nt_tagval, bus); - } - switch (event) { case ASYNC_LOOP_UP: case ASYNC_PTPMODE: + isp_prt(isp, ISP_LOGTDEBUG0, "%s: LOOP UP", __func__); notify.nt_ncode = NT_LINK_UP; - (void) isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); + isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case ASYNC_LOOP_DOWN: + isp_prt(isp, ISP_LOGTDEBUG0, "%s: LOOP DOWN", __func__); notify.nt_ncode = NT_LINK_DOWN; - (void) isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); + isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case ASYNC_LIP_ERROR: case ASYNC_LIP_F8: case ASYNC_LIP_OCCURRED: case ASYNC_LOOP_RESET: + isp_prt(isp, ISP_LOGTDEBUG0, "%s: LIP RESET", __func__); notify.nt_ncode = NT_LIP_RESET; - (void) isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); + isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case ASYNC_BUS_RESET: case ASYNC_TIMEOUT_RESET: /* XXX: where does this come from ? */ + isp_prt(isp, ISP_LOGTDEBUG0, "%s: BUS RESET", __func__); notify.nt_ncode = NT_BUS_RESET; - (void) isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); + isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case ASYNC_DEVICE_RESET: + isp_prt(isp, ISP_LOGTDEBUG0, "%s: DEVICE RESET", __func__); notify.nt_ncode = NT_TARGET_RESET; - (void) isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); + isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case ASYNC_CTIO_DONE: { uint8_t storage[QENTRY_LEN]; + isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO DONE", __func__); memset(storage, 0, QENTRY_LEN); if (IS_24XX(isp)) { ct7_entry_t *ct = (ct7_entry_t *) storage; ct->ct_header.rqs_entry_type = RQSTYPE_CTIO7; ct->ct_nphdl = CT7_OK; ct->ct_syshandle = bus; - ct->ct_flags = CT7_SENDSTATUS|CT7_FASTPOST; + ct->ct_flags = CT7_SENDSTATUS; } else if (IS_FC(isp)) { /* This should also suffice for 2K login code */ ct2_entry_t *ct = (ct2_entry_t *) storage; ct->ct_header.rqs_entry_type = RQSTYPE_CTIO2; ct->ct_status = CT_OK; ct->ct_syshandle = bus; ct->ct_flags = CT2_SENDSTATUS|CT2_FASTPOST; } else { ct_entry_t *ct = (ct_entry_t *) storage; ct->ct_header.rqs_entry_type = RQSTYPE_CTIO; ct->ct_status = CT_OK; ct->ct_fwhandle = bus; ct->ct_flags = CT_SENDSTATUS; } - (void) isp_async(isp, ISPASYNC_TARGET_ACTION, storage); + isp_async(isp, ISPASYNC_TARGET_ACTION, storage); break; } default: - isp_prt(isp, ISP_LOGERR, - "isp_target_async: unknown event 0x%x", event); + isp_prt(isp, ISP_LOGERR, "%s: unknown event 0x%x", __func__, event); if (isp->isp_state == ISP_RUNSTATE) { - isp_notify_ack(isp, NULL); + (void) isp_notify_ack(isp, NULL); } break; } return (0); } /* * Process a received message. * The ISP firmware can handle most messages, there are only * a few that we need to deal with: * - abort: clean up the current command * - abort tag and clear queue */ static void isp_got_msg(ispsoftc_t *isp, in_entry_t *inp) { - tmd_notify_t nt; + isp_notify_t notify; uint8_t status = inp->in_status & ~QLTM_SVALID; - MEMZERO(&nt, sizeof (nt)); - nt.nt_hba = isp; - nt.nt_iid = GET_IID_VAL(inp->in_iid); - nt.nt_tgt = inp->in_tgt; - nt.nt_lun = inp->in_lun; - IN_MAKE_TAGID(nt.nt_tagval, GET_BUS_VAL(inp->in_iid), 0, inp); - nt.nt_lreserved = inp; + ISP_MEMZERO(¬ify, sizeof (notify)); + notify.nt_hba = isp; + notify.nt_wwn = INI_ANY; + notify.nt_nphdl = GET_IID_VAL(inp->in_iid); + notify.nt_sid = PORT_ANY; + notify.nt_did = PORT_ANY; + notify.nt_channel = GET_BUS_VAL(inp->in_iid); + notify.nt_tgt = inp->in_tgt; + notify.nt_lun = inp->in_lun; + IN_MAKE_TAGID(notify.nt_tagval, inp); + notify.nt_tagval |= (((uint64_t)(isp->isp_serno++)) << 32); + notify.nt_lreserved = inp; if (status == IN_IDE_RECEIVED || status == IN_MSG_RECEIVED) { switch (inp->in_msg[0]) { case MSG_ABORT: - nt.nt_ncode = NT_ABORT_TASK_SET; + notify.nt_ncode = NT_ABORT_TASK_SET; break; case MSG_BUS_DEV_RESET: - nt.nt_ncode = NT_TARGET_RESET; + notify.nt_ncode = NT_TARGET_RESET; break; case MSG_ABORT_TAG: - nt.nt_ncode = NT_ABORT_TASK; + notify.nt_ncode = NT_ABORT_TASK; break; case MSG_CLEAR_QUEUE: - nt.nt_ncode = NT_CLEAR_TASK_SET; + notify.nt_ncode = NT_CLEAR_TASK_SET; break; case MSG_REL_RECOVERY: - nt.nt_ncode = NT_CLEAR_ACA; + notify.nt_ncode = NT_CLEAR_ACA; break; case MSG_TERM_IO_PROC: - nt.nt_ncode = NT_ABORT_TASK; + notify.nt_ncode = NT_ABORT_TASK; break; case MSG_LUN_RESET: - nt.nt_ncode = NT_LUN_RESET; + notify.nt_ncode = NT_LUN_RESET; break; default: - isp_prt(isp, ISP_LOGERR, - "unhandled message 0x%x", inp->in_msg[0]); - isp_notify_ack(isp, inp); + isp_prt(isp, ISP_LOGERR, "%s: unhandled message 0x%x", __func__, inp->in_msg[0]); + (void) isp_notify_ack(isp, inp); return; } - (void) isp_async(isp, ISPASYNC_TARGET_NOTIFY, &nt); + isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); } else { - isp_prt(isp, ISP_LOGERR, - "unknown immediate notify status 0x%x", inp->in_status); - isp_notify_ack(isp, inp); + isp_prt(isp, ISP_LOGERR, "%s: unknown immediate notify status 0x%x", __func__, inp->in_status); + (void) isp_notify_ack(isp, inp); } } /* * Synthesize a message from the task management flags in a FCP_CMND_IU. */ static void isp_got_msg_fc(ispsoftc_t *isp, in_fcentry_t *inp) { - tmd_notify_t nt; + isp_notify_t notify; static const char f1[] = "%s from N-port handle 0x%x lun %d seq 0x%x"; - static const char f2[] = "unknown %s 0x%x lun %d N-Port handle 0x%x " - "task flags 0x%x seq 0x%x\n"; + static const char f2[] = "unknown %s 0x%x lun %d N-Port handle 0x%x task flags 0x%x seq 0x%x\n"; uint16_t seqid, loopid; - MEMZERO(&nt, sizeof (tmd_notify_t)); - nt.nt_hba = isp; - if (FCPARAM(isp)->isp_2klogin) { - nt.nt_iid = ((in_fcentry_e_t *)inp)->in_iid; + ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); + notify.nt_hba = isp; + notify.nt_wwn = INI_ANY; + if (ISP_CAP_2KLOGIN(isp)) { + notify.nt_nphdl = ((in_fcentry_e_t *)inp)->in_iid; loopid = ((in_fcentry_e_t *)inp)->in_iid; seqid = ((in_fcentry_e_t *)inp)->in_seqid; } else { - nt.nt_iid = inp->in_iid; + notify.nt_nphdl = inp->in_iid; loopid = inp->in_iid; seqid = inp->in_seqid; } + notify.nt_sid = PORT_ANY; + notify.nt_did = PORT_ANY; + /* nt_tgt set in outer layers */ - if (FCPARAM(isp)->isp_sccfw) { - nt.nt_lun = inp->in_scclun; + if (ISP_CAP_SCCFW(isp)) { + notify.nt_lun = inp->in_scclun; } else { - nt.nt_lun = inp->in_lun; + notify.nt_lun = inp->in_lun; } - IN_FC_MAKE_TAGID(nt.nt_tagval, 0, 0, seqid); - nt.nt_need_ack = 1; - nt.nt_lreserved = inp; + notify.nt_tagval = seqid; + notify.nt_tagval |= (((uint64_t)(isp->isp_serno++)) << 32); + notify.nt_need_ack = 1; + notify.nt_lreserved = inp; if (inp->in_status != IN_MSG_RECEIVED) { - isp_prt(isp, ISP_LOGINFO, f2, "immediate notify status", - inp->in_status, nt.nt_lun, loopid, inp->in_task_flags, - inp->in_seqid); - isp_notify_ack(isp, inp); + isp_prt(isp, ISP_LOGINFO, f2, "immediate notify status", inp->in_status, notify.nt_lun, loopid, inp->in_task_flags, inp->in_seqid); + (void) isp_notify_ack(isp, inp); return; } if (inp->in_task_flags & TASK_FLAGS_ABORT_TASK_SET) { - isp_prt(isp, ISP_LOGINFO, f1, "ABORT TASK SET", - loopid, nt.nt_lun, inp->in_seqid); - nt.nt_ncode = NT_ABORT_TASK_SET; + isp_prt(isp, ISP_LOGINFO, f1, "ABORT TASK SET", loopid, notify.nt_lun, inp->in_seqid); + notify.nt_ncode = NT_ABORT_TASK_SET; } else if (inp->in_task_flags & TASK_FLAGS_CLEAR_TASK_SET) { - isp_prt(isp, ISP_LOGINFO, f1, "CLEAR TASK SET", - loopid, nt.nt_lun, inp->in_seqid); - nt.nt_ncode = NT_CLEAR_TASK_SET; + isp_prt(isp, ISP_LOGINFO, f1, "CLEAR TASK SET", loopid, notify.nt_lun, inp->in_seqid); + notify.nt_ncode = NT_CLEAR_TASK_SET; } else if (inp->in_task_flags & TASK_FLAGS_LUN_RESET) { - isp_prt(isp, ISP_LOGINFO, f1, "LUN RESET", - loopid, nt.nt_lun, inp->in_seqid); - nt.nt_ncode = NT_LUN_RESET; + isp_prt(isp, ISP_LOGINFO, f1, "LUN RESET", loopid, notify.nt_lun, inp->in_seqid); + notify.nt_ncode = NT_LUN_RESET; } else if (inp->in_task_flags & TASK_FLAGS_TARGET_RESET) { - isp_prt(isp, ISP_LOGINFO, f1, "TARGET RESET", - loopid, nt.nt_lun, inp->in_seqid); - nt.nt_ncode = NT_TARGET_RESET; + isp_prt(isp, ISP_LOGINFO, f1, "TARGET RESET", loopid, notify.nt_lun, inp->in_seqid); + notify.nt_ncode = NT_TARGET_RESET; } else if (inp->in_task_flags & TASK_FLAGS_CLEAR_ACA) { - isp_prt(isp, ISP_LOGINFO, f1, "CLEAR ACA", - loopid, nt.nt_lun, inp->in_seqid); - nt.nt_ncode = NT_CLEAR_ACA; + isp_prt(isp, ISP_LOGINFO, f1, "CLEAR ACA", loopid, notify.nt_lun, inp->in_seqid); + notify.nt_ncode = NT_CLEAR_ACA; } else { - isp_prt(isp, ISP_LOGWARN, f2, "task flag", inp->in_status, - nt.nt_lun, loopid, inp->in_task_flags, inp->in_seqid); - isp_notify_ack(isp, inp); + isp_prt(isp, ISP_LOGWARN, f2, "task flag", inp->in_status, notify.nt_lun, loopid, inp->in_task_flags, inp->in_seqid); + (void) isp_notify_ack(isp, inp); return; } - (void) isp_async(isp, ISPASYNC_TARGET_NOTIFY, &nt); + isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); } -#define HILO(x) (uint32_t) (x >> 32), (uint32_t) x static void isp_got_tmf_24xx(ispsoftc_t *isp, at7_entry_t *aep) { - tmd_notify_t nt; - static const char f1[] = - "%s from PortID 0x%06x lun %d seq 0x%08x%08x"; - static const char f2[] = - "unknown Task Flag 0x%x lun %d PortID 0x%x tag 0x%08x%08x"; - uint32_t sid; + isp_notify_t notify; + static const char f1[] = "%s from PortID 0x%06x lun %d seq 0x%08x"; + static const char f2[] = "unknown Task Flag 0x%x lun %d PortID 0x%x tag 0x%08x"; + uint16_t chan; + uint32_t sid, did; - MEMZERO(&nt, sizeof (tmd_notify_t)); - nt.nt_hba = isp; - nt.nt_iid = INI_ANY; - nt.nt_lun = - (aep->at_cmnd.fcp_cmnd_lun[0] << 8) | - (aep->at_cmnd.fcp_cmnd_lun[1]); - /* - * XXX: VPIDX HAS TO BE DERIVED FROM DESTINATION PORT - */ - nt.nt_tagval = aep->at_rxid; - nt.nt_lreserved = aep; - sid = - (aep->at_hdr.s_id[0] << 16) | - (aep->at_hdr.s_id[1] << 8) | - (aep->at_hdr.s_id[2]); + ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); + notify.nt_hba = isp; + notify.nt_wwn = INI_ANY; + notify.nt_lun = (aep->at_cmnd.fcp_cmnd_lun[0] << 8) | (aep->at_cmnd.fcp_cmnd_lun[1]); + notify.nt_tagval = aep->at_rxid; + notify.nt_tagval |= (((uint64_t)(isp->isp_serno++)) << 32); + notify.nt_lreserved = aep; + sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | (aep->at_hdr.s_id[2]); - if (aep->at_cmnd.fcp_cmnd_task_management & - FCP_CMND_TMF_ABORT_TASK_SET) { - isp_prt(isp, ISP_LOGINFO, f1, "ABORT TASK SET", - sid, nt.nt_lun, HILO(nt.nt_tagval)); - nt.nt_ncode = NT_ABORT_TASK_SET; - } else if (aep->at_cmnd.fcp_cmnd_task_management & - FCP_CMND_TMF_CLEAR_TASK_SET) { - isp_prt(isp, ISP_LOGINFO, f1, "CLEAR TASK SET", - sid, nt.nt_lun, HILO(nt.nt_tagval)); - nt.nt_ncode = NT_CLEAR_TASK_SET; - } else if (aep->at_cmnd.fcp_cmnd_task_management & - FCP_CMND_TMF_LUN_RESET) { - isp_prt(isp, ISP_LOGINFO, f1, "LUN RESET", - sid, nt.nt_lun, HILO(nt.nt_tagval)); - nt.nt_ncode = NT_LUN_RESET; - } else if (aep->at_cmnd.fcp_cmnd_task_management & - FCP_CMND_TMF_TGT_RESET) { - isp_prt(isp, ISP_LOGINFO, f1, "TARGET RESET", - sid, nt.nt_lun, HILO(nt.nt_tagval)); - nt.nt_ncode = NT_TARGET_RESET; - nt.nt_lun = LUN_ANY; - } else if (aep->at_cmnd.fcp_cmnd_task_management & - FCP_CMND_TMF_CLEAR_ACA) { - isp_prt(isp, ISP_LOGINFO, f1, "CLEAR ACA", - sid, nt.nt_lun, HILO(nt.nt_tagval)); - nt.nt_ncode = NT_CLEAR_ACA; + /* Channel has to derived from D_ID */ + did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2]; + for (chan = 0; chan < isp->isp_nchan; chan++) { + if (FCPARAM(isp, chan)->isp_portid == did) { + break; + } + } + if (chan == isp->isp_nchan) { + isp_prt(isp, ISP_LOGWARN, "%s: D_ID 0x%x not found on any channel", __func__, did); + /* just drop on the floor */ + return; + } + notify.nt_nphdl = NIL_HANDLE; /* unknown here */ + notify.nt_sid = sid; + notify.nt_did = did; + notify.nt_channel = chan; + if (aep->at_cmnd.fcp_cmnd_task_management & FCP_CMND_TMF_ABORT_TASK_SET) { + isp_prt(isp, ISP_LOGINFO, f1, "ABORT TASK SET", sid, notify.nt_lun, aep->at_rxid); + notify.nt_ncode = NT_ABORT_TASK_SET; + } else if (aep->at_cmnd.fcp_cmnd_task_management & FCP_CMND_TMF_CLEAR_TASK_SET) { + isp_prt(isp, ISP_LOGINFO, f1, "CLEAR TASK SET", sid, notify.nt_lun, aep->at_rxid); + notify.nt_ncode = NT_CLEAR_TASK_SET; + } else if (aep->at_cmnd.fcp_cmnd_task_management & FCP_CMND_TMF_LUN_RESET) { + isp_prt(isp, ISP_LOGINFO, f1, "LUN RESET", sid, notify.nt_lun, aep->at_rxid); + notify.nt_ncode = NT_LUN_RESET; + } else if (aep->at_cmnd.fcp_cmnd_task_management & FCP_CMND_TMF_TGT_RESET) { + isp_prt(isp, ISP_LOGINFO, f1, "TARGET RESET", sid, notify.nt_lun, aep->at_rxid); + notify.nt_ncode = NT_TARGET_RESET; + } else if (aep->at_cmnd.fcp_cmnd_task_management & FCP_CMND_TMF_CLEAR_ACA) { + isp_prt(isp, ISP_LOGINFO, f1, "CLEAR ACA", sid, notify.nt_lun, aep->at_rxid); + notify.nt_ncode = NT_CLEAR_ACA; } else { - isp_prt(isp, ISP_LOGWARN, f2, - aep->at_cmnd.fcp_cmnd_task_management, - nt.nt_lun, sid, HILO(nt.nt_tagval)); - isp_endcmd(isp, aep, 0, 0); + isp_prt(isp, ISP_LOGWARN, f2, aep->at_cmnd.fcp_cmnd_task_management, notify.nt_lun, sid, aep->at_rxid); + notify.nt_ncode = NT_UNKNOWN; return; } - (void) isp_async(isp, ISPASYNC_TARGET_NOTIFY, &nt); + isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); } -void +int isp_notify_ack(ispsoftc_t *isp, void *arg) { char storage[QENTRY_LEN]; - uint32_t nxti, optr; void *outp; - if (isp_getrqentry(isp, &nxti, &optr, &outp)) { - isp_prt(isp, ISP_LOGWARN, - "Request Queue Overflow For isp_notify_ack"); - return; + /* + * This is in case a Task Management Function ends up here. + */ + if (IS_24XX(isp) && arg != NULL && (((isphdr_t *)arg)->rqs_entry_type == RQSTYPE_ATIO)) { + at7_entry_t *aep = arg; + return (isp_endcmd(isp, aep, NIL_HANDLE, 0, 0, 0)); } - MEMZERO(storage, QENTRY_LEN); + outp = isp_getrqentry(isp); + if (outp == NULL) { + isp_prt(isp, ISP_LOGWARN, rqo, __func__); + return (1); + } - if (IS_24XX(isp) && arg != NULL && (((isphdr_t *)arg)->rqs_entry_type == RQSTYPE_ATIO)) { - at7_entry_t *aep = arg; - isp_endcmd(isp, aep, 0, 0); - return; - } else if (IS_24XX(isp) && arg != NULL && (((isphdr_t *)arg)->rqs_entry_type == RQSTYPE_ABTS_RSP)) { - abts_rsp_t *abts_rsp = (abts_rsp_t *) storage; - /* - * The caller will have set response values as appropriate - * in the ABTS structure just before calling us. - */ - MEMCPY(abts_rsp, arg, QENTRY_LEN); - isp_put_abts_rsp(isp, abts_rsp, (abts_rsp_t *)outp); - } else if (IS_24XX(isp)) { + ISP_MEMZERO(storage, QENTRY_LEN); + + if (IS_24XX(isp)) { na_fcentry_24xx_t *na = (na_fcentry_24xx_t *) storage; if (arg) { in_fcentry_24xx_t *in = arg; na->na_nphdl = in->in_nphdl; + na->na_flags = in->in_flags & IN24XX_FLAG_PUREX_IOCB; na->na_status = in->in_status; na->na_status_subcode = in->in_status_subcode; na->na_rxid = in->in_rxid; na->na_oxid = in->in_oxid; + na->na_vpidx = in->in_vpidx; if (in->in_status == IN24XX_SRR_RCVD) { na->na_srr_rxid = in->in_srr_rxid; na->na_srr_reloff_hi = in->in_srr_reloff_hi; na->na_srr_reloff_lo = in->in_srr_reloff_lo; na->na_srr_iu = in->in_srr_iu; na->na_srr_flags = 1; na->na_srr_reject_vunique = 0; na->na_srr_reject_explanation = 1; na->na_srr_reject_code = 1; } } na->na_header.rqs_entry_type = RQSTYPE_NOTIFY_ACK; na->na_header.rqs_entry_count = 1; isp_put_notify_24xx_ack(isp, na, (na_fcentry_24xx_t *)outp); } else if (IS_FC(isp)) { na_fcentry_t *na = (na_fcentry_t *) storage; int iid = 0; if (arg) { in_fcentry_t *inp = arg; - MEMCPY(storage, arg, sizeof (isphdr_t)); - if (FCPARAM(isp)->isp_2klogin) { - ((na_fcentry_e_t *)na)->na_iid = - ((in_fcentry_e_t *)inp)->in_iid; + ISP_MEMCPY(storage, arg, sizeof (isphdr_t)); + if (ISP_CAP_2KLOGIN(isp)) { + ((na_fcentry_e_t *)na)->na_iid = ((in_fcentry_e_t *)inp)->in_iid; iid = ((na_fcentry_e_t *)na)->na_iid; } else { na->na_iid = inp->in_iid; iid = na->na_iid; } - na->na_task_flags = - inp->in_task_flags & TASK_FLAGS_RESERVED_MASK; + na->na_task_flags = inp->in_task_flags & TASK_FLAGS_RESERVED_MASK; na->na_seqid = inp->in_seqid; na->na_flags = NAFC_RCOUNT; na->na_status = inp->in_status; if (inp->in_status == IN_RESET) { na->na_flags |= NAFC_RST_CLRD; } if (inp->in_status == IN_MSG_RECEIVED) { na->na_flags |= NAFC_TVALID; na->na_response = 0; /* XXX SUCCEEDED XXX */ } } else { na->na_flags = NAFC_RST_CLRD; } na->na_header.rqs_entry_type = RQSTYPE_NOTIFY_ACK; na->na_header.rqs_entry_count = 1; - if (FCPARAM(isp)->isp_2klogin) { - isp_put_notify_ack_fc_e(isp, (na_fcentry_e_t *) na, - (na_fcentry_e_t *)outp); + if (ISP_CAP_2KLOGIN(isp)) { + isp_put_notify_ack_fc_e(isp, (na_fcentry_e_t *) na, (na_fcentry_e_t *)outp); } else { isp_put_notify_ack_fc(isp, na, (na_fcentry_t *)outp); } - isp_prt(isp, ISP_LOGTDEBUG0, "notify ack loopid %u seqid %x " - "flags %x tflags %x response %x", iid, na->na_seqid, + isp_prt(isp, ISP_LOGTDEBUG0, "notify ack loopid %u seqid %x flags %x tflags %x response %x", iid, na->na_seqid, na->na_flags, na->na_task_flags, na->na_response); } else { na_entry_t *na = (na_entry_t *) storage; if (arg) { in_entry_t *inp = arg; - MEMCPY(storage, arg, sizeof (isphdr_t)); + ISP_MEMCPY(storage, arg, sizeof (isphdr_t)); na->na_iid = inp->in_iid; na->na_lun = inp->in_lun; na->na_tgt = inp->in_tgt; na->na_seqid = inp->in_seqid; if (inp->in_status == IN_RESET) { na->na_event = NA_RST_CLRD; } } else { na->na_event = NA_RST_CLRD; } na->na_header.rqs_entry_type = RQSTYPE_NOTIFY_ACK; na->na_header.rqs_entry_count = 1; isp_put_notify_ack(isp, na, (na_entry_t *)outp); - isp_prt(isp, ISP_LOGTDEBUG0, "notify ack loopid %u lun %u tgt " - "%u seqid %x event %x", na->na_iid, na->na_lun, na->na_tgt, - na->na_seqid, na->na_event); + isp_prt(isp, ISP_LOGTDEBUG0, "notify ack loopid %u lun %u tgt %u seqid %x event %x", na->na_iid, na->na_lun, na->na_tgt, na->na_seqid, na->na_event); } - ISP_TDQE(isp, "isp_notify_ack", (int) optr, storage); - ISP_ADD_REQUEST(isp, nxti); + ISP_TDQE(isp, "isp_notify_ack", isp->isp_reqidx, storage); + ISP_SYNC_REQUEST(isp); + return (0); } +int +isp_acknak_abts(ispsoftc_t *isp, void *arg, int errno) +{ + char storage[QENTRY_LEN]; + uint16_t tmpw; + uint8_t tmpb; + abts_t *abts = arg; + abts_rsp_t *rsp = (abts_rsp_t *) storage; + void *outp; + + if (!IS_24XX(isp)) { + isp_prt(isp, ISP_LOGERR, "%s: called for non-24XX card", __func__); + return (0); + } + + if (abts->abts_header.rqs_entry_type != RQSTYPE_ABTS_RCVD) { + isp_prt(isp, ISP_LOGERR, "%s: called for non-ABTS entry (0x%x)", __func__, abts->abts_header.rqs_entry_type); + return (0); + } + + outp = isp_getrqentry(isp); + if (outp == NULL) { + isp_prt(isp, ISP_LOGWARN, rqo, __func__); + return (1); + } + + ISP_MEMCPY(rsp, abts, QENTRY_LEN); + rsp->abts_rsp_header.rqs_entry_type = RQSTYPE_ABTS_RSP; + + /* + * Swap destination and source for response. + */ + rsp->abts_rsp_r_ctl = BA_ACC; + tmpw = rsp->abts_rsp_did_lo; + tmpb = rsp->abts_rsp_did_hi; + rsp->abts_rsp_did_lo = rsp->abts_rsp_sid_lo; + rsp->abts_rsp_did_hi = rsp->abts_rsp_sid_hi; + rsp->abts_rsp_sid_lo = tmpw; + rsp->abts_rsp_sid_hi = tmpb; + + rsp->abts_rsp_f_ctl_hi ^= 0x80; /* invert Exchange Context */ + rsp->abts_rsp_f_ctl_hi &= ~0x7f; /* clear Sequence Initiator and other bits */ + rsp->abts_rsp_f_ctl_hi |= 0x10; /* abort the whole exchange */ + rsp->abts_rsp_f_ctl_hi |= 0x8; /* last data frame of sequence */ + rsp->abts_rsp_f_ctl_hi |= 0x1; /* transfer Sequence Initiative */ + rsp->abts_rsp_f_ctl_lo = 0; + + if (errno == 0) { + uint16_t rx_id, ox_id; + + rx_id = rsp->abts_rsp_rx_id; + ox_id = rsp->abts_rsp_ox_id; + ISP_MEMZERO(&rsp->abts_rsp_payload.ba_acc, sizeof (rsp->abts_rsp_payload.ba_acc)); + isp_prt(isp, ISP_LOGTINFO, "[0x%x] ABTS of 0x%x being BA_ACC'd", rsp->abts_rsp_rxid_abts, rsp->abts_rsp_rxid_task); + rsp->abts_rsp_payload.ba_acc.aborted_rx_id = rx_id; + rsp->abts_rsp_payload.ba_acc.aborted_ox_id = ox_id; + rsp->abts_rsp_payload.ba_acc.high_seq_cnt = 0xffff; + } else { + ISP_MEMZERO(&rsp->abts_rsp_payload.ba_rjt, sizeof (rsp->abts_rsp_payload.ba_acc)); + switch (errno) { + case ENOMEM: + rsp->abts_rsp_payload.ba_rjt.reason = 5; /* Logical Busy */ + break; + default: + rsp->abts_rsp_payload.ba_rjt.reason = 9; /* Unable to perform command request */ + break; + } + } + + /* + * The caller will have set response values as appropriate + * in the ABTS structure just before calling us. + */ + isp_put_abts_rsp(isp, rsp, (abts_rsp_t *)outp); + ISP_TDQE(isp, "isp_acknak_abts", isp->isp_reqidx, storage); + ISP_SYNC_REQUEST(isp); + return (0); +} + static void isp_handle_atio(ispsoftc_t *isp, at_entry_t *aep) { int lun; lun = aep->at_lun; /* * The firmware status (except for the QLTM_SVALID bit) indicates * why this ATIO was sent to us. * * If QLTM_SVALID is set, the firware has recommended Sense Data. * * If the DISCONNECTS DISABLED bit is set in the flags field, * we're still connected on the SCSI bus - i.e. the initiator * did not set DiscPriv in the identify message. We don't care * about this so it's ignored. */ - switch(aep->at_status & ~QLTM_SVALID) { + switch (aep->at_status & ~QLTM_SVALID) { case AT_PATH_INVALID: /* * ATIO rejected by the firmware due to disabled lun. */ - isp_prt(isp, ISP_LOGERR, - "rejected ATIO for disabled lun %d", lun); + isp_prt(isp, ISP_LOGERR, "rejected ATIO for disabled lun %d", lun); break; case AT_NOCAP: /* * Requested Capability not available * We sent an ATIO that overflowed the firmware's * command resource count. */ - isp_prt(isp, ISP_LOGERR, - "rejected ATIO for lun %d because of command count" - " overflow", lun); + isp_prt(isp, ISP_LOGERR, "rejected ATIO for lun %d because of command count overflow", lun); break; case AT_BDR_MSG: /* * If we send an ATIO to the firmware to increment * its command resource count, and the firmware is * recovering from a Bus Device Reset, it returns * the ATIO with this status. We set the command * resource count in the Enable Lun entry and do * not increment it. Therefore we should never get * this status here. */ - isp_prt(isp, ISP_LOGERR, atiocope, lun, - GET_BUS_VAL(aep->at_iid)); + isp_prt(isp, ISP_LOGERR, atiocope, lun, GET_BUS_VAL(aep->at_iid)); break; case AT_CDB: /* Got a CDB */ case AT_PHASE_ERROR: /* Bus Phase Sequence Error */ /* * Punt to platform specific layer. */ - (void) isp_async(isp, ISPASYNC_TARGET_ACTION, aep); + isp_async(isp, ISPASYNC_TARGET_ACTION, aep); break; case AT_RESET: /* * A bus reset came along and blew away this command. Why * they do this in addition the async event code stuff, * I dunno. * * Ignore it because the async event will clear things * up for us. */ - isp_prt(isp, ISP_LOGWARN, atior, lun, - GET_IID_VAL(aep->at_iid), GET_BUS_VAL(aep->at_iid)); + isp_prt(isp, ISP_LOGWARN, atior, lun, GET_IID_VAL(aep->at_iid), GET_BUS_VAL(aep->at_iid)); break; default: - isp_prt(isp, ISP_LOGERR, - "Unknown ATIO status 0x%x from loopid %d for lun %d", - aep->at_status, aep->at_iid, lun); + isp_prt(isp, ISP_LOGERR, "Unknown ATIO status 0x%x from loopid %d for lun %d", aep->at_status, aep->at_iid, lun); (void) isp_target_put_atio(isp, aep); break; } } static void isp_handle_atio2(ispsoftc_t *isp, at2_entry_t *aep) { int lun, iid; - if (FCPARAM(isp)->isp_sccfw) { + if (ISP_CAP_SCCFW(isp)) { lun = aep->at_scclun; } else { lun = aep->at_lun; } - if (FCPARAM(isp)->isp_2klogin) { + if (ISP_CAP_2KLOGIN(isp)) { iid = ((at2e_entry_t *)aep)->at_iid; } else { iid = aep->at_iid; } /* * The firmware status (except for the QLTM_SVALID bit) indicates * why this ATIO was sent to us. * * If QLTM_SVALID is set, the firware has recommended Sense Data. * * If the DISCONNECTS DISABLED bit is set in the flags field, * we're still connected on the SCSI bus - i.e. the initiator * did not set DiscPriv in the identify message. We don't care * about this so it's ignored. */ - switch(aep->at_status & ~QLTM_SVALID) { + switch (aep->at_status & ~QLTM_SVALID) { case AT_PATH_INVALID: /* * ATIO rejected by the firmware due to disabled lun. */ - isp_prt(isp, ISP_LOGERR, - "rejected ATIO2 for disabled lun %d", lun); + isp_prt(isp, ISP_LOGERR, "rejected ATIO2 for disabled lun %d", lun); break; case AT_NOCAP: /* * Requested Capability not available * We sent an ATIO that overflowed the firmware's * command resource count. */ - isp_prt(isp, ISP_LOGERR, - "rejected ATIO2 for lun %d- command count overflow", lun); + isp_prt(isp, ISP_LOGERR, "rejected ATIO2 for lun %d- command count overflow", lun); break; case AT_BDR_MSG: /* * If we send an ATIO to the firmware to increment * its command resource count, and the firmware is * recovering from a Bus Device Reset, it returns * the ATIO with this status. We set the command * resource count in the Enable Lun entry and no * not increment it. Therefore we should never get * this status here. */ isp_prt(isp, ISP_LOGERR, atiocope, lun, 0); break; case AT_CDB: /* Got a CDB */ /* * Punt to platform specific layer. */ - (void) isp_async(isp, ISPASYNC_TARGET_ACTION, aep); + isp_async(isp, ISPASYNC_TARGET_ACTION, aep); break; case AT_RESET: /* * A bus reset came along an blew away this command. Why * they do this in addition the async event code stuff, * I dunno. * * Ignore it because the async event will clear things * up for us. */ isp_prt(isp, ISP_LOGERR, atior, lun, iid, 0); break; default: - isp_prt(isp, ISP_LOGERR, - "Unknown ATIO2 status 0x%x from loopid %d for lun %d", - aep->at_status, iid, lun); + isp_prt(isp, ISP_LOGERR, "Unknown ATIO2 status 0x%x from loopid %d for lun %d", aep->at_status, iid, lun); (void) isp_target_put_atio(isp, aep); break; } } static void isp_handle_ctio(ispsoftc_t *isp, ct_entry_t *ct) { void *xs; int pl = ISP_LOGTDEBUG2; char *fmsg = NULL; if (ct->ct_syshandle) { xs = isp_find_xs_tgt(isp, ct->ct_syshandle); if (xs == NULL) { pl = ISP_LOGALL; } } else { xs = NULL; } - switch(ct->ct_status & ~QLTM_SVALID) { + switch (ct->ct_status & ~QLTM_SVALID) { case CT_OK: /* * There are generally 3 possibilities as to why we'd get * this condition: * We disconnected after receiving a CDB. * We sent or received data. * We sent status & command complete. */ if (ct->ct_flags & CT_SENDSTATUS) { break; } else if ((ct->ct_flags & CT_DATAMASK) == CT_NO_DATA) { /* * Nothing to do in this case. */ - isp_prt(isp, pl, "CTIO- iid %d disconnected OK", - ct->ct_iid); + isp_prt(isp, pl, "CTIO- iid %d disconnected OK", ct->ct_iid); return; } break; case CT_BDR_MSG: /* * Bus Device Reset message received or the SCSI Bus has * been Reset; the firmware has gone to Bus Free. * * The firmware generates an async mailbox interrupt to * notify us of this and returns outstanding CTIOs with this * status. These CTIOs are handled in that same way as * CT_ABORTED ones, so just fall through here. */ fmsg = "Bus Device Reset"; /*FALLTHROUGH*/ case CT_RESET: if (fmsg == NULL) fmsg = "Bus Reset"; /*FALLTHROUGH*/ case CT_ABORTED: /* * When an Abort message is received the firmware goes to * Bus Free and returns all outstanding CTIOs with the status * set, then sends us an Immediate Notify entry. */ if (fmsg == NULL) fmsg = "ABORT TAG message sent by Initiator"; - isp_prt(isp, ISP_LOGTDEBUG0, "CTIO destroyed by %s", fmsg); break; case CT_INVAL: /* * CTIO rejected by the firmware due to disabled lun. * "Cannot Happen". */ - isp_prt(isp, ISP_LOGERR, - "Firmware rejected CTIO for disabled lun %d", - ct->ct_lun); + isp_prt(isp, ISP_LOGERR, "Firmware rejected CTIO for disabled lun %d", ct->ct_lun); break; case CT_NOPATH: /* * CTIO rejected by the firmware due "no path for the * nondisconnecting nexus specified". This means that * we tried to access the bus while a non-disconnecting * command is in process. */ - isp_prt(isp, ISP_LOGERR, - "Firmware rejected CTIO for bad nexus %d/%d/%d", - ct->ct_iid, ct->ct_tgt, ct->ct_lun); + isp_prt(isp, ISP_LOGERR, "Firmware rejected CTIO for bad nexus %d/%d/%d", ct->ct_iid, ct->ct_tgt, ct->ct_lun); break; case CT_RSELTMO: fmsg = "Reselection"; /*FALLTHROUGH*/ case CT_TIMEOUT: if (fmsg == NULL) fmsg = "Command"; - isp_prt(isp, ISP_LOGERR, "Firmware timed out on %s", fmsg); + isp_prt(isp, ISP_LOGWARN, "Firmware timed out on %s", fmsg); break; case CT_PANIC: if (fmsg == NULL) fmsg = "Unrecoverable Error"; /*FALLTHROUGH*/ case CT_ERR: if (fmsg == NULL) fmsg = "Completed with Error"; /*FALLTHROUGH*/ case CT_PHASE_ERROR: if (fmsg == NULL) fmsg = "Phase Sequence Error"; /*FALLTHROUGH*/ case CT_TERMINATED: if (fmsg == NULL) fmsg = "terminated by TERMINATE TRANSFER"; /*FALLTHROUGH*/ case CT_NOACK: if (fmsg == NULL) fmsg = "unacknowledged Immediate Notify pending"; isp_prt(isp, ISP_LOGERR, "CTIO returned by f/w- %s", fmsg); break; default: - isp_prt(isp, ISP_LOGERR, "Unknown CTIO status 0x%x", - ct->ct_status & ~QLTM_SVALID); + isp_prt(isp, ISP_LOGERR, "Unknown CTIO status 0x%x", ct->ct_status & ~QLTM_SVALID); break; } if (xs == NULL) { /* * There may be more than one CTIO for a data transfer, * or this may be a status CTIO we're not monitoring. * * The assumption is that they'll all be returned in the * order we got them. */ if (ct->ct_syshandle == 0) { if ((ct->ct_flags & CT_SENDSTATUS) == 0) { - isp_prt(isp, pl, - "intermediate CTIO completed ok"); + isp_prt(isp, pl, "intermediate CTIO completed ok"); } else { - isp_prt(isp, pl, - "unmonitored CTIO completed ok"); + isp_prt(isp, pl, "unmonitored CTIO completed ok"); } } else { - isp_prt(isp, pl, - "NO xs for CTIO (handle 0x%x) status 0x%x", - ct->ct_syshandle, ct->ct_status & ~QLTM_SVALID); + isp_prt(isp, pl, "NO xs for CTIO (handle 0x%x) status 0x%x", ct->ct_syshandle, ct->ct_status & ~QLTM_SVALID); } } else { /* * Final CTIO completed. Release DMA resources and * notify platform dependent layers. */ if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { ISP_DMAFREE(isp, xs, ct->ct_syshandle); } isp_prt(isp, pl, "final CTIO complete"); /* * The platform layer will destroy the handle if appropriate. */ - (void) isp_async(isp, ISPASYNC_TARGET_ACTION, ct); + isp_async(isp, ISPASYNC_TARGET_ACTION, ct); } } static void isp_handle_ctio2(ispsoftc_t *isp, ct2_entry_t *ct) { void *xs; int pl = ISP_LOGTDEBUG2; char *fmsg = NULL; if (ct->ct_syshandle) { xs = isp_find_xs_tgt(isp, ct->ct_syshandle); if (xs == NULL) { pl = ISP_LOGALL; } } else { xs = NULL; } - switch(ct->ct_status & ~QLTM_SVALID) { + switch (ct->ct_status & ~QLTM_SVALID) { case CT_BUS_ERROR: isp_prt(isp, ISP_LOGERR, "PCI DMA Bus Error"); /* FALL Through */ case CT_DATA_OVER: case CT_DATA_UNDER: case CT_OK: /* * There are generally 2 possibilities as to why we'd get * this condition: * We sent or received data. * We sent status & command complete. */ break; case CT_BDR_MSG: /* * Target Reset function received. * * The firmware generates an async mailbox interrupt to * notify us of this and returns outstanding CTIOs with this * status. These CTIOs are handled in that same way as * CT_ABORTED ones, so just fall through here. */ fmsg = "TARGET RESET"; /*FALLTHROUGH*/ case CT_RESET: if (fmsg == NULL) fmsg = "LIP Reset"; /*FALLTHROUGH*/ case CT_ABORTED: /* * When an Abort message is received the firmware goes to * Bus Free and returns all outstanding CTIOs with the status * set, then sends us an Immediate Notify entry. */ if (fmsg == NULL) { fmsg = "ABORT"; } - isp_prt(isp, ISP_LOGTDEBUG0, - "CTIO2 destroyed by %s: RX_ID=0x%x", fmsg, ct->ct_rxid); + isp_prt(isp, ISP_LOGTDEBUG0, "CTIO2 destroyed by %s: RX_ID=0x%x", fmsg, ct->ct_rxid); break; case CT_INVAL: /* * CTIO rejected by the firmware - invalid data direction. */ isp_prt(isp, ISP_LOGERR, "CTIO2 had wrong data direction"); break; case CT_RSELTMO: fmsg = "failure to reconnect to initiator"; /*FALLTHROUGH*/ case CT_TIMEOUT: if (fmsg == NULL) fmsg = "command"; - isp_prt(isp, ISP_LOGERR, "Firmware timed out on %s", fmsg); + isp_prt(isp, ISP_LOGWARN, "Firmware timed out on %s", fmsg); break; case CT_ERR: fmsg = "Completed with Error"; /*FALLTHROUGH*/ case CT_LOGOUT: if (fmsg == NULL) fmsg = "Port Logout"; /*FALLTHROUGH*/ case CT_PORTUNAVAIL: if (fmsg == NULL) fmsg = "Port not available"; /*FALLTHROUGH*/ case CT_PORTCHANGED: if (fmsg == NULL) fmsg = "Port Changed"; /*FALLTHROUGH*/ case CT_NOACK: if (fmsg == NULL) fmsg = "unacknowledged Immediate Notify pending"; isp_prt(isp, ISP_LOGWARN, "CTIO returned by f/w- %s", fmsg); break; case CT_INVRXID: /* * CTIO rejected by the firmware because an invalid RX_ID. * Just print a message. */ - isp_prt(isp, ISP_LOGWARN, - "CTIO2 completed with Invalid RX_ID 0x%x", ct->ct_rxid); + isp_prt(isp, ISP_LOGWARN, "CTIO2 completed with Invalid RX_ID 0x%x", ct->ct_rxid); break; default: - isp_prt(isp, ISP_LOGERR, "Unknown CTIO2 status 0x%x", - ct->ct_status & ~QLTM_SVALID); + isp_prt(isp, ISP_LOGERR, "Unknown CTIO2 status 0x%x", ct->ct_status & ~QLTM_SVALID); break; } if (xs == NULL) { /* * There may be more than one CTIO for a data transfer, * or this may be a status CTIO we're not monitoring. * * The assumption is that they'll all be returned in the * order we got them. */ if (ct->ct_syshandle == 0) { if ((ct->ct_flags & CT2_SENDSTATUS) == 0) { - isp_prt(isp, pl, - "intermediate CTIO completed ok"); + isp_prt(isp, pl, "intermediate CTIO completed ok"); } else { - isp_prt(isp, pl, - "unmonitored CTIO completed ok"); + isp_prt(isp, pl, "unmonitored CTIO completed ok"); } } else { - isp_prt(isp, pl, - "NO xs for CTIO (handle 0x%x) status 0x%x", - ct->ct_syshandle, ct->ct_status & ~QLTM_SVALID); + isp_prt(isp, pl, "NO xs for CTIO (handle 0x%x) status 0x%x", ct->ct_syshandle, ct->ct_status & ~QLTM_SVALID); } } else { if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { ISP_DMAFREE(isp, xs, ct->ct_syshandle); } if (ct->ct_flags & CT2_SENDSTATUS) { /* * Sent status and command complete. * * We're now really done with this command, so we * punt to the platform dependent layers because * only there can we do the appropriate command * complete thread synchronization. */ isp_prt(isp, pl, "status CTIO complete"); } else { /* * Final CTIO completed. Release DMA resources and * notify platform dependent layers. */ isp_prt(isp, pl, "data CTIO complete"); } - (void) isp_async(isp, ISPASYNC_TARGET_ACTION, ct); + isp_async(isp, ISPASYNC_TARGET_ACTION, ct); /* * The platform layer will destroy the handle if appropriate. */ } } static void isp_handle_ctio7(ispsoftc_t *isp, ct7_entry_t *ct) { void *xs; int pl = ISP_LOGTDEBUG2; char *fmsg = NULL; if (ct->ct_syshandle) { xs = isp_find_xs_tgt(isp, ct->ct_syshandle); if (xs == NULL) { pl = ISP_LOGALL; } } else { xs = NULL; } - switch(ct->ct_nphdl) { + switch (ct->ct_nphdl) { case CT7_BUS_ERROR: isp_prt(isp, ISP_LOGERR, "PCI DMA Bus Error"); /* FALL Through */ case CT7_DATA_OVER: case CT7_DATA_UNDER: case CT7_OK: /* * There are generally 2 possibilities as to why we'd get * this condition: * We sent or received data. * We sent status & command complete. */ break; case CT7_RESET: if (fmsg == NULL) { fmsg = "LIP Reset"; } /*FALLTHROUGH*/ case CT7_ABORTED: /* * When an Abort message is received the firmware goes to * Bus Free and returns all outstanding CTIOs with the status * set, then sends us an Immediate Notify entry. */ if (fmsg == NULL) { fmsg = "ABORT"; } - isp_prt(isp, ISP_LOGTDEBUG0, - "CTIO7 destroyed by %s: RX_ID=0x%x", fmsg, ct->ct_rxid); + isp_prt(isp, ISP_LOGTDEBUG0, "CTIO7 destroyed by %s: RX_ID=0x%x", fmsg, ct->ct_rxid); break; case CT7_TIMEOUT: if (fmsg == NULL) { fmsg = "command"; } - isp_prt(isp, ISP_LOGERR, "Firmware timed out on %s", fmsg); + isp_prt(isp, ISP_LOGWARN, "Firmware timed out on %s", fmsg); break; case CT7_ERR: fmsg = "Completed with Error"; /*FALLTHROUGH*/ case CT7_LOGOUT: if (fmsg == NULL) { fmsg = "Port Logout"; } /*FALLTHROUGH*/ case CT7_PORTUNAVAIL: if (fmsg == NULL) { fmsg = "Port not available"; } /*FALLTHROUGH*/ case CT7_PORTCHANGED: if (fmsg == NULL) { fmsg = "Port Changed"; } isp_prt(isp, ISP_LOGWARN, "CTIO returned by f/w- %s", fmsg); break; case CT7_INVRXID: /* * CTIO rejected by the firmware because an invalid RX_ID. * Just print a message. */ - isp_prt(isp, ISP_LOGWARN, - "CTIO7 completed with Invalid RX_ID 0x%x", ct->ct_rxid); + isp_prt(isp, ISP_LOGWARN, "CTIO7 completed with Invalid RX_ID 0x%x", ct->ct_rxid); break; case CT7_REASSY_ERR: isp_prt(isp, ISP_LOGWARN, "reassembly error"); break; case CT7_SRR: isp_prt(isp, ISP_LOGWARN, "SRR received"); break; default: - isp_prt(isp, ISP_LOGERR, "Unknown CTIO7 status 0x%x", - ct->ct_nphdl); + isp_prt(isp, ISP_LOGERR, "Unknown CTIO7 status 0x%x", ct->ct_nphdl); break; } if (xs == NULL) { /* * There may be more than one CTIO for a data transfer, * or this may be a status CTIO we're not monitoring. * * The assumption is that they'll all be returned in the * order we got them. */ if (ct->ct_syshandle == 0) { if (ct->ct_flags & CT7_TERMINATE) { - isp_prt(isp, ISP_LOGINFO, - "termination of 0x%x complete", - ct->ct_rxid); + isp_prt(isp, ISP_LOGINFO, "termination of 0x%x complete", ct->ct_rxid); } else if ((ct->ct_flags & CT7_SENDSTATUS) == 0) { - isp_prt(isp, pl, - "intermediate CTIO completed ok"); + isp_prt(isp, pl, "intermediate CTIO completed ok"); } else { - isp_prt(isp, pl, - "unmonitored CTIO completed ok"); + isp_prt(isp, pl, "unmonitored CTIO completed ok"); } } else { - isp_prt(isp, pl, - "NO xs for CTIO (handle 0x%x) status 0x%x", - ct->ct_syshandle, ct->ct_nphdl); + isp_prt(isp, pl, "NO xs for CTIO (handle 0x%x) status 0x%x", ct->ct_syshandle, ct->ct_nphdl); } } else { - if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { + if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA) { ISP_DMAFREE(isp, xs, ct->ct_syshandle); } - if (ct->ct_flags & CT2_SENDSTATUS) { + if (ct->ct_flags & CT7_SENDSTATUS) { /* * Sent status and command complete. * * We're now really done with this command, so we * punt to the platform dependent layers because * only there can we do the appropriate command * complete thread synchronization. */ isp_prt(isp, pl, "status CTIO complete"); } else { /* * Final CTIO completed. Release DMA resources and * notify platform dependent layers. */ isp_prt(isp, pl, "data CTIO complete"); } - (void) isp_async(isp, ISPASYNC_TARGET_ACTION, ct); + isp_async(isp, ISPASYNC_TARGET_ACTION, ct); /* * The platform layer will destroy the handle if appropriate. */ } +} + +static void +isp_handle_24xx_inotify(ispsoftc_t *isp, in_fcentry_24xx_t *inot_24xx) +{ + uint8_t ochan, chan, lochan, hichan; + + /* + * Check to see whether we got a wildcard channel. + * If so, we have to iterate over all channels. + */ + ochan = chan = ISP_GET_VPIDX(isp, inot_24xx->in_vpidx); + if (chan == 0xff) { + lochan = 0; + hichan = isp->isp_nchan; + } else { + if (chan >= isp->isp_nchan) { + char buf[64]; + ISP_SNPRINTF(buf, sizeof buf, "%s: bad channel %d for status 0x%x", __func__, chan, inot_24xx->in_status); + isp_print_bytes(isp, buf, QENTRY_LEN, inot_24xx); + (void) isp_notify_ack(isp, inot_24xx); + return; + } + lochan = chan; + hichan = chan + 1; + } + isp_prt(isp, ISP_LOGTDEBUG1, "%s: Immediate Notify Channels %d..%d status=0x%x seqid=0x%x", __func__, lochan, hichan-1, inot_24xx->in_status, inot_24xx->in_rxid); + for (chan = lochan; chan < hichan; chan++) { + switch (inot_24xx->in_status) { + case IN24XX_LIP_RESET: + case IN24XX_LINK_RESET: + case IN24XX_PORT_LOGOUT: + case IN24XX_PORT_CHANGED: + case IN24XX_LINK_FAILED: + case IN24XX_SRR_RCVD: + case IN24XX_ELS_RCVD: + inot_24xx->in_vpidx = chan; + isp_async(isp, ISPASYNC_TARGET_ACTION, inot_24xx); + break; + default: + isp_prt(isp, ISP_LOGINFO, "%s: unhandled status (0x%x) for chan %d", __func__, inot_24xx->in_status, chan); + (void) isp_notify_ack(isp, inot_24xx); + break; + } + } + inot_24xx->in_vpidx = ochan; } #endif Index: head/sys/dev/isp/isp_target.h =================================================================== --- head/sys/dev/isp/isp_target.h (revision 196007) +++ head/sys/dev/isp/isp_target.h (revision 196008) @@ -1,945 +1,100 @@ /* $FreeBSD$ */ /*- - * Copyright (c) 1997-2007 by Matthew Jacob + * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * */ /* * Qlogic Target Mode Structure and Flag Definitions */ #ifndef _ISP_TARGET_H #define _ISP_TARGET_H -#define QLTM_SENSELEN 18 /* non-FC cards only */ -#define QLTM_SVALID 0x80 - /* - * Structure for Enable Lun and Modify Lun queue entries + * Notify structure- these are for asynchronous events that need to be sent + * as notifications to the outer layer. It should be pretty self-explanatory. */ -typedef struct { - isphdr_t le_header; - uint32_t le_reserved; - uint8_t le_lun; - uint8_t le_rsvd; - uint8_t le_ops; /* Modify LUN only */ - uint8_t le_tgt; /* Not for FC */ - uint32_t le_flags; /* Not for FC */ - uint8_t le_status; - uint8_t le_reserved2; - uint8_t le_cmd_count; - uint8_t le_in_count; - uint8_t le_cdb6len; /* Not for FC */ - uint8_t le_cdb7len; /* Not for FC */ - uint16_t le_timeout; - uint16_t le_reserved3[20]; -} lun_entry_t; +typedef enum { + NT_UNKNOWN=0x999, + NT_ABORT_TASK=0x1000, + NT_ABORT_TASK_SET, + NT_CLEAR_ACA, + NT_CLEAR_TASK_SET, + NT_LUN_RESET, + NT_TARGET_RESET, + NT_BUS_RESET, + NT_LIP_RESET, + NT_LINK_UP, + NT_LINK_DOWN, + NT_LOGOUT, + NT_GLOBAL_LOGOUT, + NT_ARRIVED, + NT_DEPARTED, + NT_HBA_RESET +} isp_ncode_t; +typedef struct isp_notify { + void * nt_hba; /* HBA tag */ + void * nt_tmd; + void * nt_lreserved; + void * nt_hreserved; + uint64_t nt_wwn; /* source (wwn) */ + uint64_t nt_tgt; /* destination (wwn) */ + uint64_t nt_tagval; /* tag value */ + uint32_t + nt_sid : 24; /* source port id */ + uint32_t + nt_failed : 1, /* notify operation failed */ + nt_need_ack : 1, /* this notify needs an ACK */ + nt_did : 24; /* destination port id */ + uint32_t + nt_lun : 16, /* logical unit */ + nt_nphdl : 16; /* n-port handle */ + uint8_t nt_channel; /* channel id */ + isp_ncode_t nt_ncode; /* action */ +} isp_notify_t; +#define MATCH_TMD(tmd, iid, lun, tag) \ + ( \ + (tmd) && \ + (iid == INI_ANY || iid == tmd->cd_iid) && \ + (lun == LUN_ANY || lun == tmd->cd_lun) && \ + (tag == TAG_ANY || tag == tmd->cd_tagval) \ + ) /* - * le_flags values - */ -#define LUN_TQAE 0x00000002 /* bit1 Tagged Queue Action Enable */ -#define LUN_DSSM 0x01000000 /* bit24 Disable Sending SDP Message */ -#define LUN_DISAD 0x02000000 /* bit25 Disable autodisconnect */ -#define LUN_DM 0x40000000 /* bit30 Disconnects Mandatory */ - -/* - * le_ops values - */ -#define LUN_CCINCR 0x01 /* increment command count */ -#define LUN_CCDECR 0x02 /* decrement command count */ -#define LUN_ININCR 0x40 /* increment immed. notify count */ -#define LUN_INDECR 0x80 /* decrement immed. notify count */ - -/* - * le_status values - */ -#define LUN_OK 0x01 /* we be rockin' */ -#define LUN_ERR 0x04 /* request completed with error */ -#define LUN_INVAL 0x06 /* invalid request */ -#define LUN_NOCAP 0x16 /* can't provide requested capability */ -#define LUN_ENABLED 0x3E /* LUN already enabled */ - -/* - * Immediate Notify Entry structure - */ -#define IN_MSGLEN 8 /* 8 bytes */ -#define IN_RSVDLEN 8 /* 8 words */ -typedef struct { - isphdr_t in_header; - uint32_t in_reserved; - uint8_t in_lun; /* lun */ - uint8_t in_iid; /* initiator */ - uint8_t in_reserved2; - uint8_t in_tgt; /* target */ - uint32_t in_flags; - uint8_t in_status; - uint8_t in_rsvd2; - uint8_t in_tag_val; /* tag value */ - uint8_t in_tag_type; /* tag type */ - uint16_t in_seqid; /* sequence id */ - uint8_t in_msg[IN_MSGLEN]; /* SCSI message bytes */ - uint16_t in_reserved3[IN_RSVDLEN]; - uint8_t in_sense[QLTM_SENSELEN];/* suggested sense data */ -} in_entry_t; - -typedef struct { - isphdr_t in_header; - uint32_t in_reserved; - uint8_t in_lun; /* lun */ - uint8_t in_iid; /* initiator */ - uint16_t in_scclun; - uint32_t in_reserved2; - uint16_t in_status; - uint16_t in_task_flags; - uint16_t in_seqid; /* sequence id */ -} in_fcentry_t; - -typedef struct { - isphdr_t in_header; - uint32_t in_reserved; - uint16_t in_iid; /* initiator */ - uint16_t in_scclun; - uint32_t in_reserved2; - uint16_t in_status; - uint16_t in_task_flags; - uint16_t in_seqid; /* sequence id */ -} in_fcentry_e_t; - -/* - * Values for the in_status field - */ -#define IN_REJECT 0x0D /* Message Reject message received */ -#define IN_RESET 0x0E /* Bus Reset occurred */ -#define IN_NO_RCAP 0x16 /* requested capability not available */ -#define IN_IDE_RECEIVED 0x33 /* Initiator Detected Error msg received */ -#define IN_RSRC_UNAVAIL 0x34 /* resource unavailable */ -#define IN_MSG_RECEIVED 0x36 /* SCSI message received */ -#define IN_ABORT_TASK 0x20 /* task named in RX_ID is being aborted (FC) */ -#define IN_PORT_LOGOUT 0x29 /* port has logged out (FC) */ -#define IN_PORT_CHANGED 0x2A /* port changed */ -#define IN_GLOBAL_LOGO 0x2E /* all ports logged out */ -#define IN_NO_NEXUS 0x3B /* Nexus not established */ - -/* - * Values for the in_task_flags field- should only get one at a time! - */ -#define TASK_FLAGS_RESERVED_MASK (0xe700) -#define TASK_FLAGS_CLEAR_ACA (1<<14) -#define TASK_FLAGS_TARGET_RESET (1<<13) -#define TASK_FLAGS_LUN_RESET (1<<12) -#define TASK_FLAGS_CLEAR_TASK_SET (1<<10) -#define TASK_FLAGS_ABORT_TASK_SET (1<<9) - -/* - * ISP24XX Immediate Notify - */ -typedef struct { - isphdr_t in_header; - uint32_t in_reserved; - uint16_t in_nphdl; - uint16_t in_reserved1; - uint16_t in_flags; - uint16_t in_srr_rxid; - uint16_t in_status; - uint8_t in_status_subcode; - uint8_t in_reserved2; - uint32_t in_rxid; - uint16_t in_srr_reloff_lo; - uint16_t in_srr_reloff_hi; - uint16_t in_srr_iu; - uint16_t in_srr_oxid; - uint8_t in_reserved3[18]; - uint8_t in_reserved4; - uint8_t in_vpindex; - uint32_t in_reserved5; - uint16_t in_portid_lo; - uint8_t in_portid_hi; - uint8_t in_reserved6; - uint16_t in_reserved7; - uint16_t in_oxid; -} in_fcentry_24xx_t; - -#define IN24XX_FLAG_PUREX_IOCB 0x1 -#define IN24XX_FLAG_GLOBAL_LOGOUT 0x2 - -#define IN24XX_LIP_RESET 0x0E -#define IN24XX_LINK_RESET 0x0F -#define IN24XX_PORT_LOGOUT 0x29 -#define IN24XX_PORT_CHANGED 0x2A -#define IN24XX_LINK_FAILED 0x2E -#define IN24XX_SRR_RCVD 0x45 -#define IN24XX_ELS_RCVD 0x46 /* - * login-affectin ELS received- check - * subcode for specific opcode - */ -/* - * Notify Acknowledge Entry structure - */ -#define NA_RSVDLEN 22 -typedef struct { - isphdr_t na_header; - uint32_t na_reserved; - uint8_t na_lun; /* lun */ - uint8_t na_iid; /* initiator */ - uint8_t na_reserved2; - uint8_t na_tgt; /* target */ - uint32_t na_flags; - uint8_t na_status; - uint8_t na_event; - uint16_t na_seqid; /* sequence id */ - uint16_t na_reserved3[NA_RSVDLEN]; -} na_entry_t; - -/* - * Value for the na_event field - */ -#define NA_RST_CLRD 0x80 /* Clear an async event notification */ -#define NA_OK 0x01 /* Notify Acknowledge Succeeded */ -#define NA_INVALID 0x06 /* Invalid Notify Acknowledge */ - -#define NA2_RSVDLEN 21 -typedef struct { - isphdr_t na_header; - uint32_t na_reserved; - uint8_t na_reserved1; - uint8_t na_iid; /* initiator loop id */ - uint16_t na_response; - uint16_t na_flags; - uint16_t na_reserved2; - uint16_t na_status; - uint16_t na_task_flags; - uint16_t na_seqid; /* sequence id */ - uint16_t na_reserved3[NA2_RSVDLEN]; -} na_fcentry_t; - -typedef struct { - isphdr_t na_header; - uint32_t na_reserved; - uint16_t na_iid; /* initiator loop id */ - uint16_t na_response; /* response code */ - uint16_t na_flags; - uint16_t na_reserved2; - uint16_t na_status; - uint16_t na_task_flags; - uint16_t na_seqid; /* sequence id */ - uint16_t na_reserved3[NA2_RSVDLEN]; -} na_fcentry_e_t; - -#define NAFC_RCOUNT 0x80 /* increment resource count */ -#define NAFC_RST_CLRD 0x20 /* Clear LIP Reset */ -#define NAFC_TVALID 0x10 /* task mangement response code is valid */ - -/* - * ISP24XX Notify Acknowledge - */ - -typedef struct { - isphdr_t na_header; - uint32_t na_handle; - uint16_t na_nphdl; - uint16_t na_reserved1; - uint16_t na_flags; - uint16_t na_srr_rxid; - uint16_t na_status; - uint8_t na_status_subcode; - uint8_t na_reserved2; - uint32_t na_rxid; - uint16_t na_srr_reloff_lo; - uint16_t na_srr_reloff_hi; - uint16_t na_srr_iu; - uint16_t na_srr_flags; - uint8_t na_reserved3[18]; - uint8_t na_reserved4; - uint8_t na_vpindex; - uint8_t na_srr_reject_vunique; - uint8_t na_srr_reject_explanation; - uint8_t na_srr_reject_code; - uint8_t na_reserved5; - uint8_t na_reserved6[6]; - uint16_t na_oxid; -} na_fcentry_24xx_t; - -/* - * Accept Target I/O Entry structure - */ -#define ATIO_CDBLEN 26 - -typedef struct { - isphdr_t at_header; - uint16_t at_reserved; - uint16_t at_handle; - uint8_t at_lun; /* lun */ - uint8_t at_iid; /* initiator */ - uint8_t at_cdblen; /* cdb length */ - uint8_t at_tgt; /* target */ - uint32_t at_flags; - uint8_t at_status; /* firmware status */ - uint8_t at_scsi_status; /* scsi status */ - uint8_t at_tag_val; /* tag value */ - uint8_t at_tag_type; /* tag type */ - uint8_t at_cdb[ATIO_CDBLEN]; /* received CDB */ - uint8_t at_sense[QLTM_SENSELEN];/* suggested sense data */ -} at_entry_t; - -/* - * at_flags values - */ -#define AT_NODISC 0x00008000 /* disconnect disabled */ -#define AT_TQAE 0x00000002 /* Tagged Queue Action enabled */ - -/* - * at_status values - */ -#define AT_PATH_INVALID 0x07 /* ATIO sent to firmware for disabled lun */ -#define AT_RESET 0x0E /* SCSI Bus Reset Occurred */ -#define AT_PHASE_ERROR 0x14 /* Bus phase sequence error */ -#define AT_NOCAP 0x16 /* Requested capability not available */ -#define AT_BDR_MSG 0x17 /* Bus Device Reset msg received */ -#define AT_CDB 0x3D /* CDB received */ -/* - * Macros to create and fetch and test concatenated handle and tag value macros - */ - -#define AT_MAKE_TAGID(tid, bus, inst, aep) \ - tid = aep->at_handle; \ - if (aep->at_flags & AT_TQAE) { \ - tid |= (aep->at_tag_val << 16); \ - tid |= (1 << 24); \ - } \ - tid |= (bus << 25); \ - tid |= (inst << 26) - -#define CT_MAKE_TAGID(tid, bus, inst, ct) \ - tid = ct->ct_fwhandle; \ - if (ct->ct_flags & CT_TQAE) { \ - tid |= (ct->ct_tag_val << 16); \ - tid |= (1 << 24); \ - } \ - tid |= ((bus & 0x1) << 25); \ - tid |= (inst << 26) - -#define AT_HAS_TAG(val) ((val) & (1 << 24)) -#define AT_GET_TAG(val) (((val) >> 16) & 0xff) -#define AT_GET_INST(val) (((val) >> 26) & 0x3f) -#define AT_GET_BUS(val) (((val) >> 25) & 0x1) -#define AT_GET_HANDLE(val) ((val) & 0xffff) - -#define IN_MAKE_TAGID(tid, bus, inst, inp) \ - tid = inp->in_seqid; \ - tid |= (inp->in_tag_val << 16); \ - tid |= (1 << 24); \ - tid |= (bus << 25); \ - tid |= (inst << 26) - -#define TAG_INSERT_INST(tid, inst) \ - tid &= ~(0x3ffffff); \ - tid |= (inst << 26) - -#define TAG_INSERT_BUS(tid, bus) \ - tid &= ~(1 << 25); \ - tid |= (bus << 25) - -/* - * Accept Target I/O Entry structure, Type 2 - */ -#define ATIO2_CDBLEN 16 - -typedef struct { - isphdr_t at_header; - uint32_t at_reserved; - uint8_t at_lun; /* lun or reserved */ - uint8_t at_iid; /* initiator */ - uint16_t at_rxid; /* response ID */ - uint16_t at_flags; - uint16_t at_status; /* firmware status */ - uint8_t at_crn; /* command reference number */ - uint8_t at_taskcodes; - uint8_t at_taskflags; - uint8_t at_execodes; - uint8_t at_cdb[ATIO2_CDBLEN]; /* received CDB */ - uint32_t at_datalen; /* allocated data len */ - uint16_t at_scclun; /* SCC Lun or reserved */ - uint16_t at_wwpn[4]; /* WWPN of initiator */ - uint16_t at_reserved2[6]; - uint16_t at_oxid; -} at2_entry_t; - -typedef struct { - isphdr_t at_header; - uint32_t at_reserved; - uint16_t at_iid; /* initiator */ - uint16_t at_rxid; /* response ID */ - uint16_t at_flags; - uint16_t at_status; /* firmware status */ - uint8_t at_crn; /* command reference number */ - uint8_t at_taskcodes; - uint8_t at_taskflags; - uint8_t at_execodes; - uint8_t at_cdb[ATIO2_CDBLEN]; /* received CDB */ - uint32_t at_datalen; /* allocated data len */ - uint16_t at_scclun; /* SCC Lun or reserved */ - uint16_t at_wwpn[4]; /* WWPN of initiator */ - uint16_t at_reserved2[6]; - uint16_t at_oxid; -} at2e_entry_t; - -#define ATIO2_WWPN_OFFSET 0x2A -#define ATIO2_OXID_OFFSET 0x3E - -#define ATIO2_TC_ATTR_MASK 0x7 -#define ATIO2_TC_ATTR_SIMPLEQ 0 -#define ATIO2_TC_ATTR_HEADOFQ 1 -#define ATIO2_TC_ATTR_ORDERED 2 -#define ATIO2_TC_ATTR_ACAQ 4 -#define ATIO2_TC_ATTR_UNTAGGED 5 - -#define ATIO2_EX_WRITE 0x1 -#define ATIO2_EX_READ 0x2 -/* - * Macros to create and fetch and test concatenated handle and tag value macros - */ -#define AT2_MAKE_TAGID(tid, bus, inst, aep) \ - tid = aep->at_rxid; \ - tid |= (((uint64_t)inst) << 32); \ - tid |= (((uint64_t)bus) << 48) - -#define CT2_MAKE_TAGID(tid, bus, inst, ct) \ - tid = ct->ct_rxid; \ - tid |= (((uint64_t)inst) << 32); \ - tid |= (((uint64_t)(bus & 0xff)) << 48) - -#define AT2_HAS_TAG(val) 1 -#define AT2_GET_TAG(val) ((val) & 0xffffffff) -#define AT2_GET_INST(val) ((val) >> 32) -#define AT2_GET_HANDLE AT2_GET_TAG -#define AT2_GET_BUS(val) (((val) >> 48) & 0xff) - -#define FC_HAS_TAG AT2_HAS_TAG -#define FC_GET_TAG AT2_GET_TAG -#define FC_GET_INST AT2_GET_INST -#define FC_GET_HANDLE AT2_GET_HANDLE - -#define IN_FC_MAKE_TAGID(tid, bus, inst, seqid) \ - tid = seqid; \ - tid |= (((uint64_t)inst) << 32); \ - tid |= (((uint64_t)(bus & 0xff)) << 48) - -#define FC_TAG_INSERT_INST(tid, inst) \ - tid &= ~0xffff00000000ull; \ - tid |= (((uint64_t)inst) << 32) - -/* - * 24XX ATIO Definition - * - * This is *quite* different from other entry types. - * First of all, it has its own queue it comes in on. - * - * Secondly, it doesn't have a normal header. - * - * Thirdly, it's just a passthru of the FCP CMND IU - * which is recorded in big endian mode. - */ -typedef struct { - uint8_t at_type; - uint8_t at_count; - /* - * Task attribute in high four bits, - * the rest is the FCP CMND IU Length. - * NB: the command can extend past the - * length for a single queue entry. - */ - uint16_t at_ta_len; - uint32_t at_rxid; - fc_hdr_t at_hdr; - fcp_cmnd_iu_t at_cmnd; -} at7_entry_t; - - -/* - * Continue Target I/O Entry structure - * Request from driver. The response from the - * ISP firmware is the same except that the last 18 - * bytes are overwritten by suggested sense data if - * the 'autosense valid' bit is set in the status byte. - */ -typedef struct { - isphdr_t ct_header; - uint16_t ct_syshandle; - uint16_t ct_fwhandle; /* required by f/w */ - uint8_t ct_lun; /* lun */ - uint8_t ct_iid; /* initiator id */ - uint8_t ct_reserved2; - uint8_t ct_tgt; /* our target id */ - uint32_t ct_flags; - uint8_t ct_status; /* isp status */ - uint8_t ct_scsi_status; /* scsi status */ - uint8_t ct_tag_val; /* tag value */ - uint8_t ct_tag_type; /* tag type */ - uint32_t ct_xfrlen; /* transfer length */ - uint32_t ct_resid; /* residual length */ - uint16_t ct_timeout; - uint16_t ct_seg_count; - ispds_t ct_dataseg[ISP_RQDSEG]; -} ct_entry_t; - -/* - * For some of the dual port SCSI adapters, port (bus #) is reported - * in the MSbit of ct_iid. Bit fields are a bit too awkward here. - * - * Note that this does not apply to FC adapters at all which can and - * do report IIDs between 0x81 && 0xfe (or 0x7ff) which represent devices - * that have logged in across a SCSI fabric. - */ -#define GET_IID_VAL(x) (x & 0x3f) -#define GET_BUS_VAL(x) ((x >> 7) & 0x1) -#define SET_IID_VAL(y, x) y = ((y & ~0x3f) | (x & 0x3f)) -#define SET_BUS_VAL(y, x) y = ((y & 0x3f) | ((x & 0x1) << 7)) - -/* - * ct_flags values - */ -#define CT_TQAE 0x00000002 /* bit 1, Tagged Queue Action enable */ -#define CT_DATA_IN 0x00000040 /* bits 6&7, Data direction */ -#define CT_DATA_OUT 0x00000080 /* bits 6&7, Data direction */ -#define CT_NO_DATA 0x000000C0 /* bits 6&7, Data direction */ -#define CT_CCINCR 0x00000100 /* bit 8, autoincrement atio count */ -#define CT_DATAMASK 0x000000C0 /* bits 6&7, Data direction */ -#define CT_INISYNCWIDE 0x00004000 /* bit 14, Do Sync/Wide Negotiation */ -#define CT_NODISC 0x00008000 /* bit 15, Disconnects disabled */ -#define CT_DSDP 0x01000000 /* bit 24, Disable Save Data Pointers */ -#define CT_SENDRDP 0x04000000 /* bit 26, Send Restore Pointers msg */ -#define CT_SENDSTATUS 0x80000000 /* bit 31, Send SCSI status byte */ - -/* - * ct_status values - * - set by the firmware when it returns the CTIO - */ -#define CT_OK 0x01 /* completed without error */ -#define CT_ABORTED 0x02 /* aborted by host */ -#define CT_ERR 0x04 /* see sense data for error */ -#define CT_INVAL 0x06 /* request for disabled lun */ -#define CT_NOPATH 0x07 /* invalid ITL nexus */ -#define CT_INVRXID 0x08 /* (FC only) Invalid RX_ID */ -#define CT_DATA_OVER 0x09 /* (FC only) Data Overrun */ -#define CT_RSELTMO 0x0A /* reselection timeout after 2 tries */ -#define CT_TIMEOUT 0x0B /* timed out */ -#define CT_RESET 0x0E /* SCSI Bus Reset occurred */ -#define CT_PARITY 0x0F /* Uncorrectable Parity Error */ -#define CT_BUS_ERROR 0x10 /* (FC Only) DMA PCI Error */ -#define CT_PANIC 0x13 /* Unrecoverable Error */ -#define CT_PHASE_ERROR 0x14 /* Bus phase sequence error */ -#define CT_DATA_UNDER 0x15 /* (FC only) Data Underrun */ -#define CT_BDR_MSG 0x17 /* Bus Device Reset msg received */ -#define CT_TERMINATED 0x19 /* due to Terminate Transfer mbox cmd */ -#define CT_PORTUNAVAIL 0x28 /* port not available */ -#define CT_LOGOUT 0x29 /* port logout */ -#define CT_PORTCHANGED 0x2A /* port changed */ -#define CT_IDE 0x33 /* Initiator Detected Error */ -#define CT_NOACK 0x35 /* Outstanding Immed. Notify. entry */ -#define CT_SRR 0x45 /* SRR Received */ -#define CT_LUN_RESET 0x48 /* Lun Reset Received */ - -/* - * When the firmware returns a CTIO entry, it may overwrite the last - * part of the structure with sense data. This starts at offset 0x2E - * into the entry, which is in the middle of ct_dataseg[1]. Rather - * than define a new struct for this, I'm just using the sense data - * offset. - */ -#define CTIO_SENSE_OFFSET 0x2E - -/* - * Entry length in u_longs. All entries are the same size so - * any one will do as the numerator. - */ -#define UINT32_ENTRY_SIZE (sizeof(at_entry_t)/sizeof(uint32_t)) - -/* - * QLA2100 CTIO (type 2) entry - */ -#define MAXRESPLEN 26 -typedef struct { - isphdr_t ct_header; - uint32_t ct_syshandle; - uint8_t ct_lun; /* lun */ - uint8_t ct_iid; /* initiator id */ - uint16_t ct_rxid; /* response ID */ - uint16_t ct_flags; - uint16_t ct_status; /* isp status */ - uint16_t ct_timeout; - uint16_t ct_seg_count; - uint32_t ct_reloff; /* relative offset */ - int32_t ct_resid; /* residual length */ - union { - /* - * The three different modes that the target driver - * can set the CTIO{2,3,4} up as. - * - * The first is for sending FCP_DATA_IUs as well as - * (optionally) sending a terminal SCSI status FCP_RSP_IU. - * - * The second is for sending SCSI sense data in an FCP_RSP_IU. - * Note that no FCP_DATA_IUs will be sent. - * - * The third is for sending FCP_RSP_IUs as built specifically - * in system memory as located by the isp_dataseg. - */ - struct { - uint32_t _reserved; - uint16_t _reserved2; - uint16_t ct_scsi_status; - uint32_t ct_xfrlen; - union { - ispds_t ct_dataseg[ISP_RQDSEG_T2]; - ispds64_t ct_dataseg64[ISP_RQDSEG_T3]; - ispdslist_t ct_dslist; - } u; - } m0; - struct { - uint16_t _reserved; - uint16_t _reserved2; - uint16_t ct_senselen; - uint16_t ct_scsi_status; - uint16_t ct_resplen; - uint8_t ct_resp[MAXRESPLEN]; - } m1; - struct { - uint32_t _reserved; - uint16_t _reserved2; - uint16_t _reserved3; - uint32_t ct_datalen; - ispds_t ct_fcp_rsp_iudata; - } m2; - } rsp; -} ct2_entry_t; - -typedef struct { - isphdr_t ct_header; - uint32_t ct_syshandle; - uint16_t ct_iid; /* initiator id */ - uint16_t ct_rxid; /* response ID */ - uint16_t ct_flags; - uint16_t ct_status; /* isp status */ - uint16_t ct_timeout; - uint16_t ct_seg_count; - uint32_t ct_reloff; /* relative offset */ - int32_t ct_resid; /* residual length */ - union { - struct { - uint32_t _reserved; - uint16_t _reserved2; - uint16_t ct_scsi_status; - uint32_t ct_xfrlen; - union { - ispds_t ct_dataseg[ISP_RQDSEG_T2]; - ispds64_t ct_dataseg64[ISP_RQDSEG_T3]; - ispdslist_t ct_dslist; - } u; - } m0; - struct { - uint16_t _reserved; - uint16_t _reserved2; - uint16_t ct_senselen; - uint16_t ct_scsi_status; - uint16_t ct_resplen; - uint8_t ct_resp[MAXRESPLEN]; - } m1; - struct { - uint32_t _reserved; - uint16_t _reserved2; - uint16_t _reserved3; - uint32_t ct_datalen; - ispds_t ct_fcp_rsp_iudata; - } m2; - } rsp; -} ct2e_entry_t; - -/* - * ct_flags values for CTIO2 - */ -#define CT2_FLAG_MODE0 0x0000 -#define CT2_FLAG_MODE1 0x0001 -#define CT2_FLAG_MODE2 0x0002 -#define CT2_FLAG_MMASK 0x0003 -#define CT2_DATA_IN 0x0040 -#define CT2_DATA_OUT 0x0080 -#define CT2_NO_DATA 0x00C0 -#define CT2_DATAMASK 0x00C0 -#define CT2_CCINCR 0x0100 -#define CT2_FASTPOST 0x0200 -#define CT2_CONFIRM 0x2000 -#define CT2_TERMINATE 0x4000 -#define CT2_SENDSTATUS 0x8000 - -/* - * ct_status values are (mostly) the same as that for ct_entry. - */ - -/* - * ct_scsi_status values- the low 8 bits are the normal SCSI status - * we know and love. The upper 8 bits are validity markers for FCP_RSP_IU - * fields. - */ -#define CT2_RSPLEN_VALID 0x0100 -#define CT2_SNSLEN_VALID 0x0200 -#define CT2_DATA_OVER 0x0400 -#define CT2_DATA_UNDER 0x0800 - -/* - * ISP24XX CTIO - */ -#define MAXRESPLEN_24XX 24 -typedef struct { - isphdr_t ct_header; - uint32_t ct_syshandle; - uint16_t ct_nphdl; /* status on returned CTIOs */ - uint16_t ct_timeout; - uint16_t ct_seg_count; - uint8_t ct_vpindex; - uint8_t ct_xflags; - uint16_t ct_iid_lo; /* low 16 bits of portid */ - uint8_t ct_iid_hi; /* hi 8 bits of portid */ - uint8_t ct_reserved; - uint32_t ct_rxid; - uint16_t ct_senselen; /* mode 0 only */ - uint16_t ct_flags; - int32_t ct_resid; /* residual length */ - uint16_t ct_oxid; - uint16_t ct_scsi_status; /* modes 0 && 1 only */ - union { - struct { - uint32_t reloff; - uint32_t reserved0; - uint32_t ct_xfrlen; - uint32_t reserved1; - ispds64_t ds; - } m0; - struct { - uint16_t ct_resplen; - uint16_t reserved; - uint8_t ct_resp[MAXRESPLEN_24XX]; - } m1; - struct { - uint32_t reserved0; - uint32_t ct_datalen; - uint32_t reserved1; - ispds64_t ct_fcp_rsp_iudata; - } m2; - } rsp; -} ct7_entry_t; - -/* - * ct_flags values for CTIO7 - */ -#define CT7_DATA_IN 0x0002 -#define CT7_DATA_OUT 0x0001 -#define CT7_NO_DATA 0x0000 -#define CT7_DATAMASK 0x003 -#define CT7_DSD_ENABLE 0x0004 -#define CT7_CONF_STSFD 0x0010 -#define CT7_EXPLCT_CONF 0x0020 -#define CT7_FLAG_MODE0 0x0000 -#define CT7_FLAG_MODE1 0x0040 -#define CT7_FLAG_MODE7 0x0080 -#define CT7_FLAG_MMASK 0x00C0 -#define CT7_FASTPOST 0x0100 -#define CT7_ATTR_MASK 0x1e00 /* task attributes from atio7 */ -#define CT7_CONFIRM 0x2000 -#define CT7_TERMINATE 0x4000 -#define CT7_SENDSTATUS 0x8000 - -/* - * Type 7 CTIO status codes - */ -#define CT7_OK 0x01 /* completed without error */ -#define CT7_ABORTED 0x02 /* aborted by host */ -#define CT7_ERR 0x04 /* see sense data for error */ -#define CT7_INVAL 0x06 /* request for disabled lun */ -#define CT7_INVRXID 0x08 /* (FC only) Invalid RX_ID */ -#define CT7_DATA_OVER 0x09 /* (FC only) Data Overrun */ -#define CT7_TIMEOUT 0x0B /* timed out */ -#define CT7_RESET 0x0E /* LIP Rset Received */ -#define CT7_BUS_ERROR 0x10 /* DMA PCI Error */ -#define CT7_REASSY_ERR 0x11 /* DMA reassembly error */ -#define CT7_DATA_UNDER 0x15 /* (FC only) Data Underrun */ -#define CT7_PORTUNAVAIL 0x28 /* port not available */ -#define CT7_LOGOUT 0x29 /* port logout */ -#define CT7_PORTCHANGED 0x2A /* port changed */ -#define CT7_SRR 0x45 /* SRR Received */ - -/* - * Other 24XX related target IOCBs - */ - -/* - * ABTS Received - */ -typedef struct { - isphdr_t abts_header; - uint8_t abts_reserved0[6]; - uint16_t abts_nphdl; - uint16_t abts_reserved1; - uint16_t abts_sof; - uint32_t abts_rxid_abts; - uint16_t abts_did_lo; - uint8_t abts_did_hi; - uint8_t abts_r_ctl; - uint16_t abts_sid_lo; - uint8_t abts_sid_hi; - uint8_t abts_cs_ctl; - uint16_t abts_fs_ctl; - uint8_t abts_f_ctl; - uint8_t abts_type; - uint16_t abts_seq_cnt; - uint8_t abts_df_ctl; - uint8_t abts_seq_id; - uint16_t abts_rx_id; - uint16_t abts_ox_id; - uint32_t abts_param; - uint8_t abts_reserved2[16]; - uint32_t abts_rxid_task; -} abts_t; - -typedef struct { - isphdr_t abts_rsp_header; - uint32_t abts_rsp_handle; - uint16_t abts_rsp_status; - uint16_t abts_rsp_nphdl; - uint16_t abts_rsp_ctl_flags; - uint16_t abts_rsp_sof; - uint32_t abts_rsp_rxid_abts; - uint16_t abts_rsp_did_lo; - uint8_t abts_rsp_did_hi; - uint8_t abts_rsp_r_ctl; - uint16_t abts_rsp_sid_lo; - uint8_t abts_rsp_sid_hi; - uint8_t abts_rsp_cs_ctl; - uint16_t abts_rsp_f_ctl_lo; - uint8_t abts_rsp_f_ctl_hi; - uint8_t abts_rsp_type; - uint16_t abts_rsp_seq_cnt; - uint8_t abts_rsp_df_ctl; - uint8_t abts_rsp_seq_id; - uint16_t abts_rsp_rx_id; - uint16_t abts_rsp_ox_id; - uint32_t abts_rsp_param; - union { - struct { - uint16_t reserved; - uint8_t last_seq_id; - uint8_t seq_id_valid; - uint16_t aborted_rx_id; - uint16_t aborted_ox_id; - uint16_t high_seq_cnt; - uint16_t low_seq_cnt; - uint8_t reserved2[4]; - } ba_acc; - struct { - uint8_t vendor_unique; - uint8_t explanation; - uint8_t reason; - uint8_t reserved; - uint8_t reserved2[12]; - } ba_rjt; - struct { - uint8_t reserved[8]; - uint32_t subcode1; - uint32_t subcode2; - } rsp; - uint8_t reserved[16]; - } abts_rsp_payload; - uint32_t abts_rsp_rxid_task; -} abts_rsp_t; - -/* terminate this ABTS exchange */ -#define ISP24XX_ABTS_RSP_TERMINATE 0x01 - -#define ISP24XX_ABTS_RSP_COMPLETE 0x00 -#define ISP24XX_ABTS_RSP_RESET 0x04 -#define ISP24XX_ABTS_RSP_ABORTED 0x05 -#define ISP24XX_ABTS_RSP_TIMEOUT 0x06 -#define ISP24XX_ABTS_RSP_INVXID 0x08 -#define ISP24XX_ABTS_RSP_LOGOUT 0x29 -#define ISP24XX_ABTS_RSP_SUBCODE 0x31 - -/* * Debug macros */ #define ISP_TDQE(isp, msg, idx, arg) \ if (isp->isp_dblev & ISP_LOGTDEBUG2) isp_print_qentry(isp, msg, idx, arg) -#ifndef ISP_TOOLS /* - * The functions below are for the publicly available - * target mode functions that are internal to the Qlogic driver. + * Special Constatns */ - -/* - * This function handles new response queue entry appropriate for target mode. - */ -int isp_target_notify(ispsoftc_t *, void *, uint32_t *); - -/* - * This function externalizes the ability to acknowledge an Immediate Notify - * request. - */ -void isp_notify_ack(ispsoftc_t *, void *); - -/* - * Enable/Disable/Modify a logical unit. - * (softc, cmd, bus, tgt, lun, cmd_cnt, inotify_cnt, opaque) - */ -#define DFLT_CMND_CNT 0xfe /* unmonitored */ -#define DFLT_INOT_CNT 0xfe /* unmonitored */ -int isp_lun_cmd(ispsoftc_t *, int, int, int, int, int, int, uint32_t); - -/* - * General request queue 'put' routine for target mode entries. - */ -int isp_target_put_entry(ispsoftc_t *isp, void *); - -/* - * General routine to put back an ATIO entry- - * used for replenishing f/w resource counts. - * The argument is a pointer to a source ATIO - * or ATIO2. - */ -int isp_target_put_atio(ispsoftc_t *, void *); - -/* - * General routine to send a final CTIO for a command- used mostly for - * local responses. - */ -int isp_endcmd(ispsoftc_t *, void *, uint32_t, uint32_t); -#define ECMD_SVALID 0x100 - -/* - * Handle an asynchronous event - * - * Return nonzero if the interrupt that generated this event has been dismissed. - */ -int isp_target_async(ispsoftc_t *, int, int); -#endif +#define INI_ANY ((uint64_t) -1) +#define VALID_INI(ini) (ini != INI_NONE && ini != INI_ANY) +#define LUN_ANY 0xffff +#define TGT_ANY ((uint64_t) -1) +#define TAG_ANY ((uint64_t) 0) #endif /* _ISP_TARGET_H */ Index: head/sys/dev/isp/ispmbox.h =================================================================== --- head/sys/dev/isp/ispmbox.h (revision 196007) +++ head/sys/dev/isp/ispmbox.h (revision 196008) @@ -1,1412 +1,2544 @@ /* $FreeBSD$ */ /*- - * Copyright (c) 1997-2007 by Matthew Jacob + * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * */ /* * Mailbox and Queue Entry Definitions for for Qlogic ISP SCSI adapters. */ #ifndef _ISPMBOX_H #define _ISPMBOX_H /* * Mailbox Command Opcodes */ #define MBOX_NO_OP 0x0000 #define MBOX_LOAD_RAM 0x0001 #define MBOX_EXEC_FIRMWARE 0x0002 #define MBOX_DUMP_RAM 0x0003 #define MBOX_WRITE_RAM_WORD 0x0004 #define MBOX_READ_RAM_WORD 0x0005 #define MBOX_MAILBOX_REG_TEST 0x0006 #define MBOX_VERIFY_CHECKSUM 0x0007 #define MBOX_ABOUT_FIRMWARE 0x0008 #define MBOX_LOAD_RISC_RAM_2100 0x0009 /* a */ #define MBOX_LOAD_RISC_RAM 0x000b /* c */ #define MBOX_WRITE_RAM_WORD_EXTENDED 0x000d #define MBOX_CHECK_FIRMWARE 0x000e #define MBOX_READ_RAM_WORD_EXTENDED 0x000f #define MBOX_INIT_REQ_QUEUE 0x0010 #define MBOX_INIT_RES_QUEUE 0x0011 #define MBOX_EXECUTE_IOCB 0x0012 #define MBOX_WAKE_UP 0x0013 #define MBOX_STOP_FIRMWARE 0x0014 #define MBOX_ABORT 0x0015 #define MBOX_ABORT_DEVICE 0x0016 #define MBOX_ABORT_TARGET 0x0017 #define MBOX_BUS_RESET 0x0018 #define MBOX_STOP_QUEUE 0x0019 #define MBOX_START_QUEUE 0x001a #define MBOX_SINGLE_STEP_QUEUE 0x001b #define MBOX_ABORT_QUEUE 0x001c #define MBOX_GET_DEV_QUEUE_STATUS 0x001d /* 1e */ #define MBOX_GET_FIRMWARE_STATUS 0x001f #define MBOX_GET_INIT_SCSI_ID 0x0020 #define MBOX_GET_SELECT_TIMEOUT 0x0021 #define MBOX_GET_RETRY_COUNT 0x0022 #define MBOX_GET_TAG_AGE_LIMIT 0x0023 #define MBOX_GET_CLOCK_RATE 0x0024 #define MBOX_GET_ACT_NEG_STATE 0x0025 #define MBOX_GET_ASYNC_DATA_SETUP_TIME 0x0026 #define MBOX_GET_SBUS_PARAMS 0x0027 #define MBOX_GET_PCI_PARAMS MBOX_GET_SBUS_PARAMS #define MBOX_GET_TARGET_PARAMS 0x0028 #define MBOX_GET_DEV_QUEUE_PARAMS 0x0029 #define MBOX_GET_RESET_DELAY_PARAMS 0x002a /* 2b */ /* 2c */ /* 2d */ /* 2e */ /* 2f */ #define MBOX_SET_INIT_SCSI_ID 0x0030 #define MBOX_SET_SELECT_TIMEOUT 0x0031 #define MBOX_SET_RETRY_COUNT 0x0032 #define MBOX_SET_TAG_AGE_LIMIT 0x0033 #define MBOX_SET_CLOCK_RATE 0x0034 #define MBOX_SET_ACT_NEG_STATE 0x0035 #define MBOX_SET_ASYNC_DATA_SETUP_TIME 0x0036 #define MBOX_SET_SBUS_CONTROL_PARAMS 0x0037 #define MBOX_SET_PCI_PARAMETERS 0x0037 #define MBOX_SET_TARGET_PARAMS 0x0038 #define MBOX_SET_DEV_QUEUE_PARAMS 0x0039 #define MBOX_SET_RESET_DELAY_PARAMS 0x003a /* 3b */ /* 3c */ /* 3d */ /* 3e */ /* 3f */ #define MBOX_RETURN_BIOS_BLOCK_ADDR 0x0040 #define MBOX_WRITE_FOUR_RAM_WORDS 0x0041 #define MBOX_EXEC_BIOS_IOCB 0x0042 #define MBOX_SET_FW_FEATURES 0x004a #define MBOX_GET_FW_FEATURES 0x004b #define FW_FEATURE_FAST_POST 0x1 #define FW_FEATURE_LVD_NOTIFY 0x2 #define FW_FEATURE_RIO_32BIT 0x4 #define FW_FEATURE_RIO_16BIT 0x8 #define MBOX_INIT_REQ_QUEUE_A64 0x0052 #define MBOX_INIT_RES_QUEUE_A64 0x0053 #define MBOX_ENABLE_TARGET_MODE 0x0055 #define ENABLE_TARGET_FLAG 0x8000 #define ENABLE_TQING_FLAG 0x0004 #define ENABLE_MANDATORY_DISC 0x0002 #define MBOX_GET_TARGET_STATUS 0x0056 /* These are for the ISP2X00 FC cards */ #define MBOX_GET_LOOP_ID 0x0020 +/* for 24XX cards, outgoing mailbox 7 has these values for F or FL topologies */ +#define ISP24XX_INORDER 0x0100 +#define ISP24XX_NPIV_SAN 0x0400 +#define ISP24XX_VSAN_SAN 0x1000 +#define ISP24XX_FC_SP_SAN 0x2000 + #define MBOX_GET_FIRMWARE_OPTIONS 0x0028 #define MBOX_SET_FIRMWARE_OPTIONS 0x0038 #define MBOX_GET_RESOURCE_COUNT 0x0042 #define MBOX_REQUEST_OFFLINE_MODE 0x0043 #define MBOX_ENHANCED_GET_PDB 0x0047 +#define MBOX_INIT_FIRMWARE_MULTI_ID 0x0048 /* 2400 only */ +#define MBOX_GET_VP_DATABASE 0x0049 /* 2400 only */ +#define MBOX_GET_VP_DATABASE_ENTRY 0x004a /* 2400 only */ #define MBOX_EXEC_COMMAND_IOCB_A64 0x0054 #define MBOX_INIT_FIRMWARE 0x0060 #define MBOX_GET_INIT_CONTROL_BLOCK 0x0061 #define MBOX_INIT_LIP 0x0062 #define MBOX_GET_FC_AL_POSITION_MAP 0x0063 #define MBOX_GET_PORT_DB 0x0064 #define MBOX_CLEAR_ACA 0x0065 #define MBOX_TARGET_RESET 0x0066 #define MBOX_CLEAR_TASK_SET 0x0067 #define MBOX_ABORT_TASK_SET 0x0068 #define MBOX_GET_FW_STATE 0x0069 #define MBOX_GET_PORT_NAME 0x006A #define MBOX_GET_LINK_STATUS 0x006B #define MBOX_INIT_LIP_RESET 0x006C #define MBOX_SEND_SNS 0x006E #define MBOX_FABRIC_LOGIN 0x006F #define MBOX_SEND_CHANGE_REQUEST 0x0070 #define MBOX_FABRIC_LOGOUT 0x0071 #define MBOX_INIT_LIP_LOGIN 0x0072 #define MBOX_LUN_RESET 0x007E #define MBOX_DRIVER_HEARTBEAT 0x005B #define MBOX_FW_HEARTBEAT 0x005C #define MBOX_GET_SET_DATA_RATE 0x005D /* 24XX/23XX only */ #define MBGSD_GET_RATE 0 #define MBGSD_SET_RATE 1 #define MBGSD_SET_RATE_NOW 2 /* 24XX only */ #define MBGSD_ONEGB 0 #define MBGSD_TWOGB 1 #define MBGSD_AUTO 2 #define MBGSD_FOURGB 3 /* 24XX only */ +#define MBGSD_EIGHTGB 4 /* 25XX only */ #define ISP2100_SET_PCI_PARAM 0x00ff #define MBOX_BUSY 0x04 /* * Mailbox Command Complete Status Codes */ #define MBOX_COMMAND_COMPLETE 0x4000 #define MBOX_INVALID_COMMAND 0x4001 #define MBOX_HOST_INTERFACE_ERROR 0x4002 #define MBOX_TEST_FAILED 0x4003 #define MBOX_COMMAND_ERROR 0x4005 #define MBOX_COMMAND_PARAM_ERROR 0x4006 #define MBOX_PORT_ID_USED 0x4007 #define MBOX_LOOP_ID_USED 0x4008 #define MBOX_ALL_IDS_USED 0x4009 #define MBOX_NOT_LOGGED_IN 0x400A /* pseudo mailbox completion codes */ #define MBOX_REGS_BUSY 0x6000 /* registers in use */ #define MBOX_TIMEOUT 0x6001 /* command timed out */ #define MBLOGALL 0x000f #define MBLOGNONE 0x0000 #define MBLOGMASK(x) ((x) & 0xf) /* * Asynchronous event status codes */ #define ASYNC_BUS_RESET 0x8001 #define ASYNC_SYSTEM_ERROR 0x8002 #define ASYNC_RQS_XFER_ERR 0x8003 #define ASYNC_RSP_XFER_ERR 0x8004 #define ASYNC_QWAKEUP 0x8005 #define ASYNC_TIMEOUT_RESET 0x8006 #define ASYNC_DEVICE_RESET 0x8007 #define ASYNC_EXTMSG_UNDERRUN 0x800A #define ASYNC_SCAM_INT 0x800B #define ASYNC_HUNG_SCSI 0x800C #define ASYNC_KILLED_BUS 0x800D #define ASYNC_BUS_TRANSIT 0x800E /* LVD -> HVD, eg. */ #define ASYNC_LIP_OCCURRED 0x8010 #define ASYNC_LOOP_UP 0x8011 #define ASYNC_LOOP_DOWN 0x8012 #define ASYNC_LOOP_RESET 0x8013 #define ASYNC_PDB_CHANGED 0x8014 #define ASYNC_CHANGE_NOTIFY 0x8015 #define ASYNC_LIP_F8 0x8016 #define ASYNC_LIP_ERROR 0x8017 #define ASYNC_SECURITY_UPDATE 0x801B #define ASYNC_CMD_CMPLT 0x8020 #define ASYNC_CTIO_DONE 0x8021 #define ASYNC_IP_XMIT_DONE 0x8022 #define ASYNC_IP_RECV_DONE 0x8023 #define ASYNC_IP_BROADCAST 0x8024 #define ASYNC_IP_RCVQ_LOW 0x8025 #define ASYNC_IP_RCVQ_EMPTY 0x8026 #define ASYNC_IP_RECV_DONE_ALIGNED 0x8027 #define ASYNC_PTPMODE 0x8030 #define ASYNC_RIO1 0x8031 #define ASYNC_RIO2 0x8032 #define ASYNC_RIO3 0x8033 #define ASYNC_RIO4 0x8034 #define ASYNC_RIO5 0x8035 #define ASYNC_CONNMODE 0x8036 #define ISP_CONN_LOOP 1 #define ISP_CONN_PTP 2 #define ISP_CONN_BADLIP 3 #define ISP_CONN_FATAL 4 #define ISP_CONN_LOOPBACK 5 #define ASYNC_RIO_RESP 0x8040 #define ASYNC_RIO_COMP 0x8042 #define ASYNC_RCV_ERR 0x8048 /* * 2.01.31 2200 Only. Need Bit 13 in Mailbox 1 for Set Firmware Options * mailbox command to enable this. */ #define ASYNC_QFULL_SENT 0x8049 /* * 24XX only */ #define ASYNC_RJT_SENT 0x8049 /* * All IOCB Queue entries are this size */ #define QENTRY_LEN 64 /* + * Special Internal Handle for IOCBs + */ +#define ISP_SPCL_HANDLE 0xa5dead5a + +/* * Command Structure Definitions */ typedef struct { uint32_t ds_base; uint32_t ds_count; } ispds_t; typedef struct { uint32_t ds_base; uint32_t ds_basehi; uint32_t ds_count; } ispds64_t; #define DSTYPE_32BIT 0 #define DSTYPE_64BIT 1 typedef struct { uint16_t ds_type; /* 0-> ispds_t, 1-> ispds64_t */ uint32_t ds_segment; /* unused */ uint32_t ds_base; /* 32 bit address of DSD list */ } ispdslist_t; -/* - * These elements get swizzled around for SBus instances. - */ -#define ISP_SWAP8(a, b) { \ - uint8_t tmp; \ - tmp = a; \ - a = b; \ - b = tmp; \ -} typedef struct { uint8_t rqs_entry_type; uint8_t rqs_entry_count; uint8_t rqs_seqno; uint8_t rqs_flags; } isphdr_t; /* RQS Flag definitions */ #define RQSFLAG_CONTINUATION 0x01 #define RQSFLAG_FULL 0x02 #define RQSFLAG_BADHEADER 0x04 #define RQSFLAG_BADPACKET 0x08 -#define RQSFLAG_MASK 0x0f +#define RQSFLAG_BADCOUNT 0x10 +#define RQSFLAG_BADORDER 0x20 +#define RQSFLAG_MASK 0x3f /* RQS entry_type definitions */ #define RQSTYPE_REQUEST 0x01 #define RQSTYPE_DATASEG 0x02 #define RQSTYPE_RESPONSE 0x03 #define RQSTYPE_MARKER 0x04 #define RQSTYPE_CMDONLY 0x05 #define RQSTYPE_ATIO 0x06 /* Target Mode */ #define RQSTYPE_CTIO 0x07 /* Target Mode */ #define RQSTYPE_SCAM 0x08 #define RQSTYPE_A64 0x09 #define RQSTYPE_A64_CONT 0x0a #define RQSTYPE_ENABLE_LUN 0x0b /* Target Mode */ #define RQSTYPE_MODIFY_LUN 0x0c /* Target Mode */ #define RQSTYPE_NOTIFY 0x0d /* Target Mode */ #define RQSTYPE_NOTIFY_ACK 0x0e /* Target Mode */ #define RQSTYPE_CTIO1 0x0f /* Target Mode */ #define RQSTYPE_STATUS_CONT 0x10 #define RQSTYPE_T2RQS 0x11 #define RQSTYPE_CTIO7 0x12 #define RQSTYPE_IP_XMIT 0x13 #define RQSTYPE_TSK_MGMT 0x14 #define RQSTYPE_T4RQS 0x15 #define RQSTYPE_ATIO2 0x16 /* Target Mode */ #define RQSTYPE_CTIO2 0x17 /* Target Mode */ #define RQSTYPE_T7RQS 0x18 #define RQSTYPE_T3RQS 0x19 #define RQSTYPE_IP_XMIT_64 0x1b #define RQSTYPE_CTIO4 0x1e /* Target Mode */ #define RQSTYPE_CTIO3 0x1f /* Target Mode */ #define RQSTYPE_RIO1 0x21 #define RQSTYPE_RIO2 0x22 #define RQSTYPE_IP_RECV 0x23 #define RQSTYPE_IP_RECV_CONT 0x24 #define RQSTYPE_CT_PASSTHRU 0x29 #define RQSTYPE_MS_PASSTHRU 0x29 +#define RQSTYPE_VP_CTRL 0x30 /* 24XX only */ +#define RQSTYPE_VP_MODIFY 0x31 /* 24XX only */ +#define RQSTYPE_RPT_ID_ACQ 0x32 /* 24XX only */ #define RQSTYPE_ABORT_IO 0x33 #define RQSTYPE_T6RQS 0x48 #define RQSTYPE_LOGIN 0x52 #define RQSTYPE_ABTS_RCVD 0x54 /* 24XX only */ #define RQSTYPE_ABTS_RSP 0x55 /* 24XX only */ #define ISP_RQDSEG 4 typedef struct { isphdr_t req_header; uint32_t req_handle; uint8_t req_lun_trn; uint8_t req_target; uint16_t req_cdblen; uint16_t req_flags; uint16_t req_reserved; uint16_t req_time; uint16_t req_seg_count; uint8_t req_cdb[12]; ispds_t req_dataseg[ISP_RQDSEG]; } ispreq_t; #define ISP_RQDSEG_A64 2 typedef struct { isphdr_t mrk_header; uint32_t mrk_handle; uint8_t mrk_reserved0; uint8_t mrk_target; uint16_t mrk_modifier; uint16_t mrk_flags; uint16_t mrk_lun; uint8_t mrk_reserved1[48]; } isp_marker_t; typedef struct { isphdr_t mrk_header; uint32_t mrk_handle; uint16_t mrk_nphdl; uint8_t mrk_modifier; uint8_t mrk_reserved0; uint8_t mrk_reserved1; uint8_t mrk_vphdl; uint16_t mrk_reserved2; uint8_t mrk_lun[8]; uint8_t mrk_reserved3[40]; } isp_marker_24xx_t; #define SYNC_DEVICE 0 #define SYNC_TARGET 1 #define SYNC_ALL 2 #define SYNC_LIP 3 #define ISP_RQDSEG_T2 3 typedef struct { isphdr_t req_header; uint32_t req_handle; uint8_t req_lun_trn; uint8_t req_target; uint16_t req_scclun; uint16_t req_flags; uint16_t req_reserved; uint16_t req_time; uint16_t req_seg_count; uint8_t req_cdb[16]; uint32_t req_totalcnt; ispds_t req_dataseg[ISP_RQDSEG_T2]; } ispreqt2_t; typedef struct { isphdr_t req_header; uint32_t req_handle; uint16_t req_target; uint16_t req_scclun; uint16_t req_flags; uint16_t req_reserved; uint16_t req_time; uint16_t req_seg_count; uint8_t req_cdb[16]; uint32_t req_totalcnt; ispds_t req_dataseg[ISP_RQDSEG_T2]; } ispreqt2e_t; #define ISP_RQDSEG_T3 2 typedef struct { isphdr_t req_header; uint32_t req_handle; uint8_t req_lun_trn; uint8_t req_target; uint16_t req_scclun; uint16_t req_flags; uint16_t req_reserved; uint16_t req_time; uint16_t req_seg_count; uint8_t req_cdb[16]; uint32_t req_totalcnt; ispds64_t req_dataseg[ISP_RQDSEG_T3]; } ispreqt3_t; #define ispreq64_t ispreqt3_t /* same as.... */ typedef struct { isphdr_t req_header; uint32_t req_handle; uint16_t req_target; uint16_t req_scclun; uint16_t req_flags; uint16_t req_reserved; uint16_t req_time; uint16_t req_seg_count; uint8_t req_cdb[16]; uint32_t req_totalcnt; ispds64_t req_dataseg[ISP_RQDSEG_T3]; } ispreqt3e_t; /* req_flag values */ #define REQFLAG_NODISCON 0x0001 #define REQFLAG_HTAG 0x0002 #define REQFLAG_OTAG 0x0004 #define REQFLAG_STAG 0x0008 #define REQFLAG_TARGET_RTN 0x0010 #define REQFLAG_NODATA 0x0000 #define REQFLAG_DATA_IN 0x0020 #define REQFLAG_DATA_OUT 0x0040 #define REQFLAG_DATA_UNKNOWN 0x0060 #define REQFLAG_DISARQ 0x0100 #define REQFLAG_FRC_ASYNC 0x0200 #define REQFLAG_FRC_SYNC 0x0400 #define REQFLAG_FRC_WIDE 0x0800 #define REQFLAG_NOPARITY 0x1000 #define REQFLAG_STOPQ 0x2000 #define REQFLAG_XTRASNS 0x4000 #define REQFLAG_PRIORITY 0x8000 typedef struct { isphdr_t req_header; uint32_t req_handle; uint8_t req_lun_trn; uint8_t req_target; uint16_t req_cdblen; uint16_t req_flags; uint16_t req_reserved; uint16_t req_time; uint16_t req_seg_count; uint8_t req_cdb[44]; } ispextreq_t; /* 24XX only */ typedef struct { uint16_t fcd_length; uint16_t fcd_a1500; uint16_t fcd_a3116; uint16_t fcd_a4732; uint16_t fcd_a6348; } fcp_cmnd_ds_t; typedef struct { isphdr_t req_header; uint32_t req_handle; uint16_t req_nphdl; uint16_t req_time; uint16_t req_seg_count; uint16_t req_fc_rsp_dsd_length; uint8_t req_lun[8]; uint16_t req_flags; uint16_t req_fc_cmnd_dsd_length; uint16_t req_fc_cmnd_dsd_a1500; uint16_t req_fc_cmnd_dsd_a3116; uint16_t req_fc_cmnd_dsd_a4732; uint16_t req_fc_cmnd_dsd_a6348; uint16_t req_fc_rsp_dsd_a1500; uint16_t req_fc_rsp_dsd_a3116; uint16_t req_fc_rsp_dsd_a4732; uint16_t req_fc_rsp_dsd_a6348; uint32_t req_totalcnt; uint16_t req_tidlo; uint8_t req_tidhi; uint8_t req_vpidx; ispds64_t req_dataseg; } ispreqt6_t; typedef struct { isphdr_t req_header; uint32_t req_handle; uint16_t req_nphdl; uint16_t req_time; uint16_t req_seg_count; uint16_t req_reserved; uint8_t req_lun[8]; uint8_t req_alen_datadir; uint8_t req_task_management; uint8_t req_task_attribute; uint8_t req_crn; uint8_t req_cdb[16]; uint32_t req_dl; uint16_t req_tidlo; uint8_t req_tidhi; uint8_t req_vpidx; ispds64_t req_dataseg; } ispreqt7_t; +/* Task Management Request Function */ +typedef struct { + isphdr_t tmf_header; + uint32_t tmf_handle; + uint16_t tmf_nphdl; + uint8_t tmf_reserved0[2]; + uint16_t tmf_delay; + uint16_t tmf_timeout; + uint8_t tmf_lun[8]; + uint32_t tmf_flags; + uint8_t tmf_reserved1[20]; + uint16_t tmf_tidlo; + uint8_t tmf_tidhi; + uint8_t tmf_vpidx; + uint8_t tmf_reserved2[12]; +} isp24xx_tmf_t; + +#define ISP24XX_TMF_NOSEND 0x80000000 + +#define ISP24XX_TMF_LUN_RESET 0x00000010 +#define ISP24XX_TMF_ABORT_TASK_SET 0x00000008 +#define ISP24XX_TMF_CLEAR_TASK_SET 0x00000004 +#define ISP24XX_TMF_TARGET_RESET 0x00000002 +#define ISP24XX_TMF_CLEAR_ACA 0x00000001 + /* I/O Abort Structure */ typedef struct { isphdr_t abrt_header; uint32_t abrt_handle; uint16_t abrt_nphdl; uint16_t abrt_options; uint32_t abrt_cmd_handle; uint8_t abrt_reserved[32]; uint16_t abrt_tidlo; uint8_t abrt_tidhi; uint8_t abrt_vpidx; uint8_t abrt_reserved1[12]; } isp24xx_abrt_t; -#define ISP24XX_ABRT_NO_ABTS 0x01 /* don't actually send an ABTS */ + +#define ISP24XX_ABRT_NOSEND 0x01 /* don't actually send ABTS */ +#define ISP24XX_ABRT_OKAY 0x00 /* in nphdl on return */ #define ISP24XX_ABRT_ENXIO 0x31 /* in nphdl on return */ #define ISP_CDSEG 7 typedef struct { isphdr_t req_header; uint32_t req_reserved; ispds_t req_dataseg[ISP_CDSEG]; } ispcontreq_t; #define ISP_CDSEG64 5 typedef struct { isphdr_t req_header; ispds64_t req_dataseg[ISP_CDSEG64]; } ispcontreq64_t; typedef struct { isphdr_t req_header; uint32_t req_handle; uint16_t req_scsi_status; uint16_t req_completion_status; uint16_t req_state_flags; uint16_t req_status_flags; uint16_t req_time; #define req_response_len req_time /* FC only */ uint16_t req_sense_len; uint32_t req_resid; uint8_t req_response[8]; /* FC only */ uint8_t req_sense_data[32]; } ispstatusreq_t; /* * Status Continuation */ typedef struct { isphdr_t req_header; uint8_t req_sense_data[60]; } ispstatus_cont_t; /* * 24XX Type 0 status */ typedef struct { isphdr_t req_header; uint32_t req_handle; uint16_t req_completion_status; uint16_t req_oxid; uint32_t req_resid; uint16_t req_reserved0; uint16_t req_state_flags; uint16_t req_reserved1; uint16_t req_scsi_status; uint32_t req_fcp_residual; uint32_t req_sense_len; uint32_t req_response_len; uint8_t req_rsp_sense[28]; } isp24xx_statusreq_t; /* * For Qlogic 2X00, the high order byte of SCSI status has * additional meaning. */ #define RQCS_RU 0x800 /* Residual Under */ #define RQCS_RO 0x400 /* Residual Over */ #define RQCS_RESID (RQCS_RU|RQCS_RO) #define RQCS_SV 0x200 /* Sense Length Valid */ #define RQCS_RV 0x100 /* FCP Response Length Valid */ /* * CT Passthru IOCB */ typedef struct { isphdr_t ctp_header; uint32_t ctp_handle; uint16_t ctp_status; uint16_t ctp_nphdl; /* n-port handle */ uint16_t ctp_cmd_cnt; /* Command DSD count */ - uint16_t ctp_vpidx; /* low 8 bits */ + uint8_t ctp_vpidx; + uint8_t ctp_reserved0; uint16_t ctp_time; - uint16_t ctp_reserved0; + uint16_t ctp_reserved1; uint16_t ctp_rsp_cnt; /* Response DSD count */ - uint16_t ctp_reserved1[5]; + uint16_t ctp_reserved2[5]; uint32_t ctp_rsp_bcnt; /* Response byte count */ uint32_t ctp_cmd_bcnt; /* Command byte count */ ispds64_t ctp_dataseg[2]; } isp_ct_pt_t; /* * MS Passthru IOCB */ typedef struct { isphdr_t ms_header; uint32_t ms_handle; uint16_t ms_nphdl; /* handle in high byte for !2k f/w */ uint16_t ms_status; uint16_t ms_flags; uint16_t ms_reserved1; /* low 8 bits */ uint16_t ms_time; uint16_t ms_cmd_cnt; /* Command DSD count */ uint16_t ms_tot_cnt; /* Total DSD Count */ uint8_t ms_type; /* MS type */ uint8_t ms_r_ctl; /* R_CTL */ uint16_t ms_rxid; /* RX_ID */ uint16_t ms_reserved2; uint32_t ms_handle2; uint32_t ms_rsp_bcnt; /* Response byte count */ uint32_t ms_cmd_bcnt; /* Command byte count */ ispds64_t ms_dataseg[2]; } isp_ms_t; /* * Completion Status Codes. */ #define RQCS_COMPLETE 0x0000 #define RQCS_DMA_ERROR 0x0002 #define RQCS_RESET_OCCURRED 0x0004 #define RQCS_ABORTED 0x0005 #define RQCS_TIMEOUT 0x0006 #define RQCS_DATA_OVERRUN 0x0007 #define RQCS_DATA_UNDERRUN 0x0015 #define RQCS_QUEUE_FULL 0x001C /* 1X00 Only Completion Codes */ #define RQCS_INCOMPLETE 0x0001 #define RQCS_TRANSPORT_ERROR 0x0003 #define RQCS_COMMAND_OVERRUN 0x0008 #define RQCS_STATUS_OVERRUN 0x0009 #define RQCS_BAD_MESSAGE 0x000a #define RQCS_NO_MESSAGE_OUT 0x000b #define RQCS_EXT_ID_FAILED 0x000c #define RQCS_IDE_MSG_FAILED 0x000d #define RQCS_ABORT_MSG_FAILED 0x000e #define RQCS_REJECT_MSG_FAILED 0x000f #define RQCS_NOP_MSG_FAILED 0x0010 #define RQCS_PARITY_ERROR_MSG_FAILED 0x0011 #define RQCS_DEVICE_RESET_MSG_FAILED 0x0012 #define RQCS_ID_MSG_FAILED 0x0013 #define RQCS_UNEXP_BUS_FREE 0x0014 #define RQCS_XACT_ERR1 0x0018 #define RQCS_XACT_ERR2 0x0019 #define RQCS_XACT_ERR3 0x001A #define RQCS_BAD_ENTRY 0x001B #define RQCS_PHASE_SKIPPED 0x001D #define RQCS_ARQS_FAILED 0x001E #define RQCS_WIDE_FAILED 0x001F #define RQCS_SYNCXFER_FAILED 0x0020 #define RQCS_LVD_BUSERR 0x0021 /* 2X00 Only Completion Codes */ #define RQCS_PORT_UNAVAILABLE 0x0028 #define RQCS_PORT_LOGGED_OUT 0x0029 #define RQCS_PORT_CHANGED 0x002A #define RQCS_PORT_BUSY 0x002B /* 24XX Only Completion Codes */ #define RQCS_24XX_DRE 0x0011 /* data reassembly error */ #define RQCS_24XX_TABORT 0x0013 /* aborted by target */ #define RQCS_24XX_ENOMEM 0x002C /* f/w resource unavailable */ #define RQCS_24XX_TMO 0x0030 /* task management overrun */ /* * 1X00 specific State Flags */ #define RQSF_GOT_BUS 0x0100 #define RQSF_GOT_TARGET 0x0200 #define RQSF_SENT_CDB 0x0400 #define RQSF_XFRD_DATA 0x0800 #define RQSF_GOT_STATUS 0x1000 #define RQSF_GOT_SENSE 0x2000 #define RQSF_XFER_COMPLETE 0x4000 /* * 2X00 specific State Flags * (same as 1X00 except RQSF_GOT_BUS/RQSF_GOT_TARGET are not available) */ #define RQSF_DATA_IN 0x0020 #define RQSF_DATA_OUT 0x0040 #define RQSF_STAG 0x0008 #define RQSF_OTAG 0x0004 #define RQSF_HTAG 0x0002 /* * 1X00 Status Flags */ #define RQSTF_DISCONNECT 0x0001 #define RQSTF_SYNCHRONOUS 0x0002 #define RQSTF_PARITY_ERROR 0x0004 #define RQSTF_BUS_RESET 0x0008 #define RQSTF_DEVICE_RESET 0x0010 #define RQSTF_ABORTED 0x0020 #define RQSTF_TIMEOUT 0x0040 #define RQSTF_NEGOTIATION 0x0080 /* * 2X00 specific state flags */ /* RQSF_SENT_CDB */ /* RQSF_XFRD_DATA */ /* RQSF_GOT_STATUS */ /* RQSF_XFER_COMPLETE */ /* * 2X00 specific status flags */ /* RQSTF_ABORTED */ /* RQSTF_TIMEOUT */ #define RQSTF_DMA_ERROR 0x0080 #define RQSTF_LOGOUT 0x2000 /* * Miscellaneous */ #ifndef ISP_EXEC_THROTTLE #define ISP_EXEC_THROTTLE 16 #endif /* * About Firmware returns an 'attribute' word in mailbox 6. * These attributes are for 2200 and 2300. */ -#define ISP_FW_ATTR_TMODE 0x01 -#define ISP_FW_ATTR_SCCLUN 0x02 -#define ISP_FW_ATTR_FABRIC 0x04 -#define ISP_FW_ATTR_CLASS2 0x08 -#define ISP_FW_ATTR_FCTAPE 0x10 -#define ISP_FW_ATTR_IP 0x20 -#define ISP_FW_ATTR_VI 0x40 -#define ISP_FW_ATTR_VI_SOLARIS 0x80 -#define ISP_FW_ATTR_2KLOGINS 0x100 /* XXX: just a guess */ +#define ISP_FW_ATTR_TMODE 0x0001 +#define ISP_FW_ATTR_SCCLUN 0x0002 +#define ISP_FW_ATTR_FABRIC 0x0004 +#define ISP_FW_ATTR_CLASS2 0x0008 +#define ISP_FW_ATTR_FCTAPE 0x0010 +#define ISP_FW_ATTR_IP 0x0020 +#define ISP_FW_ATTR_VI 0x0040 +#define ISP_FW_ATTR_VI_SOLARIS 0x0080 +#define ISP_FW_ATTR_2KLOGINS 0x0100 /* just a guess... */ /* and these are for the 2400 */ -#define ISP2400_FW_ATTR_CLASS2 (1 << 0) -#define ISP2400_FW_ATTR_IP (1 << 1) -#define ISP2400_FW_ATTR_MULTIID (1 << 2) -#define ISP2400_FW_ATTR_SB2 (1 << 3) -#define ISP2400_FW_ATTR_T10CRC (1 << 4) -#define ISP2400_FW_ATTR_VI (1 << 5) -#define ISP2400_FW_ATTR_EXPFW (1 << 13) +#define ISP2400_FW_ATTR_CLASS2 0x0001 +#define ISP2400_FW_ATTR_IP 0x0002 +#define ISP2400_FW_ATTR_MULTIID 0x0004 +#define ISP2400_FW_ATTR_SB2 0x0008 +#define ISP2400_FW_ATTR_T10CRC 0x0010 +#define ISP2400_FW_ATTR_VI 0x0020 +#define ISP2400_FW_ATTR_EXPFW 0x2000 +#define ISP_CAP_TMODE(isp) \ + (IS_24XX(isp)? 1 : (isp->isp_fwattr & ISP_FW_ATTR_TMODE)) +#define ISP_CAP_SCCFW(isp) \ + (IS_24XX(isp)? 1 : (isp->isp_fwattr & ISP_FW_ATTR_SCCLUN)) +#define ISP_CAP_2KLOGIN(isp) \ + (IS_24XX(isp)? 1 : (isp->isp_fwattr & ISP_FW_ATTR_2KLOGINS)) +#define ISP_CAP_MULTI_ID(isp) \ + (IS_24XX(isp)? (isp->isp_fwattr & ISP2400_FW_ATTR_MULTIID) : 0) + +#define ISP_GET_VPIDX(isp, tag) \ + (ISP_CAP_MULTI_ID(isp) ? tag : 0) + /* * Reduced Interrupt Operation Response Queue Entreis */ typedef struct { isphdr_t req_header; uint32_t req_handles[15]; } isp_rio1_t; typedef struct { isphdr_t req_header; uint16_t req_handles[30]; } isp_rio2_t; /* * FC (ISP2100/ISP2200/ISP2300/ISP2400) specific data structures */ /* * Initialization Control Block * * Version One (prime) format. */ typedef struct { uint8_t icb_version; uint8_t icb_reserved0; uint16_t icb_fwoptions; uint16_t icb_maxfrmlen; uint16_t icb_maxalloc; uint16_t icb_execthrottle; uint8_t icb_retry_count; uint8_t icb_retry_delay; uint8_t icb_portname[8]; uint16_t icb_hardaddr; uint8_t icb_iqdevtype; uint8_t icb_logintime; uint8_t icb_nodename[8]; uint16_t icb_rqstout; uint16_t icb_rspnsin; uint16_t icb_rqstqlen; uint16_t icb_rsltqlen; uint16_t icb_rqstaddr[4]; uint16_t icb_respaddr[4]; uint16_t icb_lunenables; uint8_t icb_ccnt; uint8_t icb_icnt; uint16_t icb_lunetimeout; uint16_t icb_reserved1; uint16_t icb_xfwoptions; uint8_t icb_racctimer; uint8_t icb_idelaytimer; uint16_t icb_zfwoptions; uint16_t icb_reserved2[13]; } isp_icb_t; #define ICB_VERSION1 1 #define ICBOPT_EXTENDED 0x8000 #define ICBOPT_BOTH_WWNS 0x4000 #define ICBOPT_FULL_LOGIN 0x2000 #define ICBOPT_STOP_ON_QFULL 0x1000 /* 2200/2100 only */ #define ICBOPT_PREVLOOP 0x0800 #define ICBOPT_SRCHDOWN 0x0400 #define ICBOPT_NOLIP 0x0200 #define ICBOPT_PDBCHANGE_AE 0x0100 #define ICBOPT_INI_TGTTYPE 0x0080 #define ICBOPT_INI_ADISC 0x0040 #define ICBOPT_INI_DISABLE 0x0020 #define ICBOPT_TGT_ENABLE 0x0010 #define ICBOPT_FAST_POST 0x0008 #define ICBOPT_FULL_DUPLEX 0x0004 #define ICBOPT_FAIRNESS 0x0002 #define ICBOPT_HARD_ADDRESS 0x0001 #define ICBXOPT_NO_LOGOUT 0x8000 /* no logout on link failure */ #define ICBXOPT_FCTAPE_CCQ 0x4000 /* FC-Tape Command Queueing */ #define ICBXOPT_FCTAPE_CONFIRM 0x2000 #define ICBXOPT_FCTAPE 0x1000 #define ICBXOPT_CLASS2_ACK0 0x0200 #define ICBXOPT_CLASS2 0x0100 #define ICBXOPT_NO_PLAY 0x0080 /* don't play if can't get hard addr */ #define ICBXOPT_TOPO_MASK 0x0070 #define ICBXOPT_LOOP_ONLY 0x0000 #define ICBXOPT_PTP_ONLY 0x0010 #define ICBXOPT_LOOP_2_PTP 0x0020 #define ICBXOPT_PTP_2_LOOP 0x0030 /* * The lower 4 bits of the xfwoptions field are the OPERATION MODE bits. * RIO is not defined for the 23XX cards (just 2200) */ #define ICBXOPT_RIO_OFF 0 #define ICBXOPT_RIO_16BIT 1 #define ICBXOPT_RIO_32BIT 2 #define ICBXOPT_RIO_16BIT_IOCB 3 #define ICBXOPT_RIO_32BIT_IOCB 4 #define ICBXOPT_ZIO 5 #define ICBXOPT_TIMER_MASK 0x7 #define ICBZOPT_RATE_MASK 0xC000 #define ICBZOPT_RATE_ONEGB 0x0000 #define ICBZOPT_RATE_AUTO 0x8000 #define ICBZOPT_RATE_TWOGB 0x4000 #define ICBZOPT_50_OHM 0x2000 #define ICBZOPT_ENA_OOF 0x0040 /* out of order frame handling */ #define ICBZOPT_RSPSZ_MASK 0x0030 #define ICBZOPT_RSPSZ_24 0x0000 #define ICBZOPT_RSPSZ_12 0x0010 #define ICBZOPT_RSPSZ_24A 0x0020 #define ICBZOPT_RSPSZ_32 0x0030 #define ICBZOPT_SOFTID 0x0002 #define ICBZOPT_ENA_RDXFR_RDY 0x0001 /* 2400 F/W options */ #define ICB2400_OPT1_BOTH_WWNS 0x00004000 #define ICB2400_OPT1_FULL_LOGIN 0x00002000 #define ICB2400_OPT1_PREVLOOP 0x00000800 #define ICB2400_OPT1_SRCHDOWN 0x00000400 #define ICB2400_OPT1_NOLIP 0x00000200 #define ICB2400_OPT1_INI_DISABLE 0x00000020 #define ICB2400_OPT1_TGT_ENABLE 0x00000010 #define ICB2400_OPT1_FULL_DUPLEX 0x00000004 #define ICB2400_OPT1_FAIRNESS 0x00000002 #define ICB2400_OPT1_HARD_ADDRESS 0x00000001 #define ICB2400_OPT2_FCTAPE 0x00001000 #define ICB2400_OPT2_CLASS2_ACK0 0x00000200 #define ICB2400_OPT2_CLASS2 0x00000100 #define ICB2400_OPT2_NO_PLAY 0x00000080 #define ICB2400_OPT2_TOPO_MASK 0x00000070 #define ICB2400_OPT2_LOOP_ONLY 0x00000000 #define ICB2400_OPT2_PTP_ONLY 0x00000010 #define ICB2400_OPT2_LOOP_2_PTP 0x00000020 #define ICB2400_OPT2_PTP_2_LOOP 0x00000030 #define ICB2400_OPT2_TIMER_MASK 0x00000007 #define ICB2400_OPT2_ZIO 0x00000005 #define ICB2400_OPT2_ZIO1 0x00000006 #define ICB2400_OPT3_75_OHM 0x00010000 #define ICB2400_OPT3_RATE_MASK 0x0000E000 #define ICB2400_OPT3_RATE_ONEGB 0x00000000 #define ICB2400_OPT3_RATE_TWOGB 0x00002000 #define ICB2400_OPT3_RATE_AUTO 0x00004000 #define ICB2400_OPT3_RATE_FOURGB 0x00006000 +#define ICB2400_OPT3_RATE_EIGHTGB 0x00008000 #define ICB2400_OPT3_ENA_OOF_XFRDY 0x00000200 #define ICB2400_OPT3_NO_LOCAL_PLOGI 0x00000080 #define ICB2400_OPT3_ENA_OOF 0x00000040 /* note that a response size flag of zero is reserved! */ #define ICB2400_OPT3_RSPSZ_MASK 0x00000030 #define ICB2400_OPT3_RSPSZ_12 0x00000010 #define ICB2400_OPT3_RSPSZ_24 0x00000020 #define ICB2400_OPT3_RSPSZ_32 0x00000030 #define ICB2400_OPT3_SOFTID 0x00000002 #define ICB_MIN_FRMLEN 256 #define ICB_MAX_FRMLEN 2112 #define ICB_DFLT_FRMLEN 1024 #define ICB_DFLT_ALLOC 256 #define ICB_DFLT_THROTTLE 16 #define ICB_DFLT_RDELAY 5 #define ICB_DFLT_RCOUNT 3 #define ICB_LOGIN_TOV 30 #define ICB_LUN_ENABLE_TOV 180 /* * And somebody at QLogic had a great idea that you could just change * the structure *and* keep the version number the same as the other cards. */ typedef struct { uint16_t icb_version; uint16_t icb_reserved0; uint16_t icb_maxfrmlen; uint16_t icb_execthrottle; uint16_t icb_xchgcnt; uint16_t icb_hardaddr; uint8_t icb_portname[8]; uint8_t icb_nodename[8]; uint16_t icb_rspnsin; uint16_t icb_rqstout; uint16_t icb_retry_count; uint16_t icb_priout; uint16_t icb_rsltqlen; uint16_t icb_rqstqlen; uint16_t icb_ldn_nols; uint16_t icb_prqstqlen; uint16_t icb_rqstaddr[4]; uint16_t icb_respaddr[4]; uint16_t icb_priaddr[4]; uint16_t icb_reserved1[4]; uint16_t icb_atio_in; uint16_t icb_atioqlen; uint16_t icb_atioqaddr[4]; uint16_t icb_idelaytimer; uint16_t icb_logintime; uint32_t icb_fwoptions1; uint32_t icb_fwoptions2; uint32_t icb_fwoptions3; uint16_t icb_reserved2[12]; } isp_icb_2400_t; #define RQRSP_ADDR0015 0 #define RQRSP_ADDR1631 1 #define RQRSP_ADDR3247 2 #define RQRSP_ADDR4863 3 #define ICB_NNM0 7 #define ICB_NNM1 6 #define ICB_NNM2 5 #define ICB_NNM3 4 #define ICB_NNM4 3 #define ICB_NNM5 2 #define ICB_NNM6 1 #define ICB_NNM7 0 #define MAKE_NODE_NAME_FROM_WWN(array, wwn) \ array[ICB_NNM0] = (uint8_t) ((wwn >> 0) & 0xff), \ array[ICB_NNM1] = (uint8_t) ((wwn >> 8) & 0xff), \ array[ICB_NNM2] = (uint8_t) ((wwn >> 16) & 0xff), \ array[ICB_NNM3] = (uint8_t) ((wwn >> 24) & 0xff), \ array[ICB_NNM4] = (uint8_t) ((wwn >> 32) & 0xff), \ array[ICB_NNM5] = (uint8_t) ((wwn >> 40) & 0xff), \ array[ICB_NNM6] = (uint8_t) ((wwn >> 48) & 0xff), \ array[ICB_NNM7] = (uint8_t) ((wwn >> 56) & 0xff) #define MAKE_WWN_FROM_NODE_NAME(wwn, array) \ wwn = ((uint64_t) array[ICB_NNM0]) | \ ((uint64_t) array[ICB_NNM1] << 8) | \ ((uint64_t) array[ICB_NNM2] << 16) | \ ((uint64_t) array[ICB_NNM3] << 24) | \ ((uint64_t) array[ICB_NNM4] << 32) | \ ((uint64_t) array[ICB_NNM5] << 40) | \ ((uint64_t) array[ICB_NNM6] << 48) | \ ((uint64_t) array[ICB_NNM7] << 56) + /* + * For MULTI_ID firmware, this describes a + * virtual port entity for getting status. + */ +typedef struct { + uint16_t vp_port_status; + uint8_t vp_port_options; + uint8_t vp_port_loopid; + uint8_t vp_port_portname[8]; + uint8_t vp_port_nodename[8]; + uint16_t vp_port_portid_lo; /* not present when trailing icb */ + uint16_t vp_port_portid_hi; /* not present when trailing icb */ +} vp_port_info_t; + +#define ICB2400_VPOPT_TGT_DISABLE 0x00000020 /* disable target mode */ +#define ICB2400_VPOPT_INI_ENABLE 0x00000010 /* enable initiator mode */ +#define ICB2400_VPOPT_ENABLED 0x00000008 +#define ICB2400_VPOPT_NOPLAY 0x00000004 +#define ICB2400_VPOPT_PREVLOOP 0x00000002 +#define ICB2400_VPOPT_HARD_ADDRESS 0x00000001 + +#define ICB2400_VPOPT_WRITE_SIZE 20 + +/* + * For MULTI_ID firmware, we append this structure + * to the isp_icb_2400_t above, followed by a list + * structures that are *most* of the vp_port_info_t. + */ +typedef struct { + uint16_t vp_count; + uint16_t vp_global_options; +} isp_icb_2400_vpinfo_t; + +#define ICB2400_VPINFO_OFF 0x80 /* offset from start of ICB */ +#define ICB2400_VPINFO_PORT_OFF(chan) \ + ICB2400_VPINFO_OFF + \ + sizeof (isp_icb_2400_vpinfo_t) + ((chan - 1) * ICB2400_VPOPT_WRITE_SIZE) + +#define ICB2400_VPGOPT_MID_DISABLE 0x02 + +typedef struct { + isphdr_t vp_ctrl_hdr; + uint32_t vp_ctrl_handle; + uint16_t vp_ctrl_index_fail; + uint16_t vp_ctrl_status; + uint16_t vp_ctrl_command; + uint16_t vp_ctrl_vp_count; + uint16_t vp_ctrl_idmap[8]; + uint8_t vp_ctrl_reserved[32]; +} vp_ctrl_info_t; + +#define VP_CTRL_CMD_ENABLE_VP 0 +#define VP_CTRL_CMD_DISABLE_VP 8 +#define VP_CTRL_CMD_DISABLE_VP_REINIT_LINK 9 +#define VP_CTRL_CMD_DISABLE_VP_LOGO 0xA + +/* + * We can use this structure for modifying either one or two VP ports after initialization + */ +typedef struct { + isphdr_t vp_mod_hdr; + uint32_t vp_mod_hdl; + uint16_t vp_mod_reserved0; + uint16_t vp_mod_status; + uint8_t vp_mod_cmd; + uint8_t vp_mod_cnt; + uint8_t vp_mod_idx0; + uint8_t vp_mod_idx1; + struct { + uint8_t options; + uint8_t loopid; + uint16_t reserved1; + uint8_t wwpn[8]; + uint8_t wwnn[8]; + } vp_mod_ports[2]; + uint8_t vp_mod_reserved2[8]; +} vp_modify_t; + +#define VP_STS_OK 0x00 +#define VP_STS_ERR 0x01 +#define VP_CNT_ERR 0x02 +#define VP_GEN_ERR 0x03 +#define VP_IDX_ERR 0x04 +#define VP_STS_BSY 0x05 + +#define VP_MODIFY_VP 0x00 +#define VP_MODIFY_ENA 0x01 + +/* * Port Data Base Element */ typedef struct { uint16_t pdb_options; uint8_t pdb_mstate; uint8_t pdb_sstate; uint8_t pdb_hardaddr_bits[4]; uint8_t pdb_portid_bits[4]; uint8_t pdb_nodename[8]; uint8_t pdb_portname[8]; uint16_t pdb_execthrottle; uint16_t pdb_exec_count; uint8_t pdb_retry_count; uint8_t pdb_retry_delay; uint16_t pdb_resalloc; uint16_t pdb_curalloc; uint16_t pdb_qhead; uint16_t pdb_qtail; uint16_t pdb_tl_next; uint16_t pdb_tl_last; uint16_t pdb_features; /* PLOGI, Common Service */ uint16_t pdb_pconcurrnt; /* PLOGI, Common Service */ uint16_t pdb_roi; /* PLOGI, Common Service */ uint8_t pdb_target; uint8_t pdb_initiator; /* PLOGI, Class 3 Control Flags */ uint16_t pdb_rdsiz; /* PLOGI, Class 3 */ uint16_t pdb_ncseq; /* PLOGI, Class 3 */ uint16_t pdb_noseq; /* PLOGI, Class 3 */ uint16_t pdb_labrtflg; uint16_t pdb_lstopflg; uint16_t pdb_sqhead; uint16_t pdb_sqtail; uint16_t pdb_ptimer; uint16_t pdb_nxt_seqid; uint16_t pdb_fcount; uint16_t pdb_prli_len; uint16_t pdb_prli_svc0; uint16_t pdb_prli_svc3; uint16_t pdb_loopid; uint16_t pdb_il_ptr; uint16_t pdb_sl_ptr; } isp_pdb_21xx_t; #define PDB_OPTIONS_XMITTING (1<<11) #define PDB_OPTIONS_LNKXMIT (1<<10) #define PDB_OPTIONS_ABORTED (1<<9) #define PDB_OPTIONS_ADISC (1<<1) #define PDB_STATE_DISCOVERY 0 #define PDB_STATE_WDISC_ACK 1 #define PDB_STATE_PLOGI 2 #define PDB_STATE_PLOGI_ACK 3 #define PDB_STATE_PRLI 4 #define PDB_STATE_PRLI_ACK 5 #define PDB_STATE_LOGGED_IN 6 #define PDB_STATE_PORT_UNAVAIL 7 #define PDB_STATE_PRLO 8 #define PDB_STATE_PRLO_ACK 9 #define PDB_STATE_PLOGO 10 #define PDB_STATE_PLOG_ACK 11 #define SVC3_TGT_ROLE 0x10 #define SVC3_INI_ROLE 0x20 #define SVC3_ROLE_MASK 0x30 #define SVC3_ROLE_SHIFT 4 #define BITS2WORD(x) ((x)[0] << 16 | (x)[3] << 8 | (x)[2]) #define BITS2WORD_24XX(x) ((x)[0] << 16 | (x)[1] << 8 | (x)[2]) /* * Port Data Base Element- 24XX cards */ typedef struct { uint16_t pdb_flags; uint8_t pdb_curstate; uint8_t pdb_laststate; uint8_t pdb_hardaddr_bits[4]; uint8_t pdb_portid_bits[4]; #define pdb_nxt_seqid_2400 pdb_portid_bits[3] uint16_t pdb_retry_timer; uint16_t pdb_handle; uint16_t pdb_rcv_dsize; uint16_t pdb_reserved0; uint16_t pdb_prli_svc0; uint16_t pdb_prli_svc3; uint8_t pdb_portname[8]; uint8_t pdb_nodename[8]; uint8_t pdb_reserved1[24]; } isp_pdb_24xx_t; #define PDB2400_TID_SUPPORTED 0x4000 #define PDB2400_FC_TAPE 0x0080 #define PDB2400_CLASS2_ACK0 0x0040 #define PDB2400_FCP_CONF 0x0020 #define PDB2400_CLASS2 0x0010 #define PDB2400_ADDR_VALID 0x0002 +#define PDB2400_STATE_PLOGI_PEND 0x03 +#define PDB2400_STATE_PLOGI_DONE 0x04 +#define PDB2400_STATE_PRLI_PEND 0x05 +#define PDB2400_STATE_LOGGED_IN 0x06 +#define PDB2400_STATE_PORT_UNAVAIL 0x07 +#define PDB2400_STATE_PRLO_PEND 0x09 +#define PDB2400_STATE_LOGO_PEND 0x0B + /* * Common elements from the above two structures that are actually useful to us. */ typedef struct { uint16_t handle; uint16_t reserved; uint32_t s3_role : 8, portid : 24; uint8_t portname[8]; uint8_t nodename[8]; } isp_pdb_t; /* + * Port Database Changed Async Event information for 24XX cards + */ +#define PDB24XX_AE_OK 0x00 +#define PDB24XX_AE_IMPL_LOGO_1 0x01 +#define PDB24XX_AE_IMPL_LOGO_2 0x02 +#define PDB24XX_AE_IMPL_LOGO_3 0x03 +#define PDB24XX_AE_PLOGI_RCVD 0x04 +#define PDB24XX_AE_PLOGI_RJT 0x05 +#define PDB24XX_AE_PRLI_RCVD 0x06 +#define PDB24XX_AE_PRLI_RJT 0x07 +#define PDB24XX_AE_TPRLO 0x08 +#define PDB24XX_AE_TPRLO_RJT 0x09 +#define PDB24XX_AE_PRLO_RCVD 0x0a +#define PDB24XX_AE_LOGO_RCVD 0x0b +#define PDB24XX_AE_TOPO_CHG 0x0c +#define PDB24XX_AE_NPORT_CHG 0x0d +#define PDB24XX_AE_FLOGI_RJT 0x0e +#define PDB24XX_AE_BAD_FANN 0x0f +#define PDB24XX_AE_FLOGI_TIMO 0x10 +#define PDB24XX_AE_ABX_LOGO 0x11 +#define PDB24XX_AE_PLOGI_DONE 0x12 +#define PDB24XX_AE_PRLI_DONJE 0x13 +#define PDB24XX_AE_OPN_1 0x14 +#define PDB24XX_AE_OPN_2 0x15 +#define PDB24XX_AE_TXERR 0x16 +#define PDB24XX_AE_FORCED_LOGO 0x17 +#define PDB24XX_AE_DISC_TIMO 0x18 + +/* * Genericized Port Login/Logout software structure */ typedef struct { uint16_t handle; + uint16_t channel; uint32_t flags : 8, portid : 24; } isp_plcmd_t; /* the flags to use are those for PLOGX_FLG_* below */ /* * ISP24XX- Login/Logout Port IOCB */ typedef struct { isphdr_t plogx_header; uint32_t plogx_handle; uint16_t plogx_status; uint16_t plogx_nphdl; uint16_t plogx_flags; uint16_t plogx_vphdl; /* low 8 bits */ uint16_t plogx_portlo; /* low 16 bits */ uint16_t plogx_rspsz_porthi; struct { uint16_t lo16; uint16_t hi16; } plogx_ioparm[11]; } isp_plogx_t; #define PLOGX_STATUS_OK 0x00 #define PLOGX_STATUS_UNAVAIL 0x28 #define PLOGX_STATUS_LOGOUT 0x29 #define PLOGX_STATUS_IOCBERR 0x31 #define PLOGX_IOCBERR_NOLINK 0x01 #define PLOGX_IOCBERR_NOIOCB 0x02 #define PLOGX_IOCBERR_NOXGHG 0x03 #define PLOGX_IOCBERR_FAILED 0x04 /* further info in IOPARM 1 */ #define PLOGX_IOCBERR_NOFABRIC 0x05 #define PLOGX_IOCBERR_NOTREADY 0x07 #define PLOGX_IOCBERR_NOLOGIN 0x08 /* further info in IOPARM 1 */ #define PLOGX_IOCBERR_NOPCB 0x0a #define PLOGX_IOCBERR_REJECT 0x18 /* further info in IOPARM 1 */ #define PLOGX_IOCBERR_EINVAL 0x19 /* further info in IOPARM 1 */ #define PLOGX_IOCBERR_PORTUSED 0x1a /* further info in IOPARM 1 */ #define PLOGX_IOCBERR_HNDLUSED 0x1b /* further info in IOPARM 1 */ #define PLOGX_IOCBERR_NOHANDLE 0x1c #define PLOGX_IOCBERR_NOFLOGI 0x1f /* further info in IOPARM 1 */ #define PLOGX_FLG_CMD_MASK 0xf #define PLOGX_FLG_CMD_PLOGI 0 #define PLOGX_FLG_CMD_PRLI 1 #define PLOGX_FLG_CMD_PDISC 2 #define PLOGX_FLG_CMD_LOGO 8 #define PLOGX_FLG_CMD_PRLO 9 #define PLOGX_FLG_CMD_TPRLO 10 #define PLOGX_FLG_COND_PLOGI 0x10 /* if with PLOGI */ #define PLOGX_FLG_IMPLICIT 0x10 /* if with LOGO, PRLO, TPRLO */ #define PLOGX_FLG_SKIP_PRLI 0x20 /* if with PLOGI */ #define PLOGX_FLG_IMPLICIT_LOGO_ALL 0x20 /* if with LOGO */ #define PLOGX_FLG_EXPLICIT_LOGO 0x40 /* if with LOGO */ #define PLOGX_FLG_COMMON_FEATURES 0x80 /* if with PLOGI */ #define PLOGX_FLG_FREE_NPHDL 0x80 /* if with with LOGO */ #define PLOGX_FLG_CLASS2 0x100 /* if with PLOGI */ #define PLOGX_FLG_FCP2_OVERRIDE 0x200 /* if with PRLOG, PRLI */ /* + * Report ID Acquisistion (24XX multi-id firmware) + */ +typedef struct { + isphdr_t ridacq_hdr; + uint32_t ridacq_handle; + union { + struct { + uint8_t ridacq_vp_acquired; + uint8_t ridacq_vp_setup; + uint16_t ridacq_reserved0; + } type0; /* type 0 */ + struct { + uint16_t ridacq_vp_count; + uint8_t ridacq_vp_index; + uint8_t ridacq_vp_status; + } type1; /* type 1 */ + } un; + uint16_t ridacq_vp_port_lo; + uint8_t ridacq_vp_port_hi; + uint8_t ridacq_format; /* 0 or 1 */ + uint16_t ridacq_map[8]; + uint8_t ridacq_reserved1[32]; +} isp_ridacq_t; + +#define RIDACQ_STS_COMPLETE 0 +#define RIDACQ_STS_UNACQUIRED 1 +#define RIDACQ_STS_CHANGED 20 + + +/* * Simple Name Server Data Structures */ #define SNS_GA_NXT 0x100 #define SNS_GPN_ID 0x112 #define SNS_GNN_ID 0x113 #define SNS_GFF_ID 0x11F #define SNS_GID_FT 0x171 #define SNS_RFT_ID 0x217 typedef struct { uint16_t snscb_rblen; /* response buffer length (words) */ uint16_t snscb_reserved0; uint16_t snscb_addr[4]; /* response buffer address */ uint16_t snscb_sblen; /* subcommand buffer length (words) */ uint16_t snscb_reserved1; uint16_t snscb_data[1]; /* variable data */ } sns_screq_t; /* Subcommand Request Structure */ typedef struct { uint16_t snscb_rblen; /* response buffer length (words) */ uint16_t snscb_reserved0; uint16_t snscb_addr[4]; /* response buffer address */ uint16_t snscb_sblen; /* subcommand buffer length (words) */ uint16_t snscb_reserved1; uint16_t snscb_cmd; uint16_t snscb_reserved2; uint32_t snscb_reserved3; uint32_t snscb_port; } sns_ga_nxt_req_t; #define SNS_GA_NXT_REQ_SIZE (sizeof (sns_ga_nxt_req_t)) typedef struct { uint16_t snscb_rblen; /* response buffer length (words) */ uint16_t snscb_reserved0; uint16_t snscb_addr[4]; /* response buffer address */ uint16_t snscb_sblen; /* subcommand buffer length (words) */ uint16_t snscb_reserved1; uint16_t snscb_cmd; uint16_t snscb_reserved2; uint32_t snscb_reserved3; uint32_t snscb_portid; } sns_gxn_id_req_t; #define SNS_GXN_ID_REQ_SIZE (sizeof (sns_gxn_id_req_t)) typedef struct { uint16_t snscb_rblen; /* response buffer length (words) */ uint16_t snscb_reserved0; uint16_t snscb_addr[4]; /* response buffer address */ uint16_t snscb_sblen; /* subcommand buffer length (words) */ uint16_t snscb_reserved1; uint16_t snscb_cmd; uint16_t snscb_mword_div_2; uint32_t snscb_reserved3; uint32_t snscb_fc4_type; } sns_gid_ft_req_t; #define SNS_GID_FT_REQ_SIZE (sizeof (sns_gid_ft_req_t)) typedef struct { uint16_t snscb_rblen; /* response buffer length (words) */ uint16_t snscb_reserved0; uint16_t snscb_addr[4]; /* response buffer address */ uint16_t snscb_sblen; /* subcommand buffer length (words) */ uint16_t snscb_reserved1; uint16_t snscb_cmd; uint16_t snscb_reserved2; uint32_t snscb_reserved3; uint32_t snscb_port; uint32_t snscb_fc4_types[8]; } sns_rft_id_req_t; #define SNS_RFT_ID_REQ_SIZE (sizeof (sns_rft_id_req_t)) typedef struct { ct_hdr_t snscb_cthdr; uint8_t snscb_port_type; uint8_t snscb_port_id[3]; uint8_t snscb_portname[8]; uint16_t snscb_data[1]; /* variable data */ } sns_scrsp_t; /* Subcommand Response Structure */ typedef struct { ct_hdr_t snscb_cthdr; uint8_t snscb_port_type; uint8_t snscb_port_id[3]; uint8_t snscb_portname[8]; uint8_t snscb_pnlen; /* symbolic port name length */ uint8_t snscb_pname[255]; /* symbolic port name */ uint8_t snscb_nodename[8]; uint8_t snscb_nnlen; /* symbolic node name length */ uint8_t snscb_nname[255]; /* symbolic node name */ uint8_t snscb_ipassoc[8]; uint8_t snscb_ipaddr[16]; uint8_t snscb_svc_class[4]; uint8_t snscb_fc4_types[32]; uint8_t snscb_fpname[8]; uint8_t snscb_reserved; uint8_t snscb_hardaddr[3]; } sns_ga_nxt_rsp_t; /* Subcommand Response Structure */ #define SNS_GA_NXT_RESP_SIZE (sizeof (sns_ga_nxt_rsp_t)) typedef struct { ct_hdr_t snscb_cthdr; uint8_t snscb_wwn[8]; } sns_gxn_id_rsp_t; #define SNS_GXN_ID_RESP_SIZE (sizeof (sns_gxn_id_rsp_t)) typedef struct { ct_hdr_t snscb_cthdr; uint32_t snscb_fc4_features[32]; } sns_gff_id_rsp_t; #define SNS_GFF_ID_RESP_SIZE (sizeof (sns_gff_id_rsp_t)) typedef struct { ct_hdr_t snscb_cthdr; struct { uint8_t control; uint8_t portid[3]; } snscb_ports[1]; } sns_gid_ft_rsp_t; #define SNS_GID_FT_RESP_SIZE(x) ((sizeof (sns_gid_ft_rsp_t)) + ((x - 1) << 2)) #define SNS_RFT_ID_RESP_SIZE (sizeof (ct_hdr_t)) /* * Other Misc Structures */ /* ELS Pass Through */ typedef struct { isphdr_t els_hdr; uint32_t els_handle; uint16_t els_status; uint16_t els_nphdl; uint16_t els_xmit_dsd_count; /* outgoing only */ uint8_t els_vphdl; uint8_t els_sof; uint32_t els_rxid; uint16_t els_recv_dsd_count; /* outgoing only */ uint8_t els_opcode; uint8_t els_reserved1; uint8_t els_did_lo; uint8_t els_did_mid; uint8_t els_did_hi; uint8_t els_reserved2; uint16_t els_reserved3; uint16_t els_ctl_flags; union { struct { uint32_t _els_bytecnt; uint32_t _els_subcode1; uint32_t _els_subcode2; uint8_t _els_reserved4[20]; } in; struct { uint32_t _els_recv_bytecnt; uint32_t _els_xmit_bytecnt; uint32_t _els_xmit_dsd_length; uint16_t _els_xmit_dsd_a1500; uint16_t _els_xmit_dsd_a3116; uint16_t _els_xmit_dsd_a4732; uint16_t _els_xmit_dsd_a6348; uint32_t _els_recv_dsd_length; uint16_t _els_recv_dsd_a1500; uint16_t _els_recv_dsd_a3116; uint16_t _els_recv_dsd_a4732; uint16_t _els_recv_dsd_a6348; } out; } inout; #define els_bytecnt inout.in._els_bytecnt #define els_subcode1 inout.in._els_subcode1 #define els_subcode2 inout.in._els_subcode2 #define els_reserved4 inout.in._els_reserved4 #define els_recv_bytecnt inout.out._els_recv_bytecnt #define els_xmit_bytecnt inout.out._els_xmit_bytecnt #define els_xmit_dsd_length inout.out._els_xmit_dsd_length #define els_xmit_dsd_a1500 inout.out._els_xmit_dsd_a1500 #define els_xmit_dsd_a3116 inout.out._els_xmit_dsd_a3116 #define els_xmit_dsd_a4732 inout.out._els_xmit_dsd_a4732 #define els_xmit_dsd_a6348 inout.out._els_xmit_dsd_a6348 #define els_recv_dsd_length inout.out._els_recv_dsd_length #define els_recv_dsd_a1500 inout.out._els_recv_dsd_a1500 #define els_recv_dsd_a3116 inout.out._els_recv_dsd_a3116 #define els_recv_dsd_a4732 inout.out._els_recv_dsd_a4732 #define els_recv_dsd_a6348 inout.out._els_recv_dsd_a6348 } els_t; /* - * A handy package structure for running FC-SCSI commands via RUN IOCB A64. + * A handy package structure for running FC-SCSI commands internally */ typedef struct { uint16_t handle; uint16_t lun; - uint32_t portid; + uint32_t + channel : 8, + portid : 24; uint32_t timeout; union { struct { uint32_t data_length; - uint8_t do_read; - uint8_t pad[3]; + uint32_t + no_wait : 1, + do_read : 1; uint8_t cdb[16]; void *data_ptr; } beg; struct { uint32_t data_residual; uint8_t status; uint8_t pad; uint16_t sense_length; uint8_t sense_data[32]; } end; } fcd; } isp_xcmd_t; + +/* + * Target Mode related definitions + */ +#define QLTM_SENSELEN 18 /* non-FC cards only */ +#define QLTM_SVALID 0x80 + +/* + * Structure for Enable Lun and Modify Lun queue entries + */ +typedef struct { + isphdr_t le_header; + uint32_t le_reserved; + uint8_t le_lun; + uint8_t le_rsvd; + uint8_t le_ops; /* Modify LUN only */ + uint8_t le_tgt; /* Not for FC */ + uint32_t le_flags; /* Not for FC */ + uint8_t le_status; + uint8_t le_reserved2; + uint8_t le_cmd_count; + uint8_t le_in_count; + uint8_t le_cdb6len; /* Not for FC */ + uint8_t le_cdb7len; /* Not for FC */ + uint16_t le_timeout; + uint16_t le_reserved3[20]; +} lun_entry_t; + +/* + * le_flags values + */ +#define LUN_TQAE 0x00000002 /* bit1 Tagged Queue Action Enable */ +#define LUN_DSSM 0x01000000 /* bit24 Disable Sending SDP Message */ +#define LUN_DISAD 0x02000000 /* bit25 Disable autodisconnect */ +#define LUN_DM 0x40000000 /* bit30 Disconnects Mandatory */ + +/* + * le_ops values + */ +#define LUN_CCINCR 0x01 /* increment command count */ +#define LUN_CCDECR 0x02 /* decrement command count */ +#define LUN_ININCR 0x40 /* increment immed. notify count */ +#define LUN_INDECR 0x80 /* decrement immed. notify count */ + +/* + * le_status values + */ +#define LUN_OK 0x01 /* we be rockin' */ +#define LUN_ERR 0x04 /* request completed with error */ +#define LUN_INVAL 0x06 /* invalid request */ +#define LUN_NOCAP 0x16 /* can't provide requested capability */ +#define LUN_ENABLED 0x3E /* LUN already enabled */ + +/* + * Immediate Notify Entry structure + */ +#define IN_MSGLEN 8 /* 8 bytes */ +#define IN_RSVDLEN 8 /* 8 words */ +typedef struct { + isphdr_t in_header; + uint32_t in_reserved; + uint8_t in_lun; /* lun */ + uint8_t in_iid; /* initiator */ + uint8_t in_reserved2; + uint8_t in_tgt; /* target */ + uint32_t in_flags; + uint8_t in_status; + uint8_t in_rsvd2; + uint8_t in_tag_val; /* tag value */ + uint8_t in_tag_type; /* tag type */ + uint16_t in_seqid; /* sequence id */ + uint8_t in_msg[IN_MSGLEN]; /* SCSI message bytes */ + uint16_t in_reserved3[IN_RSVDLEN]; + uint8_t in_sense[QLTM_SENSELEN];/* suggested sense data */ +} in_entry_t; + +typedef struct { + isphdr_t in_header; + uint32_t in_reserved; + uint8_t in_lun; /* lun */ + uint8_t in_iid; /* initiator */ + uint16_t in_scclun; + uint32_t in_reserved2; + uint16_t in_status; + uint16_t in_task_flags; + uint16_t in_seqid; /* sequence id */ +} in_fcentry_t; + +typedef struct { + isphdr_t in_header; + uint32_t in_reserved; + uint16_t in_iid; /* initiator */ + uint16_t in_scclun; + uint32_t in_reserved2; + uint16_t in_status; + uint16_t in_task_flags; + uint16_t in_seqid; /* sequence id */ +} in_fcentry_e_t; + +/* + * Values for the in_status field + */ +#define IN_REJECT 0x0D /* Message Reject message received */ +#define IN_RESET 0x0E /* Bus Reset occurred */ +#define IN_NO_RCAP 0x16 /* requested capability not available */ +#define IN_IDE_RECEIVED 0x33 /* Initiator Detected Error msg received */ +#define IN_RSRC_UNAVAIL 0x34 /* resource unavailable */ +#define IN_MSG_RECEIVED 0x36 /* SCSI message received */ +#define IN_ABORT_TASK 0x20 /* task named in RX_ID is being aborted (FC) */ +#define IN_PORT_LOGOUT 0x29 /* port has logged out (FC) */ +#define IN_PORT_CHANGED 0x2A /* port changed */ +#define IN_GLOBAL_LOGO 0x2E /* all ports logged out */ +#define IN_NO_NEXUS 0x3B /* Nexus not established */ + +/* + * Values for the in_task_flags field- should only get one at a time! + */ +#define TASK_FLAGS_RESERVED_MASK (0xe700) +#define TASK_FLAGS_CLEAR_ACA (1<<14) +#define TASK_FLAGS_TARGET_RESET (1<<13) +#define TASK_FLAGS_LUN_RESET (1<<12) +#define TASK_FLAGS_CLEAR_TASK_SET (1<<10) +#define TASK_FLAGS_ABORT_TASK_SET (1<<9) + +/* + * ISP24XX Immediate Notify + */ +typedef struct { + isphdr_t in_header; + uint32_t in_reserved; + uint16_t in_nphdl; + uint16_t in_reserved1; + uint16_t in_flags; + uint16_t in_srr_rxid; + uint16_t in_status; + uint8_t in_status_subcode; + uint8_t in_reserved2; + uint32_t in_rxid; + uint16_t in_srr_reloff_lo; + uint16_t in_srr_reloff_hi; + uint16_t in_srr_iu; + uint16_t in_srr_oxid; + /* + * If bit 2 is set in in_flags, the following + * two tags are valid. If the received ELS is + * a LOGO, then these tags contain the N Port ID + * from the LOGO payload. If the received ELS + * request is TPRLO, these tags contain the + * Third Party Originator N Port ID. + */ + uint16_t in_nport_id_hi; + uint8_t in_nport_id_lo; + uint8_t in_reserved3; + /* + * If bit 2 is set in in_flags, the following + * tag is valid. If the received ELS is a LOGO, + * then this tag contains the n-port handle + * from the LOGO payload. If the received ELS + * request is TPRLO, this tag contain the + * n-port handle for the Third Party Originator. + */ + uint16_t in_np_handle; + uint8_t in_reserved4[12]; + uint8_t in_reserved5; + uint8_t in_vpidx; + uint32_t in_reserved6; + uint16_t in_portid_lo; + uint8_t in_portid_hi; + uint8_t in_reserved7; + uint16_t in_reserved8; + uint16_t in_oxid; +} in_fcentry_24xx_t; + +#define IN24XX_FLAG_PUREX_IOCB 0x1 +#define IN24XX_FLAG_GLOBAL_LOGOUT 0x2 +#define IN24XX_FLAG_NPHDL_VALID 0x4 + +#define IN24XX_LIP_RESET 0x0E +#define IN24XX_LINK_RESET 0x0F +#define IN24XX_PORT_LOGOUT 0x29 +#define IN24XX_PORT_CHANGED 0x2A +#define IN24XX_LINK_FAILED 0x2E +#define IN24XX_SRR_RCVD 0x45 +#define IN24XX_ELS_RCVD 0x46 /* + * login-affectin ELS received- check + * subcode for specific opcode + */ + +/* + * For f/w > 4.0.25, these offsets in the Immediate Notify contain + * the WWNN/WWPN if the ELS is PLOGI, PDISC or ADISC. The WWN is in + * Big Endian format. + */ +#define IN24XX_PLOGI_WWNN_OFF 0x20 +#define IN24XX_PLOGI_WWPN_OFF 0x28 + +/* + * For f/w > 4.0.25, this offset in the Immediate Notify contain + * the WWPN if the ELS is LOGO. The WWN is in Big Endian format. + */ +#define IN24XX_LOGO_WWPN_OFF 0x28 + +/* + * Immediate Notify Status Subcodes for IN24XX_PORT_LOGOUT + */ +#define IN24XX_PORT_LOGOUT_PDISC_TMO 0x00 +#define IN24XX_PORT_LOGOUT_UXPR_DISC 0x01 +#define IN24XX_PORT_LOGOUT_OWN_OPN 0x02 +#define IN24XX_PORT_LOGOUT_OWN_OPN_SFT 0x03 +#define IN24XX_PORT_LOGOUT_ABTS_TMO 0x04 +#define IN24XX_PORT_LOGOUT_DISC_RJT 0x05 +#define IN24XX_PORT_LOGOUT_LOGIN_NEEDED 0x06 +#define IN24XX_PORT_LOGOUT_BAD_DISC 0x07 +#define IN24XX_PORT_LOGOUT_LOST_ALPA 0x08 +#define IN24XX_PORT_LOGOUT_XMIT_FAILURE 0x09 + +/* + * Immediate Notify Status Subcodes for IN24XX_PORT_CHANGED + */ +#define IN24XX_PORT_CHANGED_BADFAN 0x00 +#define IN24XX_PORT_CHANGED_TOPO_CHANGE 0x01 +#define IN24XX_PORT_CHANGED_FLOGI_ACC 0x02 +#define IN24XX_PORT_CHANGED_FLOGI_RJT 0x03 +#define IN24XX_PORT_CHANGED_TIMEOUT 0x04 +#define IN24XX_PORT_CHANGED_PORT_CHANGE 0x05 + +/* + * Notify Acknowledge Entry structure + */ +#define NA_RSVDLEN 22 +typedef struct { + isphdr_t na_header; + uint32_t na_reserved; + uint8_t na_lun; /* lun */ + uint8_t na_iid; /* initiator */ + uint8_t na_reserved2; + uint8_t na_tgt; /* target */ + uint32_t na_flags; + uint8_t na_status; + uint8_t na_event; + uint16_t na_seqid; /* sequence id */ + uint16_t na_reserved3[NA_RSVDLEN]; +} na_entry_t; + +/* + * Value for the na_event field + */ +#define NA_RST_CLRD 0x80 /* Clear an async event notification */ +#define NA_OK 0x01 /* Notify Acknowledge Succeeded */ +#define NA_INVALID 0x06 /* Invalid Notify Acknowledge */ + +#define NA2_RSVDLEN 21 +typedef struct { + isphdr_t na_header; + uint32_t na_reserved; + uint8_t na_reserved1; + uint8_t na_iid; /* initiator loop id */ + uint16_t na_response; + uint16_t na_flags; + uint16_t na_reserved2; + uint16_t na_status; + uint16_t na_task_flags; + uint16_t na_seqid; /* sequence id */ + uint16_t na_reserved3[NA2_RSVDLEN]; +} na_fcentry_t; + +typedef struct { + isphdr_t na_header; + uint32_t na_reserved; + uint16_t na_iid; /* initiator loop id */ + uint16_t na_response; /* response code */ + uint16_t na_flags; + uint16_t na_reserved2; + uint16_t na_status; + uint16_t na_task_flags; + uint16_t na_seqid; /* sequence id */ + uint16_t na_reserved3[NA2_RSVDLEN]; +} na_fcentry_e_t; + +#define NAFC_RCOUNT 0x80 /* increment resource count */ +#define NAFC_RST_CLRD 0x20 /* Clear LIP Reset */ +#define NAFC_TVALID 0x10 /* task mangement response code is valid */ + +/* + * ISP24XX Notify Acknowledge + */ + +typedef struct { + isphdr_t na_header; + uint32_t na_handle; + uint16_t na_nphdl; + uint16_t na_reserved1; + uint16_t na_flags; + uint16_t na_srr_rxid; + uint16_t na_status; + uint8_t na_status_subcode; + uint8_t na_reserved2; + uint32_t na_rxid; + uint16_t na_srr_reloff_lo; + uint16_t na_srr_reloff_hi; + uint16_t na_srr_iu; + uint16_t na_srr_flags; + uint8_t na_reserved3[18]; + uint8_t na_reserved4; + uint8_t na_vpidx; + uint8_t na_srr_reject_vunique; + uint8_t na_srr_reject_explanation; + uint8_t na_srr_reject_code; + uint8_t na_reserved5; + uint8_t na_reserved6[6]; + uint16_t na_oxid; +} na_fcentry_24xx_t; + +/* + * Accept Target I/O Entry structure + */ +#define ATIO_CDBLEN 26 + +typedef struct { + isphdr_t at_header; + uint16_t at_reserved; + uint16_t at_handle; + uint8_t at_lun; /* lun */ + uint8_t at_iid; /* initiator */ + uint8_t at_cdblen; /* cdb length */ + uint8_t at_tgt; /* target */ + uint32_t at_flags; + uint8_t at_status; /* firmware status */ + uint8_t at_scsi_status; /* scsi status */ + uint8_t at_tag_val; /* tag value */ + uint8_t at_tag_type; /* tag type */ + uint8_t at_cdb[ATIO_CDBLEN]; /* received CDB */ + uint8_t at_sense[QLTM_SENSELEN];/* suggested sense data */ +} at_entry_t; + +/* + * at_flags values + */ +#define AT_NODISC 0x00008000 /* disconnect disabled */ +#define AT_TQAE 0x00000002 /* Tagged Queue Action enabled */ + +/* + * at_status values + */ +#define AT_PATH_INVALID 0x07 /* ATIO sent to firmware for disabled lun */ +#define AT_RESET 0x0E /* SCSI Bus Reset Occurred */ +#define AT_PHASE_ERROR 0x14 /* Bus phase sequence error */ +#define AT_NOCAP 0x16 /* Requested capability not available */ +#define AT_BDR_MSG 0x17 /* Bus Device Reset msg received */ +#define AT_CDB 0x3D /* CDB received */ +/* + * Macros to create and fetch and test concatenated handle and tag value macros + * (SPI only) + */ +#define AT_MAKE_TAGID(tid, aep) \ + tid = aep->at_handle; \ + if (aep->at_flags & AT_TQAE) { \ + tid |= (aep->at_tag_val << 16); \ + tid |= (1 << 24); \ + } + +#define CT_MAKE_TAGID(tid, ct) \ + tid = ct->ct_fwhandle; \ + if (ct->ct_flags & CT_TQAE) { \ + tid |= (ct->ct_tag_val << 16); \ + tid |= (1 << 24); \ + } + +#define AT_HAS_TAG(val) ((val) & (1 << 24)) +#define AT_GET_TAG(val) (((val) >> 16) & 0xff) +#define AT_GET_HANDLE(val) ((val) & 0xffff) + +#define IN_MAKE_TAGID(tid, inp) \ + tid = inp->in_seqid; \ + tid |= (inp->in_tag_val << 16); \ + tid |= (1 << 24) + +/* + * Accept Target I/O Entry structure, Type 2 + */ +#define ATIO2_CDBLEN 16 + +typedef struct { + isphdr_t at_header; + uint32_t at_reserved; + uint8_t at_lun; /* lun or reserved */ + uint8_t at_iid; /* initiator */ + uint16_t at_rxid; /* response ID */ + uint16_t at_flags; + uint16_t at_status; /* firmware status */ + uint8_t at_crn; /* command reference number */ + uint8_t at_taskcodes; + uint8_t at_taskflags; + uint8_t at_execodes; + uint8_t at_cdb[ATIO2_CDBLEN]; /* received CDB */ + uint32_t at_datalen; /* allocated data len */ + uint16_t at_scclun; /* SCC Lun or reserved */ + uint16_t at_wwpn[4]; /* WWPN of initiator */ + uint16_t at_reserved2[6]; + uint16_t at_oxid; +} at2_entry_t; + +typedef struct { + isphdr_t at_header; + uint32_t at_reserved; + uint16_t at_iid; /* initiator */ + uint16_t at_rxid; /* response ID */ + uint16_t at_flags; + uint16_t at_status; /* firmware status */ + uint8_t at_crn; /* command reference number */ + uint8_t at_taskcodes; + uint8_t at_taskflags; + uint8_t at_execodes; + uint8_t at_cdb[ATIO2_CDBLEN]; /* received CDB */ + uint32_t at_datalen; /* allocated data len */ + uint16_t at_scclun; /* SCC Lun or reserved */ + uint16_t at_wwpn[4]; /* WWPN of initiator */ + uint16_t at_reserved2[6]; + uint16_t at_oxid; +} at2e_entry_t; + +#define ATIO2_WWPN_OFFSET 0x2A +#define ATIO2_OXID_OFFSET 0x3E + +#define ATIO2_TC_ATTR_MASK 0x7 +#define ATIO2_TC_ATTR_SIMPLEQ 0 +#define ATIO2_TC_ATTR_HEADOFQ 1 +#define ATIO2_TC_ATTR_ORDERED 2 +#define ATIO2_TC_ATTR_ACAQ 4 +#define ATIO2_TC_ATTR_UNTAGGED 5 + +#define ATIO2_EX_WRITE 0x1 +#define ATIO2_EX_READ 0x2 +/* + * Macros to create and fetch and test concatenated handle and tag value macros + */ +#define AT2_MAKE_TAGID(tid, bus, inst, aep) \ + tid = aep->at_rxid; \ + tid |= (((uint64_t)inst) << 32); \ + tid |= (((uint64_t)bus) << 48) + +#define CT2_MAKE_TAGID(tid, bus, inst, ct) \ + tid = ct->ct_rxid; \ + tid |= (((uint64_t)inst) << 32); \ + tid |= (((uint64_t)(bus & 0xff)) << 48) + +#define AT2_HAS_TAG(val) 1 +#define AT2_GET_TAG(val) ((val) & 0xffffffff) +#define AT2_GET_INST(val) (((val) >> 32) & 0xffff) +#define AT2_GET_HANDLE AT2_GET_TAG +#define AT2_GET_BUS(val) (((val) >> 48) & 0xff) + +#define FC_HAS_TAG AT2_HAS_TAG +#define FC_GET_TAG AT2_GET_TAG +#define FC_GET_INST AT2_GET_INST +#define FC_GET_HANDLE AT2_GET_HANDLE + +#define IN_FC_MAKE_TAGID(tid, bus, inst, seqid) \ + tid = seqid; \ + tid |= (((uint64_t)inst) << 32); \ + tid |= (((uint64_t)(bus & 0xff)) << 48) + +#define FC_TAG_INSERT_INST(tid, inst) \ + tid &= ~0x0000ffff00000000ull; \ + tid |= (((uint64_t)inst) << 32) + +/* + * 24XX ATIO Definition + * + * This is *quite* different from other entry types. + * First of all, it has its own queue it comes in on. + * + * Secondly, it doesn't have a normal header. + * + * Thirdly, it's just a passthru of the FCP CMND IU + * which is recorded in big endian mode. + */ +typedef struct { + uint8_t at_type; + uint8_t at_count; + /* + * Task attribute in high four bits, + * the rest is the FCP CMND IU Length. + * NB: the command can extend past the + * length for a single queue entry. + */ + uint16_t at_ta_len; + uint32_t at_rxid; + fc_hdr_t at_hdr; + fcp_cmnd_iu_t at_cmnd; +} at7_entry_t; +#define AT7_NORESRC_RXID 0xffffffff + + +/* + * Continue Target I/O Entry structure + * Request from driver. The response from the + * ISP firmware is the same except that the last 18 + * bytes are overwritten by suggested sense data if + * the 'autosense valid' bit is set in the status byte. + */ +typedef struct { + isphdr_t ct_header; + uint16_t ct_syshandle; + uint16_t ct_fwhandle; /* required by f/w */ + uint8_t ct_lun; /* lun */ + uint8_t ct_iid; /* initiator id */ + uint8_t ct_reserved2; + uint8_t ct_tgt; /* our target id */ + uint32_t ct_flags; + uint8_t ct_status; /* isp status */ + uint8_t ct_scsi_status; /* scsi status */ + uint8_t ct_tag_val; /* tag value */ + uint8_t ct_tag_type; /* tag type */ + uint32_t ct_xfrlen; /* transfer length */ + int32_t ct_resid; /* residual length */ + uint16_t ct_timeout; + uint16_t ct_seg_count; + ispds_t ct_dataseg[ISP_RQDSEG]; +} ct_entry_t; + +/* + * For some of the dual port SCSI adapters, port (bus #) is reported + * in the MSbit of ct_iid. Bit fields are a bit too awkward here. + * + * Note that this does not apply to FC adapters at all which can and + * do report IIDs between 0x81 && 0xfe (or 0x7ff) which represent devices + * that have logged in across a SCSI fabric. + */ +#define GET_IID_VAL(x) (x & 0x3f) +#define GET_BUS_VAL(x) ((x >> 7) & 0x1) +#define SET_IID_VAL(y, x) y = ((y & ~0x3f) | (x & 0x3f)) +#define SET_BUS_VAL(y, x) y = ((y & 0x3f) | ((x & 0x1) << 7)) + +/* + * ct_flags values + */ +#define CT_TQAE 0x00000002 /* bit 1, Tagged Queue Action enable */ +#define CT_DATA_IN 0x00000040 /* bits 6&7, Data direction */ +#define CT_DATA_OUT 0x00000080 /* bits 6&7, Data direction */ +#define CT_NO_DATA 0x000000C0 /* bits 6&7, Data direction */ +#define CT_CCINCR 0x00000100 /* bit 8, autoincrement atio count */ +#define CT_DATAMASK 0x000000C0 /* bits 6&7, Data direction */ +#define CT_INISYNCWIDE 0x00004000 /* bit 14, Do Sync/Wide Negotiation */ +#define CT_NODISC 0x00008000 /* bit 15, Disconnects disabled */ +#define CT_DSDP 0x01000000 /* bit 24, Disable Save Data Pointers */ +#define CT_SENDRDP 0x04000000 /* bit 26, Send Restore Pointers msg */ +#define CT_SENDSTATUS 0x80000000 /* bit 31, Send SCSI status byte */ + +/* + * ct_status values + * - set by the firmware when it returns the CTIO + */ +#define CT_OK 0x01 /* completed without error */ +#define CT_ABORTED 0x02 /* aborted by host */ +#define CT_ERR 0x04 /* see sense data for error */ +#define CT_INVAL 0x06 /* request for disabled lun */ +#define CT_NOPATH 0x07 /* invalid ITL nexus */ +#define CT_INVRXID 0x08 /* (FC only) Invalid RX_ID */ +#define CT_DATA_OVER 0x09 /* (FC only) Data Overrun */ +#define CT_RSELTMO 0x0A /* reselection timeout after 2 tries */ +#define CT_TIMEOUT 0x0B /* timed out */ +#define CT_RESET 0x0E /* SCSI Bus Reset occurred */ +#define CT_PARITY 0x0F /* Uncorrectable Parity Error */ +#define CT_BUS_ERROR 0x10 /* (FC Only) DMA PCI Error */ +#define CT_PANIC 0x13 /* Unrecoverable Error */ +#define CT_PHASE_ERROR 0x14 /* Bus phase sequence error */ +#define CT_DATA_UNDER 0x15 /* (FC only) Data Underrun */ +#define CT_BDR_MSG 0x17 /* Bus Device Reset msg received */ +#define CT_TERMINATED 0x19 /* due to Terminate Transfer mbox cmd */ +#define CT_PORTUNAVAIL 0x28 /* port not available */ +#define CT_LOGOUT 0x29 /* port logout */ +#define CT_PORTCHANGED 0x2A /* port changed */ +#define CT_IDE 0x33 /* Initiator Detected Error */ +#define CT_NOACK 0x35 /* Outstanding Immed. Notify. entry */ +#define CT_SRR 0x45 /* SRR Received */ +#define CT_LUN_RESET 0x48 /* Lun Reset Received */ + +#define CT_HBA_RESET 0xffff /* pseudo error - command destroyed by HBA reset*/ + +/* + * When the firmware returns a CTIO entry, it may overwrite the last + * part of the structure with sense data. This starts at offset 0x2E + * into the entry, which is in the middle of ct_dataseg[1]. Rather + * than define a new struct for this, I'm just using the sense data + * offset. + */ +#define CTIO_SENSE_OFFSET 0x2E + +/* + * Entry length in u_longs. All entries are the same size so + * any one will do as the numerator. + */ +#define UINT32_ENTRY_SIZE (sizeof(at_entry_t)/sizeof(uint32_t)) + +/* + * QLA2100 CTIO (type 2) entry + */ +#define MAXRESPLEN 26 +typedef struct { + isphdr_t ct_header; + uint32_t ct_syshandle; + uint8_t ct_lun; /* lun */ + uint8_t ct_iid; /* initiator id */ + uint16_t ct_rxid; /* response ID */ + uint16_t ct_flags; + uint16_t ct_status; /* isp status */ + uint16_t ct_timeout; + uint16_t ct_seg_count; + uint32_t ct_reloff; /* relative offset */ + int32_t ct_resid; /* residual length */ + union { + /* + * The three different modes that the target driver + * can set the CTIO{2,3,4} up as. + * + * The first is for sending FCP_DATA_IUs as well as + * (optionally) sending a terminal SCSI status FCP_RSP_IU. + * + * The second is for sending SCSI sense data in an FCP_RSP_IU. + * Note that no FCP_DATA_IUs will be sent. + * + * The third is for sending FCP_RSP_IUs as built specifically + * in system memory as located by the isp_dataseg. + */ + struct { + uint32_t _reserved; + uint16_t _reserved2; + uint16_t ct_scsi_status; + uint32_t ct_xfrlen; + union { + ispds_t ct_dataseg[ISP_RQDSEG_T2]; + ispds64_t ct_dataseg64[ISP_RQDSEG_T3]; + ispdslist_t ct_dslist; + } u; + } m0; + struct { + uint16_t _reserved; + uint16_t _reserved2; + uint16_t ct_senselen; + uint16_t ct_scsi_status; + uint16_t ct_resplen; + uint8_t ct_resp[MAXRESPLEN]; + } m1; + struct { + uint32_t _reserved; + uint16_t _reserved2; + uint16_t _reserved3; + uint32_t ct_datalen; + ispds_t ct_fcp_rsp_iudata; + } m2; + } rsp; +} ct2_entry_t; + +typedef struct { + isphdr_t ct_header; + uint32_t ct_syshandle; + uint16_t ct_iid; /* initiator id */ + uint16_t ct_rxid; /* response ID */ + uint16_t ct_flags; + uint16_t ct_status; /* isp status */ + uint16_t ct_timeout; + uint16_t ct_seg_count; + uint32_t ct_reloff; /* relative offset */ + int32_t ct_resid; /* residual length */ + union { + struct { + uint32_t _reserved; + uint16_t _reserved2; + uint16_t ct_scsi_status; + uint32_t ct_xfrlen; + union { + ispds_t ct_dataseg[ISP_RQDSEG_T2]; + ispds64_t ct_dataseg64[ISP_RQDSEG_T3]; + ispdslist_t ct_dslist; + } u; + } m0; + struct { + uint16_t _reserved; + uint16_t _reserved2; + uint16_t ct_senselen; + uint16_t ct_scsi_status; + uint16_t ct_resplen; + uint8_t ct_resp[MAXRESPLEN]; + } m1; + struct { + uint32_t _reserved; + uint16_t _reserved2; + uint16_t _reserved3; + uint32_t ct_datalen; + ispds_t ct_fcp_rsp_iudata; + } m2; + } rsp; +} ct2e_entry_t; + +/* + * ct_flags values for CTIO2 + */ +#define CT2_FLAG_MODE0 0x0000 +#define CT2_FLAG_MODE1 0x0001 +#define CT2_FLAG_MODE2 0x0002 +#define CT2_FLAG_MMASK 0x0003 +#define CT2_DATA_IN 0x0040 +#define CT2_DATA_OUT 0x0080 +#define CT2_NO_DATA 0x00C0 +#define CT2_DATAMASK 0x00C0 +#define CT2_CCINCR 0x0100 +#define CT2_FASTPOST 0x0200 +#define CT2_CONFIRM 0x2000 +#define CT2_TERMINATE 0x4000 +#define CT2_SENDSTATUS 0x8000 + +/* + * ct_status values are (mostly) the same as that for ct_entry. + */ + +/* + * ct_scsi_status values- the low 8 bits are the normal SCSI status + * we know and love. The upper 8 bits are validity markers for FCP_RSP_IU + * fields. + */ +#define CT2_RSPLEN_VALID 0x0100 +#define CT2_SNSLEN_VALID 0x0200 +#define CT2_DATA_OVER 0x0400 +#define CT2_DATA_UNDER 0x0800 + +/* + * ISP24XX CTIO + */ +#define MAXRESPLEN_24XX 24 +typedef struct { + isphdr_t ct_header; + uint32_t ct_syshandle; + uint16_t ct_nphdl; /* status on returned CTIOs */ + uint16_t ct_timeout; + uint16_t ct_seg_count; + uint8_t ct_vpidx; + uint8_t ct_xflags; + uint16_t ct_iid_lo; /* low 16 bits of portid */ + uint8_t ct_iid_hi; /* hi 8 bits of portid */ + uint8_t ct_reserved; + uint32_t ct_rxid; + uint16_t ct_senselen; /* mode 1 only */ + uint16_t ct_flags; + int32_t ct_resid; /* residual length */ + uint16_t ct_oxid; + uint16_t ct_scsi_status; /* modes 0 && 1 only */ + union { + struct { + uint32_t reloff; + uint32_t reserved0; + uint32_t ct_xfrlen; + uint32_t reserved1; + ispds64_t ds; + } m0; + struct { + uint16_t ct_resplen; + uint16_t reserved; + uint8_t ct_resp[MAXRESPLEN_24XX]; + } m1; + struct { + uint32_t reserved0; + uint32_t ct_datalen; + uint32_t reserved1; + ispds64_t ct_fcp_rsp_iudata; + } m2; + } rsp; +} ct7_entry_t; + +/* + * ct_flags values for CTIO7 + */ +#define CT7_DATA_IN 0x0002 +#define CT7_DATA_OUT 0x0001 +#define CT7_NO_DATA 0x0000 +#define CT7_DATAMASK 0x003 +#define CT7_DSD_ENABLE 0x0004 +#define CT7_CONF_STSFD 0x0010 +#define CT7_EXPLCT_CONF 0x0020 +#define CT7_FLAG_MODE0 0x0000 +#define CT7_FLAG_MODE1 0x0040 +#define CT7_FLAG_MODE2 0x0080 +#define CT7_FLAG_MMASK 0x00C0 +#define CT7_NOACK 0x0100 +#define CT7_TASK_ATTR_SHIFT 9 +#define CT7_CONFIRM 0x2000 +#define CT7_TERMINATE 0x4000 +#define CT7_SENDSTATUS 0x8000 + +/* + * Type 7 CTIO status codes + */ +#define CT7_OK 0x01 /* completed without error */ +#define CT7_ABORTED 0x02 /* aborted by host */ +#define CT7_ERR 0x04 /* see sense data for error */ +#define CT7_INVAL 0x06 /* request for disabled lun */ +#define CT7_INVRXID 0x08 /* Invalid RX_ID */ +#define CT7_DATA_OVER 0x09 /* Data Overrun */ +#define CT7_TIMEOUT 0x0B /* timed out */ +#define CT7_RESET 0x0E /* LIP Rset Received */ +#define CT7_BUS_ERROR 0x10 /* DMA PCI Error */ +#define CT7_REASSY_ERR 0x11 /* DMA reassembly error */ +#define CT7_DATA_UNDER 0x15 /* Data Underrun */ +#define CT7_PORTUNAVAIL 0x28 /* port not available */ +#define CT7_LOGOUT 0x29 /* port logout */ +#define CT7_PORTCHANGED 0x2A /* port changed */ +#define CT7_SRR 0x45 /* SRR Received */ + +/* + * Other 24XX related target IOCBs + */ + +/* + * ABTS Received + */ +typedef struct { + isphdr_t abts_header; + uint8_t abts_reserved0[6]; + uint16_t abts_nphdl; + uint16_t abts_reserved1; + uint16_t abts_sof; + uint32_t abts_rxid_abts; + uint16_t abts_did_lo; + uint8_t abts_did_hi; + uint8_t abts_r_ctl; + uint16_t abts_sid_lo; + uint8_t abts_sid_hi; + uint8_t abts_cs_ctl; + uint16_t abts_fs_ctl; + uint8_t abts_f_ctl; + uint8_t abts_type; + uint16_t abts_seq_cnt; + uint8_t abts_df_ctl; + uint8_t abts_seq_id; + uint16_t abts_rx_id; + uint16_t abts_ox_id; + uint32_t abts_param; + uint8_t abts_reserved2[16]; + uint32_t abts_rxid_task; +} abts_t; + +typedef struct { + isphdr_t abts_rsp_header; + uint32_t abts_rsp_handle; + uint16_t abts_rsp_status; + uint16_t abts_rsp_nphdl; + uint16_t abts_rsp_ctl_flags; + uint16_t abts_rsp_sof; + uint32_t abts_rsp_rxid_abts; + uint16_t abts_rsp_did_lo; + uint8_t abts_rsp_did_hi; + uint8_t abts_rsp_r_ctl; + uint16_t abts_rsp_sid_lo; + uint8_t abts_rsp_sid_hi; + uint8_t abts_rsp_cs_ctl; + uint16_t abts_rsp_f_ctl_lo; + uint8_t abts_rsp_f_ctl_hi; + uint8_t abts_rsp_type; + uint16_t abts_rsp_seq_cnt; + uint8_t abts_rsp_df_ctl; + uint8_t abts_rsp_seq_id; + uint16_t abts_rsp_rx_id; + uint16_t abts_rsp_ox_id; + uint32_t abts_rsp_param; + union { + struct { + uint16_t reserved; + uint8_t last_seq_id; + uint8_t seq_id_valid; + uint16_t aborted_rx_id; + uint16_t aborted_ox_id; + uint16_t high_seq_cnt; + uint16_t low_seq_cnt; + uint8_t reserved2[4]; + } ba_acc; + struct { + uint8_t vendor_unique; + uint8_t explanation; + uint8_t reason; + uint8_t reserved; + uint8_t reserved2[12]; + } ba_rjt; + struct { + uint8_t reserved[8]; + uint32_t subcode1; + uint32_t subcode2; + } rsp; + uint8_t reserved[16]; + } abts_rsp_payload; + uint32_t abts_rsp_rxid_task; +} abts_rsp_t; + +/* terminate this ABTS exchange */ +#define ISP24XX_ABTS_RSP_TERMINATE 0x01 + +#define ISP24XX_ABTS_RSP_COMPLETE 0x00 +#define ISP24XX_ABTS_RSP_RESET 0x04 +#define ISP24XX_ABTS_RSP_ABORTED 0x05 +#define ISP24XX_ABTS_RSP_TIMEOUT 0x06 +#define ISP24XX_ABTS_RSP_INVXID 0x08 +#define ISP24XX_ABTS_RSP_LOGOUT 0x29 +#define ISP24XX_ABTS_RSP_SUBCODE 0x31 + +#define ISP24XX_NO_TASK 0xffffffff + +/* + * Miscellaneous + * + * These are the limits of the number of dma segments we + * can deal with based not on the size of the segment counter + * (which is 16 bits), but on the size of the number of + * queue entries field (which is 8 bits). We assume no + * segments in the first queue entry, so we can either + * have 7 dma segments per continuation entry or 5 + * (for 64 bit dma).. multiplying out by 254.... + */ +#define ISP_NSEG_MAX 1778 +#define ISP_NSEG64_MAX 1270 + #endif /* _ISPMBOX_H */ Index: head/sys/dev/isp/ispreg.h =================================================================== --- head/sys/dev/isp/ispreg.h (revision 196007) +++ head/sys/dev/isp/ispreg.h (revision 196008) @@ -1,1180 +1,1193 @@ /* $FreeBSD$ */ /*- - * Copyright (c) 1997-2007 by Matthew Jacob + * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * */ /* * Machine Independent (well, as best as possible) register * definitions for Qlogic ISP SCSI adapters. */ #ifndef _ISPREG_H #define _ISPREG_H /* * Hardware definitions for the Qlogic ISP registers. */ /* * This defines types of access to various registers. * * R: Read Only * W: Write Only * RW: Read/Write * * R*, W*, RW*: Read Only, Write Only, Read/Write, but only * if RISC processor in ISP is paused. */ /* * Offsets for various register blocks. * * Sad but true, different architectures have different offsets. * * Don't be alarmed if none of this makes sense. The original register * layout set some defines in a certain pattern. Everything else has been * grafted on since. For example, the ISP1080 manual will state that DMA * registers start at 0x80 from the base of the register address space. * That's true, but for our purposes, we define DMA_REGS_OFF for the 1080 * to start at offset 0x60 because the DMA registers are all defined to * be DMA_BLOCK+0x20 and so on. Clear? */ #define BIU_REGS_OFF 0x00 #define PCI_MBOX_REGS_OFF 0x70 #define PCI_MBOX_REGS2100_OFF 0x10 #define PCI_MBOX_REGS2300_OFF 0x40 #define PCI_MBOX_REGS2400_OFF 0x80 #define SBUS_MBOX_REGS_OFF 0x80 #define PCI_SXP_REGS_OFF 0x80 #define SBUS_SXP_REGS_OFF 0x200 #define PCI_RISC_REGS_OFF 0x80 #define SBUS_RISC_REGS_OFF 0x400 /* Bless me! Chip designers have putzed it again! */ #define ISP1080_DMA_REGS_OFF 0x60 #define DMA_REGS_OFF 0x00 /* same as BIU block */ #define SBUS_REGSIZE 0x450 #define PCI_REGSIZE 0x100 /* * NB: The *_BLOCK definitions have no specific hardware meaning. * They serve simply to note to the MD layer which block of * registers offsets are being accessed. */ #define _NREG_BLKS 5 #define _BLK_REG_SHFT 13 #define _BLK_REG_MASK (7 << _BLK_REG_SHFT) #define BIU_BLOCK (0 << _BLK_REG_SHFT) #define MBOX_BLOCK (1 << _BLK_REG_SHFT) #define SXP_BLOCK (2 << _BLK_REG_SHFT) #define RISC_BLOCK (3 << _BLK_REG_SHFT) #define DMA_BLOCK (4 << _BLK_REG_SHFT) /* * Bus Interface Block Register Offsets */ #define BIU_ID_LO (BIU_BLOCK+0x0) /* R : Bus ID, Low */ #define BIU2100_FLASH_ADDR (BIU_BLOCK+0x0) #define BIU_ID_HI (BIU_BLOCK+0x2) /* R : Bus ID, High */ #define BIU2100_FLASH_DATA (BIU_BLOCK+0x2) #define BIU_CONF0 (BIU_BLOCK+0x4) /* R : Bus Configuration #0 */ #define BIU_CONF1 (BIU_BLOCK+0x6) /* R : Bus Configuration #1 */ #define BIU2100_CSR (BIU_BLOCK+0x6) #define BIU_ICR (BIU_BLOCK+0x8) /* RW : Bus Interface Ctrl */ #define BIU_ISR (BIU_BLOCK+0xA) /* R : Bus Interface Status */ #define BIU_SEMA (BIU_BLOCK+0xC) /* RW : Bus Semaphore */ #define BIU_NVRAM (BIU_BLOCK+0xE) /* RW : Bus NVRAM */ /* * These are specific to the 2300. */ #define BIU_REQINP (BIU_BLOCK+0x10) /* Request Queue In */ #define BIU_REQOUTP (BIU_BLOCK+0x12) /* Request Queue Out */ #define BIU_RSPINP (BIU_BLOCK+0x14) /* Response Queue In */ #define BIU_RSPOUTP (BIU_BLOCK+0x16) /* Response Queue Out */ #define BIU_R2HSTSLO (BIU_BLOCK+0x18) #define BIU_R2HSTSHI (BIU_BLOCK+0x1A) #define BIU_R2HST_INTR (1 << 15) /* RISC to Host Interrupt */ #define BIU_R2HST_PAUSED (1 << 8) /* RISC paused */ #define BIU_R2HST_ISTAT_MASK 0x3f /* intr information && status */ #define ISPR2HST_ROM_MBX_OK 0x1 /* ROM mailbox cmd done ok */ #define ISPR2HST_ROM_MBX_FAIL 0x2 /* ROM mailbox cmd done fail */ #define ISPR2HST_MBX_OK 0x10 /* mailbox cmd done ok */ #define ISPR2HST_MBX_FAIL 0x11 /* mailbox cmd done fail */ #define ISPR2HST_ASYNC_EVENT 0x12 /* Async Event */ #define ISPR2HST_RSPQ_UPDATE 0x13 /* Response Queue Update */ #define ISPR2HST_RQST_UPDATE 0x14 /* Resquest Queue Update */ #define ISPR2HST_RIO_16 0x15 /* RIO 1-16 */ #define ISPR2HST_FPOST 0x16 /* Low 16 bits fast post */ #define ISPR2HST_FPOST_CTIO 0x17 /* Low 16 bits fast post ctio */ /* fifo command stuff- mostly for SPI */ #define DFIFO_COMMAND (BIU_BLOCK+0x60) /* RW : Command FIFO Port */ #define RDMA2100_CONTROL DFIFO_COMMAND #define DFIFO_DATA (BIU_BLOCK+0x62) /* RW : Data FIFO Port */ /* * Putzed DMA register layouts. */ #define CDMA_CONF (DMA_BLOCK+0x20) /* RW*: DMA Configuration */ #define CDMA2100_CONTROL CDMA_CONF #define CDMA_CONTROL (DMA_BLOCK+0x22) /* RW*: DMA Control */ #define CDMA_STATUS (DMA_BLOCK+0x24) /* R : DMA Status */ #define CDMA_FIFO_STS (DMA_BLOCK+0x26) /* R : DMA FIFO Status */ #define CDMA_COUNT (DMA_BLOCK+0x28) /* RW*: DMA Transfer Count */ #define CDMA_ADDR0 (DMA_BLOCK+0x2C) /* RW*: DMA Address, Word 0 */ #define CDMA_ADDR1 (DMA_BLOCK+0x2E) /* RW*: DMA Address, Word 1 */ #define CDMA_ADDR2 (DMA_BLOCK+0x30) /* RW*: DMA Address, Word 2 */ #define CDMA_ADDR3 (DMA_BLOCK+0x32) /* RW*: DMA Address, Word 3 */ #define DDMA_CONF (DMA_BLOCK+0x40) /* RW*: DMA Configuration */ #define TDMA2100_CONTROL DDMA_CONF #define DDMA_CONTROL (DMA_BLOCK+0x42) /* RW*: DMA Control */ #define DDMA_STATUS (DMA_BLOCK+0x44) /* R : DMA Status */ #define DDMA_FIFO_STS (DMA_BLOCK+0x46) /* R : DMA FIFO Status */ #define DDMA_COUNT_LO (DMA_BLOCK+0x48) /* RW*: DMA Xfer Count, Low */ #define DDMA_COUNT_HI (DMA_BLOCK+0x4A) /* RW*: DMA Xfer Count, High */ #define DDMA_ADDR0 (DMA_BLOCK+0x4C) /* RW*: DMA Address, Word 0 */ #define DDMA_ADDR1 (DMA_BLOCK+0x4E) /* RW*: DMA Address, Word 1 */ /* these are for the 1040A cards */ #define DDMA_ADDR2 (DMA_BLOCK+0x50) /* RW*: DMA Address, Word 2 */ #define DDMA_ADDR3 (DMA_BLOCK+0x52) /* RW*: DMA Address, Word 3 */ /* * Bus Interface Block Register Definitions */ /* BUS CONFIGURATION REGISTER #0 */ #define BIU_CONF0_HW_MASK 0x000F /* Hardware revision mask */ /* BUS CONFIGURATION REGISTER #1 */ #define BIU_SBUS_CONF1_PARITY 0x0100 /* Enable parity checking */ #define BIU_SBUS_CONF1_FCODE_MASK 0x00F0 /* Fcode cycle mask */ #define BIU_PCI_CONF1_FIFO_128 0x0040 /* 128 bytes FIFO threshold */ #define BIU_PCI_CONF1_FIFO_64 0x0030 /* 64 bytes FIFO threshold */ #define BIU_PCI_CONF1_FIFO_32 0x0020 /* 32 bytes FIFO threshold */ #define BIU_PCI_CONF1_FIFO_16 0x0010 /* 16 bytes FIFO threshold */ #define BIU_BURST_ENABLE 0x0004 /* Global enable Bus bursts */ #define BIU_SBUS_CONF1_FIFO_64 0x0003 /* 64 bytes FIFO threshold */ #define BIU_SBUS_CONF1_FIFO_32 0x0002 /* 32 bytes FIFO threshold */ #define BIU_SBUS_CONF1_FIFO_16 0x0001 /* 16 bytes FIFO threshold */ #define BIU_SBUS_CONF1_FIFO_8 0x0000 /* 8 bytes FIFO threshold */ #define BIU_SBUS_CONF1_BURST8 0x0008 /* Enable 8-byte bursts */ #define BIU_PCI_CONF1_SXP 0x0008 /* SXP register select */ #define BIU_PCI1080_CONF1_SXP0 0x0100 /* SXP bank #1 select */ #define BIU_PCI1080_CONF1_SXP1 0x0200 /* SXP bank #2 select */ #define BIU_PCI1080_CONF1_DMA 0x0300 /* DMA bank select */ /* ISP2100 Bus Control/Status Register */ #define BIU2100_ICSR_REGBSEL 0x30 /* RW: register bank select */ #define BIU2100_RISC_REGS (0 << 4) /* RISC Regs */ #define BIU2100_FB_REGS (1 << 4) /* FrameBuffer Regs */ #define BIU2100_FPM0_REGS (2 << 4) /* FPM 0 Regs */ #define BIU2100_FPM1_REGS (3 << 4) /* FPM 1 Regs */ #define BIU2100_NVRAM_OFFSET (1 << 14) #define BIU2100_FLASH_UPPER_64K 0x04 /* RW: Upper 64K Bank Select */ #define BIU2100_FLASH_ENABLE 0x02 /* RW: Enable Flash RAM */ #define BIU2100_SOFT_RESET 0x01 /* SOFT RESET FOR ISP2100 is same bit, but in this register, not ICR */ /* BUS CONTROL REGISTER */ #define BIU_ICR_ENABLE_DMA_INT 0x0020 /* Enable DMA interrupts */ #define BIU_ICR_ENABLE_CDMA_INT 0x0010 /* Enable CDMA interrupts */ #define BIU_ICR_ENABLE_SXP_INT 0x0008 /* Enable SXP interrupts */ #define BIU_ICR_ENABLE_RISC_INT 0x0004 /* Enable Risc interrupts */ #define BIU_ICR_ENABLE_ALL_INTS 0x0002 /* Global enable all inter */ #define BIU_ICR_SOFT_RESET 0x0001 /* Soft Reset of ISP */ #define BIU_IMASK (BIU_ICR_ENABLE_RISC_INT|BIU_ICR_ENABLE_ALL_INTS) #define BIU2100_ICR_ENABLE_ALL_INTS 0x8000 #define BIU2100_ICR_ENA_FPM_INT 0x0020 #define BIU2100_ICR_ENA_FB_INT 0x0010 #define BIU2100_ICR_ENA_RISC_INT 0x0008 #define BIU2100_ICR_ENA_CDMA_INT 0x0004 #define BIU2100_ICR_ENABLE_RXDMA_INT 0x0002 #define BIU2100_ICR_ENABLE_TXDMA_INT 0x0001 #define BIU2100_ICR_DISABLE_ALL_INTS 0x0000 #define BIU2100_IMASK (BIU2100_ICR_ENA_RISC_INT|BIU2100_ICR_ENABLE_ALL_INTS) /* BUS STATUS REGISTER */ #define BIU_ISR_DMA_INT 0x0020 /* DMA interrupt pending */ #define BIU_ISR_CDMA_INT 0x0010 /* CDMA interrupt pending */ #define BIU_ISR_SXP_INT 0x0008 /* SXP interrupt pending */ #define BIU_ISR_RISC_INT 0x0004 /* Risc interrupt pending */ #define BIU_ISR_IPEND 0x0002 /* Global interrupt pending */ #define BIU2100_ISR_INT_PENDING 0x8000 /* Global interrupt pending */ #define BIU2100_ISR_FPM_INT 0x0020 /* FPM interrupt pending */ #define BIU2100_ISR_FB_INT 0x0010 /* FB interrupt pending */ #define BIU2100_ISR_RISC_INT 0x0008 /* Risc interrupt pending */ #define BIU2100_ISR_CDMA_INT 0x0004 /* CDMA interrupt pending */ #define BIU2100_ISR_RXDMA_INT_PENDING 0x0002 /* Global interrupt pending */ #define BIU2100_ISR_TXDMA_INT_PENDING 0x0001 /* Global interrupt pending */ #define INT_PENDING(isp, isr) \ IS_FC(isp)? \ (IS_24XX(isp)? (isr & BIU2400_ISR_RISC_INT) : (isr & BIU2100_ISR_RISC_INT)) :\ (isr & BIU_ISR_RISC_INT) #define INT_PENDING_MASK(isp) \ (IS_FC(isp)? (IS_24XX(isp)? BIU2400_ISR_RISC_INT : BIU2100_ISR_RISC_INT) : \ (BIU_ISR_RISC_INT)) /* BUS SEMAPHORE REGISTER */ #define BIU_SEMA_STATUS 0x0002 /* Semaphore Status Bit */ #define BIU_SEMA_LOCK 0x0001 /* Semaphore Lock Bit */ /* NVRAM SEMAPHORE REGISTER */ #define BIU_NVRAM_CLOCK 0x0001 #define BIU_NVRAM_SELECT 0x0002 #define BIU_NVRAM_DATAOUT 0x0004 #define BIU_NVRAM_DATAIN 0x0008 #define BIU_NVRAM_BUSY 0x0080 /* 2322/24xx only */ #define ISP_NVRAM_READ 6 /* COMNMAND && DATA DMA CONFIGURATION REGISTER */ #define DMA_ENABLE_SXP_DMA 0x0008 /* Enable SXP to DMA Data */ #define DMA_ENABLE_INTS 0x0004 /* Enable interrupts to RISC */ #define DMA_ENABLE_BURST 0x0002 /* Enable Bus burst trans */ #define DMA_DMA_DIRECTION 0x0001 /* * Set DMA direction: * 0 - DMA FIFO to host * 1 - Host to DMA FIFO */ /* COMMAND && DATA DMA CONTROL REGISTER */ #define DMA_CNTRL_SUSPEND_CHAN 0x0010 /* Suspend DMA transfer */ #define DMA_CNTRL_CLEAR_CHAN 0x0008 /* * Clear FIFO and DMA Channel, * reset DMA registers */ #define DMA_CNTRL_CLEAR_FIFO 0x0004 /* Clear DMA FIFO */ #define DMA_CNTRL_RESET_INT 0x0002 /* Clear DMA interrupt */ #define DMA_CNTRL_STROBE 0x0001 /* Start DMA transfer */ /* * Variants of same for 2100 */ #define DMA_CNTRL2100_CLEAR_CHAN 0x0004 #define DMA_CNTRL2100_RESET_INT 0x0002 /* DMA STATUS REGISTER */ #define DMA_SBUS_STATUS_PIPE_MASK 0x00C0 /* DMA Pipeline status mask */ #define DMA_SBUS_STATUS_CHAN_MASK 0x0030 /* Channel status mask */ #define DMA_SBUS_STATUS_BUS_PARITY 0x0008 /* Parity Error on bus */ #define DMA_SBUS_STATUS_BUS_ERR 0x0004 /* Error Detected on bus */ #define DMA_SBUS_STATUS_TERM_COUNT 0x0002 /* DMA Transfer Completed */ #define DMA_SBUS_STATUS_INTERRUPT 0x0001 /* Enable DMA channel inter */ #define DMA_PCI_STATUS_INTERRUPT 0x8000 /* Enable DMA channel inter */ #define DMA_PCI_STATUS_RETRY_STAT 0x4000 /* Retry status */ #define DMA_PCI_STATUS_CHAN_MASK 0x3000 /* Channel status mask */ #define DMA_PCI_STATUS_FIFO_OVR 0x0100 /* DMA FIFO overrun cond */ #define DMA_PCI_STATUS_FIFO_UDR 0x0080 /* DMA FIFO underrun cond */ #define DMA_PCI_STATUS_BUS_ERR 0x0040 /* Error Detected on bus */ #define DMA_PCI_STATUS_BUS_PARITY 0x0020 /* Parity Error on bus */ #define DMA_PCI_STATUS_CLR_PEND 0x0010 /* DMA clear pending */ #define DMA_PCI_STATUS_TERM_COUNT 0x0008 /* DMA Transfer Completed */ #define DMA_PCI_STATUS_DMA_SUSP 0x0004 /* DMA suspended */ #define DMA_PCI_STATUS_PIPE_MASK 0x0003 /* DMA Pipeline status mask */ /* DMA Status Register, pipeline status bits */ #define DMA_SBUS_PIPE_FULL 0x00C0 /* Both pipeline stages full */ #define DMA_SBUS_PIPE_OVERRUN 0x0080 /* Pipeline overrun */ #define DMA_SBUS_PIPE_STAGE1 0x0040 /* * Pipeline stage 1 Loaded, * stage 2 empty */ #define DMA_PCI_PIPE_FULL 0x0003 /* Both pipeline stages full */ #define DMA_PCI_PIPE_OVERRUN 0x0002 /* Pipeline overrun */ #define DMA_PCI_PIPE_STAGE1 0x0001 /* * Pipeline stage 1 Loaded, * stage 2 empty */ #define DMA_PIPE_EMPTY 0x0000 /* All pipeline stages empty */ /* DMA Status Register, channel status bits */ #define DMA_SBUS_CHAN_SUSPEND 0x0030 /* Channel error or suspended */ #define DMA_SBUS_CHAN_TRANSFER 0x0020 /* Chan transfer in progress */ #define DMA_SBUS_CHAN_ACTIVE 0x0010 /* Chan trans to host active */ #define DMA_PCI_CHAN_TRANSFER 0x3000 /* Chan transfer in progress */ #define DMA_PCI_CHAN_SUSPEND 0x2000 /* Channel error or suspended */ #define DMA_PCI_CHAN_ACTIVE 0x1000 /* Chan trans to host active */ #define ISP_DMA_CHAN_IDLE 0x0000 /* Chan idle (normal comp) */ /* DMA FIFO STATUS REGISTER */ #define DMA_FIFO_STATUS_OVERRUN 0x0200 /* FIFO Overrun Condition */ #define DMA_FIFO_STATUS_UNDERRUN 0x0100 /* FIFO Underrun Condition */ #define DMA_FIFO_SBUS_COUNT_MASK 0x007F /* FIFO Byte count mask */ #define DMA_FIFO_PCI_COUNT_MASK 0x00FF /* FIFO Byte count mask */ /* * 2400 Interface Offsets and Register Definitions * * The 2400 looks quite different in terms of registers from other QLogic cards. * It is getting to be a genuine pain and challenge to keep the same model * for all. */ #define BIU2400_FLASH_ADDR (BIU_BLOCK+0x00) #define BIU2400_FLASH_DATA (BIU_BLOCK+0x04) #define BIU2400_CSR (BIU_BLOCK+0x08) #define BIU2400_ICR (BIU_BLOCK+0x0C) #define BIU2400_ISR (BIU_BLOCK+0x10) #define BIU2400_REQINP (BIU_BLOCK+0x1C) /* Request Queue In */ #define BIU2400_REQOUTP (BIU_BLOCK+0x20) /* Request Queue Out */ #define BIU2400_RSPINP (BIU_BLOCK+0x24) /* Response Queue In */ #define BIU2400_RSPOUTP (BIU_BLOCK+0x28) /* Response Queue Out */ -#define BIU2400_PRI_RQINP (BIU_BLOCK+0x2C) /* Priority Request Q In */ -#define BIU2400_PRI_RSPINP (BIU_BLOCK+0x30) /* Priority Request Q Out */ -#define BIU2400_ATIO_RSPINP (BIU_BLOCK+0x3C) /* ATIO Queue In */ -#define BIU2400_ATIO_REQINP (BIU_BLOCK+0x40) /* ATIO Queue Out */ +#define BIU2400_PRI_REQINP (BIU_BLOCK+0x2C) /* Priority Request Q In */ +#define BIU2400_PRI_REQOUTP (BIU_BLOCK+0x30) /* Priority Request Q Out */ +#define BIU2400_ATIO_RSPINP (BIU_BLOCK+0x3C) /* ATIO Queue In */ +#define BIU2400_ATIO_RSPOUTP (BIU_BLOCK+0x40) /* ATIO Queue Out */ + #define BIU2400_R2HSTSLO (BIU_BLOCK+0x44) #define BIU2400_R2HSTSHI (BIU_BLOCK+0x46) #define BIU2400_HCCR (BIU_BLOCK+0x48) #define BIU2400_GPIOD (BIU_BLOCK+0x4C) #define BIU2400_GPIOE (BIU_BLOCK+0x50) #define BIU2400_HSEMA (BIU_BLOCK+0x58) /* BIU2400_FLASH_ADDR definitions */ #define BIU2400_FLASH_DFLAG (1 << 30) /* BIU2400_CSR definitions */ #define BIU2400_NVERR (1 << 18) #define BIU2400_DMA_ACTIVE (1 << 17) /* RO */ #define BIU2400_DMA_STOP (1 << 16) #define BIU2400_FUNCTION (1 << 15) /* RO */ #define BIU2400_PCIX_MODE(x) (((x) >> 8) & 0xf) /* RO */ #define BIU2400_CSR_64BIT (1 << 2) /* RO */ #define BIU2400_FLASH_ENABLE (1 << 1) #define BIU2400_SOFT_RESET (1 << 0) /* BIU2400_ICR definitions */ #define BIU2400_ICR_ENA_RISC_INT 0x8 #define BIU2400_IMASK (BIU2400_ICR_ENA_RISC_INT) /* BIU2400_ISR definitions */ #define BIU2400_ISR_RISC_INT 0x8 #define BIU2400_R2HST_INTR BIU_R2HST_INTR #define BIU2400_R2HST_PAUSED BIU_R2HST_PAUSED #define BIU2400_R2HST_ISTAT_MASK 0x1f /* interrupt status meanings */ #define ISP2400R2HST_ROM_MBX_OK 0x1 /* ROM mailbox cmd done ok */ #define ISP2400R2HST_ROM_MBX_FAIL 0x2 /* ROM mailbox cmd done fail */ #define ISP2400R2HST_MBX_OK 0x10 /* mailbox cmd done ok */ #define ISP2400R2HST_MBX_FAIL 0x11 /* mailbox cmd done fail */ #define ISP2400R2HST_ASYNC_EVENT 0x12 /* Async Event */ #define ISP2400R2HST_RSPQ_UPDATE 0x13 /* Response Queue Update */ #define ISP2400R2HST_ATIO_RSPQ_UPDATE 0x1C /* ATIO Response Queue Update */ #define ISP2400R2HST_ATIO_RQST_UPDATE 0x1D /* ATIO Request Queue Update */ /* BIU2400_HCCR definitions */ #define HCCR_2400_CMD_NOP 0x00000000 #define HCCR_2400_CMD_RESET 0x10000000 #define HCCR_2400_CMD_CLEAR_RESET 0x20000000 #define HCCR_2400_CMD_PAUSE 0x30000000 #define HCCR_2400_CMD_RELEASE 0x40000000 #define HCCR_2400_CMD_SET_HOST_INT 0x50000000 #define HCCR_2400_CMD_CLEAR_HOST_INT 0x60000000 #define HCCR_2400_CMD_CLEAR_RISC_INT 0xA0000000 #define HCCR_2400_RISC_ERR(x) (((x) >> 12) & 0x7) /* RO */ #define HCCR_2400_RISC2HOST_INT (1 << 6) /* RO */ #define HCCR_2400_RISC_RESET (1 << 5) /* RO */ /* * Mailbox Block Register Offsets */ #define INMAILBOX0 (MBOX_BLOCK+0x0) #define INMAILBOX1 (MBOX_BLOCK+0x2) #define INMAILBOX2 (MBOX_BLOCK+0x4) #define INMAILBOX3 (MBOX_BLOCK+0x6) #define INMAILBOX4 (MBOX_BLOCK+0x8) #define INMAILBOX5 (MBOX_BLOCK+0xA) #define INMAILBOX6 (MBOX_BLOCK+0xC) #define INMAILBOX7 (MBOX_BLOCK+0xE) #define OUTMAILBOX0 (MBOX_BLOCK+0x0) #define OUTMAILBOX1 (MBOX_BLOCK+0x2) #define OUTMAILBOX2 (MBOX_BLOCK+0x4) #define OUTMAILBOX3 (MBOX_BLOCK+0x6) #define OUTMAILBOX4 (MBOX_BLOCK+0x8) #define OUTMAILBOX5 (MBOX_BLOCK+0xA) #define OUTMAILBOX6 (MBOX_BLOCK+0xC) #define OUTMAILBOX7 (MBOX_BLOCK+0xE) /* * Strictly speaking, it's * SCSI && 2100 : 8 MBOX registers * 2200: 24 MBOX registers * 2300/2400: 32 MBOX registers */ #define MBOX_OFF(n) (MBOX_BLOCK + ((n) << 1)) #define NMBOX(isp) \ (((((isp)->isp_type & ISP_HA_SCSI) >= ISP_HA_SCSI_1040A) || \ ((isp)->isp_type & ISP_HA_FC))? 12 : 6) #define NMBOX_BMASK(isp) \ (((((isp)->isp_type & ISP_HA_SCSI) >= ISP_HA_SCSI_1040A) || \ ((isp)->isp_type & ISP_HA_FC))? 0xfff : 0x3f) #define MAX_MAILBOX(isp) ((IS_FC(isp))? 12 : 8) #define MAILBOX_STORAGE 12 /* if timeout == 0, then default timeout is picked */ #define MBCMD_DEFAULT_TIMEOUT 100000 /* 100 ms */ typedef struct { uint16_t param[MAILBOX_STORAGE]; uint16_t ibits; uint16_t obits; - uint32_t : 28, + uint32_t + lineno : 16, + : 12, logval : 4; uint32_t timeout; + const char *func; } mbreg_t; +#define MBSINIT(mbxp, code, loglev, timo) \ + ISP_MEMZERO((mbxp), sizeof (mbreg_t)); \ + (mbxp)->param[0] = code; \ + (mbxp)->lineno = __LINE__; \ + (mbxp)->func = __func__; \ + (mbxp)->logval = loglev; \ + (mbxp)->timeout = timo + /* * Fibre Protocol Module and Frame Buffer Register Offsets/Definitions (2X00). * NB: The RISC processor must be paused and the appropriate register * bank selected via BIU2100_CSR bits. */ #define FPM_DIAG_CONFIG (BIU_BLOCK + 0x96) #define FPM_SOFT_RESET 0x0100 #define FBM_CMD (BIU_BLOCK + 0xB8) #define FBMCMD_FIFO_RESET_ALL 0xA000 /* * SXP Block Register Offsets */ #define SXP_PART_ID (SXP_BLOCK+0x0) /* R : Part ID Code */ #define SXP_CONFIG1 (SXP_BLOCK+0x2) /* RW*: Configuration Reg #1 */ #define SXP_CONFIG2 (SXP_BLOCK+0x4) /* RW*: Configuration Reg #2 */ #define SXP_CONFIG3 (SXP_BLOCK+0x6) /* RW*: Configuration Reg #2 */ #define SXP_INSTRUCTION (SXP_BLOCK+0xC) /* RW*: Instruction Pointer */ #define SXP_RETURN_ADDR (SXP_BLOCK+0x10) /* RW*: Return Address */ #define SXP_COMMAND (SXP_BLOCK+0x14) /* RW*: Command */ #define SXP_INTERRUPT (SXP_BLOCK+0x18) /* R : Interrupt */ #define SXP_SEQUENCE (SXP_BLOCK+0x1C) /* RW*: Sequence */ #define SXP_GROSS_ERR (SXP_BLOCK+0x1E) /* R : Gross Error */ #define SXP_EXCEPTION (SXP_BLOCK+0x20) /* RW*: Exception Enable */ #define SXP_OVERRIDE (SXP_BLOCK+0x24) /* RW*: Override */ #define SXP_LIT_BASE (SXP_BLOCK+0x28) /* RW*: Literal Base */ #define SXP_USER_FLAGS (SXP_BLOCK+0x2C) /* RW*: User Flags */ #define SXP_USER_EXCEPT (SXP_BLOCK+0x30) /* RW*: User Exception */ #define SXP_BREAKPOINT (SXP_BLOCK+0x34) /* RW*: Breakpoint */ #define SXP_SCSI_ID (SXP_BLOCK+0x40) /* RW*: SCSI ID */ #define SXP_DEV_CONFIG1 (SXP_BLOCK+0x42) /* RW*: Device Config Reg #1 */ #define SXP_DEV_CONFIG2 (SXP_BLOCK+0x44) /* RW*: Device Config Reg #2 */ #define SXP_PHASE_PTR (SXP_BLOCK+0x48) /* RW*: SCSI Phase Pointer */ #define SXP_BUF_PTR (SXP_BLOCK+0x4C) /* RW*: SCSI Buffer Pointer */ #define SXP_BUF_CTR (SXP_BLOCK+0x50) /* RW*: SCSI Buffer Counter */ #define SXP_BUFFER (SXP_BLOCK+0x52) /* RW*: SCSI Buffer */ #define SXP_BUF_BYTE (SXP_BLOCK+0x54) /* RW*: SCSI Buffer Byte */ #define SXP_BUF_WD (SXP_BLOCK+0x56) /* RW*: SCSI Buffer Word */ #define SXP_BUF_WD_TRAN (SXP_BLOCK+0x58) /* RW*: SCSI Buffer Wd xlate */ #define SXP_FIFO (SXP_BLOCK+0x5A) /* RW*: SCSI FIFO */ #define SXP_FIFO_STATUS (SXP_BLOCK+0x5C) /* RW*: SCSI FIFO Status */ #define SXP_FIFO_TOP (SXP_BLOCK+0x5E) /* RW*: SCSI FIFO Top Resid */ #define SXP_FIFO_BOTTOM (SXP_BLOCK+0x60) /* RW*: SCSI FIFO Bot Resid */ #define SXP_TRAN_REG (SXP_BLOCK+0x64) /* RW*: SCSI Transferr Reg */ #define SXP_TRAN_CNT_LO (SXP_BLOCK+0x68) /* RW*: SCSI Trans Count */ #define SXP_TRAN_CNT_HI (SXP_BLOCK+0x6A) /* RW*: SCSI Trans Count */ #define SXP_TRAN_CTR_LO (SXP_BLOCK+0x6C) /* RW*: SCSI Trans Counter */ #define SXP_TRAN_CTR_HI (SXP_BLOCK+0x6E) /* RW*: SCSI Trans Counter */ #define SXP_ARB_DATA (SXP_BLOCK+0x70) /* R : SCSI Arb Data */ #define SXP_PINS_CTRL (SXP_BLOCK+0x72) /* RW*: SCSI Control Pins */ #define SXP_PINS_DATA (SXP_BLOCK+0x74) /* RW*: SCSI Data Pins */ #define SXP_PINS_DIFF (SXP_BLOCK+0x76) /* RW*: SCSI Diff Pins */ /* for 1080/1280/1240 only */ #define SXP_BANK1_SELECT 0x100 /* SXP CONF1 REGISTER */ #define SXP_CONF1_ASYNCH_SETUP 0xF000 /* Asynchronous setup time */ #define SXP_CONF1_SELECTION_UNIT 0x0000 /* Selection time unit */ #define SXP_CONF1_SELECTION_TIMEOUT 0x0600 /* Selection timeout */ #define SXP_CONF1_CLOCK_FACTOR 0x00E0 /* Clock factor */ #define SXP_CONF1_SCSI_ID 0x000F /* SCSI id */ /* SXP CONF2 REGISTER */ #define SXP_CONF2_DISABLE_FILTER 0x0040 /* Disable SCSI rec filters */ #define SXP_CONF2_REQ_ACK_PULLUPS 0x0020 /* Enable req/ack pullups */ #define SXP_CONF2_DATA_PULLUPS 0x0010 /* Enable data pullups */ #define SXP_CONF2_CONFIG_AUTOLOAD 0x0008 /* Enable dev conf auto-load */ #define SXP_CONF2_RESELECT 0x0002 /* Enable reselection */ #define SXP_CONF2_SELECT 0x0001 /* Enable selection */ /* SXP INTERRUPT REGISTER */ #define SXP_INT_PARITY_ERR 0x8000 /* Parity error detected */ #define SXP_INT_GROSS_ERR 0x4000 /* Gross error detected */ #define SXP_INT_FUNCTION_ABORT 0x2000 /* Last cmd aborted */ #define SXP_INT_CONDITION_FAILED 0x1000 /* Last cond failed test */ #define SXP_INT_FIFO_EMPTY 0x0800 /* SCSI FIFO is empty */ #define SXP_INT_BUF_COUNTER_ZERO 0x0400 /* SCSI buf count == zero */ #define SXP_INT_XFER_ZERO 0x0200 /* SCSI trans count == zero */ #define SXP_INT_INT_PENDING 0x0080 /* SXP interrupt pending */ #define SXP_INT_CMD_RUNNING 0x0040 /* SXP is running a command */ #define SXP_INT_INT_RETURN_CODE 0x000F /* Interrupt return code */ /* SXP GROSS ERROR REGISTER */ #define SXP_GROSS_OFFSET_RESID 0x0040 /* Req/Ack offset not zero */ #define SXP_GROSS_OFFSET_UNDERFLOW 0x0020 /* Req/Ack offset underflow */ #define SXP_GROSS_OFFSET_OVERFLOW 0x0010 /* Req/Ack offset overflow */ #define SXP_GROSS_FIFO_UNDERFLOW 0x0008 /* SCSI FIFO underflow */ #define SXP_GROSS_FIFO_OVERFLOW 0x0004 /* SCSI FIFO overflow */ #define SXP_GROSS_WRITE_ERR 0x0002 /* SXP and RISC wrote to reg */ #define SXP_GROSS_ILLEGAL_INST 0x0001 /* Bad inst loaded into SXP */ /* SXP EXCEPTION REGISTER */ #define SXP_EXCEPT_USER_0 0x8000 /* Enable user exception #0 */ #define SXP_EXCEPT_USER_1 0x4000 /* Enable user exception #1 */ #define PCI_SXP_EXCEPT_SCAM 0x0400 /* SCAM Selection enable */ #define SXP_EXCEPT_BUS_FREE 0x0200 /* Enable Bus Free det */ #define SXP_EXCEPT_TARGET_ATN 0x0100 /* Enable TGT mode atten det */ #define SXP_EXCEPT_RESELECTED 0x0080 /* Enable ReSEL exc handling */ #define SXP_EXCEPT_SELECTED 0x0040 /* Enable SEL exc handling */ #define SXP_EXCEPT_ARBITRATION 0x0020 /* Enable ARB exc handling */ #define SXP_EXCEPT_GROSS_ERR 0x0010 /* Enable gross error except */ #define SXP_EXCEPT_BUS_RESET 0x0008 /* Enable Bus Reset except */ /* SXP OVERRIDE REGISTER */ #define SXP_ORIDE_EXT_TRIGGER 0x8000 /* Enable external trigger */ #define SXP_ORIDE_STEP 0x4000 /* Enable single step mode */ #define SXP_ORIDE_BREAKPOINT 0x2000 /* Enable breakpoint reg */ #define SXP_ORIDE_PIN_WRITE 0x1000 /* Enable write to SCSI pins */ #define SXP_ORIDE_FORCE_OUTPUTS 0x0800 /* Force SCSI outputs on */ #define SXP_ORIDE_LOOPBACK 0x0400 /* Enable SCSI loopback mode */ #define SXP_ORIDE_PARITY_TEST 0x0200 /* Enable parity test mode */ #define SXP_ORIDE_TRISTATE_ENA_PINS 0x0100 /* Tristate SCSI enable pins */ #define SXP_ORIDE_TRISTATE_PINS 0x0080 /* Tristate SCSI pins */ #define SXP_ORIDE_FIFO_RESET 0x0008 /* Reset SCSI FIFO */ #define SXP_ORIDE_CMD_TERMINATE 0x0004 /* Terminate cur SXP com */ #define SXP_ORIDE_RESET_REG 0x0002 /* Reset SXP registers */ #define SXP_ORIDE_RESET_MODULE 0x0001 /* Reset SXP module */ /* SXP COMMANDS */ #define SXP_RESET_BUS_CMD 0x300b /* SXP SCSI ID REGISTER */ #define SXP_SELECTING_ID 0x0F00 /* (Re)Selecting id */ #define SXP_SELECT_ID 0x000F /* Select id */ /* SXP DEV CONFIG1 REGISTER */ #define SXP_DCONF1_SYNC_HOLD 0x7000 /* Synchronous data hold */ #define SXP_DCONF1_SYNC_SETUP 0x0F00 /* Synchronous data setup */ #define SXP_DCONF1_SYNC_OFFSET 0x000F /* Synchronous data offset */ /* SXP DEV CONFIG2 REGISTER */ #define SXP_DCONF2_FLAGS_MASK 0xF000 /* Device flags */ #define SXP_DCONF2_WIDE 0x0400 /* Enable wide SCSI */ #define SXP_DCONF2_PARITY 0x0200 /* Enable parity checking */ #define SXP_DCONF2_BLOCK_MODE 0x0100 /* Enable blk mode xfr count */ #define SXP_DCONF2_ASSERTION_MASK 0x0007 /* Assersion period mask */ /* SXP PHASE POINTER REGISTER */ #define SXP_PHASE_STATUS_PTR 0x1000 /* Status buffer offset */ #define SXP_PHASE_MSG_IN_PTR 0x0700 /* Msg in buffer offset */ #define SXP_PHASE_COM_PTR 0x00F0 /* Command buffer offset */ #define SXP_PHASE_MSG_OUT_PTR 0x0007 /* Msg out buffer offset */ /* SXP FIFO STATUS REGISTER */ #define SXP_FIFO_TOP_RESID 0x8000 /* Top residue reg full */ #define SXP_FIFO_ACK_RESID 0x4000 /* Wide transfers odd resid */ #define SXP_FIFO_COUNT_MASK 0x001C /* Words in SXP FIFO */ #define SXP_FIFO_BOTTOM_RESID 0x0001 /* Bottom residue reg full */ /* SXP CONTROL PINS REGISTER */ #define SXP_PINS_CON_PHASE 0x8000 /* Scsi phase valid */ #define SXP_PINS_CON_PARITY_HI 0x0400 /* Parity pin */ #define SXP_PINS_CON_PARITY_LO 0x0200 /* Parity pin */ #define SXP_PINS_CON_REQ 0x0100 /* SCSI bus REQUEST */ #define SXP_PINS_CON_ACK 0x0080 /* SCSI bus ACKNOWLEDGE */ #define SXP_PINS_CON_RST 0x0040 /* SCSI bus RESET */ #define SXP_PINS_CON_BSY 0x0020 /* SCSI bus BUSY */ #define SXP_PINS_CON_SEL 0x0010 /* SCSI bus SELECT */ #define SXP_PINS_CON_ATN 0x0008 /* SCSI bus ATTENTION */ #define SXP_PINS_CON_MSG 0x0004 /* SCSI bus MESSAGE */ #define SXP_PINS_CON_CD 0x0002 /* SCSI bus COMMAND */ #define SXP_PINS_CON_IO 0x0001 /* SCSI bus INPUT */ /* * Set the hold time for the SCSI Bus Reset to be 250 ms */ #define SXP_SCSI_BUS_RESET_HOLD_TIME 250 /* SXP DIFF PINS REGISTER */ #define SXP_PINS_DIFF_SENSE 0x0200 /* DIFFSENS sig on SCSI bus */ #define SXP_PINS_DIFF_MODE 0x0100 /* DIFFM signal */ #define SXP_PINS_DIFF_ENABLE_OUTPUT 0x0080 /* Enable SXP SCSI data drv */ #define SXP_PINS_DIFF_PINS_MASK 0x007C /* Differential control pins */ #define SXP_PINS_DIFF_TARGET 0x0002 /* Enable SXP target mode */ #define SXP_PINS_DIFF_INITIATOR 0x0001 /* Enable SXP initiator mode */ /* Ultra2 only */ #define SXP_PINS_LVD_MODE 0x1000 #define SXP_PINS_HVD_MODE 0x0800 #define SXP_PINS_SE_MODE 0x0400 /* The above have to be put together with the DIFFM pin to make sense */ #define ISP1080_LVD_MODE (SXP_PINS_LVD_MODE) #define ISP1080_HVD_MODE (SXP_PINS_HVD_MODE|SXP_PINS_DIFF_MODE) #define ISP1080_SE_MODE (SXP_PINS_SE_MODE) #define ISP1080_MODE_MASK \ (SXP_PINS_LVD_MODE|SXP_PINS_HVD_MODE|SXP_PINS_SE_MODE|SXP_PINS_DIFF_MODE) /* * RISC and Host Command and Control Block Register Offsets */ #define RISC_ACC RISC_BLOCK+0x0 /* RW*: Accumulator */ #define RISC_R1 RISC_BLOCK+0x2 /* RW*: GP Reg R1 */ #define RISC_R2 RISC_BLOCK+0x4 /* RW*: GP Reg R2 */ #define RISC_R3 RISC_BLOCK+0x6 /* RW*: GP Reg R3 */ #define RISC_R4 RISC_BLOCK+0x8 /* RW*: GP Reg R4 */ #define RISC_R5 RISC_BLOCK+0xA /* RW*: GP Reg R5 */ #define RISC_R6 RISC_BLOCK+0xC /* RW*: GP Reg R6 */ #define RISC_R7 RISC_BLOCK+0xE /* RW*: GP Reg R7 */ #define RISC_R8 RISC_BLOCK+0x10 /* RW*: GP Reg R8 */ #define RISC_R9 RISC_BLOCK+0x12 /* RW*: GP Reg R9 */ #define RISC_R10 RISC_BLOCK+0x14 /* RW*: GP Reg R10 */ #define RISC_R11 RISC_BLOCK+0x16 /* RW*: GP Reg R11 */ #define RISC_R12 RISC_BLOCK+0x18 /* RW*: GP Reg R12 */ #define RISC_R13 RISC_BLOCK+0x1a /* RW*: GP Reg R13 */ #define RISC_R14 RISC_BLOCK+0x1c /* RW*: GP Reg R14 */ #define RISC_R15 RISC_BLOCK+0x1e /* RW*: GP Reg R15 */ #define RISC_PSR RISC_BLOCK+0x20 /* RW*: Processor Status */ #define RISC_IVR RISC_BLOCK+0x22 /* RW*: Interrupt Vector */ #define RISC_PCR RISC_BLOCK+0x24 /* RW*: Processor Ctrl */ #define RISC_RAR0 RISC_BLOCK+0x26 /* RW*: Ram Address #0 */ #define RISC_RAR1 RISC_BLOCK+0x28 /* RW*: Ram Address #1 */ #define RISC_LCR RISC_BLOCK+0x2a /* RW*: Loop Counter */ #define RISC_PC RISC_BLOCK+0x2c /* R : Program Counter */ #define RISC_MTR RISC_BLOCK+0x2e /* RW*: Memory Timing */ #define RISC_MTR2100 RISC_BLOCK+0x30 #define RISC_EMB RISC_BLOCK+0x30 /* RW*: Ext Mem Boundary */ #define DUAL_BANK 8 #define RISC_SP RISC_BLOCK+0x32 /* RW*: Stack Pointer */ #define RISC_HRL RISC_BLOCK+0x3e /* R *: Hardware Rev Level */ #define HCCR RISC_BLOCK+0x40 /* RW : Host Command & Ctrl */ #define BP0 RISC_BLOCK+0x42 /* RW : Processor Brkpt #0 */ #define BP1 RISC_BLOCK+0x44 /* RW : Processor Brkpt #1 */ #define TCR RISC_BLOCK+0x46 /* W : Test Control */ #define TMR RISC_BLOCK+0x48 /* W : Test Mode */ /* PROCESSOR STATUS REGISTER */ #define RISC_PSR_FORCE_TRUE 0x8000 #define RISC_PSR_LOOP_COUNT_DONE 0x4000 #define RISC_PSR_RISC_INT 0x2000 #define RISC_PSR_TIMER_ROLLOVER 0x1000 #define RISC_PSR_ALU_OVERFLOW 0x0800 #define RISC_PSR_ALU_MSB 0x0400 #define RISC_PSR_ALU_CARRY 0x0200 #define RISC_PSR_ALU_ZERO 0x0100 #define RISC_PSR_PCI_ULTRA 0x0080 #define RISC_PSR_SBUS_ULTRA 0x0020 #define RISC_PSR_DMA_INT 0x0010 #define RISC_PSR_SXP_INT 0x0008 #define RISC_PSR_HOST_INT 0x0004 #define RISC_PSR_INT_PENDING 0x0002 #define RISC_PSR_FORCE_FALSE 0x0001 /* Host Command and Control */ #define HCCR_CMD_NOP 0x0000 /* NOP */ #define HCCR_CMD_RESET 0x1000 /* Reset RISC */ #define HCCR_CMD_PAUSE 0x2000 /* Pause RISC */ #define HCCR_CMD_RELEASE 0x3000 /* Release Paused RISC */ #define HCCR_CMD_STEP 0x4000 /* Single Step RISC */ #define HCCR_2X00_DISABLE_PARITY_PAUSE 0x4001 /* * Disable RISC pause on FPM * parity error. */ #define HCCR_CMD_SET_HOST_INT 0x5000 /* Set Host Interrupt */ #define HCCR_CMD_CLEAR_HOST_INT 0x6000 /* Clear Host Interrupt */ #define HCCR_CMD_CLEAR_RISC_INT 0x7000 /* Clear RISC interrupt */ #define HCCR_CMD_BREAKPOINT 0x8000 /* Change breakpoint enables */ #define PCI_HCCR_CMD_BIOS 0x9000 /* Write BIOS (disable) */ #define PCI_HCCR_CMD_PARITY 0xA000 /* Write parity enable */ #define PCI_HCCR_CMD_PARITY_ERR 0xE000 /* Generate parity error */ #define HCCR_CMD_TEST_MODE 0xF000 /* Set Test Mode */ #define ISP2100_HCCR_PARITY_ENABLE_2 0x0400 #define ISP2100_HCCR_PARITY_ENABLE_1 0x0200 #define ISP2100_HCCR_PARITY_ENABLE_0 0x0100 #define ISP2100_HCCR_PARITY 0x0001 #define PCI_HCCR_PARITY 0x0400 /* Parity error flag */ #define PCI_HCCR_PARITY_ENABLE_1 0x0200 /* Parity enable bank 1 */ #define PCI_HCCR_PARITY_ENABLE_0 0x0100 /* Parity enable bank 0 */ #define HCCR_HOST_INT 0x0080 /* R : Host interrupt set */ #define HCCR_RESET 0x0040 /* R : reset in progress */ #define HCCR_PAUSE 0x0020 /* R : RISC paused */ #define PCI_HCCR_BIOS 0x0001 /* W : BIOS enable */ /* * Defines for Interrupts */ #define ISP_INTS_ENABLED(isp) \ ((IS_SCSI(isp))? \ (ISP_READ(isp, BIU_ICR) & BIU_IMASK) : \ (IS_24XX(isp)? (ISP_READ(isp, BIU2400_ICR) & BIU2400_IMASK) : \ (ISP_READ(isp, BIU_ICR) & BIU2100_IMASK))) #define ISP_ENABLE_INTS(isp) \ (IS_SCSI(isp) ? \ ISP_WRITE(isp, BIU_ICR, BIU_IMASK) : \ (IS_24XX(isp) ? \ (ISP_WRITE(isp, BIU2400_ICR, BIU2400_IMASK)) : \ (ISP_WRITE(isp, BIU_ICR, BIU2100_IMASK)))) #define ISP_DISABLE_INTS(isp) \ IS_24XX(isp)? ISP_WRITE(isp, BIU2400_ICR, 0) : ISP_WRITE(isp, BIU_ICR, 0) /* * NVRAM Definitions (PCI cards only) */ #define ISPBSMX(c, byte, shift, mask) \ (((c)[(byte)] >> (shift)) & (mask)) /* * Qlogic 1020/1040 NVRAM is an array of 128 bytes. * * Some portion of the front of this is for general host adapter properties * This is followed by an array of per-target parameters, and is tailed off * with a checksum xor byte at offset 127. For non-byte entities data is * stored in Little Endian order. */ #define ISP_NVRAM_SIZE 128 #define ISP_NVRAM_VERSION(c) (c)[4] #define ISP_NVRAM_FIFO_THRESHOLD(c) ISPBSMX(c, 5, 0, 0x03) #define ISP_NVRAM_BIOS_DISABLE(c) ISPBSMX(c, 5, 2, 0x01) #define ISP_NVRAM_HBA_ENABLE(c) ISPBSMX(c, 5, 3, 0x01) #define ISP_NVRAM_INITIATOR_ID(c) ISPBSMX(c, 5, 4, 0x0f) #define ISP_NVRAM_BUS_RESET_DELAY(c) (c)[6] #define ISP_NVRAM_BUS_RETRY_COUNT(c) (c)[7] #define ISP_NVRAM_BUS_RETRY_DELAY(c) (c)[8] #define ISP_NVRAM_ASYNC_DATA_SETUP_TIME(c) ISPBSMX(c, 9, 0, 0x0f) #define ISP_NVRAM_REQ_ACK_ACTIVE_NEGATION(c) ISPBSMX(c, 9, 4, 0x01) #define ISP_NVRAM_DATA_LINE_ACTIVE_NEGATION(c) ISPBSMX(c, 9, 5, 0x01) #define ISP_NVRAM_DATA_DMA_BURST_ENABLE(c) ISPBSMX(c, 9, 6, 0x01) #define ISP_NVRAM_CMD_DMA_BURST_ENABLE(c) ISPBSMX(c, 9, 7, 0x01) #define ISP_NVRAM_TAG_AGE_LIMIT(c) (c)[10] #define ISP_NVRAM_LOWTRM_ENABLE(c) ISPBSMX(c, 11, 0, 0x01) #define ISP_NVRAM_HITRM_ENABLE(c) ISPBSMX(c, 11, 1, 0x01) #define ISP_NVRAM_PCMC_BURST_ENABLE(c) ISPBSMX(c, 11, 2, 0x01) #define ISP_NVRAM_ENABLE_60_MHZ(c) ISPBSMX(c, 11, 3, 0x01) #define ISP_NVRAM_SCSI_RESET_DISABLE(c) ISPBSMX(c, 11, 4, 0x01) #define ISP_NVRAM_ENABLE_AUTO_TERM(c) ISPBSMX(c, 11, 5, 0x01) #define ISP_NVRAM_FIFO_THRESHOLD_128(c) ISPBSMX(c, 11, 6, 0x01) #define ISP_NVRAM_AUTO_TERM_SUPPORT(c) ISPBSMX(c, 11, 7, 0x01) #define ISP_NVRAM_SELECTION_TIMEOUT(c) (((c)[12]) | ((c)[13] << 8)) #define ISP_NVRAM_MAX_QUEUE_DEPTH(c) (((c)[14]) | ((c)[15] << 8)) #define ISP_NVRAM_SCSI_BUS_SIZE(c) ISPBSMX(c, 16, 0, 0x01) #define ISP_NVRAM_SCSI_BUS_TYPE(c) ISPBSMX(c, 16, 1, 0x01) #define ISP_NVRAM_ADAPTER_CLK_SPEED(c) ISPBSMX(c, 16, 2, 0x01) #define ISP_NVRAM_SOFT_TERM_SUPPORT(c) ISPBSMX(c, 16, 3, 0x01) #define ISP_NVRAM_FLASH_ONBOARD(c) ISPBSMX(c, 16, 4, 0x01) #define ISP_NVRAM_FAST_MTTR_ENABLE(c) ISPBSMX(c, 22, 0, 0x01) #define ISP_NVRAM_TARGOFF 28 #define ISP_NVRAM_TARGSIZE 6 #define _IxT(tgt, tidx) \ (ISP_NVRAM_TARGOFF + (ISP_NVRAM_TARGSIZE * (tgt)) + (tidx)) #define ISP_NVRAM_TGT_RENEG(c, t) ISPBSMX(c, _IxT(t, 0), 0, 0x01) #define ISP_NVRAM_TGT_QFRZ(c, t) ISPBSMX(c, _IxT(t, 0), 1, 0x01) #define ISP_NVRAM_TGT_ARQ(c, t) ISPBSMX(c, _IxT(t, 0), 2, 0x01) #define ISP_NVRAM_TGT_TQING(c, t) ISPBSMX(c, _IxT(t, 0), 3, 0x01) #define ISP_NVRAM_TGT_SYNC(c, t) ISPBSMX(c, _IxT(t, 0), 4, 0x01) #define ISP_NVRAM_TGT_WIDE(c, t) ISPBSMX(c, _IxT(t, 0), 5, 0x01) #define ISP_NVRAM_TGT_PARITY(c, t) ISPBSMX(c, _IxT(t, 0), 6, 0x01) #define ISP_NVRAM_TGT_DISC(c, t) ISPBSMX(c, _IxT(t, 0), 7, 0x01) #define ISP_NVRAM_TGT_EXEC_THROTTLE(c, t) ISPBSMX(c, _IxT(t, 1), 0, 0xff) #define ISP_NVRAM_TGT_SYNC_PERIOD(c, t) ISPBSMX(c, _IxT(t, 2), 0, 0xff) #define ISP_NVRAM_TGT_SYNC_OFFSET(c, t) ISPBSMX(c, _IxT(t, 3), 0, 0x0f) #define ISP_NVRAM_TGT_DEVICE_ENABLE(c, t) ISPBSMX(c, _IxT(t, 3), 4, 0x01) #define ISP_NVRAM_TGT_LUN_DISABLE(c, t) ISPBSMX(c, _IxT(t, 3), 5, 0x01) /* * Qlogic 1080/1240 NVRAM is an array of 256 bytes. * * Some portion of the front of this is for general host adapter properties * This is followed by an array of per-target parameters, and is tailed off * with a checksum xor byte at offset 256. For non-byte entities data is * stored in Little Endian order. */ #define ISP1080_NVRAM_SIZE 256 #define ISP1080_NVRAM_VERSION(c) ISP_NVRAM_VERSION(c) /* Offset 5 */ /* uint8_t bios_configuration_mode :2; uint8_t bios_disable :1; uint8_t selectable_scsi_boot_enable :1; uint8_t cd_rom_boot_enable :1; uint8_t disable_loading_risc_code :1; uint8_t enable_64bit_addressing :1; uint8_t unused_7 :1; */ /* Offsets 6, 7 */ /* uint8_t boot_lun_number :5; uint8_t scsi_bus_number :1; uint8_t unused_6 :1; uint8_t unused_7 :1; uint8_t boot_target_number :4; uint8_t unused_12 :1; uint8_t unused_13 :1; uint8_t unused_14 :1; uint8_t unused_15 :1; */ #define ISP1080_NVRAM_HBA_ENABLE(c) ISPBSMX(c, 16, 3, 0x01) #define ISP1080_NVRAM_BURST_ENABLE(c) ISPBSMX(c, 16, 1, 0x01) #define ISP1080_NVRAM_FIFO_THRESHOLD(c) ISPBSMX(c, 16, 4, 0x0f) #define ISP1080_NVRAM_AUTO_TERM_SUPPORT(c) ISPBSMX(c, 17, 7, 0x01) #define ISP1080_NVRAM_BUS0_TERM_MODE(c) ISPBSMX(c, 17, 0, 0x03) #define ISP1080_NVRAM_BUS1_TERM_MODE(c) ISPBSMX(c, 17, 2, 0x03) #define ISP1080_ISP_PARAMETER(c) \ (((c)[18]) | ((c)[19] << 8)) #define ISP1080_FAST_POST(c) ISPBSMX(c, 20, 0, 0x01) #define ISP1080_REPORT_LVD_TRANSITION(c) ISPBSMX(c, 20, 1, 0x01) #define ISP1080_BUS1_OFF 112 #define ISP1080_NVRAM_INITIATOR_ID(c, b) \ ISPBSMX(c, ((b == 0)? 0 : ISP1080_BUS1_OFF) + 24, 0, 0x0f) #define ISP1080_NVRAM_BUS_RESET_DELAY(c, b) \ (c)[((b == 0)? 0 : ISP1080_BUS1_OFF) + 25] #define ISP1080_NVRAM_BUS_RETRY_COUNT(c, b) \ (c)[((b == 0)? 0 : ISP1080_BUS1_OFF) + 26] #define ISP1080_NVRAM_BUS_RETRY_DELAY(c, b) \ (c)[((b == 0)? 0 : ISP1080_BUS1_OFF) + 27] #define ISP1080_NVRAM_ASYNC_DATA_SETUP_TIME(c, b) \ ISPBSMX(c, ((b == 0)? 0 : ISP1080_BUS1_OFF) + 28, 0, 0x0f) #define ISP1080_NVRAM_REQ_ACK_ACTIVE_NEGATION(c, b) \ ISPBSMX(c, ((b == 0)? 0 : ISP1080_BUS1_OFF) + 28, 4, 0x01) #define ISP1080_NVRAM_DATA_LINE_ACTIVE_NEGATION(c, b) \ ISPBSMX(c, ((b == 0)? 0 : ISP1080_BUS1_OFF) + 28, 5, 0x01) #define ISP1080_NVRAM_SELECTION_TIMEOUT(c, b) \ (((c)[((b == 0)? 0 : ISP1080_BUS1_OFF) + 30]) | \ ((c)[((b == 0)? 0 : ISP1080_BUS1_OFF) + 31] << 8)) #define ISP1080_NVRAM_MAX_QUEUE_DEPTH(c, b) \ (((c)[((b == 0)? 0 : ISP1080_BUS1_OFF) + 32]) | \ ((c)[((b == 0)? 0 : ISP1080_BUS1_OFF) + 33] << 8)) #define ISP1080_NVRAM_TARGOFF(b) \ ((b == 0)? 40: (40 + ISP1080_BUS1_OFF)) #define ISP1080_NVRAM_TARGSIZE 6 #define _IxT8(tgt, tidx, b) \ (ISP1080_NVRAM_TARGOFF((b)) + (ISP1080_NVRAM_TARGSIZE * (tgt)) + (tidx)) #define ISP1080_NVRAM_TGT_RENEG(c, t, b) \ ISPBSMX(c, _IxT8(t, 0, (b)), 0, 0x01) #define ISP1080_NVRAM_TGT_QFRZ(c, t, b) \ ISPBSMX(c, _IxT8(t, 0, (b)), 1, 0x01) #define ISP1080_NVRAM_TGT_ARQ(c, t, b) \ ISPBSMX(c, _IxT8(t, 0, (b)), 2, 0x01) #define ISP1080_NVRAM_TGT_TQING(c, t, b) \ ISPBSMX(c, _IxT8(t, 0, (b)), 3, 0x01) #define ISP1080_NVRAM_TGT_SYNC(c, t, b) \ ISPBSMX(c, _IxT8(t, 0, (b)), 4, 0x01) #define ISP1080_NVRAM_TGT_WIDE(c, t, b) \ ISPBSMX(c, _IxT8(t, 0, (b)), 5, 0x01) #define ISP1080_NVRAM_TGT_PARITY(c, t, b) \ ISPBSMX(c, _IxT8(t, 0, (b)), 6, 0x01) #define ISP1080_NVRAM_TGT_DISC(c, t, b) \ ISPBSMX(c, _IxT8(t, 0, (b)), 7, 0x01) #define ISP1080_NVRAM_TGT_EXEC_THROTTLE(c, t, b) \ ISPBSMX(c, _IxT8(t, 1, (b)), 0, 0xff) #define ISP1080_NVRAM_TGT_SYNC_PERIOD(c, t, b) \ ISPBSMX(c, _IxT8(t, 2, (b)), 0, 0xff) #define ISP1080_NVRAM_TGT_SYNC_OFFSET(c, t, b) \ ISPBSMX(c, _IxT8(t, 3, (b)), 0, 0x0f) #define ISP1080_NVRAM_TGT_DEVICE_ENABLE(c, t, b) \ ISPBSMX(c, _IxT8(t, 3, (b)), 4, 0x01) #define ISP1080_NVRAM_TGT_LUN_DISABLE(c, t, b) \ ISPBSMX(c, _IxT8(t, 3, (b)), 5, 0x01) #define ISP12160_NVRAM_HBA_ENABLE ISP1080_NVRAM_HBA_ENABLE #define ISP12160_NVRAM_BURST_ENABLE ISP1080_NVRAM_BURST_ENABLE #define ISP12160_NVRAM_FIFO_THRESHOLD ISP1080_NVRAM_FIFO_THRESHOLD #define ISP12160_NVRAM_AUTO_TERM_SUPPORT ISP1080_NVRAM_AUTO_TERM_SUPPORT #define ISP12160_NVRAM_BUS0_TERM_MODE ISP1080_NVRAM_BUS0_TERM_MODE #define ISP12160_NVRAM_BUS1_TERM_MODE ISP1080_NVRAM_BUS1_TERM_MODE #define ISP12160_ISP_PARAMETER ISP12160_ISP_PARAMETER #define ISP12160_FAST_POST ISP1080_FAST_POST #define ISP12160_REPORT_LVD_TRANSITION ISP1080_REPORT_LVD_TRANSTION #define ISP12160_NVRAM_INITIATOR_ID \ ISP1080_NVRAM_INITIATOR_ID #define ISP12160_NVRAM_BUS_RESET_DELAY \ ISP1080_NVRAM_BUS_RESET_DELAY #define ISP12160_NVRAM_BUS_RETRY_COUNT \ ISP1080_NVRAM_BUS_RETRY_COUNT #define ISP12160_NVRAM_BUS_RETRY_DELAY \ ISP1080_NVRAM_BUS_RETRY_DELAY #define ISP12160_NVRAM_ASYNC_DATA_SETUP_TIME \ ISP1080_NVRAM_ASYNC_DATA_SETUP_TIME #define ISP12160_NVRAM_REQ_ACK_ACTIVE_NEGATION \ ISP1080_NVRAM_REQ_ACK_ACTIVE_NEGATION #define ISP12160_NVRAM_DATA_LINE_ACTIVE_NEGATION \ ISP1080_NVRAM_DATA_LINE_ACTIVE_NEGATION #define ISP12160_NVRAM_SELECTION_TIMEOUT \ ISP1080_NVRAM_SELECTION_TIMEOUT #define ISP12160_NVRAM_MAX_QUEUE_DEPTH \ ISP1080_NVRAM_MAX_QUEUE_DEPTH #define ISP12160_BUS0_OFF 24 #define ISP12160_BUS1_OFF 136 #define ISP12160_NVRAM_TARGOFF(b) \ (((b == 0)? ISP12160_BUS0_OFF : ISP12160_BUS1_OFF) + 16) #define ISP12160_NVRAM_TARGSIZE 6 #define _IxT16(tgt, tidx, b) \ (ISP12160_NVRAM_TARGOFF((b))+(ISP12160_NVRAM_TARGSIZE * (tgt))+(tidx)) #define ISP12160_NVRAM_TGT_RENEG(c, t, b) \ ISPBSMX(c, _IxT16(t, 0, (b)), 0, 0x01) #define ISP12160_NVRAM_TGT_QFRZ(c, t, b) \ ISPBSMX(c, _IxT16(t, 0, (b)), 1, 0x01) #define ISP12160_NVRAM_TGT_ARQ(c, t, b) \ ISPBSMX(c, _IxT16(t, 0, (b)), 2, 0x01) #define ISP12160_NVRAM_TGT_TQING(c, t, b) \ ISPBSMX(c, _IxT16(t, 0, (b)), 3, 0x01) #define ISP12160_NVRAM_TGT_SYNC(c, t, b) \ ISPBSMX(c, _IxT16(t, 0, (b)), 4, 0x01) #define ISP12160_NVRAM_TGT_WIDE(c, t, b) \ ISPBSMX(c, _IxT16(t, 0, (b)), 5, 0x01) #define ISP12160_NVRAM_TGT_PARITY(c, t, b) \ ISPBSMX(c, _IxT16(t, 0, (b)), 6, 0x01) #define ISP12160_NVRAM_TGT_DISC(c, t, b) \ ISPBSMX(c, _IxT16(t, 0, (b)), 7, 0x01) #define ISP12160_NVRAM_TGT_EXEC_THROTTLE(c, t, b) \ ISPBSMX(c, _IxT16(t, 1, (b)), 0, 0xff) #define ISP12160_NVRAM_TGT_SYNC_PERIOD(c, t, b) \ ISPBSMX(c, _IxT16(t, 2, (b)), 0, 0xff) #define ISP12160_NVRAM_TGT_SYNC_OFFSET(c, t, b) \ ISPBSMX(c, _IxT16(t, 3, (b)), 0, 0x1f) #define ISP12160_NVRAM_TGT_DEVICE_ENABLE(c, t, b) \ ISPBSMX(c, _IxT16(t, 3, (b)), 5, 0x01) #define ISP12160_NVRAM_PPR_OPTIONS(c, t, b) \ ISPBSMX(c, _IxT16(t, 4, (b)), 0, 0x0f) #define ISP12160_NVRAM_PPR_WIDTH(c, t, b) \ ISPBSMX(c, _IxT16(t, 4, (b)), 4, 0x03) #define ISP12160_NVRAM_PPR_ENABLE(c, t, b) \ ISPBSMX(c, _IxT16(t, 4, (b)), 7, 0x01) /* * Qlogic 2100 thru 2300 NVRAM is an array of 256 bytes. * * Some portion of the front of this is for general RISC engine parameters, * mostly reflecting the state of the last INITIALIZE FIRMWARE mailbox command. * * This is followed by some general host adapter parameters, and ends with * a checksum xor byte at offset 255. For non-byte entities data is stored * in Little Endian order. */ #define ISP2100_NVRAM_SIZE 256 /* ISP_NVRAM_VERSION is in same overall place */ #define ISP2100_NVRAM_RISCVER(c) (c)[6] #define ISP2100_NVRAM_OPTIONS(c) ((c)[8] | ((c)[9] << 8)) #define ISP2100_NVRAM_MAXFRAMELENGTH(c) (((c)[10]) | ((c)[11] << 8)) #define ISP2100_NVRAM_MAXIOCBALLOCATION(c) (((c)[12]) | ((c)[13] << 8)) #define ISP2100_NVRAM_EXECUTION_THROTTLE(c) (((c)[14]) | ((c)[15] << 8)) #define ISP2100_NVRAM_RETRY_COUNT(c) (c)[16] #define ISP2100_NVRAM_RETRY_DELAY(c) (c)[17] #define ISP2100_NVRAM_PORT_NAME(c) (\ (((uint64_t)(c)[18]) << 56) | \ (((uint64_t)(c)[19]) << 48) | \ (((uint64_t)(c)[20]) << 40) | \ (((uint64_t)(c)[21]) << 32) | \ (((uint64_t)(c)[22]) << 24) | \ (((uint64_t)(c)[23]) << 16) | \ (((uint64_t)(c)[24]) << 8) | \ (((uint64_t)(c)[25]) << 0)) #define ISP2100_NVRAM_HARDLOOPID(c) ((c)[26] | ((c)[27] << 8)) #define ISP2100_NVRAM_TOV(c) ((c)[29]) #define ISP2100_NVRAM_NODE_NAME(c) (\ (((uint64_t)(c)[30]) << 56) | \ (((uint64_t)(c)[31]) << 48) | \ (((uint64_t)(c)[32]) << 40) | \ (((uint64_t)(c)[33]) << 32) | \ (((uint64_t)(c)[34]) << 24) | \ (((uint64_t)(c)[35]) << 16) | \ (((uint64_t)(c)[36]) << 8) | \ (((uint64_t)(c)[37]) << 0)) #define ISP2100_XFW_OPTIONS(c) ((c)[38] | ((c)[39] << 8)) #define ISP2100_RACC_TIMER(c) (c)[40] #define ISP2100_IDELAY_TIMER(c) (c)[41] #define ISP2100_ZFW_OPTIONS(c) ((c)[42] | ((c)[43] << 8)) #define ISP2100_SERIAL_LINK(c) ((c)[68] | ((c)[69] << 8)) #define ISP2100_NVRAM_HBA_OPTIONS(c) ((c)[70] | ((c)[71] << 8)) #define ISP2100_NVRAM_HBA_DISABLE(c) ISPBSMX(c, 70, 0, 0x01) #define ISP2100_NVRAM_BIOS_DISABLE(c) ISPBSMX(c, 70, 1, 0x01) #define ISP2100_NVRAM_LUN_DISABLE(c) ISPBSMX(c, 70, 2, 0x01) #define ISP2100_NVRAM_ENABLE_SELECT_BOOT(c) ISPBSMX(c, 70, 3, 0x01) #define ISP2100_NVRAM_DISABLE_CODELOAD(c) ISPBSMX(c, 70, 4, 0x01) #define ISP2100_NVRAM_SET_CACHELINESZ(c) ISPBSMX(c, 70, 5, 0x01) #define ISP2100_NVRAM_BOOT_NODE_NAME(c) (\ (((uint64_t)(c)[72]) << 56) | \ (((uint64_t)(c)[73]) << 48) | \ (((uint64_t)(c)[74]) << 40) | \ (((uint64_t)(c)[75]) << 32) | \ (((uint64_t)(c)[76]) << 24) | \ (((uint64_t)(c)[77]) << 16) | \ (((uint64_t)(c)[78]) << 8) | \ (((uint64_t)(c)[79]) << 0)) #define ISP2100_NVRAM_BOOT_LUN(c) (c)[80] #define ISP2100_RESET_DELAY(c) (c)[81] #define ISP2100_HBA_FEATURES(c) ((c)[232] | ((c)[233] << 8)) /* * Qlogic 2400 NVRAM is an array of 512 bytes with a 32 bit checksum. */ #define ISP2400_NVRAM_PORT0_ADDR 0x80 #define ISP2400_NVRAM_PORT1_ADDR 0x180 #define ISP2400_NVRAM_SIZE 512 #define ISP2400_NVRAM_VERSION(c) ((c)[4] | ((c)[5] << 8)) #define ISP2400_NVRAM_MAXFRAMELENGTH(c) (((c)[12]) | ((c)[13] << 8)) #define ISP2400_NVRAM_EXECUTION_THROTTLE(c) (((c)[14]) | ((c)[15] << 8)) #define ISP2400_NVRAM_EXCHANGE_COUNT(c) (((c)[16]) | ((c)[17] << 8)) #define ISP2400_NVRAM_HARDLOOPID(c) ((c)[18] | ((c)[19] << 8)) #define ISP2400_NVRAM_PORT_NAME(c) (\ (((uint64_t)(c)[20]) << 56) | \ (((uint64_t)(c)[21]) << 48) | \ (((uint64_t)(c)[22]) << 40) | \ (((uint64_t)(c)[23]) << 32) | \ (((uint64_t)(c)[24]) << 24) | \ (((uint64_t)(c)[25]) << 16) | \ (((uint64_t)(c)[26]) << 8) | \ (((uint64_t)(c)[27]) << 0)) #define ISP2400_NVRAM_NODE_NAME(c) (\ (((uint64_t)(c)[28]) << 56) | \ (((uint64_t)(c)[29]) << 48) | \ (((uint64_t)(c)[30]) << 40) | \ (((uint64_t)(c)[31]) << 32) | \ (((uint64_t)(c)[32]) << 24) | \ (((uint64_t)(c)[33]) << 16) | \ (((uint64_t)(c)[34]) << 8) | \ (((uint64_t)(c)[35]) << 0)) #define ISP2400_NVRAM_LOGIN_RETRY_CNT(c) ((c)[36] | ((c)[37] << 8)) #define ISP2400_NVRAM_LINK_DOWN_ON_NOS(c) ((c)[38] | ((c)[39] << 8)) #define ISP2400_NVRAM_INTERRUPT_DELAY(c) ((c)[40] | ((c)[41] << 8)) #define ISP2400_NVRAM_LOGIN_TIMEOUT(c) ((c)[42] | ((c)[43] << 8)) #define ISP2400_NVRAM_FIRMWARE_OPTIONS1(c) \ ((c)[44] | ((c)[45] << 8) | ((c)[46] << 16) | ((c)[47] << 24)) #define ISP2400_NVRAM_FIRMWARE_OPTIONS2(c) \ ((c)[48] | ((c)[49] << 8) | ((c)[50] << 16) | ((c)[51] << 24)) #define ISP2400_NVRAM_FIRMWARE_OPTIONS3(c) \ ((c)[52] | ((c)[53] << 8) | ((c)[54] << 16) | ((c)[55] << 24)) /* * Firmware Crash Dump * * QLogic needs specific information format when they look at firmware crashes. * * This is incredibly kernel memory consumptive (to say the least), so this * code is only compiled in when needed. */ #define QLA2200_RISC_IMAGE_DUMP_SIZE \ (1 * sizeof (uint16_t)) + /* 'used' flag (also HBA type) */ \ (352 * sizeof (uint16_t)) + /* RISC registers */ \ (61440 * sizeof (uint16_t)) /* RISC SRAM (offset 0x1000..0xffff) */ #define QLA2300_RISC_IMAGE_DUMP_SIZE \ (1 * sizeof (uint16_t)) + /* 'used' flag (also HBA type) */ \ (464 * sizeof (uint16_t)) + /* RISC registers */ \ (63488 * sizeof (uint16_t)) + /* RISC SRAM (0x0800..0xffff) */ \ (4096 * sizeof (uint16_t)) + /* RISC SRAM (0x10000..0x10FFF) */ \ (61440 * sizeof (uint16_t)) /* RISC SRAM (0x11000..0x1FFFF) */ /* the larger of the two */ #define ISP_CRASH_IMAGE_SIZE QLA2300_RISC_IMAGE_DUMP_SIZE #endif /* _ISPREG_H */ Index: head/sys/dev/isp/ispvar.h =================================================================== --- head/sys/dev/isp/ispvar.h (revision 196007) +++ head/sys/dev/isp/ispvar.h (revision 196008) @@ -1,1023 +1,1115 @@ /* $FreeBSD$ */ /*- - * Copyright (c) 1997-2007 by Matthew Jacob + * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * */ /* * Soft Definitions for for Qlogic ISP SCSI adapters. */ #ifndef _ISPVAR_H #define _ISPVAR_H #if defined(__NetBSD__) || defined(__OpenBSD__) #include #include #endif #ifdef __FreeBSD__ #include #include #endif #ifdef __linux__ #include "isp_stds.h" #include "ispmbox.h" #endif #ifdef __svr4__ #include "isp_stds.h" #include "ispmbox.h" #endif -#define ISP_CORE_VERSION_MAJOR 3 +#define ISP_CORE_VERSION_MAJOR 6 #define ISP_CORE_VERSION_MINOR 0 /* * Vector for bus specific code to provide specific services. */ typedef struct ispsoftc ispsoftc_t; struct ispmdvec { - int (*dv_rd_isr) - (ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); + int (*dv_rd_isr) (ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); uint32_t (*dv_rd_reg) (ispsoftc_t *, int); void (*dv_wr_reg) (ispsoftc_t *, int, uint32_t); int (*dv_mbxdma) (ispsoftc_t *); - int (*dv_dmaset) - (ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t); + int (*dv_dmaset) (ispsoftc_t *, XS_T *, void *); void (*dv_dmaclr) (ispsoftc_t *, XS_T *, uint32_t); void (*dv_reset0) (ispsoftc_t *); void (*dv_reset1) (ispsoftc_t *); void (*dv_dregs) (ispsoftc_t *, const char *); const void * dv_ispfw; /* ptr to f/w */ uint16_t dv_conf1; uint16_t dv_clock; /* clock frequency */ }; /* * Overall parameters */ #define MAX_TARGETS 16 +#ifndef MAX_FC_TARG #define MAX_FC_TARG 512 +#endif #define ISP_MAX_TARGETS(isp) (IS_FC(isp)? MAX_FC_TARG : MAX_TARGETS) #define ISP_MAX_LUNS(isp) (isp)->isp_maxluns /* * Macros to access ISP registers through bus specific layers- * mostly wrappers to vector through the mdvec structure. */ #define ISP_READ_ISR(isp, isrp, semap, mbox0p) \ (*(isp)->isp_mdvec->dv_rd_isr)(isp, isrp, semap, mbox0p) #define ISP_READ(isp, reg) \ (*(isp)->isp_mdvec->dv_rd_reg)((isp), (reg)) #define ISP_WRITE(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), (val)) #define ISP_MBOXDMASETUP(isp) \ (*(isp)->isp_mdvec->dv_mbxdma)((isp)) -#define ISP_DMASETUP(isp, xs, req, iptrp, optr) \ - (*(isp)->isp_mdvec->dv_dmaset)((isp), (xs), (req), (iptrp), (optr)) +#define ISP_DMASETUP(isp, xs, req) \ + (*(isp)->isp_mdvec->dv_dmaset)((isp), (xs), (req)) #define ISP_DMAFREE(isp, xs, hndl) \ if ((isp)->isp_mdvec->dv_dmaclr) \ (*(isp)->isp_mdvec->dv_dmaclr)((isp), (xs), (hndl)) #define ISP_RESET0(isp) \ if ((isp)->isp_mdvec->dv_reset0) (*(isp)->isp_mdvec->dv_reset0)((isp)) #define ISP_RESET1(isp) \ if ((isp)->isp_mdvec->dv_reset1) (*(isp)->isp_mdvec->dv_reset1)((isp)) #define ISP_DUMPREGS(isp, m) \ if ((isp)->isp_mdvec->dv_dregs) (*(isp)->isp_mdvec->dv_dregs)((isp),(m)) #define ISP_SETBITS(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), ISP_READ((isp), (reg)) | (val)) #define ISP_CLRBITS(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), ISP_READ((isp), (reg)) & ~(val)) /* * The MEMORYBARRIER macro is defined per platform (to provide synchronization * on Request and Response Queues, Scratch DMA areas, and Registers) * * Defined Memory Barrier Synchronization Types */ #define SYNC_REQUEST 0 /* request queue synchronization */ #define SYNC_RESULT 1 /* result queue synchronization */ #define SYNC_SFORDEV 2 /* scratch, sync for ISP */ #define SYNC_SFORCPU 3 /* scratch, sync for CPU */ #define SYNC_REG 4 /* for registers */ #define SYNC_ATIOQ 5 /* atio result queue (24xx) */ /* * Request/Response Queue defines and macros. * The maximum is defined per platform (and can be based on board type). */ /* This is the size of a queue entry (request and response) */ #define QENTRY_LEN 64 /* Both request and result queue length must be a power of two */ #define RQUEST_QUEUE_LEN(x) MAXISPREQUEST(x) #ifdef ISP_TARGET_MODE #define RESULT_QUEUE_LEN(x) MAXISPREQUEST(x) #else #define RESULT_QUEUE_LEN(x) \ (((MAXISPREQUEST(x) >> 2) < 64)? 64 : MAXISPREQUEST(x) >> 2) #endif #define ISP_QUEUE_ENTRY(q, idx) (((uint8_t *)q) + ((idx) * QENTRY_LEN)) #define ISP_QUEUE_SIZE(n) ((n) * QENTRY_LEN) #define ISP_NXT_QENTRY(idx, qlen) (((idx) + 1) & ((qlen)-1)) #define ISP_QFREE(in, out, qlen) \ ((in == out)? (qlen - 1) : ((in > out)? \ ((qlen - 1) - (in - out)) : (out - in - 1))) #define ISP_QAVAIL(isp) \ ISP_QFREE(isp->isp_reqidx, isp->isp_reqodx, RQUEST_QUEUE_LEN(isp)) #define ISP_ADD_REQUEST(isp, nxti) \ MEMORYBARRIER(isp, SYNC_REQUEST, isp->isp_reqidx, QENTRY_LEN); \ ISP_WRITE(isp, isp->isp_rqstinrp, nxti); \ isp->isp_reqidx = nxti +#define ISP_SYNC_REQUEST(isp) \ + MEMORYBARRIER(isp, SYNC_REQUEST, isp->isp_reqidx, QENTRY_LEN); \ + isp->isp_reqidx = ISP_NXT_QENTRY(isp->isp_reqidx, RQUEST_QUEUE_LEN(isp)); \ + ISP_WRITE(isp, isp->isp_rqstinrp, isp->isp_reqidx) + /* * SCSI Specific Host Adapter Parameters- per bus, per target */ typedef struct { - uint32_t : 10, - isp_bad_nvram : 1, - isp_gotdparms : 1, + uint32_t : 8, + update : 1, + sendmarker : 1, + role : 2, isp_req_ack_active_neg : 1, isp_data_line_active_neg: 1, isp_cmd_dma_burst_enable: 1, isp_data_dma_burst_enabl: 1, isp_fifo_threshold : 3, isp_ptisp : 1, isp_ultramode : 1, isp_diffmode : 1, isp_lvdmode : 1, isp_fast_mttr : 1, /* fast sram */ isp_initiator_id : 4, isp_async_data_setup : 4; uint16_t isp_selection_timeout; uint16_t isp_max_queue_depth; uint8_t isp_tag_aging; uint8_t isp_bus_reset_delay; uint8_t isp_retry_count; uint8_t isp_retry_delay; struct { - uint32_t + uint32_t exc_throttle : 8, : 1, dev_enable : 1, /* ignored */ dev_update : 1, dev_refresh : 1, actv_offset : 4, goal_offset : 4, nvrm_offset : 4; uint8_t actv_period; /* current sync period */ uint8_t goal_period; /* goal sync period */ uint8_t nvrm_period; /* nvram sync period */ uint16_t actv_flags; /* current device flags */ uint16_t goal_flags; /* goal device flags */ uint16_t nvrm_flags; /* nvram device flags */ } isp_devparam[MAX_TARGETS]; } sdparam; /* * Device Flags */ #define DPARM_DISC 0x8000 #define DPARM_PARITY 0x4000 #define DPARM_WIDE 0x2000 #define DPARM_SYNC 0x1000 #define DPARM_TQING 0x0800 #define DPARM_ARQ 0x0400 #define DPARM_QFRZ 0x0200 #define DPARM_RENEG 0x0100 #define DPARM_NARROW 0x0080 #define DPARM_ASYNC 0x0040 #define DPARM_PPR 0x0020 #define DPARM_DEFAULT (0xFF00 & ~DPARM_QFRZ) #define DPARM_SAFE_DFLT (DPARM_DEFAULT & ~(DPARM_WIDE|DPARM_SYNC|DPARM_TQING)) /* technically, not really correct, as they need to be rated based upon clock */ #define ISP_80M_SYNCPARMS 0x0c09 #define ISP_40M_SYNCPARMS 0x0c0a #define ISP_20M_SYNCPARMS 0x0c0c #define ISP_20M_SYNCPARMS_1040 0x080c #define ISP_10M_SYNCPARMS 0x0c19 #define ISP_08M_SYNCPARMS 0x0c25 #define ISP_05M_SYNCPARMS 0x0c32 #define ISP_04M_SYNCPARMS 0x0c41 /* * Fibre Channel Specifics */ /* These are for non-2K Login Firmware cards */ #define FL_ID 0x7e /* FL_Port Special ID */ #define SNS_ID 0x80 /* SNS Server Special ID */ #define NPH_MAX 0xfe +/* Use this handle for the base for multi-id firmware SNS logins */ +#define NPH_SNS_HDLBASE 0x400 + /* These are for 2K Login Firmware cards */ #define NPH_RESERVED 0x7F0 /* begin of reserved N-port handles */ #define NPH_MGT_ID 0x7FA /* Management Server Special ID */ #define NPH_SNS_ID 0x7FC /* SNS Server Special ID */ -#define NPH_FL_ID 0x7FE /* FL Port Special ID */ +#define NPH_FABRIC_CTLR 0x7FD /* Fabric Controller (0xFFFFFD) */ +#define NPH_FL_ID 0x7FE /* F Port Special ID (0xFFFFFE) */ +#define NPH_IP_BCST 0x7ff /* IP Broadcast Special ID (0xFFFFFF) */ #define NPH_MAX_2K 0x800 /* * "Unassigned" handle to be used internally */ #define NIL_HANDLE 0xffff /* * Limit for devices on an arbitrated loop. */ #define LOCAL_LOOP_LIM 126 /* + * Limit for (2K login) N-port handle amounts + */ +#define MAX_NPORT_HANDLE 2048 + +/* + * Special Constants + */ +#define INI_NONE ((uint64_t) 0) +#define ISP_NOCHAN 0xff + +/* * Special Port IDs */ #define MANAGEMENT_PORT_ID 0xFFFFFA #define SNS_PORT_ID 0xFFFFFC #define FABRIC_PORT_ID 0xFFFFFE +#define PORT_ANY 0xFFFFFF +#define PORT_NONE 0 +#define DOMAIN_CONTROLLER_BASE 0xFFFC00 +#define DOMAIN_CONTROLLER_END 0xFFFCFF /* * FC Port Database entry. * * It has a handle that the f/w uses to address commands to a device. * This handle's value may be assigned by the firmware (e.g., for local loop * devices) or by the driver (e.g., for fabric devices). * * It has a state. If the state if VALID, that means that we've logged into * the device. We also *may* have a initiator map index entry. This is a value - * from 0..MAX_FC_TARG that is used to index into the isp_ini_map array. If + * from 0..MAX_FC_TARG that is used to index into the isp_dev_map array. If * the value therein is non-zero, then that value minus one is used to index * into the Port Database to find the handle for forming commands. There is - * back-index minus one value within to Port Database entry that tells us - * which entry in isp_ini_map points to us (to avoid searching). + * back-index minus one value within to Port Database entry that tells us + * which entry in isp_dev_map points to us (to avoid searching). * * Local loop devices the firmware automatically performs PLOGI on for us * (which is why that handle is imposed upon us). Fabric devices we assign * a handle to and perform the PLOGI on. * * When a PORT DATABASE CHANGED asynchronous event occurs, we mark all VALID * entries as PROBATIONAL. This allows us, if policy says to, just keep track * of devices whose handles change but are otherwise the same device (and * thus keep 'target' constant). * * In any case, we search all possible local loop handles. For each one that * has a port database entity returned, we search for any PROBATIONAL entry * that matches it and update as appropriate. Otherwise, as a new entry, we * find room for it in the Port Database. We *try* and use the handle as the * index to put it into the Database, but that's just an optimization. We mark * the entry VALID and make sure that the target index is updated and correct. * * When we get done searching the local loop, we then search similarily for * a list of devices we've gotten from the fabric name controller (if we're * on a fabric). VALID marking is also done similarily. * * When all of this is done, we can march through the database and clean up * any entry that is still PROBATIONAL (these represent devices which have * departed). Then we're done and can resume normal operations. * * Negative invariants that we try and test for are: * * + There can never be two non-NIL entries with the same { Port, Node } WWN * duples. * * + There can never be two non-NIL entries with the same handle. * - * + There can never be two non-NIL entries which have the same ini_map_idx + * + There can never be two non-NIL entries which have the same dev_map_idx * value. */ typedef struct { /* * This is the handle that the firmware needs in order for us to * send commands to the device. For pre-24XX cards, this would be * the 'loopid'. */ uint16_t handle; + /* - * The ini_map_idx, if nonzero, is the system virtual target ID (+1) - * as a cross-reference with the isp_ini_map. + * The dev_map_idx, if nonzero, is the system virtual target ID (+1) + * as a cross-reference with the isp_dev_map. * * A device is 'autologin' if the firmware automatically logs into * it (re-logins as needed). Basically, local private loop devices. * - * The state is the current state of thsi entry. + * The state is the current state of this entry. * * Role is Initiator, Target, Both * - * Portid is obvious, as or node && port WWNs. The new_role and + * Portid is obvious, as are node && port WWNs. The new_role and * new_portid is for when we are pending a change. + * + * The 'target_mode' tag means that this entry arrived via a + * target mode command and is immune from normal flushing rules. + * You should also never see anything with an initiator role + * with this set. */ - uint16_t ini_map_idx : 12, + uint16_t dev_map_idx : 12, autologin : 1, /* F/W does PLOGI/PLOGO */ state : 3; - uint32_t reserved : 6, + uint32_t reserved : 5, + target_mode : 1, roles : 2, portid : 24; - uint32_t new_reserved : 6, + uint32_t + dirty : 1, /* commands have been run */ + new_reserved : 5, new_roles : 2, new_portid : 24; uint64_t node_wwn; uint64_t port_wwn; } fcportdb_t; #define FC_PORTDB_STATE_NIL 0 #define FC_PORTDB_STATE_PROBATIONAL 1 #define FC_PORTDB_STATE_DEAD 2 #define FC_PORTDB_STATE_CHANGED 3 #define FC_PORTDB_STATE_NEW 4 #define FC_PORTDB_STATE_PENDING_VALID 5 #define FC_PORTDB_STATE_ZOMBIE 6 #define FC_PORTDB_STATE_VALID 7 /* * FC card specific information + * + * This structure is replicated across multiple channels for multi-id + * capapble chipsets, with some entities different on a per-channel basis. */ + typedef struct { - uint32_t : 10, - isp_tmode : 1, - isp_2klogin : 1, - isp_sccfw : 1, - isp_gbspeed : 3, - : 1, - : 1, - isp_gotdparms : 1, - isp_bad_nvram : 1, + uint32_t + link_active : 1, + npiv_fabric : 1, + inorder : 1, + sendmarker : 1, + role : 2, + isp_gbspeed : 4, isp_loopstate : 4, /* Current Loop State */ isp_fwstate : 4, /* ISP F/W state */ - isp_topo : 3, + isp_topo : 3, /* Connection Type */ loop_seen_once : 1; + uint32_t : 8, isp_portid : 24; /* S_ID */ + + uint16_t isp_fwoptions; uint16_t isp_xfwoptions; uint16_t isp_zfwoptions; - uint16_t isp_loopid; /* hard loop id */ - uint16_t isp_fwattr; /* firmware attributes */ - uint16_t isp_execthrottle; + uint16_t isp_loopid; /* hard loop id */ + uint16_t isp_sns_hdl; /* N-port handle for SNS */ + uint16_t isp_lasthdl; /* only valid for channel 0 */ + uint16_t isp_maxalloc; uint8_t isp_retry_delay; uint8_t isp_retry_count; - uint8_t isp_reserved; - uint16_t isp_maxalloc; - uint16_t isp_maxfrmlen; + + /* + * Current active WWNN/WWPN + */ + uint64_t isp_wwnn; + uint64_t isp_wwpn; + + /* + * NVRAM WWNN/WWPN + */ uint64_t isp_wwnn_nvram; uint64_t isp_wwpn_nvram; /* * Our Port Data Base */ fcportdb_t portdb[MAX_FC_TARG]; /* * This maps system virtual 'target' id to a portdb entry. * * The mapping function is to take any non-zero entry and * subtract one to get the portdb index. This means that * entries which are zero are unmapped (i.e., don't exist). */ - uint16_t isp_ini_map[MAX_FC_TARG]; + uint16_t isp_dev_map[MAX_FC_TARG]; +#ifdef ISP_TARGET_MODE /* + * This maps N-Port Handle to portdb entry so we + * don't have to search for every incoming command. + * + * The mapping function is to take any non-zero entry and + * subtract one to get the portdb index. This means that + * entries which are zero are unmapped (i.e., don't exist). + */ + uint16_t isp_tgt_map[MAX_NPORT_HANDLE]; +#endif + + /* * Scratch DMA mapped in area to fetch Port Database stuff, etc. */ void * isp_scratch; XS_DMA_ADDR_T isp_scdma; -#ifdef ISP_FW_CRASH_DUMP - uint16_t * isp_dump_data; -#endif } fcparam; #define FW_CONFIG_WAIT 0 #define FW_WAIT_AL_PA 1 #define FW_WAIT_LOGIN 2 #define FW_READY 3 #define FW_LOSS_OF_SYNC 4 #define FW_ERROR 5 #define FW_REINIT 6 #define FW_NON_PART 7 #define LOOP_NIL 0 #define LOOP_LIP_RCVD 1 #define LOOP_PDB_RCVD 2 #define LOOP_SCANNING_LOOP 3 #define LOOP_LSCAN_DONE 4 #define LOOP_SCANNING_FABRIC 5 #define LOOP_FSCAN_DONE 6 #define LOOP_SYNCING_PDB 7 #define LOOP_READY 8 #define TOPO_NL_PORT 0 #define TOPO_FL_PORT 1 #define TOPO_N_PORT 2 #define TOPO_F_PORT 3 #define TOPO_PTP_STUB 4 /* * Soft Structure per host adapter */ struct ispsoftc { /* * Platform (OS) specific data */ struct isposinfo isp_osinfo; /* * Pointer to bus specific functions and data */ struct ispmdvec * isp_mdvec; /* * (Mostly) nonvolatile state. Board specific parameters * may contain some volatile state (e.g., current loop state). */ void * isp_param; /* type specific */ uint16_t isp_fwrev[3]; /* Loaded F/W revision */ - uint16_t isp_romfw_rev[3]; /* PROM F/W revision */ uint16_t isp_maxcmds; /* max possible I/O cmds */ uint8_t isp_type; /* HBA Chip Type */ uint8_t isp_revision; /* HBA Chip H/W Revision */ uint32_t isp_maxluns; /* maximum luns supported */ uint32_t isp_clock : 8, /* input clock */ : 4, isp_port : 1, /* 23XX/24XX only */ - isp_failed : 1, /* board failed */ isp_open : 1, /* opened (ioctl) */ - isp_touched : 1, /* board ever seen? */ isp_bustype : 1, /* SBus or PCI */ isp_loaded_fw : 1, /* loaded firmware */ - isp_role : 2, /* roles supported */ - isp_dblev : 12; /* debug log mask */ + isp_dblev : 16; /* debug log mask */ - uint32_t isp_confopts; /* config options */ + uint16_t isp_fwattr; /* firmware attributes */ + uint16_t isp_nchan; /* number of channels */ + uint32_t isp_confopts; /* config options */ + uint32_t isp_rqstinrp; /* register for REQINP */ uint32_t isp_rqstoutrp; /* register for REQOUTP */ uint32_t isp_respinrp; /* register for RESINP */ uint32_t isp_respoutrp; /* register for RESOUTP */ - uint32_t isp_atioinrp; /* register for ATIOINP */ - uint32_t isp_atiooutrp; /* register for ATIOOUTP */ /* * Instrumentation */ uint64_t isp_intcnt; /* total int count */ uint64_t isp_intbogus; /* spurious int count */ uint64_t isp_intmboxc; /* mbox completions */ uint64_t isp_intoasync; /* other async */ uint64_t isp_rsltccmplt; /* CMDs on result q */ uint64_t isp_fphccmplt; /* CMDs via fastpost */ uint16_t isp_rscchiwater; uint16_t isp_fpcchiwater; + NANOTIME_T isp_init_time; /* time were last initialized */ /* * Volatile state */ volatile uint32_t : 8, + : 2, + isp_dead : 1, + : 1, isp_mboxbsy : 1, /* mailbox command active */ isp_state : 3, - isp_sendmarker : 2, /* send a marker entry */ - isp_update : 2, /* update parameters */ isp_nactive : 16; /* how many commands active */ volatile uint32_t isp_reqodx; /* index of last ISP pickup */ volatile uint32_t isp_reqidx; /* index of next request */ volatile uint32_t isp_residx; /* index of next result */ volatile uint32_t isp_resodx; /* index of next result */ - volatile uint32_t isp_rspbsy; volatile uint32_t isp_lasthdls; /* last handle seed */ volatile uint32_t isp_obits; /* mailbox command output */ + volatile uint32_t isp_serno; /* rolling serial number */ volatile uint16_t isp_mboxtmp[MAILBOX_STORAGE]; volatile uint16_t isp_lastmbxcmd; /* last mbox command sent */ volatile uint16_t isp_mbxwrk0; volatile uint16_t isp_mbxwrk1; volatile uint16_t isp_mbxwrk2; volatile uint16_t isp_mbxwrk8; void * isp_mbxworkp; /* * Active commands are stored here, indexed by handle functions. */ XS_T **isp_xflist; #ifdef ISP_TARGET_MODE /* - * Active target commands are stored here, indexed by handle function. + * Active target commands are stored here, indexed by handle functions. */ void **isp_tgtlist; #endif /* * request/result queue pointers and DMA handles for them. */ void * isp_rquest; void * isp_result; XS_DMA_ADDR_T isp_rquest_dma; XS_DMA_ADDR_T isp_result_dma; #ifdef ISP_TARGET_MODE /* for 24XX only */ void * isp_atioq; XS_DMA_ADDR_T isp_atioq_dma; #endif }; -#define SDPARAM(isp) ((sdparam *) (isp)->isp_param) -#define FCPARAM(isp) ((fcparam *) (isp)->isp_param) +#define SDPARAM(isp, chan) (&((sdparam *)(isp)->isp_param)[(chan)]) +#define FCPARAM(isp, chan) (&((fcparam *)(isp)->isp_param)[(chan)]) +#define ISP_SET_SENDMARKER(isp, chan, val) \ + if (IS_FC(isp)) { \ + FCPARAM(isp, chan)->sendmarker = val; \ + } else { \ + SDPARAM(isp, chan)->sendmarker = val; \ + } + +#define ISP_TST_SENDMARKER(isp, chan) \ + (IS_FC(isp)? \ + FCPARAM(isp, chan)->sendmarker != 0 : \ + SDPARAM(isp, chan)->sendmarker != 0) + /* * ISP Driver Run States */ #define ISP_NILSTATE 0 #define ISP_CRASHED 1 #define ISP_RESETSTATE 2 #define ISP_INITSTATE 3 #define ISP_RUNSTATE 4 /* * ISP Configuration Options */ #define ISP_CFG_NORELOAD 0x80 /* don't download f/w */ #define ISP_CFG_NONVRAM 0x40 /* ignore NVRAM */ #define ISP_CFG_TWOGB 0x20 /* force 2GB connection (23XX only) */ #define ISP_CFG_ONEGB 0x10 /* force 1GB connection (23XX only) */ #define ISP_CFG_FULL_DUPLEX 0x01 /* Full Duplex (Fibre Channel only) */ #define ISP_CFG_PORT_PREF 0x0C /* Mask for Port Prefs (2200 only) */ #define ISP_CFG_LPORT 0x00 /* prefer {N/F}L-Port connection */ #define ISP_CFG_NPORT 0x04 /* prefer {N/F}-Port connection */ #define ISP_CFG_NPORT_ONLY 0x08 /* insist on {N/F}-Port connection */ #define ISP_CFG_LPORT_ONLY 0x0C /* insist on {N/F}L-Port connection */ -#define ISP_CFG_OWNWWPN 0x100 /* override NVRAM wwpn */ -#define ISP_CFG_OWNWWNN 0x200 /* override NVRAM wwnn */ #define ISP_CFG_OWNFSZ 0x400 /* override NVRAM frame size */ #define ISP_CFG_OWNLOOPID 0x800 /* override NVRAM loopid */ #define ISP_CFG_OWNEXCTHROTTLE 0x1000 /* override NVRAM execution throttle */ #define ISP_CFG_FOURGB 0x2000 /* force 4GB connection (24XX only) */ /* - * Prior to calling isp_reset for the first time, the outer layer - * should set isp_role to one of NONE, INITIATOR, TARGET, BOTH. + * For each channel, the outer layers should know what role that channel + * will take: ISP_ROLE_NONE, ISP_ROLE_INITIATOR, ISP_ROLE_TARGET, + * ISP_ROLE_BOTH. * * If you set ISP_ROLE_NONE, the cards will be reset, new firmware loaded, * NVRAM read, and defaults set, but any further initialization (e.g. * INITIALIZE CONTROL BLOCK commands for 2X00 cards) won't be done. * * If INITIATOR MODE isn't set, attempts to run commands will be stopped - * at isp_start and completed with the moral equivalent of SELECTION TIMEOUT. + * at isp_start and completed with the equivalent of SELECTION TIMEOUT. * * If TARGET MODE is set, it doesn't mean that the rest of target mode support * needs to be enabled, or will even work. What happens with the 2X00 cards * here is that if you have enabled it with TARGET MODE as part of the ICB * options, but you haven't given the f/w any ram resources for ATIOs or * Immediate Notifies, the f/w just handles what it can and you never see * anything. Basically, it sends a single byte of data (the first byte, * which you can set as part of the INITIALIZE CONTROL BLOCK command) for * INQUIRY, and sends back QUEUE FULL status for any other command. * */ #define ISP_ROLE_NONE 0x0 #define ISP_ROLE_TARGET 0x1 #define ISP_ROLE_INITIATOR 0x2 #define ISP_ROLE_BOTH (ISP_ROLE_TARGET|ISP_ROLE_INITIATOR) #define ISP_ROLE_EITHER ISP_ROLE_BOTH #ifndef ISP_DEFAULT_ROLES #define ISP_DEFAULT_ROLES ISP_ROLE_INITIATOR #endif /* * Firmware related defines */ #define ISP_CODE_ORG 0x1000 /* default f/w code start */ #define ISP_CODE_ORG_2300 0x0800 /* ..except for 2300s */ #define ISP_CODE_ORG_2400 0x100000 /* ..and 2400s */ #define ISP_FW_REV(maj, min, mic) ((maj << 24) | (min << 16) | mic) #define ISP_FW_MAJOR(code) ((code >> 24) & 0xff) #define ISP_FW_MINOR(code) ((code >> 16) & 0xff) #define ISP_FW_MICRO(code) ((code >> 8) & 0xff) #define ISP_FW_REVX(xp) ((xp[0]<<24) | (xp[1] << 16) | xp[2]) #define ISP_FW_MAJORX(xp) (xp[0]) #define ISP_FW_MINORX(xp) (xp[1]) #define ISP_FW_MICROX(xp) (xp[2]) #define ISP_FW_NEWER_THAN(i, major, minor, micro) \ (ISP_FW_REVX((i)->isp_fwrev) > ISP_FW_REV(major, minor, micro)) #define ISP_FW_OLDER_THAN(i, major, minor, micro) \ (ISP_FW_REVX((i)->isp_fwrev) < ISP_FW_REV(major, minor, micro)) /* * Bus (implementation) types */ #define ISP_BT_PCI 0 /* PCI Implementations */ #define ISP_BT_SBUS 1 /* SBus Implementations */ /* * If we have not otherwise defined SBus support away make sure * it is defined here such that the code is included as default */ #ifndef ISP_SBUS_SUPPORTED #define ISP_SBUS_SUPPORTED 1 #endif /* * Chip Types */ #define ISP_HA_SCSI 0xf #define ISP_HA_SCSI_UNKNOWN 0x1 #define ISP_HA_SCSI_1020 0x2 #define ISP_HA_SCSI_1020A 0x3 #define ISP_HA_SCSI_1040 0x4 #define ISP_HA_SCSI_1040A 0x5 #define ISP_HA_SCSI_1040B 0x6 #define ISP_HA_SCSI_1040C 0x7 #define ISP_HA_SCSI_1240 0x8 #define ISP_HA_SCSI_1080 0x9 #define ISP_HA_SCSI_1280 0xa #define ISP_HA_SCSI_10160 0xb #define ISP_HA_SCSI_12160 0xc #define ISP_HA_FC 0xf0 #define ISP_HA_FC_2100 0x10 #define ISP_HA_FC_2200 0x20 #define ISP_HA_FC_2300 0x30 #define ISP_HA_FC_2312 0x40 #define ISP_HA_FC_2322 0x50 #define ISP_HA_FC_2400 0x60 +#define ISP_HA_FC_2500 0x70 #define IS_SCSI(isp) (isp->isp_type & ISP_HA_SCSI) +#define IS_1020(isp) (isp->isp_type < ISP_HA_SCSI_1240) #define IS_1240(isp) (isp->isp_type == ISP_HA_SCSI_1240) #define IS_1080(isp) (isp->isp_type == ISP_HA_SCSI_1080) #define IS_1280(isp) (isp->isp_type == ISP_HA_SCSI_1280) #define IS_10160(isp) (isp->isp_type == ISP_HA_SCSI_10160) #define IS_12160(isp) (isp->isp_type == ISP_HA_SCSI_12160) #define IS_12X0(isp) (IS_1240(isp) || IS_1280(isp)) #define IS_1X160(isp) (IS_10160(isp) || IS_12160(isp)) #define IS_DUALBUS(isp) (IS_12X0(isp) || IS_12160(isp)) #define IS_ULTRA2(isp) (IS_1080(isp) || IS_1280(isp) || IS_1X160(isp)) #define IS_ULTRA3(isp) (IS_1X160(isp)) #define IS_FC(isp) ((isp)->isp_type & ISP_HA_FC) #define IS_2100(isp) ((isp)->isp_type == ISP_HA_FC_2100) #define IS_2200(isp) ((isp)->isp_type == ISP_HA_FC_2200) #define IS_23XX(isp) ((isp)->isp_type >= ISP_HA_FC_2300 && \ (isp)->isp_type < ISP_HA_FC_2400) #define IS_2300(isp) ((isp)->isp_type == ISP_HA_FC_2300) #define IS_2312(isp) ((isp)->isp_type == ISP_HA_FC_2312) #define IS_2322(isp) ((isp)->isp_type == ISP_HA_FC_2322) #define IS_24XX(isp) ((isp)->isp_type >= ISP_HA_FC_2400) +#define IS_25XX(isp) ((isp)->isp_type >= ISP_HA_FC_2500) /* * DMA related macros */ #define DMA_WD3(x) (((uint16_t)(((uint64_t)x) >> 48)) & 0xffff) #define DMA_WD2(x) (((uint16_t)(((uint64_t)x) >> 32)) & 0xffff) #define DMA_WD1(x) ((uint16_t)((x) >> 16) & 0xffff) #define DMA_WD0(x) ((uint16_t)((x) & 0xffff)) #define DMA_LO32(x) ((uint32_t) (x)) #define DMA_HI32(x) ((uint32_t)(((uint64_t)x) >> 32)) /* * Core System Function Prototypes */ /* - * Reset Hardware. Totally. Assumes that you'll follow this with - * a call to isp_init. + * Reset Hardware. Totally. Assumes that you'll follow this with a call to isp_init. */ -void isp_reset(ispsoftc_t *); +void isp_reset(ispsoftc_t *, int); /* * Initialize Hardware to known state */ void isp_init(ispsoftc_t *); /* * Reset the ISP and call completion for any orphaned commands. */ -void isp_reinit(ispsoftc_t *); +void isp_reinit(ispsoftc_t *, int); -#ifdef ISP_FW_CRASH_DUMP /* - * Dump firmware entry point. - */ -void isp_fw_dump(ispsoftc_t *isp); -#endif - -/* * Internal Interrupt Service Routine * * The outer layers do the spade work to get the appropriate status register, * semaphore register and first mailbox register (if appropriate). This also * means that most spurious/bogus interrupts not for us can be filtered first. */ void isp_intr(ispsoftc_t *, uint32_t, uint16_t, uint16_t); /* * Command Entry Point- Platform Dependent layers call into this */ int isp_start(XS_T *); /* these values are what isp_start returns */ #define CMD_COMPLETE 101 /* command completed */ #define CMD_EAGAIN 102 /* busy- maybe retry later */ #define CMD_QUEUED 103 /* command has been queued for execution */ #define CMD_RQLATER 104 /* requeue this command later */ /* * Command Completion Point- Core layers call out from this with completed cmds */ void isp_done(XS_T *); /* * Platform Dependent to External to Internal Control Function * * Assumes locks are held on entry. You should note that with many of - * these commands and locks may be released while this is occurring. + * these commands locks may be released while this function is called. * - * A few notes about some of these functions: + * ... ISPCTL_RESET_BUS, int channel); + * Reset BUS on this channel + * ... ISPCTL_RESET_DEV, int channel, int target); + * Reset Device on this channel at this target. + * ... ISPCTL_ABORT_CMD, XS_T *xs); + * Abort active transaction described by xs. + * ... IPCTL_UPDATE_PARAMS); + * Update any operating parameters (speed, etc.) + * ... ISPCTL_FCLINK_TEST, int channel); + * Test FC link status on this channel + * ... ISPCTL_SCAN_FABRIC, int channel); + * Scan fabric on this channel + * ... ISPCTL_SCAN_LOOP, int channel); + * Scan local loop on this channel + * ... ISPCTL_PDB_SYNC, int channel); + * Synchronize port database on this channel + * ... ISPCTL_SEND_LIP, int channel); + * Send a LIP on this channel + * ... ISPCTL_GET_NAMES, int channel, int np, uint64_t *wwnn, uint64_t *wwpn) + * Get a WWNN/WWPN for this N-port handle on this channel + * ... ISPCTL_RUN_MBOXCMD, mbreg_t *mbp) + * Run this mailbox command + * ... ISPCTL_GET_PDB, int channel, int nphandle, isp_pdb_t *pdb) + * Get PDB on this channel for this N-port handle + * ... ISPCTL_PLOGX, isp_plcmd_t *) + * Performa a port login/logout * - * ISPCTL_FCLINK_TEST tests to make sure we have good fibre channel link. - * The argument is a pointer to an integer which is the time, in microseconds, - * we should wait to see whether we have good link. This test, if successful, - * lets us know our connection topology and our Loop ID/AL_PA and so on. - * You can't get anywhere without this. - * - * ISPCTL_SCAN_FABRIC queries the name server (if we're on a fabric) for - * all entities using the FC Generic Services subcommand GET ALL NEXT. - * For each found entity, an ISPASYNC_FABRICDEV event is generated (see - * below). - * - * ISPCTL_SCAN_LOOP does a local loop scan. This is only done if the connection - * topology is NL or FL port (private or public loop). Since the Qlogic f/w - * 'automatically' manages local loop connections, this function essentially - * notes the arrival, departure, and possible shuffling around of local loop - * entities. Thus for each arrival and departure this generates an isp_async - * event of ISPASYNC_PROMENADE (see below). - * * ISPCTL_PDB_SYNC is somewhat misnamed. It actually is the final step, in * order, of ISPCTL_FCLINK_TEST, ISPCTL_SCAN_FABRIC, and ISPCTL_SCAN_LOOP. * The main purpose of ISPCTL_PDB_SYNC is to complete management of logging * and logging out of fabric devices (if one is on a fabric) and then marking * the 'loop state' as being ready to now be used for sending commands to * devices. Originally fabric name server and local loop scanning were * part of this function. It's now been separated to allow for finer control. */ typedef enum { - ISPCTL_RESET_BUS, /* Reset Bus */ - ISPCTL_RESET_DEV, /* Reset Device */ - ISPCTL_ABORT_CMD, /* Abort Command */ - ISPCTL_UPDATE_PARAMS, /* Update Operating Parameters (SCSI) */ - ISPCTL_FCLINK_TEST, /* Test FC Link Status */ - ISPCTL_SCAN_FABRIC, /* (Re)scan Fabric Name Server */ - ISPCTL_SCAN_LOOP, /* (Re)scan Local Loop */ - ISPCTL_PDB_SYNC, /* Synchronize Port Database */ - ISPCTL_SEND_LIP, /* Send a LIP */ - ISPCTL_GET_PORTNAME, /* get portname from an N-port handle */ - ISPCTL_RUN_MBOXCMD, /* run a mailbox command */ - ISPCTL_TOGGLE_TMODE, /* toggle target mode */ - ISPCTL_GET_PDB, /* get a single port database entry */ - ISPCTL_PLOGX /* do a port login/logout */ + ISPCTL_RESET_BUS, + ISPCTL_RESET_DEV, + ISPCTL_ABORT_CMD, + ISPCTL_UPDATE_PARAMS, + ISPCTL_FCLINK_TEST, + ISPCTL_SCAN_FABRIC, + ISPCTL_SCAN_LOOP, + ISPCTL_PDB_SYNC, + ISPCTL_SEND_LIP, + ISPCTL_GET_NAMES, + ISPCTL_RUN_MBOXCMD, + ISPCTL_GET_PDB, + ISPCTL_PLOGX } ispctl_t; -int isp_control(ispsoftc_t *, ispctl_t, void *); +int isp_control(ispsoftc_t *, ispctl_t, ...); - /* * Platform Dependent to Internal to External Control Function - * (each platform must provide such a function) - * - * Assumes locks are held. - * - * A few notes about some of these functions: - * - * ISPASYNC_CHANGE_NOTIFY notifies the outer layer that a change has - * occurred that invalidates the list of fabric devices known and/or - * the list of known loop devices. The argument passed is a pointer - * whose values are defined below (local loop change, name server - * change, other). 'Other' may simply be a LIP, or a change in - * connection topology. - * - * ISPASYNC_FABRIC_DEV announces the next element in a list of - * fabric device names we're getting out of the name server. The - * argument points to a GET ALL NEXT response structure. The list - * is known to terminate with an entry that refers to ourselves. - * One of the main purposes of this function is to allow outer - * layers, which are OS dependent, to set policy as to which fabric - * devices might actually be logged into (and made visible) later - * at ISPCTL_PDB_SYNC time. Since there's a finite number of fabric - * devices that we can log into (256 less 3 'reserved' for F-port - * topologies), and fabrics can grow up to 8 million or so entries - * (24 bits of Port Address, less a wad of reserved spaces), clearly - * we had better let the OS determine login policy. - * - * ISPASYNC_PROMENADE has an argument that is a pointer to an integer which - * is an index into the portdb in the softc ('target'). Whether that entry's - * valid tag is set or not says whether something has arrived or departed. - * The name refers to a favorite pastime of many city dwellers- watching - * people come and go, talking of Michaelangelo, and so on.. - * - * ISPASYNC_UNHANDLED_RESPONSE gives outer layers a chance to parse a - * response queue entry not otherwise handled. The outer layer should - * return non-zero if it handled it. The 'arg' points to an unmassaged - * response queue entry. */ typedef enum { - ISPASYNC_NEW_TGT_PARAMS, /* New Target Parameters Negotiated */ - ISPASYNC_BUS_RESET, /* Bus Was Reset */ + ISPASYNC_NEW_TGT_PARAMS, /* SPI New Target Parameters */ + ISPASYNC_BUS_RESET, /* All Bus Was Reset */ ISPASYNC_LOOP_DOWN, /* FC Loop Down */ ISPASYNC_LOOP_UP, /* FC Loop Up */ - ISPASYNC_LIP, /* LIP Received */ - ISPASYNC_LOOP_RESET, /* Loop Reset Received */ + ISPASYNC_LIP, /* FC LIP Received */ + ISPASYNC_LOOP_RESET, /* FC Loop Reset Received */ ISPASYNC_CHANGE_NOTIFY, /* FC Change Notification */ - ISPASYNC_DEV_ARRIVED, /* FC Device Arrival */ - ISPASYNC_DEV_CHANGED, /* FC Device Change */ - ISPASYNC_DEV_STAYED, /* FC Device Stayed the Same */ - ISPASYNC_DEV_GONE, /* FC Device Depart */ - ISPASYNC_TARGET_NOTIFY, /* target asynchronous notification event */ - ISPASYNC_TARGET_ACTION, /* target action requested */ - ISPASYNC_CONF_CHANGE, /* Platform Configuration Change */ - ISPASYNC_UNHANDLED_RESPONSE, /* Unhandled Response Entry */ - ISPASYNC_FW_CRASH, /* Firmware has crashed */ - ISPASYNC_FW_DUMPED, /* Firmware crashdump taken */ - ISPASYNC_FW_RESTARTED /* Firmware has been restarted */ + ISPASYNC_DEV_ARRIVED, /* FC Device Arrived */ + ISPASYNC_DEV_CHANGED, /* FC Device Changed */ + ISPASYNC_DEV_STAYED, /* FC Device Stayed */ + ISPASYNC_DEV_GONE, /* FC Device Departure */ + ISPASYNC_TARGET_NOTIFY, /* All target async notification */ + ISPASYNC_TARGET_ACTION, /* All target action requested */ + ISPASYNC_FW_CRASH, /* All Firmware has crashed */ + ISPASYNC_FW_RESTARTED /* All Firmware has been restarted */ } ispasync_t; -int isp_async(ispsoftc_t *, ispasync_t, void *); +void isp_async(ispsoftc_t *, ispasync_t, ...); -#define ISPASYNC_CHANGE_PDB ((void *) 0) -#define ISPASYNC_CHANGE_SNS ((void *) 1) -#define ISPASYNC_CHANGE_OTHER ((void *) 2) +#define ISPASYNC_CHANGE_PDB 0 +#define ISPASYNC_CHANGE_SNS 1 +#define ISPASYNC_CHANGE_OTHER 2 /* * Platform Dependent Error and Debug Printout * * Generally this is: * * void isp_prt(ispsoftc_t *, int level, const char *, ...) * * but due to compiler differences on different platforms this won't be * formally done here. Instead, it goes in each platform definition file. */ #define ISP_LOGALL 0x0 /* log always */ #define ISP_LOGCONFIG 0x1 /* log configuration messages */ #define ISP_LOGINFO 0x2 /* log informational messages */ #define ISP_LOGWARN 0x4 /* log warning messages */ #define ISP_LOGERR 0x8 /* log error messages */ #define ISP_LOGDEBUG0 0x10 /* log simple debug messages */ #define ISP_LOGDEBUG1 0x20 /* log intermediate debug messages */ #define ISP_LOGDEBUG2 0x40 /* log most debug messages */ #define ISP_LOGDEBUG3 0x80 /* log high frequency debug messages */ #define ISP_LOGSANCFG 0x100 /* log SAN configuration */ -#define ISP_LOGTDEBUG0 0x200 /* log simple debug messages (target mode) */ -#define ISP_LOGTDEBUG1 0x400 /* log intermediate debug messages (target) */ -#define ISP_LOGTDEBUG2 0x800 /* log all debug messages (target) */ +#define ISP_LOGTINFO 0x1000 /* log informational messages (target mode) */ +#define ISP_LOGTDEBUG0 0x2000 /* log simple debug messages (target mode) */ +#define ISP_LOGTDEBUG1 0x4000 /* log intermediate debug messages (target) */ +#define ISP_LOGTDEBUG2 0x8000 /* log all debug messages (target) */ /* * Each Platform provides it's own isposinfo substructure of the ispsoftc * defined above. * * Each platform must also provide the following macros/defines: * * - * ISP2100_SCRLEN - length for the Fibre Channel scratch DMA area + * ISP_FC_SCRLEN FC scratch area DMA length * - * MEMZERO(dst, src) platform zeroing function - * MEMCPY(dst, src, count) platform copying function - * SNPRINTF(buf, bufsize, fmt, ...) snprintf - * USEC_DELAY(usecs) microsecond spindelay function - * USEC_SLEEP(isp, usecs) microsecond sleep function + * ISP_MEMZERO(dst, src) platform zeroing function + * ISP_MEMCPY(dst, src, count) platform copying function + * ISP_SNPRINTF(buf, bufsize, fmt, ...) snprintf + * ISP_DELAY(usecs) microsecond spindelay function + * ISP_SLEEP(isp, usecs) microsecond sleep function * + * ISP_INLINE ___inline or not- depending on how + * good your debugger is + * * NANOTIME_T nanosecond time type * * GET_NANOTIME(NANOTIME_T *) get current nanotime. * * GET_NANOSEC(NANOTIME_T *) get uint64_t from NANOTIME_T * * NANOTIME_SUB(NANOTIME_T *, NANOTIME_T *) * subtract two NANOTIME_T values * - * - * MAXISPREQUEST(ispsoftc_t *) maximum request queue size + * MAXISPREQUEST(ispsoftc_t *) maximum request queue size * for this particular board type * * MEMORYBARRIER(ispsoftc_t *, barrier_type, offset, size) * * Function/Macro the provides memory synchronization on * various objects so that the ISP's and the system's view * of the same object is consistent. * * MBOX_ACQUIRE(ispsoftc_t *) acquire lock on mailbox regs * MBOX_WAIT_COMPLETE(ispsoftc_t *, mbreg_t *) wait for cmd to be done * MBOX_NOTIFY_COMPLETE(ispsoftc_t *) notification of mbox cmd donee * MBOX_RELEASE(ispsoftc_t *) release lock on mailbox regs * - * FC_SCRATCH_ACQUIRE(ispsoftc_t *) acquire lock on FC scratch area - * FC_SCRATCH_RELEASE(ispsoftc_t *) acquire lock on FC scratch area + * FC_SCRATCH_ACQUIRE(ispsoftc_t *, chan) acquire lock on FC scratch area + * return -1 if you cannot + * FC_SCRATCH_RELEASE(ispsoftc_t *, chan) acquire lock on FC scratch area * * SCSI_GOOD SCSI 'Good' Status * SCSI_CHECK SCSI 'Check Condition' Status * SCSI_BUSY SCSI 'Busy' Status * SCSI_QFULL SCSI 'Queue Full' Status * - * XS_T Platform SCSI transaction type (i.e., command for HBA) - * XS_DMA_ADDR_T Platform PCI DMA Address Type - * XS_ISP(xs) gets an instance out of an XS_T - * XS_CHANNEL(xs) gets the channel (bus # for DUALBUS cards) "" - * XS_TGT(xs) gets the target "" - * XS_LUN(xs) gets the lun "" - * XS_CDBP(xs) gets a pointer to the scsi CDB "" - * XS_CDBLEN(xs) gets the CDB's length "" - * XS_XFRLEN(xs) gets the associated data transfer length "" - * XS_TIME(xs) gets the time (in milliseconds) for this command - * XS_RESID(xs) gets the current residual count - * XS_STSP(xs) gets a pointer to the SCSI status byte "" - * XS_SNSP(xs) gets a pointer to the associate sense data - * XS_SNSLEN(xs) gets the length of sense data storage - * XS_SNSKEY(xs) dereferences XS_SNSP to get the current stored Sense Key - * XS_TAG_P(xs) predicate of whether this command should be tagged - * XS_TAG_TYPE(xs) which type of tag to use - * XS_SETERR(xs) set error state + * XS_T Platform SCSI transaction type (i.e., command for HBA) + * XS_DMA_ADDR_T Platform PCI DMA Address Type + * XS_GET_DMA_SEG(..) Get 32 bit dma segment list value + * XS_GET_DMA64_SEG(..) Get 64 bit dma segment list value + * XS_ISP(xs) gets an instance out of an XS_T + * XS_CHANNEL(xs) gets the channel (bus # for DUALBUS cards) "" + * XS_TGT(xs) gets the target "" + * XS_LUN(xs) gets the lun "" + * XS_CDBP(xs) gets a pointer to the scsi CDB "" + * XS_CDBLEN(xs) gets the CDB's length "" + * XS_XFRLEN(xs) gets the associated data transfer length "" + * XS_TIME(xs) gets the time (in milliseconds) for this command + * XS_GET_RESID(xs) gets the current residual count + * XS_GET_RESID(xs, resid) sets the current residual count + * XS_STSP(xs) gets a pointer to the SCSI status byte "" + * XS_SNSP(xs) gets a pointer to the associate sense data + * XS_SNSLEN(xs) gets the length of sense data storage + * XS_SNSKEY(xs) dereferences XS_SNSP to get the current stored Sense Key + * XS_TAG_P(xs) predicate of whether this command should be tagged + * XS_TAG_TYPE(xs) which type of tag to use + * XS_SETERR(xs) set error state * * HBA_NOERROR command has no erros * HBA_BOTCH hba botched something * HBA_CMDTIMEOUT command timed out * HBA_SELTIMEOUT selection timed out (also port logouts for FC) * HBA_TGTBSY target returned a BUSY status * HBA_BUSRESET bus reset destroyed command * HBA_ABORTED command was aborted (by request) * HBA_DATAOVR a data overrun was detected * HBA_ARQFAIL Automatic Request Sense failed * * XS_ERR(xs) return current error state * XS_NOERR(xs) there is no error currently set * XS_INITERR(xs) initialize error state * * XS_SAVE_SENSE(xs, sp, len) save sense data * - * XS_SET_STATE_STAT(isp, sp, xs) platform dependent interpreter of - * response queue entry status bits + * DEFAULT_FRAMESIZE(ispsoftc_t *) Default Frame Size + * DEFAULT_EXEC_THROTTLE(ispsoftc_t *) Default Execution Throttle * + * GET_DEFAULT_ROLE(ispsoftc_t *, int) Get Default Role for a channel + * SET_DEFAULT_ROLE(ispsoftc_t *, int, int) Set Default Role for a channel + * DEFAULT_IID(ispsoftc_t *, int) Default SCSI initiator ID + * DEFAULT_LOOPID(ispsoftc_t *, int) Default FC Loop ID * - * DEFAULT_IID(ispsoftc_t *) Default SCSI initiator ID - * DEFAULT_LOOPID(ispsoftc_t *) Default FC Loop ID - * DEFAULT_NODEWWN(ispsoftc_t *) Default Node WWN - * DEFAULT_PORTWWN(ispsoftc_t *) Default Port WWN - * DEFAULT_FRAMESIZE(ispsoftc_t *) Default Frame Size - * DEFAULT_EXEC_THROTTLE(ispsoftc_t *) Default Execution Throttle * These establish reasonable defaults for each platform. * These must be available independent of card NVRAM and are * to be used should NVRAM not be readable. * - * ISP_NODEWWN(ispsoftc_t *) FC Node WWN to use - * ISP_PORTWWN(ispsoftc_t *) FC Port WWN to use + * DEFAULT_NODEWWN(ispsoftc_t *, chan) Default FC Node WWN to use + * DEFAULT_PORTWWN(ispsoftc_t *, chan) Default FC Port WWN to use * - * These are to be used after NVRAM is read. The tags - * in fcparam.isp_ww{n,p}n_nvram reflect the values - * read from NVRAM (possibly corrected for card botches). - * Each platform can take that information and override - * it or ignore and return the Node and Port WWNs to be - * used when sending the Qlogic f/w the Initialization - * Control Block. + * These defines are hooks to allow the setting of node and + * port WWNs when NVRAM cannot be read or is to be overriden. * - * (XXX these do endian specific transformations- in transition XXX) + * ACTIVE_NODEWWN(ispsoftc_t *, chan) FC Node WWN to use + * ACTIVE_PORTWWN(ispsoftc_t *, chan) FC Port WWN to use * + * After NVRAM is read, these will be invoked to get the + * node and port WWNs that will actually be used for this + * channel. + * + * * ISP_IOXPUT_8(ispsoftc_t *, uint8_t srcval, uint8_t *dstptr) * ISP_IOXPUT_16(ispsoftc_t *, uint16_t srcval, uint16_t *dstptr) * ISP_IOXPUT_32(ispsoftc_t *, uint32_t srcval, uint32_t *dstptr) * * ISP_IOXGET_8(ispsoftc_t *, uint8_t *srcptr, uint8_t dstrval) * ISP_IOXGET_16(ispsoftc_t *, uint16_t *srcptr, uint16_t dstrval) * ISP_IOXGET_32(ispsoftc_t *, uint32_t *srcptr, uint32_t dstrval) * * ISP_SWIZZLE_NVRAM_WORD(ispsoftc_t *, uint16_t *) * ISP_SWIZZLE_NVRAM_LONG(ispsoftc_t *, uint32_t *) * ISP_SWAP16(ispsoftc_t *, uint16_t srcval) * ISP_SWAP32(ispsoftc_t *, uint32_t srcval) */ +#ifdef ISP_TARGET_MODE +/* + * The functions below are for the publicly available + * target mode functions that are internal to the Qlogic driver. + */ + +/* + * This function handles new response queue entry appropriate for target mode. + */ +int isp_target_notify(ispsoftc_t *, void *, uint32_t *); + +/* + * This function externalizes the ability to acknowledge an Immediate Notify request. + */ +int isp_notify_ack(ispsoftc_t *, void *); + +/* + * This function externalized acknowledging (success/fail) an ABTS frame + */ +int isp_acknak_abts(ispsoftc_t *, void *, int); + +/* + * Enable/Disable/Modify a logical unit. + * (softc, cmd, bus, tgt, lun, cmd_cnt, inotify_cnt) + */ +#define DFLT_CMND_CNT 0xfe /* unmonitored */ +#define DFLT_INOT_CNT 0xfe /* unmonitored */ +int isp_lun_cmd(ispsoftc_t *, int, int, int, int, int); + +/* + * General request queue 'put' routine for target mode entries. + */ +int isp_target_put_entry(ispsoftc_t *isp, void *); + +/* + * General routine to put back an ATIO entry- + * used for replenishing f/w resource counts. + * The argument is a pointer to a source ATIO + * or ATIO2. + */ +int isp_target_put_atio(ispsoftc_t *, void *); + +/* + * General routine to send a final CTIO for a command- used mostly for + * local responses. + */ +int isp_endcmd(ispsoftc_t *, ...); +#define ECMD_SVALID 0x100 +#define ECMD_TERMINATE 0x200 + +/* + * Handle an asynchronous event + * + * Return nonzero if the interrupt that generated this event has been dismissed. + */ +int isp_target_async(ispsoftc_t *, int, int); +#endif #endif /* _ISPVAR_H */