Index: head/sys/cam/cam_ccb.h =================================================================== --- head/sys/cam/cam_ccb.h (revision 296890) +++ head/sys/cam/cam_ccb.h (revision 296891) @@ -1,1350 +1,1357 @@ /*- * Data structures and definitions for CAM Control Blocks (CCBs). * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_CCB_H #define _CAM_CAM_CCB_H 1 #include #include #include #include #ifndef _KERNEL #include #endif #include #include #include /* General allocation length definitions for CCB structures */ #define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */ #define VUHBALEN 14 /* Vendor Unique HBA length */ #define SIM_IDLEN 16 /* ASCII string len for SIM ID */ #define HBA_IDLEN 16 /* ASCII string len for HBA ID */ #define DEV_IDLEN 16 /* ASCII string len for device names */ #define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */ #define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */ /* Struct definitions for CAM control blocks */ /* Common CCB header */ /* CAM CCB flags */ typedef enum { CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */ CAM_QUEUE_ENABLE = 0x00000002,/* SIM queue actions are enabled */ CAM_CDB_LINKED = 0x00000004,/* CCB contains a linked CDB */ CAM_NEGOTIATE = 0x00000008,/* * Perform transport negotiation * with this command. */ CAM_DATA_ISPHYS = 0x00000010,/* Data type with physical addrs */ CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */ CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */ CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */ CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */ CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */ CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */ CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */ CAM_DATA_PADDR = 0x00000010,/* Data type (001:Physical) */ CAM_DATA_SG = 0x00040000,/* Data type (010:sglist) */ CAM_DATA_SG_PADDR = 0x00040010,/* Data type (011:sglist phys) */ CAM_DATA_BIO = 0x00200000,/* Data type (100:bio) */ CAM_DATA_MASK = 0x00240010,/* Data type mask */ CAM_SOFT_RST_OP = 0x00000100,/* Use Soft reset alternative */ CAM_ENG_SYNC = 0x00000200,/* Flush resid bytes on complete */ CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */ CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */ CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */ CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */ CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/ CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/ CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/ CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */ CAM_MSG_BUF_PHYS = 0x00080000,/* Message buffer ptr is physical*/ CAM_SNS_BUF_PHYS = 0x00100000,/* Autosense data ptr is physical*/ CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */ CAM_ENG_SGLIST = 0x00800000,/* SG list is for the HBA engine */ /* Phase cognizant mode flags */ CAM_DIS_AUTOSRP = 0x01000000,/* Disable autosave/restore ptrs */ CAM_DIS_AUTODISC = 0x02000000,/* Disable auto disconnect */ CAM_TGT_CCB_AVAIL = 0x04000000,/* Target CCB available */ CAM_TGT_PHASE_MODE = 0x08000000,/* The SIM runs in phase mode */ CAM_MSGB_VALID = 0x10000000,/* Message buffer valid */ CAM_STATUS_VALID = 0x20000000,/* Status buffer valid */ CAM_DATAB_VALID = 0x40000000,/* Data buffer valid */ /* Host target Mode flags */ CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */ CAM_TERM_IO = 0x10000000,/* Terminate I/O Message sup. */ CAM_DISCONNECT = 0x20000000,/* Disconnects are mandatory */ CAM_SEND_STATUS = 0x40000000,/* Send status after data phase */ CAM_UNLOCKED = 0x80000000 /* Call callback without lock. */ } ccb_flags; typedef enum { CAM_USER_DATA_ADDR = 0x00000002,/* Userspace data pointers */ CAM_SG_FORMAT_IOVEC = 0x00000004,/* iovec instead of busdma S/G*/ CAM_UNMAPPED_BUF = 0x00000008 /* use unmapped I/O */ } ccb_xflags; /* XPT Opcodes for xpt_action */ typedef enum { /* Function code flags are bits greater than 0xff */ XPT_FC_QUEUED = 0x100, /* Non-immediate function code */ XPT_FC_USER_CCB = 0x200, XPT_FC_XPT_ONLY = 0x400, /* Only for the transport layer device */ XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED, /* Passes through the device queues */ /* Common function commands: 0x00->0x0F */ XPT_NOOP = 0x00, /* Execute Nothing */ XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED, /* Execute the requested I/O operation */ XPT_GDEV_TYPE = 0x02, /* Get type information for specified device */ XPT_GDEVLIST = 0x03, /* Get a list of peripheral devices */ XPT_PATH_INQ = 0x04, /* Path routing inquiry */ XPT_REL_SIMQ = 0x05, /* Release a frozen device queue */ XPT_SASYNC_CB = 0x06, /* Set Asynchronous Callback Parameters */ XPT_SDEV_TYPE = 0x07, /* Set device type information */ XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* (Re)Scan the SCSI Bus */ XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY, /* Get EDT entries matching the given pattern */ XPT_DEBUG = 0x0a, /* Turn on debugging for a bus, target or lun */ XPT_PATH_STATS = 0x0b, /* Path statistics (error counts, etc.) */ XPT_GDEV_STATS = 0x0c, /* Device statistics (error counts, etc.) */ XPT_DEV_ADVINFO = 0x0e, /* Get/Set Device advanced information */ XPT_ASYNC = 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Asynchronous event */ /* SCSI Control Functions: 0x10->0x1F */ XPT_ABORT = 0x10, /* Abort the specified CCB */ XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY, /* Reset the specified SCSI bus */ XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED, /* Bus Device Reset the specified SCSI device */ XPT_TERM_IO = 0x13, /* Terminate the I/O process */ XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Logical Unit */ XPT_GET_TRAN_SETTINGS = 0x15, /* * Get default/user transfer settings * for the target */ XPT_SET_TRAN_SETTINGS = 0x16, /* * Set transfer rate/width * negotiation settings */ XPT_CALC_GEOMETRY = 0x17, /* * Calculate the geometry parameters for * a device give the sector size and * volume size. */ XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED, /* Execute the requested ATA I/O operation */ XPT_GET_SIM_KNOB_OLD = 0x18, /* Compat only */ XPT_SET_SIM_KNOB = 0x19, /* * Set SIM specific knob values. */ XPT_GET_SIM_KNOB = 0x1a, /* * Get SIM specific knob values. */ XPT_SMP_IO = 0x1b | XPT_FC_DEV_QUEUED, /* Serial Management Protocol */ XPT_SCAN_TGT = 0x1E | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Target */ /* HBA engine commands 0x20->0x2F */ XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY, /* HBA engine feature inquiry */ XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED, /* HBA execute engine request */ /* Target mode commands: 0x30->0x3F */ XPT_EN_LUN = 0x30, /* Enable LUN as a target */ XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED, /* Execute target I/O request */ XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Accept Host Target Mode CDB */ XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED, /* Continue Host Target I/O Connection */ XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event (obsolete) */ XPT_NOTIFY_ACK = 0x35, /* Acknowledgement of event (obsolete) */ XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event */ XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Acknowledgement of event */ /* Vendor Unique codes: 0x80->0x8F */ XPT_VUNIQUE = 0x80 } xpt_opcode; #define XPT_FC_GROUP_MASK 0xF0 #define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK) #define XPT_FC_GROUP_COMMON 0x00 #define XPT_FC_GROUP_SCSI_CONTROL 0x10 #define XPT_FC_GROUP_HBA_ENGINE 0x20 #define XPT_FC_GROUP_TMODE 0x30 #define XPT_FC_GROUP_VENDOR_UNIQUE 0x80 #define XPT_FC_IS_DEV_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED) #define XPT_FC_IS_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0) typedef enum { PROTO_UNKNOWN, PROTO_UNSPECIFIED, PROTO_SCSI, /* Small Computer System Interface */ PROTO_ATA, /* AT Attachment */ PROTO_ATAPI, /* AT Attachment Packetized Interface */ PROTO_SATAPM, /* SATA Port Multiplier */ PROTO_SEMB, /* SATA Enclosure Management Bridge */ } cam_proto; typedef enum { XPORT_UNKNOWN, XPORT_UNSPECIFIED, XPORT_SPI, /* SCSI Parallel Interface */ XPORT_FC, /* Fiber Channel */ XPORT_SSA, /* Serial Storage Architecture */ XPORT_USB, /* Universal Serial Bus */ XPORT_PPB, /* Parallel Port Bus */ XPORT_ATA, /* AT Attachment */ XPORT_SAS, /* Serial Attached SCSI */ XPORT_SATA, /* Serial AT Attachment */ XPORT_ISCSI, /* iSCSI */ XPORT_SRP, /* SCSI RDMA Protocol */ } cam_xport; #define XPORT_IS_ATA(t) ((t) == XPORT_ATA || (t) == XPORT_SATA) #define XPORT_IS_SCSI(t) ((t) != XPORT_UNKNOWN && \ (t) != XPORT_UNSPECIFIED && \ !XPORT_IS_ATA(t)) #define XPORT_DEVSTAT_TYPE(t) (XPORT_IS_ATA(t) ? DEVSTAT_TYPE_IF_IDE : \ XPORT_IS_SCSI(t) ? DEVSTAT_TYPE_IF_SCSI : \ DEVSTAT_TYPE_IF_OTHER) #define PROTO_VERSION_UNKNOWN (UINT_MAX - 1) #define PROTO_VERSION_UNSPECIFIED UINT_MAX #define XPORT_VERSION_UNKNOWN (UINT_MAX - 1) #define XPORT_VERSION_UNSPECIFIED UINT_MAX typedef union { LIST_ENTRY(ccb_hdr) le; SLIST_ENTRY(ccb_hdr) sle; TAILQ_ENTRY(ccb_hdr) tqe; STAILQ_ENTRY(ccb_hdr) stqe; } camq_entry; typedef union { void *ptr; u_long field; u_int8_t bytes[sizeof(uintptr_t)]; } ccb_priv_entry; typedef union { ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE]; u_int8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_ppriv_area; typedef union { ccb_priv_entry entries[CCB_SIM_PRIV_SIZE]; u_int8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_spriv_area; typedef struct { struct timeval *etime; uintptr_t sim_data; uintptr_t periph_data; } ccb_qos_area; struct ccb_hdr { cam_pinfo pinfo; /* Info for priority scheduling */ camq_entry xpt_links; /* For chaining in the XPT layer */ camq_entry sim_links; /* For chaining in the SIM layer */ camq_entry periph_links; /* For chaining in the type driver */ u_int32_t retry_count; void (*cbfcnp)(struct cam_periph *, union ccb *); /* Callback on completion function */ xpt_opcode func_code; /* XPT function code */ u_int32_t status; /* Status returned by CAM subsystem */ struct cam_path *path; /* Compiled path for this ccb */ path_id_t path_id; /* Path ID for the request */ target_id_t target_id; /* Target device ID */ lun_id_t target_lun; /* Target LUN number */ u_int32_t flags; /* ccb_flags */ u_int32_t xflags; /* Extended flags */ ccb_ppriv_area periph_priv; ccb_spriv_area sim_priv; ccb_qos_area qos; u_int32_t timeout; /* Hard timeout value in mseconds */ struct timeval softtimeout; /* Soft timeout value in sec + usec */ }; /* Get Device Information CCB */ struct ccb_getdev { struct ccb_hdr ccb_h; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; u_int8_t serial_num[252]; u_int8_t inq_flags; u_int8_t serial_num_len; }; /* Device Statistics CCB */ struct ccb_getdevstats { struct ccb_hdr ccb_h; int dev_openings; /* Space left for more work on device*/ int dev_active; /* Transactions running on the device */ int allocated; /* CCBs allocated for the device */ int queued; /* CCBs queued to be sent to the device */ int held; /* * CCBs held by peripheral drivers * for this device */ int maxtags; /* * Boundary conditions for number of * tagged operations */ int mintags; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { CAM_GDEVLIST_LAST_DEVICE, CAM_GDEVLIST_LIST_CHANGED, CAM_GDEVLIST_MORE_DEVS, CAM_GDEVLIST_ERROR } ccb_getdevlist_status_e; struct ccb_getdevlist { struct ccb_hdr ccb_h; char periph_name[DEV_IDLEN]; u_int32_t unit_number; unsigned int generation; u_int32_t index; ccb_getdevlist_status_e status; }; typedef enum { PERIPH_MATCH_NONE = 0x000, PERIPH_MATCH_PATH = 0x001, PERIPH_MATCH_TARGET = 0x002, PERIPH_MATCH_LUN = 0x004, PERIPH_MATCH_NAME = 0x008, PERIPH_MATCH_UNIT = 0x010, PERIPH_MATCH_ANY = 0x01f } periph_pattern_flags; struct periph_match_pattern { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; periph_pattern_flags flags; }; typedef enum { DEV_MATCH_NONE = 0x000, DEV_MATCH_PATH = 0x001, DEV_MATCH_TARGET = 0x002, DEV_MATCH_LUN = 0x004, DEV_MATCH_INQUIRY = 0x008, DEV_MATCH_DEVID = 0x010, DEV_MATCH_ANY = 0x00f } dev_pattern_flags; struct device_id_match_pattern { uint8_t id_len; uint8_t id[256]; }; struct device_match_pattern { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; dev_pattern_flags flags; union { struct scsi_static_inquiry_pattern inq_pat; struct device_id_match_pattern devid_pat; } data; }; typedef enum { BUS_MATCH_NONE = 0x000, BUS_MATCH_PATH = 0x001, BUS_MATCH_NAME = 0x002, BUS_MATCH_UNIT = 0x004, BUS_MATCH_BUS_ID = 0x008, BUS_MATCH_ANY = 0x00f } bus_pattern_flags; struct bus_match_pattern { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; bus_pattern_flags flags; }; union match_pattern { struct periph_match_pattern periph_pattern; struct device_match_pattern device_pattern; struct bus_match_pattern bus_pattern; }; typedef enum { DEV_MATCH_PERIPH, DEV_MATCH_DEVICE, DEV_MATCH_BUS } dev_match_type; struct dev_match_pattern { dev_match_type type; union match_pattern pattern; }; struct periph_match_result { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; }; typedef enum { DEV_RESULT_NOFLAG = 0x00, DEV_RESULT_UNCONFIGURED = 0x01 } dev_result_flags; struct device_match_result { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; dev_result_flags flags; }; struct bus_match_result { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; }; union match_result { struct periph_match_result periph_result; struct device_match_result device_result; struct bus_match_result bus_result; }; struct dev_match_result { dev_match_type type; union match_result result; }; typedef enum { CAM_DEV_MATCH_LAST, CAM_DEV_MATCH_MORE, CAM_DEV_MATCH_LIST_CHANGED, CAM_DEV_MATCH_SIZE_ERROR, CAM_DEV_MATCH_ERROR } ccb_dev_match_status; typedef enum { CAM_DEV_POS_NONE = 0x000, CAM_DEV_POS_BUS = 0x001, CAM_DEV_POS_TARGET = 0x002, CAM_DEV_POS_DEVICE = 0x004, CAM_DEV_POS_PERIPH = 0x008, CAM_DEV_POS_PDPTR = 0x010, CAM_DEV_POS_TYPEMASK = 0xf00, CAM_DEV_POS_EDT = 0x100, CAM_DEV_POS_PDRV = 0x200 } dev_pos_type; struct ccb_dm_cookie { void *bus; void *target; void *device; void *periph; void *pdrv; }; struct ccb_dev_position { u_int generations[4]; #define CAM_BUS_GENERATION 0x00 #define CAM_TARGET_GENERATION 0x01 #define CAM_DEV_GENERATION 0x02 #define CAM_PERIPH_GENERATION 0x03 dev_pos_type position_type; struct ccb_dm_cookie cookie; }; struct ccb_dev_match { struct ccb_hdr ccb_h; ccb_dev_match_status status; u_int32_t num_patterns; u_int32_t pattern_buf_len; struct dev_match_pattern *patterns; u_int32_t num_matches; u_int32_t match_buf_len; struct dev_match_result *matches; struct ccb_dev_position pos; }; /* * Definitions for the path inquiry CCB fields. */ #define CAM_VERSION 0x19 /* Hex value for current version */ typedef enum { PI_MDP_ABLE = 0x80, /* Supports MDP message */ PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */ PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */ PI_SDTR_ABLE = 0x10, /* Supports SDTR message */ PI_LINKED_CDB = 0x08, /* Supports linked CDBs */ PI_SATAPM = 0x04, /* Supports SATA PM */ PI_TAG_ABLE = 0x02, /* Supports tag queue messages */ PI_SOFT_RST = 0x01 /* Supports soft reset alternative */ } pi_inqflag; typedef enum { PIT_PROCESSOR = 0x80, /* Target mode processor mode */ PIT_PHASE = 0x40, /* Target mode phase cog. mode */ PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */ PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */ PIT_GRP_6 = 0x08, /* Group 6 commands supported */ PIT_GRP_7 = 0x04 /* Group 7 commands supported */ } pi_tmflag; typedef enum { PIM_EXTLUNS = 0x100,/* 64bit extended LUNs supported */ PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */ PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */ PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */ PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */ PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */ PIM_SEQSCAN = 0x04, /* Do bus scans sequentially, not in parallel */ PIM_UNMAPPED = 0x02, PIM_NOSCAN = 0x01 /* SIM does its own scanning */ } pi_miscflag; /* Path Inquiry CCB */ struct ccb_pathinq_settings_spi { u_int8_t ppr_options; }; struct ccb_pathinq_settings_fc { u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_pathinq_settings_sas { u_int32_t bitrate; /* Mbps */ }; #define PATHINQ_SETTINGS_SIZE 128 struct ccb_pathinq { struct ccb_hdr ccb_h; u_int8_t version_num; /* Version number for the SIM/HBA */ u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */ u_int16_t target_sprt; /* Flags for target mode support */ u_int32_t hba_misc; /* Misc HBA features */ u_int16_t hba_eng_cnt; /* HBA engine count */ /* Vendor Unique capabilities */ u_int8_t vuhba_flags[VUHBALEN]; u_int32_t max_target; /* Maximum supported Target */ u_int32_t max_lun; /* Maximum supported Lun */ u_int32_t async_flags; /* Installed Async handlers */ path_id_t hpath_id; /* Highest Path ID in the subsystem */ target_id_t initiator_id; /* ID of the HBA on the SCSI bus */ char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */ char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */ char dev_name[DEV_IDLEN];/* Device name for SIM */ u_int32_t unit_number; /* Unit number for SIM */ u_int32_t bus_id; /* Bus ID for SIM */ u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { struct ccb_pathinq_settings_spi spi; struct ccb_pathinq_settings_fc fc; struct ccb_pathinq_settings_sas sas; char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE]; } xport_specific; u_int maxio; /* Max supported I/O size, in bytes. */ u_int16_t hba_vendor; /* HBA vendor ID */ u_int16_t hba_device; /* HBA device ID */ u_int16_t hba_subvendor; /* HBA subvendor ID */ u_int16_t hba_subdevice; /* HBA subdevice ID */ }; /* Path Statistics CCB */ struct ccb_pathstats { struct ccb_hdr ccb_h; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { SMP_FLAG_NONE = 0x00, SMP_FLAG_REQ_SG = 0x01, SMP_FLAG_RSP_SG = 0x02 } ccb_smp_pass_flags; /* * Serial Management Protocol CCB * XXX Currently the semantics for this CCB are that it is executed either * by the addressed device, or that device's parent (i.e. an expander for * any device on an expander) if the addressed device doesn't support SMP. * Later, once we have the ability to probe SMP-only devices and put them * in CAM's topology, the CCB will only be executed by the addressed device * if possible. */ struct ccb_smpio { struct ccb_hdr ccb_h; uint8_t *smp_request; int smp_request_len; uint16_t smp_request_sglist_cnt; uint8_t *smp_response; int smp_response_len; uint16_t smp_response_sglist_cnt; ccb_smp_pass_flags flags; }; typedef union { u_int8_t *sense_ptr; /* * Pointer to storage * for sense information */ /* Storage Area for sense information */ struct scsi_sense_data sense_buf; } sense_t; typedef union { u_int8_t *cdb_ptr; /* Pointer to the CDB bytes to send */ /* Area for the CDB send */ u_int8_t cdb_bytes[IOCDBLEN]; } cdb_t; /* * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO * function codes. */ struct ccb_scsiio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ u_int8_t *req_map; /* Ptr to mapping info */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ /* Autosense storage */ struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes to autosense */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int16_t sglist_cnt; /* Number of SG list entries */ u_int8_t scsi_status; /* Returned SCSI status */ u_int8_t sense_resid; /* Autosense resid length: 2's comp */ u_int32_t resid; /* Transfer residual length: 2's comp */ cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t *msg_ptr; /* Pointer to the message buffer */ u_int16_t msg_len; /* Number of bytes for the Message */ u_int8_t tag_action; /* What to do for tag queueing */ /* * The tag action should be either the define below (to send a * non-tagged transaction) or one of the defined scsi tag messages * from scsi_message.h. */ #define CAM_TAG_ACTION_NONE 0x00 u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ }; +static __inline uint8_t * +scsiio_cdb_ptr(struct ccb_scsiio *ccb) +{ + return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? + ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); +} + /* * ATA I/O Request CCB used for the XPT_ATA_IO function code. */ struct ccb_ataio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct ata_cmd cmd; /* ATA command register set */ struct ata_res res; /* ATA result register set */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int32_t resid; /* Transfer residual length: 2's comp */ u_int8_t tag_action; /* What to do for tag queueing */ /* * The tag action should be either the define below (to send a * non-tagged transaction) or one of the defined scsi tag messages * from scsi_message.h. */ #define CAM_TAG_ACTION_NONE 0x00 u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ }; struct ccb_accept_tio { struct ccb_hdr ccb_h; cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int8_t tag_action; /* What to do for tag queueing */ u_int8_t sense_len; /* Number of bytes of Sense Data */ u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ struct scsi_sense_data sense_data; }; /* Release SIM Queue */ struct ccb_relsim { struct ccb_hdr ccb_h; u_int32_t release_flags; #define RELSIM_ADJUST_OPENINGS 0x01 #define RELSIM_RELEASE_AFTER_TIMEOUT 0x02 #define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04 #define RELSIM_RELEASE_AFTER_QEMPTY 0x08 u_int32_t openings; u_int32_t release_timeout; /* Abstract argument. */ u_int32_t qfrozen_cnt; }; /* * Definitions for the asynchronous callback CCB fields. */ typedef enum { AC_UNIT_ATTENTION = 0x4000,/* Device reported UNIT ATTENTION */ AC_ADVINFO_CHANGED = 0x2000,/* Advance info might have changes */ AC_CONTRACT = 0x1000,/* A contractual callback */ AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */ AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */ AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */ AC_LOST_DEVICE = 0x100,/* A device went away */ AC_FOUND_DEVICE = 0x080,/* A new device was found */ AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */ AC_PATH_REGISTERED = 0x020,/* A new path has been registered */ AC_SENT_BDR = 0x010,/* A BDR message was sent to target */ AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */ AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */ AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */ } ac_code; typedef void ac_callback_t (void *softc, u_int32_t code, struct cam_path *path, void *args); /* * Generic Asynchronous callbacks. * * Generic arguments passed bac which are then interpreted between a per-system * contract number. */ #define AC_CONTRACT_DATA_MAX (128 - sizeof (u_int64_t)) struct ac_contract { u_int64_t contract_number; u_int8_t contract_data[AC_CONTRACT_DATA_MAX]; }; #define AC_CONTRACT_DEV_CHG 1 struct ac_device_changed { u_int64_t wwpn; u_int32_t port; target_id_t target; u_int8_t arrived; }; /* Set Asynchronous Callback CCB */ struct ccb_setasync { struct ccb_hdr ccb_h; u_int32_t event_enable; /* Async Event enables */ ac_callback_t *callback; void *callback_arg; }; /* Set Device Type CCB */ struct ccb_setdev { struct ccb_hdr ccb_h; u_int8_t dev_type; /* Value for dev type field in EDT */ }; /* SCSI Control Functions */ /* Abort XPT request CCB */ struct ccb_abort { struct ccb_hdr ccb_h; union ccb *abort_ccb; /* Pointer to CCB to abort */ }; /* Reset SCSI Bus CCB */ struct ccb_resetbus { struct ccb_hdr ccb_h; }; /* Reset SCSI Device CCB */ struct ccb_resetdev { struct ccb_hdr ccb_h; }; /* Terminate I/O Process Request CCB */ struct ccb_termio { struct ccb_hdr ccb_h; union ccb *termio_ccb; /* Pointer to CCB to terminate */ }; typedef enum { CTS_TYPE_CURRENT_SETTINGS, CTS_TYPE_USER_SETTINGS } cts_type; struct ccb_trans_settings_scsi { u_int valid; /* Which fields to honor */ #define CTS_SCSI_VALID_TQ 0x01 u_int flags; #define CTS_SCSI_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_ata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_TQ 0x01 u_int flags; #define CTS_ATA_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_spi { u_int valid; /* Which fields to honor */ #define CTS_SPI_VALID_SYNC_RATE 0x01 #define CTS_SPI_VALID_SYNC_OFFSET 0x02 #define CTS_SPI_VALID_BUS_WIDTH 0x04 #define CTS_SPI_VALID_DISC 0x08 #define CTS_SPI_VALID_PPR_OPTIONS 0x10 u_int flags; #define CTS_SPI_FLAGS_DISC_ENB 0x01 u_int sync_period; u_int sync_offset; u_int bus_width; u_int ppr_options; }; struct ccb_trans_settings_fc { u_int valid; /* Which fields to honor */ #define CTS_FC_VALID_WWNN 0x8000 #define CTS_FC_VALID_WWPN 0x4000 #define CTS_FC_VALID_PORT 0x2000 #define CTS_FC_VALID_SPEED 0x1000 u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_sas { u_int valid; /* Which fields to honor */ #define CTS_SAS_VALID_SPEED 0x1000 u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_pata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_MODE 0x01 #define CTS_ATA_VALID_BYTECOUNT 0x02 #define CTS_ATA_VALID_ATAPI 0x20 #define CTS_ATA_VALID_CAPS 0x40 int mode; /* Mode */ u_int bytecount; /* Length of PIO transaction */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_ATA_CAPS_H 0x0000ffff #define CTS_ATA_CAPS_H_DMA48 0x00000001 /* 48-bit DMA */ #define CTS_ATA_CAPS_D 0xffff0000 }; struct ccb_trans_settings_sata { u_int valid; /* Which fields to honor */ #define CTS_SATA_VALID_MODE 0x01 #define CTS_SATA_VALID_BYTECOUNT 0x02 #define CTS_SATA_VALID_REVISION 0x04 #define CTS_SATA_VALID_PM 0x08 #define CTS_SATA_VALID_TAGS 0x10 #define CTS_SATA_VALID_ATAPI 0x20 #define CTS_SATA_VALID_CAPS 0x40 int mode; /* Legacy PATA mode */ u_int bytecount; /* Length of PIO transaction */ int revision; /* SATA revision */ u_int pm_present; /* PM is present (XPT->SIM) */ u_int tags; /* Number of allowed tags */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_SATA_CAPS_H 0x0000ffff #define CTS_SATA_CAPS_H_PMREQ 0x00000001 #define CTS_SATA_CAPS_H_APST 0x00000002 #define CTS_SATA_CAPS_H_DMAAA 0x00000010 /* Auto-activation */ #define CTS_SATA_CAPS_H_AN 0x00000020 /* Async. notification */ #define CTS_SATA_CAPS_D 0xffff0000 #define CTS_SATA_CAPS_D_PMREQ 0x00010000 #define CTS_SATA_CAPS_D_APST 0x00020000 }; /* Get/Set transfer rate/width/disconnection/tag queueing settings */ struct ccb_trans_settings { struct ccb_hdr ccb_h; cts_type type; /* Current or User settings */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_ata ata; struct ccb_trans_settings_scsi scsi; } proto_specific; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_spi spi; struct ccb_trans_settings_fc fc; struct ccb_trans_settings_sas sas; struct ccb_trans_settings_pata ata; struct ccb_trans_settings_sata sata; } xport_specific; }; /* * Calculate the geometry parameters for a device * give the block size and volume size in blocks. */ struct ccb_calc_geometry { struct ccb_hdr ccb_h; u_int32_t block_size; u_int64_t volume_size; u_int32_t cylinders; u_int8_t heads; u_int8_t secs_per_track; }; /* * Set or get SIM (and transport) specific knobs */ #define KNOB_VALID_ADDRESS 0x1 #define KNOB_VALID_ROLE 0x2 #define KNOB_ROLE_NONE 0x0 #define KNOB_ROLE_INITIATOR 0x1 #define KNOB_ROLE_TARGET 0x2 #define KNOB_ROLE_BOTH 0x3 struct ccb_sim_knob_settings_spi { u_int valid; u_int initiator_id; u_int role; }; struct ccb_sim_knob_settings_fc { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int role; }; struct ccb_sim_knob_settings_sas { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int role; }; #define KNOB_SETTINGS_SIZE 128 struct ccb_sim_knob { struct ccb_hdr ccb_h; union { u_int valid; /* Which fields to honor */ struct ccb_sim_knob_settings_spi spi; struct ccb_sim_knob_settings_fc fc; struct ccb_sim_knob_settings_sas sas; char pad[KNOB_SETTINGS_SIZE]; } xport_specific; }; /* * Rescan the given bus, or bus/target/lun */ struct ccb_rescan { struct ccb_hdr ccb_h; cam_flags flags; }; /* * Turn on debugging for the given bus, bus/target, or bus/target/lun. */ struct ccb_debug { struct ccb_hdr ccb_h; cam_debug_flags flags; }; /* Target mode structures. */ struct ccb_en_lun { struct ccb_hdr ccb_h; u_int16_t grp6_len; /* Group 6 VU CDB length */ u_int16_t grp7_len; /* Group 7 VU CDB length */ u_int8_t enable; }; /* old, barely used immediate notify, binary compatibility */ struct ccb_immed_notify { struct ccb_hdr ccb_h; struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes in sense buffer */ u_int8_t initiator_id; /* Id of initiator that selected */ u_int8_t message_args[7]; /* Message Arguments */ }; struct ccb_notify_ack { struct ccb_hdr ccb_h; u_int16_t seq_id; /* Sequence identifier */ u_int8_t event; /* Event flags */ }; struct ccb_immediate_notify { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tag for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Function specific */ }; struct ccb_notify_acknowledge { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tar for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Function specific */ }; /* HBA engine structures. */ typedef enum { EIT_BUFFER, /* Engine type: buffer memory */ EIT_LOSSLESS, /* Engine type: lossless compression */ EIT_LOSSY, /* Engine type: lossy compression */ EIT_ENCRYPT /* Engine type: encryption */ } ei_type; typedef enum { EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */ EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */ EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */ EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */ } ei_algo; struct ccb_eng_inq { struct ccb_hdr ccb_h; u_int16_t eng_num; /* The engine number for this inquiry */ ei_type eng_type; /* Returned engine type */ ei_algo eng_algo; /* Returned engine algorithm type */ u_int32_t eng_memeory; /* Returned engine memory size */ }; struct ccb_eng_exec { /* This structure must match SCSIIO size */ struct ccb_hdr ccb_h; u_int8_t *pdrv_ptr; /* Ptr used by the peripheral driver */ u_int8_t *req_map; /* Ptr for mapping info on the req. */ u_int8_t *data_ptr; /* Pointer to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int8_t *engdata_ptr; /* Pointer to the engine buffer data */ u_int16_t sglist_cnt; /* Num of scatter gather list entries */ u_int32_t dmax_len; /* Destination data maximum length */ u_int32_t dest_len; /* Destination data length */ int32_t src_resid; /* Source residual length: 2's comp */ u_int32_t timeout; /* Timeout value */ u_int16_t eng_num; /* Engine number for this request */ u_int16_t vu_flags; /* Vendor Unique flags */ }; /* * Definitions for the timeout field in the SCSI I/O CCB. */ #define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */ #define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */ #define CAM_SUCCESS 0 /* For signaling general success */ #define CAM_FAILURE 1 /* For signaling general failure */ #define CAM_FALSE 0 #define CAM_TRUE 1 #define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */ /* * CCB for working with advanced device information. This operates in a fashion * similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer * type requested, and provide a buffer size/buffer to write to. If the * buffer is too small, provsiz will be larger than bufsiz. */ struct ccb_dev_advinfo { struct ccb_hdr ccb_h; uint32_t flags; #define CDAI_FLAG_NONE 0x0 /* No flags set */ #define CDAI_FLAG_STORE 0x1 /* If set, action becomes store */ uint32_t buftype; /* IN: Type of data being requested */ /* NB: buftype is interpreted on a per-transport basis */ #define CDAI_TYPE_SCSI_DEVID 1 #define CDAI_TYPE_SERIAL_NUM 2 #define CDAI_TYPE_PHYS_PATH 3 #define CDAI_TYPE_RCAPLONG 4 #define CDAI_TYPE_EXT_INQ 5 off_t bufsiz; /* IN: Size of external buffer */ #define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */ off_t provsiz; /* OUT: Size required/used */ uint8_t *buf; /* IN/OUT: Buffer for requested data */ }; /* * CCB for sending async events */ struct ccb_async { struct ccb_hdr ccb_h; uint32_t async_code; off_t async_arg_size; void *async_arg_ptr; }; /* * Union of all CCB types for kernel space allocation. This union should * never be used for manipulating CCBs - its only use is for the allocation * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc * and the argument to xpt_ccb_free. */ union ccb { struct ccb_hdr ccb_h; /* For convenience */ struct ccb_scsiio csio; struct ccb_getdev cgd; struct ccb_getdevlist cgdl; struct ccb_pathinq cpi; struct ccb_relsim crs; struct ccb_setasync csa; struct ccb_setdev csd; struct ccb_pathstats cpis; struct ccb_getdevstats cgds; struct ccb_dev_match cdm; struct ccb_trans_settings cts; struct ccb_calc_geometry ccg; struct ccb_sim_knob knob; struct ccb_abort cab; struct ccb_resetbus crb; struct ccb_resetdev crd; struct ccb_termio tio; struct ccb_accept_tio atio; struct ccb_scsiio ctio; struct ccb_en_lun cel; struct ccb_immed_notify cin; struct ccb_notify_ack cna; struct ccb_immediate_notify cin1; struct ccb_notify_acknowledge cna2; struct ccb_eng_inq cei; struct ccb_eng_exec cee; struct ccb_smpio smpio; struct ccb_rescan crcn; struct ccb_debug cdbg; struct ccb_ataio ataio; struct ccb_dev_advinfo cdai; struct ccb_async casync; }; __BEGIN_DECLS static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout); static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout); static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout); static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout); static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_SCSI_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->sense_len = sense_len; csio->cdb_len = cdb_len; csio->tag_action = tag_action; } static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_CONT_TARGET_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->scsi_status = scsi_status; csio->tag_action = tag_action; csio->tag_id = tag_id; csio->init_id = init_id; } static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { ataio->ccb_h.func_code = XPT_ATA_IO; ataio->ccb_h.flags = flags; ataio->ccb_h.retry_count = retries; ataio->ccb_h.cbfcnp = cbfcnp; ataio->ccb_h.timeout = timeout; ataio->data_ptr = data_ptr; ataio->dxfer_len = dxfer_len; ataio->tag_action = tag_action; } static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout) { #ifdef _KERNEL KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH, ("direction != CAM_DIR_BOTH")); KASSERT((smp_request != NULL) && (smp_response != NULL), ("need valid request and response buffers")); KASSERT((smp_request_len != 0) && (smp_response_len != 0), ("need non-zero request and response lengths")); #endif /*_KERNEL*/ smpio->ccb_h.func_code = XPT_SMP_IO; smpio->ccb_h.flags = flags; smpio->ccb_h.retry_count = retries; smpio->ccb_h.cbfcnp = cbfcnp; smpio->ccb_h.timeout = timeout; smpio->smp_request = smp_request; smpio->smp_request_len = smp_request_len; smpio->smp_response = smp_response; smpio->smp_response_len = smp_response_len; } static __inline void cam_set_ccbstatus(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } static __inline cam_status cam_ccb_status(union ccb *ccb) { return ((cam_status)(ccb->ccb_h.status & CAM_STATUS_MASK)); } void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended); __END_DECLS #endif /* _CAM_CAM_CCB_H */ Index: head/sys/dev/arcmsr/arcmsr.c =================================================================== --- head/sys/dev/arcmsr/arcmsr.c (revision 296890) +++ head/sys/dev/arcmsr/arcmsr.c (revision 296891) @@ -1,4562 +1,4569 @@ /* ******************************************************************************** ** OS : FreeBSD ** FILE NAME : arcmsr.c ** BY : Erich Chen, Ching Huang ** Description: SCSI RAID Device Driver for ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) ** SATA/SAS RAID HOST Adapter ******************************************************************************** ******************************************************************************** ** ** Copyright (C) 2002 - 2012, Areca Technology Corporation All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** ** History ** ** REV# DATE NAME DESCRIPTION ** 1.00.00.00 03/31/2004 Erich Chen First release ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error ** 1.20.00.03 04/19/2005 Erich Chen add SATA 24 Ports adapter type support ** clean unused function ** 1.20.00.12 09/12/2005 Erich Chen bug fix with abort command handling, ** firmware version check ** and firmware update notify for hardware bug fix ** handling if none zero high part physical address ** of srb resource ** 1.20.00.13 08/18/2006 Erich Chen remove pending srb and report busy ** add iop message xfer ** with scsi pass-through command ** add new device id of sas raid adapters ** code fit for SPARC64 & PPC ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report ** and cause g_vfs_done() read write error ** 1.20.00.15 10/10/2007 Erich Chen support new RAID adapter type ARC120x ** 1.20.00.16 10/10/2009 Erich Chen Bug fix for RAID adapter type ARC120x ** bus_dmamem_alloc() with BUS_DMA_ZERO ** 1.20.00.17 07/15/2010 Ching Huang Added support ARC1880 ** report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed, ** prevent cam_periph_error removing all LUN devices of one Target id ** for any one LUN device failed ** 1.20.00.18 10/14/2010 Ching Huang Fixed "inquiry data fails comparion at DV1 step" ** 10/25/2010 Ching Huang Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B ** 1.20.00.19 11/11/2010 Ching Huang Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0 ** 1.20.00.20 12/08/2010 Ching Huang Avoid calling atomic_set_int function ** 1.20.00.21 02/08/2011 Ching Huang Implement I/O request timeout ** 02/14/2011 Ching Huang Modified pktRequestCount ** 1.20.00.21 03/03/2011 Ching Huang if a command timeout, then wait its ccb back before free it ** 1.20.00.22 07/04/2011 Ching Huang Fixed multiple MTX panic ** 1.20.00.23 10/28/2011 Ching Huang Added TIMEOUT_DELAY in case of too many HDDs need to start ** 1.20.00.23 11/08/2011 Ching Huang Added report device transfer speed ** 1.20.00.23 01/30/2012 Ching Huang Fixed Request requeued and Retrying command ** 1.20.00.24 06/11/2012 Ching Huang Fixed return sense data condition ** 1.20.00.25 08/17/2012 Ching Huang Fixed hotplug device no function on type A adapter ** 1.20.00.26 12/14/2012 Ching Huang Added support ARC1214,1224,1264,1284 ** 1.20.00.27 05/06/2013 Ching Huang Fixed out standing cmd full on ARC-12x4 ** 1.20.00.28 09/13/2013 Ching Huang Removed recursive mutex in arcmsr_abort_dr_ccbs ** 1.20.00.29 12/18/2013 Ching Huang Change simq allocation number, support ARC1883 ** 1.30.00.00 11/30/2015 Ching Huang Added support ARC1203 ****************************************************************************************** */ #include __FBSDID("$FreeBSD$"); #if 0 #define ARCMSR_DEBUG1 1 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version >= 500005 #include #include #include #include #include #else #include #include #include #endif #if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025 #define CAM_NEW_TRAN_CODE 1 #endif #if __FreeBSD_version > 500000 #define arcmsr_callout_init(a) callout_init(a, /*mpsafe*/1); #else #define arcmsr_callout_init(a) callout_init(a); #endif #define ARCMSR_DRIVER_VERSION "arcmsr version 1.30.00.00 2015-11-30" #include /* ************************************************************************** ************************************************************************** */ static void arcmsr_free_srb(struct CommandControlBlock *srb); static struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb); static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb); static int arcmsr_probe(device_t dev); static int arcmsr_attach(device_t dev); static int arcmsr_detach(device_t dev); static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg); static void arcmsr_iop_parking(struct AdapterControlBlock *acb); static int arcmsr_shutdown(device_t dev); static void arcmsr_interrupt(struct AdapterControlBlock *acb); static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb); static void arcmsr_free_resource(struct AdapterControlBlock *acb); static void arcmsr_bus_reset(struct AdapterControlBlock *acb); static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_iop_init(struct AdapterControlBlock *acb); static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer); static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb); static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb); static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag); static void arcmsr_iop_reset(struct AdapterControlBlock *acb); static void arcmsr_report_sense_info(struct CommandControlBlock *srb); static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg); static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb); static int arcmsr_resume(device_t dev); static int arcmsr_suspend(device_t dev); static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb); static void arcmsr_polling_devmap(void *arg); static void arcmsr_srb_timeout(void *arg); static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb); #ifdef ARCMSR_DEBUG1 static void arcmsr_dump_data(struct AdapterControlBlock *acb); #endif /* ************************************************************************** ************************************************************************** */ static void UDELAY(u_int32_t us) { DELAY(us); } /* ************************************************************************** ************************************************************************** */ static bus_dmamap_callback_t arcmsr_map_free_srb; static bus_dmamap_callback_t arcmsr_execute_srb; /* ************************************************************************** ************************************************************************** */ static d_open_t arcmsr_open; static d_close_t arcmsr_close; static d_ioctl_t arcmsr_ioctl; static device_method_t arcmsr_methods[]={ DEVMETHOD(device_probe, arcmsr_probe), DEVMETHOD(device_attach, arcmsr_attach), DEVMETHOD(device_detach, arcmsr_detach), DEVMETHOD(device_shutdown, arcmsr_shutdown), DEVMETHOD(device_suspend, arcmsr_suspend), DEVMETHOD(device_resume, arcmsr_resume), #if __FreeBSD_version >= 803000 DEVMETHOD_END #else { 0, 0 } #endif }; static driver_t arcmsr_driver={ "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock) }; static devclass_t arcmsr_devclass; DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0); MODULE_DEPEND(arcmsr, pci, 1, 1, 1); MODULE_DEPEND(arcmsr, cam, 1, 1, 1); #ifndef BUS_DMA_COHERENT #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ #endif #if __FreeBSD_version >= 501000 static struct cdevsw arcmsr_cdevsw={ #if __FreeBSD_version >= 503000 .d_version = D_VERSION, #endif #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) .d_flags = D_NEEDGIANT, #endif .d_open = arcmsr_open, /* open */ .d_close = arcmsr_close, /* close */ .d_ioctl = arcmsr_ioctl, /* ioctl */ .d_name = "arcmsr", /* name */ }; #else #define ARCMSR_CDEV_MAJOR 180 static struct cdevsw arcmsr_cdevsw = { arcmsr_open, /* open */ arcmsr_close, /* close */ noread, /* read */ nowrite, /* write */ arcmsr_ioctl, /* ioctl */ nopoll, /* poll */ nommap, /* mmap */ nostrategy, /* strategy */ "arcmsr", /* name */ ARCMSR_CDEV_MAJOR, /* major */ nodump, /* dump */ nopsize, /* psize */ 0 /* flags */ }; #endif /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_open(dev_t dev, int flags, int fmt, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_open(dev_t dev, int flags, int fmt, struct thread *proc) #else static int arcmsr_open(struct cdev *dev, int flags, int fmt, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb = dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb == NULL) { return ENXIO; } return (0); } /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_close(dev_t dev, int flags, int fmt, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_close(dev_t dev, int flags, int fmt, struct thread *proc) #else static int arcmsr_close(struct cdev *dev, int flags, int fmt, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb = dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb == NULL) { return ENXIO; } return 0; } /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) #else static int arcmsr_ioctl(struct cdev *dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb = dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb == NULL) { return ENXIO; } return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg)); } /* ********************************************************************** ********************************************************************** */ static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb) { u_int32_t intmask_org = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* disable all outbound interrupt */ intmask_org = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* disable all outbound interrupt */ intmask_org = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, 0); /* disable all interrupt */ } break; case ACB_ADAPTER_TYPE_C: { /* disable all outbound interrupt */ intmask_org = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE); } break; case ACB_ADAPTER_TYPE_D: { /* disable all outbound interrupt */ intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE); } break; } return (intmask_org); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org) { u_int32_t mask; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */ mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/ acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; } break; case ACB_ADAPTER_TYPE_C: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; } break; case ACB_ADAPTER_TYPE_D: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask = ARCMSR_HBDMU_ALL_INT_ENABLE; CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | mask); CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable); acb->outbound_int_enable = mask; } break; } } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; do { for(Index=0; Index < 100; Index++) { if(READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbd_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) { CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) { int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); do { if(arcmsr_hba_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) { int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE); do { if(arcmsr_hbb_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb) { int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); do { if(arcmsr_hbc_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbd_cache(struct AdapterControlBlock *acb) { int retry_count = 30; /* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); do { if(arcmsr_hbd_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_flush_hba_cache(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_flush_hbb_cache(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_flush_hbc_cache(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_flush_hbd_cache(acb); } break; } } /* ******************************************************************************* ******************************************************************************* */ static int arcmsr_suspend(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); /* flush controller */ arcmsr_iop_parking(acb); /* disable all outbound interrupt */ arcmsr_disable_allintr(acb); return(0); } /* ******************************************************************************* ******************************************************************************* */ static int arcmsr_resume(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); arcmsr_iop_init(acb); return(0); } /* ********************************************************************************* ********************************************************************************* */ static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg) { struct AdapterControlBlock *acb; u_int8_t target_id, target_lun; struct cam_sim *sim; sim = (struct cam_sim *) cb_arg; acb =(struct AdapterControlBlock *) cam_sim_softc(sim); switch (code) { case AC_LOST_DEVICE: target_id = xpt_path_target_id(path); target_lun = xpt_path_lun_id(path); if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) { break; } // printf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun); break; default: break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_report_sense_info(struct CommandControlBlock *srb) { union ccb *pccb = srb->pccb; pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; if(pccb->csio.sense_len) { memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data)); memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data))); ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ pccb->ccb_h.status |= CAM_AUTOSNS_VALID; } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbd_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_abort_hba_allcmd(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_abort_hbb_allcmd(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_abort_hbc_allcmd(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_abort_hbd_allcmd(acb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag) { struct AdapterControlBlock *acb = srb->acb; union ccb *pccb = srb->pccb; if(srb->srb_flags & SRB_FLAG_TIMER_START) callout_stop(&srb->ccb_callout); if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_POSTREAD; } else { op = BUS_DMASYNC_POSTWRITE; } bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); } if(stand_flag == 1) { atomic_subtract_int(&acb->srboutstandingcount, 1); if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && ( acb->srboutstandingcount < (acb->maxOutstanding -10))) { acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN; pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } } if(srb->srb_state != ARCMSR_SRB_TIMEOUT) arcmsr_free_srb(srb); acb->pktReturnCount++; xpt_done(pccb); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error) { int target, lun; target = srb->pccb->ccb_h.target_id; lun = srb->pccb->ccb_h.target_lun; if(error == FALSE) { if(acb->devstate[target][lun] == ARECA_RAID_GONE) { acb->devstate[target][lun] = ARECA_RAID_GOOD; } srb->pccb->ccb_h.status |= CAM_REQ_CMP; arcmsr_srb_complete(srb, 1); } else { switch(srb->arcmsr_cdb.DeviceStatus) { case ARCMSR_DEV_SELECT_TIMEOUT: { if(acb->devstate[target][lun] == ARECA_RAID_GOOD) { printf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun); } acb->devstate[target][lun] = ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 1); } break; case ARCMSR_DEV_ABORTED: case ARCMSR_DEV_INIT_FAIL: { acb->devstate[target][lun] = ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 1); } break; case SCSISTAT_CHECK_CONDITION: { acb->devstate[target][lun] = ARECA_RAID_GOOD; arcmsr_report_sense_info(srb); arcmsr_srb_complete(srb, 1); } break; default: printf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknown DeviceStatus=0x%x \n" , acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus); acb->devstate[target][lun] = ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; /*unknown error or crc error just for retry*/ arcmsr_srb_complete(srb, 1); break; } } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error) { struct CommandControlBlock *srb; /* check if command done with no error*/ switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_C: case ACB_ADAPTER_TYPE_D: srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0)); /*frame must be 32 bytes aligned*/ break; case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_B: default: srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ break; } if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_TIMEOUT) { arcmsr_free_srb(srb); printf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb); return; } printf("arcmsr%d: return srb has been completed\n" "srb='%p' srb_state=0x%x outstanding srb count=%d \n", acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount); return; } arcmsr_report_srb_state(acb, srb, error); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_srb_timeout(void *arg) { struct CommandControlBlock *srb = (struct CommandControlBlock *)arg; struct AdapterControlBlock *acb; int target, lun; u_int8_t cmd; target = srb->pccb->ccb_h.target_id; lun = srb->pccb->ccb_h.target_lun; acb = srb->acb; ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); if(srb->srb_state == ARCMSR_SRB_START) { - cmd = srb->pccb->csio.cdb_io.cdb_bytes[0]; + cmd = scsiio_cdb_ptr(&srb->pccb->csio)[0]; srb->srb_state = ARCMSR_SRB_TIMEOUT; srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n", acb->pci_unit, target, lun, cmd, srb); } ARCMSR_LOCK_RELEASE(&acb->isr_lock); #ifdef ARCMSR_DEBUG1 arcmsr_dump_data(acb); #endif } /* ********************************************************************** ********************************************************************** */ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) { int i=0; u_int32_t flag_srb; u_int16_t error; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { u_int32_t outbound_intstatus; /*clear and abort all outbound posted Q*/ outbound_intstatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/ while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; /*clear all outbound posted Q*/ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { if((flag_srb = phbbmu->done_qbuffer[i]) != 0) { phbbmu->done_qbuffer[i] = 0; error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } phbbmu->post_qbuffer[i] = 0; }/*drain reply FIFO*/ phbbmu->doneq_index = 0; phbbmu->postq_index = 0; } break; case ACB_ADAPTER_TYPE_C: { while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } } break; case ACB_ADAPTER_TYPE_D: { arcmsr_hbd_postqueue_isr(acb); } break; } } /* **************************************************************************** **************************************************************************** */ static void arcmsr_iop_reset(struct AdapterControlBlock *acb) { struct CommandControlBlock *srb; u_int32_t intmask_org; u_int32_t i=0; if(acb->srboutstandingcount>0) { /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); /*clear and abort all outbound posted Q*/ arcmsr_done4abort_postqueue(acb); /* talk to iop 331 outstanding command aborted*/ arcmsr_abort_allcmd(acb); for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if(srb->srb_state == ARCMSR_SRB_START) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: scsi id=%d lun=%jx srb='%p' aborted\n" , acb->pci_unit, srb->pccb->ccb_h.target_id , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); } } /* enable all outbound interrupt */ arcmsr_enable_allintr(acb, intmask_org); } acb->srboutstandingcount = 0; acb->workingsrb_doneindex = 0; acb->workingsrb_startindex = 0; acb->pktRequestCount = 0; acb->pktReturnCount = 0; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg) { struct ARCMSR_CDB *arcmsr_cdb = &srb->arcmsr_cdb; u_int8_t *psge = (u_int8_t *)&arcmsr_cdb->u; u_int32_t address_lo, address_hi; union ccb *pccb = srb->pccb; struct ccb_scsiio *pcsio = &pccb->csio; u_int32_t arccdbsize = 0x30; memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); arcmsr_cdb->Bus = 0; arcmsr_cdb->TargetID = pccb->ccb_h.target_id; arcmsr_cdb->LUN = pccb->ccb_h.target_lun; arcmsr_cdb->Function = 1; arcmsr_cdb->CdbLength = (u_int8_t)pcsio->cdb_len; - bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len); + bcopy(scsiio_cdb_ptr(pcsio), arcmsr_cdb->Cdb, pcsio->cdb_len); if(nseg != 0) { struct AdapterControlBlock *acb = srb->acb; bus_dmasync_op_t op; u_int32_t length, i, cdb_sgcount = 0; if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; srb->srb_flags |= SRB_FLAG_WRITE; } bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); for(i=0; i < nseg; i++) { /* Get the physical address of the current data pointer */ length = arcmsr_htole32(dm_segs[i].ds_len); address_lo = arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr)); address_hi = arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr)); if(address_hi == 0) { struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; pdma_sg->address = address_lo; pdma_sg->length = length; psge += sizeof(struct SG32ENTRY); arccdbsize += sizeof(struct SG32ENTRY); } else { u_int32_t sg64s_size = 0, tmplength = length; while(1) { u_int64_t span4G, length0; struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; span4G = (u_int64_t)address_lo + tmplength; pdma_sg->addresshigh = address_hi; pdma_sg->address = address_lo; if(span4G > 0x100000000) { /*see if cross 4G boundary*/ length0 = 0x100000000-address_lo; pdma_sg->length = (u_int32_t)length0 | IS_SG64_ADDR; address_hi = address_hi+1; address_lo = 0; tmplength = tmplength - (u_int32_t)length0; sg64s_size += sizeof(struct SG64ENTRY); psge += sizeof(struct SG64ENTRY); cdb_sgcount++; } else { pdma_sg->length = tmplength | IS_SG64_ADDR; sg64s_size += sizeof(struct SG64ENTRY); psge += sizeof(struct SG64ENTRY); break; } } arccdbsize += sg64s_size; } cdb_sgcount++; } arcmsr_cdb->sgcount = (u_int8_t)cdb_sgcount; arcmsr_cdb->DataLength = pcsio->dxfer_len; if( arccdbsize > 256) { arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; } } else { arcmsr_cdb->DataLength = 0; } srb->arc_cdb_size = arccdbsize; arcmsr_cdb->msgPages = (arccdbsize/256) + ((arccdbsize % 256) ? 1 : 0); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb) { u_int32_t cdb_phyaddr_low = (u_int32_t) srb->cdb_phyaddr_low; struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&srb->arcmsr_cdb; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD); atomic_add_int(&acb->srboutstandingcount, 1); srb->srb_state = ARCMSR_SRB_START; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low|ARCMSR_SRBPOST_FLAG_SGL_BSIZE); } else { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low); } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; int ending_index, index; index = phbbmu->postq_index; ending_index = ((index+1) % ARCMSR_MAX_HBB_POSTQUEUE); phbbmu->post_qbuffer[ending_index] = 0; if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { phbbmu->post_qbuffer[index] = cdb_phyaddr_low | ARCMSR_SRBPOST_FLAG_SGL_BSIZE; } else { phbbmu->post_qbuffer[index] = cdb_phyaddr_low; } index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->postq_index = index; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED); } break; case ACB_ADAPTER_TYPE_C: { u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32; arc_cdb_size = (srb->arc_cdb_size > 0x300) ? 0x300 : srb->arc_cdb_size; ccb_post_stamp = (cdb_phyaddr_low | ((arc_cdb_size-1) >> 6) | 1); cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; if(cdb_phyaddr_hi32) { CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32); CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); } else { CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); } } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; u_int16_t index_stripped; u_int16_t postq_index; struct InBound_SRB *pinbound_srb; ARCMSR_LOCK_ACQUIRE(&acb->postDone_lock); postq_index = phbdmu->postq_index; pinbound_srb = (struct InBound_SRB *)&phbdmu->post_qbuffer[postq_index & 0xFF]; pinbound_srb->addressHigh = srb->cdb_phyaddr_high; pinbound_srb->addressLow = srb->cdb_phyaddr_low; pinbound_srb->length = srb->arc_cdb_size >> 2; arcmsr_cdb->Context = srb->cdb_phyaddr_low; if (postq_index & 0x4000) { index_stripped = postq_index & 0xFF; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->postq_index = index_stripped ? (index_stripped | 0x4000) : index_stripped; } else { index_stripped = postq_index; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->postq_index = index_stripped ? index_stripped : (index_stripped | 0x4000); } CHIP_REG_WRITE32(HBD_MessageUnit, 0, inboundlist_write_pointer, postq_index); ARCMSR_LOCK_RELEASE(&acb->postDone_lock); } break; } } /* ************************************************************************ ************************************************************************ */ static struct QBUFFER *arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb) { struct QBUFFER *qbuffer=NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbamu->message_rbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbcmu->message_rbuffer; } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_rbuffer; } break; } return(qbuffer); } /* ************************************************************************ ************************************************************************ */ static struct QBUFFER *arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb) { struct QBUFFER *qbuffer = NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer; } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_wbuffer; } break; } return(qbuffer); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* let IOP know data has been read */ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_C: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_D: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ); } break; } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_C: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_D: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_IN_READY); } break; } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; acb->acb_flags &= ~ACB_F_MSG_START_BGRB; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbd_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_stop_hba_bgrb(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_stop_hbb_bgrb(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_stop_hbc_bgrb(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_stop_hbd_bgrb(acb); } break; } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_poll(struct cam_sim *psim) { struct AdapterControlBlock *acb; int mutex; acb = (struct AdapterControlBlock *)cam_sim_softc(psim); mutex = mtx_owned(&acb->isr_lock); if( mutex == 0 ) ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); arcmsr_interrupt(acb); if( mutex == 0 ) ARCMSR_LOCK_RELEASE(&acb->isr_lock); } /* ************************************************************************** ************************************************************************** */ static u_int32_t arcmsr_Read_iop_rqbuffer_data_D(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer) { u_int8_t *pQbuffer; u_int8_t *buf1 = 0; u_int32_t *iop_data, *buf2 = 0; u_int32_t iop_len, data_len; iop_data = (u_int32_t *)prbuffer->data; iop_len = (u_int32_t)prbuffer->data_len; if ( iop_len > 0 ) { buf1 = malloc(128, M_DEVBUF, M_NOWAIT | M_ZERO); buf2 = (u_int32_t *)buf1; if( buf1 == NULL) return (0); data_len = iop_len; while(data_len >= 4) { *buf2++ = *iop_data++; data_len -= 4; } if(data_len) *buf2 = *iop_data; buf2 = (u_int32_t *)buf1; } while (iop_len > 0) { pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; *pQbuffer = *buf1; acb->rqbuf_lastindex++; /* if last, index number set it to 0 */ acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; buf1++; iop_len--; } if(buf2) free( (u_int8_t *)buf2, M_DEVBUF); /* let IOP know data has been read */ arcmsr_iop_message_read(acb); return (1); } /* ************************************************************************** ************************************************************************** */ static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer) { u_int8_t *pQbuffer; u_int8_t *iop_data; u_int32_t iop_len; if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) { return(arcmsr_Read_iop_rqbuffer_data_D(acb, prbuffer)); } iop_data = (u_int8_t *)prbuffer->data; iop_len = (u_int32_t)prbuffer->data_len; while (iop_len > 0) { pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; *pQbuffer = *iop_data; acb->rqbuf_lastindex++; /* if last, index number set it to 0 */ acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; iop_data++; iop_len--; } /* let IOP know data has been read */ arcmsr_iop_message_read(acb); return (1); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) { struct QBUFFER *prbuffer; int my_empty_len; /*check this iop data if overflow my rqbuffer*/ ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); prbuffer = arcmsr_get_iop_rqbuffer(acb); my_empty_len = (acb->rqbuf_lastindex - acb->rqbuf_firstindex - 1) & (ARCMSR_MAX_QBUFFER-1); if(my_empty_len >= prbuffer->data_len) { if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } else { acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_Write_data_2iop_wqbuffer_D(struct AdapterControlBlock *acb) { u_int8_t *pQbuffer; struct QBUFFER *pwbuffer; u_int8_t *buf1 = 0; u_int32_t *iop_data, *buf2 = 0; u_int32_t allxfer_len = 0, data_len; if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { buf1 = malloc(128, M_DEVBUF, M_NOWAIT | M_ZERO); buf2 = (u_int32_t *)buf1; if( buf1 == NULL) return; acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (u_int32_t *)pwbuffer->data; while((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && (allxfer_len < 124)) { pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; *buf1 = *pQbuffer; acb->wqbuf_firstindex++; acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; buf1++; allxfer_len++; } pwbuffer->data_len = allxfer_len; data_len = allxfer_len; buf1 = (u_int8_t *)buf2; while(data_len >= 4) { *iop_data++ = *buf2++; data_len -= 4; } if(data_len) *iop_data = *buf2; free( buf1, M_DEVBUF); arcmsr_iop_message_wrote(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb) { u_int8_t *pQbuffer; struct QBUFFER *pwbuffer; u_int8_t *iop_data; int32_t allxfer_len=0; if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) { arcmsr_Write_data_2iop_wqbuffer_D(acb); return; } if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (u_int8_t *)pwbuffer->data; while((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && (allxfer_len < 124)) { pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; *iop_data = *pQbuffer; acb->wqbuf_firstindex++; acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; iop_data++; allxfer_len++; } pwbuffer->data_len = allxfer_len; arcmsr_iop_message_wrote(acb); } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) { ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ; /* ***************************************************************** ** check if there are any mail packages from user space program ** in my post bag, now is the time to send them into Areca's firmware ***************************************************************** */ if(acb->wqbuf_firstindex != acb->wqbuf_lastindex) { arcmsr_Write_data_2iop_wqbuffer(acb); } if(acb->wqbuf_firstindex == acb->wqbuf_lastindex) { acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb) { /* if (ccb->ccb_h.status != CAM_REQ_CMP) printf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x," "failure status=%x\n", ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->ccb_h.status); else printf("arcmsr_rescanLun_cb: Rescan lun successfully!\n"); */ xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); } static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun) { struct cam_path *path; union ccb *ccb; if ((ccb = (union ccb *)xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&path, NULL, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } /* printf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */ bzero(ccb, sizeof(union ccb)); xpt_setup_ccb(&ccb->ccb_h, path, 5); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = arcmsr_rescanLun_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); } static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun) { struct CommandControlBlock *srb; u_int32_t intmask_org; int i; /* disable all outbound interrupts */ intmask_org = arcmsr_disable_allintr(acb); for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if (srb->srb_state == ARCMSR_SRB_START) { if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun)) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb); } } } /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_dr_handle(struct AdapterControlBlock *acb) { u_int32_t devicemap; u_int32_t target, lun; u_int32_t deviceMapCurrent[4]={0}; u_int8_t *pDevMap; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_B: devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_C: devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_D: devicemap = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; } if(acb->acb_flags & ACB_F_BUS_HANG_ON) { acb->acb_flags &= ~ACB_F_BUS_HANG_ON; } /* ** adapter posted CONFIG message ** copy the new map, note if there are differences with the current map */ pDevMap = (u_int8_t *)&deviceMapCurrent[0]; for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { if (*pDevMap != acb->device_map[target]) { u_int8_t difference, bit_check; difference = *pDevMap ^ acb->device_map[target]; for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++) { bit_check = (1 << lun); /*check bit from 0....31*/ if(difference & bit_check) { if(acb->device_map[target] & bit_check) {/* unit departed */ printf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun); arcmsr_abort_dr_ccbs(acb, target, lun); arcmsr_rescan_lun(acb, target, lun); acb->devstate[target][lun] = ARECA_RAID_GONE; } else {/* unit arrived */ printf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun); arcmsr_rescan_lun(acb, target, lun); acb->devstate[target][lun] = ARECA_RAID_GOOD; } } } /* printf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */ acb->device_map[target] = *pDevMap; } pDevMap++; } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT); outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* clear interrupts */ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN); outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR); outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbd_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR); outbound_message = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t doorbell_status; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ doorbell_status = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t doorbell_status; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ doorbell_status = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, doorbell_status); /* clear doorbell interrupt */ if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbc_message_isr(acb); /* messenger of "driver to iop commands" */ } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbd_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t doorbell_status; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE; if(doorbell_status) CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ while( doorbell_status & ARCMSR_HBDMU_F0_DOORBELL_CAUSE ) { if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbd_message_isr(acb); /* messenger of "driver to iop commands" */ } doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE; if(doorbell_status) CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) { u_int32_t flag_srb; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while((flag_srb = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) { /* check if command done with no error*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0) ? TRUE : FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } /*drain reply FIFO*/ } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; u_int32_t flag_srb; int index; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); index = phbbmu->doneq_index; while((flag_srb = phbbmu->done_qbuffer[index]) != 0) { phbbmu->done_qbuffer[index] = 0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->doneq_index = index; /* check if command done with no error*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } /*drain reply FIFO*/ } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) { u_int32_t flag_srb,throttling = 0; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); do { flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); if (flag_srb == 0xFFFFFFFF) break; /* check if command done with no error*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); throttling++; if(throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING); throttling = 0; } } while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR); } /* ********************************************************************** ** ********************************************************************** */ static uint16_t arcmsr_get_doneq_index(struct HBD_MessageUnit0 *phbdmu) { uint16_t doneq_index, index_stripped; doneq_index = phbdmu->doneq_index; if (doneq_index & 0x4000) { index_stripped = doneq_index & 0xFF; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->doneq_index = index_stripped ? (index_stripped | 0x4000) : index_stripped; } else { index_stripped = doneq_index; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->doneq_index = index_stripped ? index_stripped : (index_stripped | 0x4000); } return (phbdmu->doneq_index); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb) { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; u_int32_t outbound_write_pointer; u_int32_t addressLow; uint16_t doneq_index; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ if((CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause) & ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT) == 0) return; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; doneq_index = phbdmu->doneq_index; while ((doneq_index & 0xFF) != (outbound_write_pointer & 0xFF)) { doneq_index = arcmsr_get_doneq_index(phbdmu); addressLow = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow; error = (addressLow & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; arcmsr_drain_donequeue(acb, addressLow, error); /*Check if command done with no error */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index); outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; } CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_interrupt_cause, ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT_CLEAR); CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause); /*Dummy ioread32 to force pci flush */ } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb) { u_int32_t outbound_intStatus; /* ********************************************* ** check outbound intstatus ********************************************* */ outbound_intStatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; if(!outbound_intStatus) { /*it must be share irq*/ return; } CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus); /*clear interrupt*/ /* MU doorbell interrupts*/ if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { arcmsr_hba_doorbell_isr(acb); } /* MU post queue interrupts*/ if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { arcmsr_hba_postqueue_isr(acb); } if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { arcmsr_hba_message_isr(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* ********************************************* ** check outbound intstatus ********************************************* */ outbound_doorbell = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & acb->outbound_int_enable; if(!outbound_doorbell) { /*it must be share irq*/ return; } WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */ READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell); WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); /* MU ioctl transfer doorbell interrupts*/ if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } /* MU post queue interrupts*/ if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { arcmsr_hbb_postqueue_isr(acb); } if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbb_message_isr(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb) { u_int32_t host_interrupt_status; /* ********************************************* ** check outbound intstatus ********************************************* */ host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR); if(!host_interrupt_status) { /*it must be share irq*/ return; } do { /* MU doorbell interrupts*/ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) { arcmsr_hbc_doorbell_isr(acb); } /* MU post queue interrupts*/ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { arcmsr_hbc_postqueue_isr(acb); } host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status); } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbd_isr( struct AdapterControlBlock *acb) { u_int32_t host_interrupt_status; u_int32_t intmask_org; /* ********************************************* ** check outbound intstatus ********************************************* */ host_interrupt_status = CHIP_REG_READ32(HBD_MessageUnit, 0, host_int_status) & acb->outbound_int_enable; if(!(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_INT)) { /*it must be share irq*/ return; } /* disable outbound interrupt */ intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE); /* MU doorbell interrupts*/ if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_DOORBELL_INT) { arcmsr_hbd_doorbell_isr(acb); } /* MU post queue interrupts*/ if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_POSTQUEUE_INT) { arcmsr_hbd_postqueue_isr(acb); } /* enable all outbound interrupt */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | ARCMSR_HBDMU_ALL_INT_ENABLE); // CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable); } /* ****************************************************************************** ****************************************************************************** */ static void arcmsr_interrupt(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_handle_hba_isr(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_handle_hbb_isr(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_handle_hbc_isr(acb); break; case ACB_ADAPTER_TYPE_D: arcmsr_handle_hbd_isr(acb); break; default: printf("arcmsr%d: interrupt service," " unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type); break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_intr_handler(void *arg) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); arcmsr_interrupt(acb); ARCMSR_LOCK_RELEASE(&acb->isr_lock); } /* ****************************************************************************** ****************************************************************************** */ static void arcmsr_polling_devmap(void *arg) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); } break; case ACB_ADAPTER_TYPE_C: CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); break; case ACB_ADAPTER_TYPE_D: CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); break; } if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0) { callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb); /* polling per 5 seconds */ } } /* ******************************************************************************* ** ******************************************************************************* */ static void arcmsr_iop_parking(struct AdapterControlBlock *acb) { u_int32_t intmask_org; if(acb != NULL) { /* stop adapter background rebuild */ if(acb->acb_flags & ACB_F_MSG_START_BGRB) { intmask_org = arcmsr_disable_allintr(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); arcmsr_enable_allintr(acb, intmask_org); } } } /* *********************************************************************** ** ************************************************************************ */ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg) { struct CMD_MESSAGE_FIELD *pcmdmessagefld; u_int32_t retvalue = EINVAL; pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) arg; if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) { return retvalue; } ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); switch(ioctl_cmd) { case ARCMSR_MESSAGE_READ_RQBUFFER: { u_int8_t *pQbuffer; u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer; u_int32_t allxfer_len=0; while((acb->rqbuf_firstindex != acb->rqbuf_lastindex) && (allxfer_len < 1031)) { /*copy READ QBUFFER to srb*/ pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; *ptmpQbuffer = *pQbuffer; acb->rqbuf_firstindex++; acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ ptmpQbuffer++; allxfer_len++; } if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER *prbuffer; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer = arcmsr_get_iop_rqbuffer(acb); if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } pcmdmessagefld->cmdmessage.Length = allxfer_len; pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_WRITE_WQBUFFER: { u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; u_int8_t *pQbuffer; u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer; user_len = pcmdmessagefld->cmdmessage.Length; /*check if data xfer length of this request will overflow my array qbuffer */ wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; if(wqbuf_lastindex != wqbuf_firstindex) { arcmsr_Write_data_2iop_wqbuffer(acb); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; } else { my_empty_len = (wqbuf_firstindex - wqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1); if(my_empty_len >= user_len) { while(user_len > 0) { /*copy srb data to wqbuffer*/ pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; *pQbuffer = *ptmpuserbuffer; acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ ptmpuserbuffer++; user_len--; } /*post fist Qbuffer*/ if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_Write_data_2iop_wqbuffer(acb); } pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } else { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; } } retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { u_int8_t *pQbuffer = acb->rqbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { u_int8_t *pQbuffer = acb->wqbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { u_int8_t *pQbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |ACB_F_MESSAGE_RQBUFFER_CLEARED |ACB_F_MESSAGE_WQBUFFER_READ); acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_SAY_HELLO: { u_int8_t *hello_string = "Hello! I am ARCMSR"; u_int8_t *puserbuffer = (u_int8_t *)pcmdmessagefld->messagedatabuffer; if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return ENOIOCTL; } pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_SAY_GOODBYE: { arcmsr_iop_parking(acb); retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { arcmsr_flush_adapter_cache(acb); retvalue = ARCMSR_MESSAGE_SUCCESS; } break; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return (retvalue); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_free_srb(struct CommandControlBlock *srb) { struct AdapterControlBlock *acb; acb = srb->acb; ARCMSR_LOCK_ACQUIRE(&acb->srb_lock); srb->srb_state = ARCMSR_SRB_DONE; srb->srb_flags = 0; acb->srbworkingQ[acb->workingsrb_doneindex] = srb; acb->workingsrb_doneindex++; acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM; ARCMSR_LOCK_RELEASE(&acb->srb_lock); } /* ************************************************************************** ************************************************************************** */ struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb) { struct CommandControlBlock *srb = NULL; u_int32_t workingsrb_startindex, workingsrb_doneindex; ARCMSR_LOCK_ACQUIRE(&acb->srb_lock); workingsrb_doneindex = acb->workingsrb_doneindex; workingsrb_startindex = acb->workingsrb_startindex; srb = acb->srbworkingQ[workingsrb_startindex]; workingsrb_startindex++; workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM; if(workingsrb_doneindex != workingsrb_startindex) { acb->workingsrb_startindex = workingsrb_startindex; } else { srb = NULL; } ARCMSR_LOCK_RELEASE(&acb->srb_lock); return(srb); } /* ************************************************************************** ************************************************************************** */ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb) { struct CMD_MESSAGE_FIELD *pcmdmessagefld; int retvalue = 0, transfer_len = 0; char *buffer; - u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 | - (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 | - (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 | - (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8]; + uint8_t *ptr = scsiio_cdb_ptr(&pccb->csio); + u_int32_t controlcode = (u_int32_t ) ptr[5] << 24 | + (u_int32_t ) ptr[6] << 16 | + (u_int32_t ) ptr[7] << 8 | + (u_int32_t ) ptr[8]; /* 4 bytes: Areca io control code */ if ((pccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { buffer = pccb->csio.data_ptr; transfer_len = pccb->csio.dxfer_len; } else { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; switch(controlcode) { case ARCMSR_MESSAGE_READ_RQBUFFER: { u_int8_t *pQbuffer; u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer; int32_t allxfer_len = 0; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) && (allxfer_len < 1031)) { pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; *ptmpQbuffer = *pQbuffer; acb->rqbuf_firstindex++; acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; ptmpQbuffer++; allxfer_len++; } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER *prbuffer; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer = arcmsr_get_iop_rqbuffer(acb); if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } pcmdmessagefld->cmdmessage.Length = allxfer_len; pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_WRITE_WQBUFFER: { int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; u_int8_t *pQbuffer; u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer; user_len = pcmdmessagefld->cmdmessage.Length; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; if (wqbuf_lastindex != wqbuf_firstindex) { arcmsr_Write_data_2iop_wqbuffer(acb); /* has error report sensedata */ if(pccb->csio.sense_len) { ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; /* AdditionalSenseLength */ ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; /* AdditionalSenseCode */ } retvalue = ARCMSR_MESSAGE_FAIL; } else { my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) &(ARCMSR_MAX_QBUFFER - 1); if (my_empty_len >= user_len) { while (user_len > 0) { pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; *pQbuffer = *ptmpuserbuffer; acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; ptmpuserbuffer++; user_len--; } if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_Write_data_2iop_wqbuffer(acb); } } else { /* has error report sensedata */ if(pccb->csio.sense_len) { ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; /* AdditionalSenseLength */ ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; /* AdditionalSenseCode */ } retvalue = ARCMSR_MESSAGE_FAIL; } } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { u_int8_t *pQbuffer = acb->rqbuffer; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { u_int8_t *pQbuffer = acb->wqbuffer; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ); acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { u_int8_t *pQbuffer; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ); acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; } break; case ARCMSR_MESSAGE_SAY_HELLO: { int8_t *hello_string = "Hello! I am ARCMSR"; memcpy(pcmdmessagefld->messagedatabuffer, hello_string , (int16_t)strlen(hello_string)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } break; case ARCMSR_MESSAGE_SAY_GOODBYE: arcmsr_iop_parking(acb); break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: arcmsr_flush_adapter_cache(acb); break; default: retvalue = ARCMSR_MESSAGE_FAIL; } message_out: return (retvalue); } /* ********************************************************************* ********************************************************************* */ static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct CommandControlBlock *srb = (struct CommandControlBlock *)arg; struct AdapterControlBlock *acb = (struct AdapterControlBlock *)srb->acb; union ccb *pccb; int target, lun; pccb = srb->pccb; target = pccb->ccb_h.target_id; lun = pccb->ccb_h.target_lun; acb->pktRequestCount++; if(error != 0) { if(error != EFBIG) { printf("arcmsr%d: unexpected error %x" " returned from 'bus_dmamap_load' \n" , acb->pci_unit, error); } if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { pccb->ccb_h.status |= CAM_REQ_TOO_BIG; } arcmsr_srb_complete(srb, 0); return; } if(nseg > ARCMSR_MAX_SG_ENTRIES) { pccb->ccb_h.status |= CAM_REQ_TOO_BIG; arcmsr_srb_complete(srb, 0); return; } if(acb->acb_flags & ACB_F_BUS_RESET) { printf("arcmsr%d: bus reset and return busy \n", acb->pci_unit); pccb->ccb_h.status |= CAM_SCSI_BUS_RESET; arcmsr_srb_complete(srb, 0); return; } if(acb->devstate[target][lun] == ARECA_RAID_GONE) { u_int8_t block_cmd, cmd; - cmd = pccb->csio.cdb_io.cdb_bytes[0]; + cmd = scsiio_cdb_ptr(&pccb->csio)[0]; block_cmd = cmd & 0x0f; if(block_cmd == 0x08 || block_cmd == 0x0a) { printf("arcmsr%d:block 'read/write' command " "with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n" , acb->pci_unit, cmd, target, lun); pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 0); return; } } if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if(nseg != 0) { bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); } arcmsr_srb_complete(srb, 0); return; } if(acb->srboutstandingcount >= acb->maxOutstanding) { if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) == 0) { xpt_freeze_simq(acb->psim, 1); acb->acb_flags |= ACB_F_CAM_DEV_QFRZN; } pccb->ccb_h.status &= ~CAM_SIM_QUEUED; pccb->ccb_h.status |= CAM_REQUEUE_REQ; arcmsr_srb_complete(srb, 0); return; } pccb->ccb_h.status |= CAM_SIM_QUEUED; arcmsr_build_srb(srb, dm_segs, nseg); arcmsr_post_srb(acb, srb); if (pccb->ccb_h.timeout != CAM_TIME_INFINITY) { arcmsr_callout_init(&srb->ccb_callout); callout_reset_sbt(&srb->ccb_callout, SBT_1MS * (pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)), 0, arcmsr_srb_timeout, srb, 0); srb->srb_flags |= SRB_FLAG_TIMER_START; } } /* ***************************************************************************************** ***************************************************************************************** */ static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb) { struct CommandControlBlock *srb; struct AdapterControlBlock *acb = (struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr; u_int32_t intmask_org; int i = 0; acb->num_aborts++; /* *************************************************************************** ** It is the upper layer do abort command this lock just prior to calling us. ** First determine if we currently own this command. ** Start by searching the device queue. If not found ** at all, and the system wanted us to just abort the ** command return success. *************************************************************************** */ if(acb->srboutstandingcount != 0) { /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if(srb->srb_state == ARCMSR_SRB_START) { if(srb->pccb == abortccb) { srb->srb_state = ARCMSR_SRB_ABORTED; printf("arcmsr%d:scsi id=%d lun=%jx abort srb '%p'" "outstanding command \n" , acb->pci_unit, abortccb->ccb_h.target_id , (uintmax_t)abortccb->ccb_h.target_lun, srb); arcmsr_polling_srbdone(acb, srb); /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); return (TRUE); } } } /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); } return(FALSE); } /* **************************************************************************** **************************************************************************** */ static void arcmsr_bus_reset(struct AdapterControlBlock *acb) { int retry = 0; acb->num_resets++; acb->acb_flags |= ACB_F_BUS_RESET; while(acb->srboutstandingcount != 0 && retry < 400) { arcmsr_interrupt(acb); UDELAY(25000); retry++; } arcmsr_iop_reset(acb); acb->acb_flags &= ~ACB_F_BUS_RESET; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, union ccb *pccb) { if (pccb->ccb_h.target_lun) { pccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(pccb); return; } pccb->ccb_h.status |= CAM_REQ_CMP; - switch (pccb->csio.cdb_io.cdb_bytes[0]) { + switch (scsiio_cdb_ptr(&pccb->csio)[0]) { case INQUIRY: { unsigned char inqdata[36]; char *buffer = pccb->csio.data_ptr; inqdata[0] = T_PROCESSOR; /* Periph Qualifier & Periph Dev Type */ inqdata[1] = 0; /* rem media bit & Dev Type Modifier */ inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */ inqdata[3] = 0; inqdata[4] = 31; /* length of additional data */ inqdata[5] = 0; inqdata[6] = 0; inqdata[7] = 0; strncpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */ strncpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */ strncpy(&inqdata[32], "R001", 4); /* Product Revision */ memcpy(buffer, inqdata, sizeof(inqdata)); xpt_done(pccb); } break; case WRITE_BUFFER: case READ_BUFFER: { if (arcmsr_iop_message_xfer(acb, pccb)) { pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } xpt_done(pccb); } break; default: xpt_done(pccb); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb) { struct AdapterControlBlock *acb; acb = (struct AdapterControlBlock *) cam_sim_softc(psim); if(acb == NULL) { pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); return; } switch (pccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct CommandControlBlock *srb; int target = pccb->ccb_h.target_id; int error; + + if (pccb->ccb_h.flags & CAM_CDB_PHYS) { + pccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(pccb); + return; + } if(target == 16) { /* virtual device for iop message transfer */ arcmsr_handle_virtual_command(acb, pccb); return; } if((srb = arcmsr_get_freesrb(acb)) == NULL) { pccb->ccb_h.status |= CAM_RESRC_UNAVAIL; xpt_done(pccb); return; } pccb->ccb_h.arcmsr_ccbsrb_ptr = srb; pccb->ccb_h.arcmsr_ccbacb_ptr = acb; srb->pccb = pccb; error = bus_dmamap_load_ccb(acb->dm_segs_dmat , srb->dm_segs_dmamap , pccb , arcmsr_execute_srb, srb, /*flags*/0); if(error == EINPROGRESS) { xpt_freeze_simq(acb->psim, 1); pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } break; } case XPT_TARGET_IO: { /* target mode not yet support vendor specific commands. */ pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &pccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = ARCMSR_MAX_TARGETID; /* 0-16 */ cpi->max_lun = ARCMSR_MAX_TARGETLUN; /* 0-7 */ cpi->initiator_id = ARCMSR_SCSI_INITIATOR_ID; /* 255 */ cpi->bus_id = cam_sim_bus(psim); strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(psim); #ifdef CAM_NEW_TRAN_CODE if(acb->adapter_bus_speed == ACB_BUS_SPEED_12G) cpi->base_transfer_speed = 1200000; else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G) cpi->base_transfer_speed = 600000; else cpi->base_transfer_speed = 300000; if((acb->vendor_device_id == PCIDevVenIDARC1880) || (acb->vendor_device_id == PCIDevVenIDARC1680) || (acb->vendor_device_id == PCIDevVenIDARC1214)) { cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol_version = SCSI_REV_SPC2; } else { cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol_version = SCSI_REV_2; } cpi->protocol = PROTO_SCSI; #endif cpi->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_ABORT: { union ccb *pabort_ccb; pabort_ccb = pccb->cab.abort_ccb; switch (pabort_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMED_NOTIFY: case XPT_CONT_TARGET_IO: if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) { pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED; xpt_done(pabort_ccb); pccb->ccb_h.status |= CAM_REQ_CMP; } else { xpt_print_path(pabort_ccb->ccb_h.path); printf("Not found\n"); pccb->ccb_h.status |= CAM_PATH_INVALID; } break; case XPT_SCSI_IO: pccb->ccb_h.status |= CAM_UA_ABORT; break; default: pccb->ccb_h.status |= CAM_REQ_INVALID; break; } xpt_done(pccb); break; } case XPT_RESET_BUS: case XPT_RESET_DEV: { u_int32_t i; arcmsr_bus_reset(acb); for (i=0; i < 500; i++) { DELAY(1000); } pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_TERM_IO: { pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; if(pccb->ccb_h.target_id == 16) { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } cts = &pccb->cts; #ifdef CAM_NEW_TRAN_CODE { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings_sas *sas; scsi = &cts->proto_specific.scsi; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; scsi->valid = CTS_SCSI_VALID_TQ; cts->protocol = PROTO_SCSI; if((acb->vendor_device_id == PCIDevVenIDARC1880) || (acb->vendor_device_id == PCIDevVenIDARC1680) || (acb->vendor_device_id == PCIDevVenIDARC1214)) { cts->protocol_version = SCSI_REV_SPC2; cts->transport_version = 0; cts->transport = XPORT_SAS; sas = &cts->xport_specific.sas; sas->valid = CTS_SAS_VALID_SPEED; if (acb->adapter_bus_speed == ACB_BUS_SPEED_12G) sas->bitrate = 1200000; else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G) sas->bitrate = 600000; else if(acb->adapter_bus_speed == ACB_BUS_SPEED_3G) sas->bitrate = 300000; } else { cts->protocol_version = SCSI_REV_2; cts->transport_version = 2; cts->transport = XPORT_SPI; spi = &cts->xport_specific.spi; spi->flags = CTS_SPI_FLAGS_DISC_ENB; if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G) spi->sync_period = 1; else spi->sync_period = 2; spi->sync_offset = 32; spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; spi->valid = CTS_SPI_VALID_DISC | CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; } } #else { cts->flags = (CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G) cts->sync_period = 1; else cts->sync_period = 2; cts->sync_offset = 32; cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; } #endif pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_SET_TRAN_SETTINGS: { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } case XPT_CALC_GEOMETRY: if(pccb->ccb_h.target_id == 16) { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } #if __FreeBSD_version >= 500000 cam_calc_geometry(&pccb->ccg, 1); #else { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &pccb->ccg; if (ccg->block_size == 0) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } if(((1024L * 1024L)/ccg->block_size) < 0) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size); if(size_mb > 1024 ) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; pccb->ccb_h.status |= CAM_REQ_CMP; } #endif xpt_done(pccb); break; default: pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; acb->acb_flags |= ACB_F_MSG_START_BGRB; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbd_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_start_hba_bgrb(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_start_hbb_bgrb(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_start_hbc_bgrb(acb); break; case ACB_ADAPTER_TYPE_D: arcmsr_start_hbd_bgrb(acb); break; } } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct CommandControlBlock *srb; u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0; u_int16_t error; polling_ccb_retry: poll_count++; outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus); /*clear interrupt*/ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { if((flag_srb = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) == 0xFFFFFFFF) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } goto polling_ccb_retry; } } /* check if command done with no error*/ srb = (struct CommandControlBlock *) (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'" "poll command abort successfully \n" , acb->pci_unit , srb->pccb->ccb_h.target_id , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'" "srboutstandingcount=%d \n" , acb->pci_unit , srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; int index; u_int16_t error; polling_ccb_retry: poll_count++; WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { index = phbbmu->doneq_index; if((flag_srb = phbbmu->done_qbuffer[index]) == 0) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } goto polling_ccb_retry; } } phbbmu->done_qbuffer[index] = 0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->doneq_index = index; /* check if command done with no error*/ srb = (struct CommandControlBlock *) (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'" "poll command abort successfully \n" , acb->pci_unit , srb->pccb->ccb_h.target_id , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'" "srboutstandingcount=%d \n" , acb->pci_unit , srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; u_int16_t error; polling_ccb_retry: poll_count++; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } if (acb->srboutstandingcount == 0) { break; } goto polling_ccb_retry; } } flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); /* check if command done with no error*/ srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; if (poll_srb != NULL) poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n" , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" , acb->pci_unit, srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbd_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; u_int32_t outbound_write_pointer; u_int16_t error, doneq_index; polling_ccb_retry: poll_count++; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; doneq_index = phbdmu->doneq_index; if ((outbound_write_pointer & 0xFF) == (doneq_index & 0xFF)) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } if (acb->srboutstandingcount == 0) { break; } goto polling_ccb_retry; } } doneq_index = arcmsr_get_doneq_index(phbdmu); flag_srb = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow; /* check if command done with no error*/ srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index); if (poll_srb != NULL) poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n" , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" , acb->pci_unit, srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ********************************************************************** */ static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_polling_hba_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_polling_hbb_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_polling_hbc_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_polling_hbd_srbdone(acb, poll_srb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) { char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i=0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i=0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD) acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i = 0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i = 0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_HBB_POSTQUEUE) acb->maxOutstanding = ARCMSR_MAX_HBB_POSTQUEUE - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb) { char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i = 0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i = 0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD) acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbd_config(struct AdapterControlBlock *acb) { char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR); CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i = 0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i = 0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_HBD_POSTQUEUE) acb->maxOutstanding = ARCMSR_MAX_HBD_POSTQUEUE - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_get_hba_config(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_get_hbb_config(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_get_hbc_config(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_get_hbd_config(acb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb) { int timeout=0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; while ((READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); } break; case ACB_ADAPTER_TYPE_C: { while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; case ACB_ADAPTER_TYPE_D: { while ((CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBDMU_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); /* let IOP know data has been read */ } break; case ACB_ADAPTER_TYPE_C: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell_clear); /* Dummy read to force pci flush */ CHIP_REG_READ32(HBC_MessageUnit, 0, inbound_doorbell); /* Dummy read to force pci flush */ } break; case ACB_ADAPTER_TYPE_D: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ); } break; } } /* ************************************************************************ ************************************************************************ */ static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb) { unsigned long srb_phyaddr; u_int32_t srb_phyaddr_hi32; u_int32_t srb_phyaddr_lo32; /* ******************************************************************** ** here we need to tell iop 331 our freesrb.HighPart ** if freesrb.HighPart is not zero ******************************************************************** */ srb_phyaddr = (unsigned long) acb->srb_phyaddr.phyaddr; srb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; srb_phyaddr_lo32 = acb->srb_phyaddr.B.phyadd_low; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if(srb_phyaddr_hi32 != 0) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } } break; /* *********************************************************************** ** if adapter type B, set window of "post command Q" *********************************************************************** */ case ACB_ADAPTER_TYPE_B: { u_int32_t post_queue_phyaddr; struct HBB_MessageUnit *phbbmu; phbbmu = (struct HBB_MessageUnit *)acb->pmu; phbbmu->postq_index = 0; phbbmu->doneq_index = 0; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit); return FALSE; } post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBB_MessageUnit, post_qbuffer); CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit); return FALSE; } WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit); return FALSE; } } break; case ACB_ADAPTER_TYPE_C: { if(srb_phyaddr_hi32 != 0) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } } break; case ACB_ADAPTER_TYPE_D: { u_int32_t post_queue_phyaddr, done_queue_phyaddr; struct HBD_MessageUnit0 *phbdmu; phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; phbdmu->postq_index = 0; phbdmu->doneq_index = 0x40FF; post_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBD_MessageUnit0, post_qbuffer); done_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBD_MessageUnit0, done_qbuffer); CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ base */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[3], done_queue_phyaddr); /* doneQ base */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[4], 0x100); CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } break; } return (TRUE); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_C: case ACB_ADAPTER_TYPE_D: break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ACTIVE_EOI_MODE); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit); return; } } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_iop_init(struct AdapterControlBlock *acb) { u_int32_t intmask_org; /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); arcmsr_wait_firmware_ready(acb); arcmsr_iop_confirm(acb); arcmsr_get_firmware_spec(acb); /*start background rebuild*/ arcmsr_start_adapter_bgrb(acb); /* empty doorbell Qbuffer if door bell ringed */ arcmsr_clear_doorbell_queue_buffer(acb); arcmsr_enable_eoi_mode(acb); /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); acb->acb_flags |= ACB_F_IOP_INITED; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct AdapterControlBlock *acb = arg; struct CommandControlBlock *srb_tmp; u_int32_t i; unsigned long srb_phyaddr = (unsigned long)segs->ds_addr; acb->srb_phyaddr.phyaddr = srb_phyaddr; srb_tmp = (struct CommandControlBlock *)acb->uncacheptr; for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { if(bus_dmamap_create(acb->dm_segs_dmat, /*flags*/0, &srb_tmp->dm_segs_dmamap) != 0) { acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; printf("arcmsr%d:" " srb dmamap bus_dmamap_create error\n", acb->pci_unit); return; } if((acb->adapter_type == ACB_ADAPTER_TYPE_C) || (acb->adapter_type == ACB_ADAPTER_TYPE_D)) { srb_tmp->cdb_phyaddr_low = srb_phyaddr; srb_tmp->cdb_phyaddr_high = (u_int32_t)((srb_phyaddr >> 16) >> 16); } else srb_tmp->cdb_phyaddr_low = srb_phyaddr >> 5; srb_tmp->acb = acb; acb->srbworkingQ[i] = acb->psrb_pool[i] = srb_tmp; srb_phyaddr = srb_phyaddr + SRB_SIZE; srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp + SRB_SIZE); } acb->vir2phy_offset = (unsigned long)srb_tmp - (unsigned long)srb_phyaddr; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_free_resource(struct AdapterControlBlock *acb) { /* remove the control device */ if(acb->ioctl_dev != NULL) { destroy_dev(acb->ioctl_dev); } bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap); bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap); bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_mutex_init(struct AdapterControlBlock *acb) { ARCMSR_LOCK_INIT(&acb->isr_lock, "arcmsr isr lock"); ARCMSR_LOCK_INIT(&acb->srb_lock, "arcmsr srb lock"); ARCMSR_LOCK_INIT(&acb->postDone_lock, "arcmsr postQ lock"); ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr RW buffer lock"); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_mutex_destroy(struct AdapterControlBlock *acb) { ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); ARCMSR_LOCK_DESTROY(&acb->postDone_lock); ARCMSR_LOCK_DESTROY(&acb->srb_lock); ARCMSR_LOCK_DESTROY(&acb->isr_lock); } /* ************************************************************************ ************************************************************************ */ static u_int32_t arcmsr_initialize(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); u_int16_t pci_command; int i, j,max_coherent_size; u_int32_t vendor_dev_id; vendor_dev_id = pci_get_devid(dev); acb->vendor_device_id = vendor_dev_id; acb->sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); switch (vendor_dev_id) { case PCIDevVenIDARC1880: case PCIDevVenIDARC1882: case PCIDevVenIDARC1213: case PCIDevVenIDARC1223: { acb->adapter_type = ACB_ADAPTER_TYPE_C; if (acb->sub_device_id == ARECA_SUB_DEV_ID_1883) acb->adapter_bus_speed = ACB_BUS_SPEED_12G; else acb->adapter_bus_speed = ACB_BUS_SPEED_6G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE; } break; case PCIDevVenIDARC1214: { acb->adapter_type = ACB_ADAPTER_TYPE_D; acb->adapter_bus_speed = ACB_BUS_SPEED_6G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBD_MessageUnit0)); } break; case PCIDevVenIDARC1200: case PCIDevVenIDARC1201: { acb->adapter_type = ACB_ADAPTER_TYPE_B; acb->adapter_bus_speed = ACB_BUS_SPEED_3G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit)); } break; case PCIDevVenIDARC1203: { acb->adapter_type = ACB_ADAPTER_TYPE_B; acb->adapter_bus_speed = ACB_BUS_SPEED_6G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit)); } break; case PCIDevVenIDARC1110: case PCIDevVenIDARC1120: case PCIDevVenIDARC1130: case PCIDevVenIDARC1160: case PCIDevVenIDARC1170: case PCIDevVenIDARC1210: case PCIDevVenIDARC1220: case PCIDevVenIDARC1230: case PCIDevVenIDARC1231: case PCIDevVenIDARC1260: case PCIDevVenIDARC1261: case PCIDevVenIDARC1270: case PCIDevVenIDARC1280: case PCIDevVenIDARC1212: case PCIDevVenIDARC1222: case PCIDevVenIDARC1380: case PCIDevVenIDARC1381: case PCIDevVenIDARC1680: case PCIDevVenIDARC1681: { acb->adapter_type = ACB_ADAPTER_TYPE_A; acb->adapter_bus_speed = ACB_BUS_SPEED_3G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE; } break; default: { printf("arcmsr%d:" " unknown RAID adapter type \n", device_get_unit(dev)); return ENOMEM; } } #if __FreeBSD_version >= 700000 if(bus_dma_tag_create( /*PCI parent*/ bus_get_dma_tag(dev), #else if(bus_dma_tag_create( /*PCI parent*/ NULL, #endif /*alignemnt*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/ BUS_SPACE_UNRESTRICTED, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ NULL, /*lockarg*/ NULL, #endif &acb->parent_dmat) != 0) { printf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENOMEM; } /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, /*alignment*/ 1, /*boundary*/ 0, #ifdef PAE /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, #else /*lowaddr*/ BUS_SPACE_MAXADDR, #endif /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM, /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ busdma_lock_mutex, /*lockarg*/ &acb->isr_lock, #endif &acb->dm_segs_dmat) != 0) { bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENOMEM; } /* DMA tag for our srb structures.... Allocate the freesrb memory */ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, /*alignment*/ 0x20, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ max_coherent_size, /*nsegments*/ 1, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ NULL, /*lockarg*/ NULL, #endif &acb->srb_dmat) != 0) { bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENXIO; } /* Allocation for our srbs */ if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) { bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev)); return ENXIO; } /* And permanently map them */ if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) { bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev)); return ENXIO; } pci_command = pci_read_config(dev, PCIR_COMMAND, 2); pci_command |= PCIM_CMD_BUSMASTEREN; pci_command |= PCIM_CMD_PERRESPEN; pci_command |= PCIM_CMD_MWRICEN; /* Enable Busmaster */ pci_write_config(dev, PCIR_COMMAND, pci_command, 2); switch(acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { u_int32_t rid0 = PCIR_BAR(0); vm_offset_t mem_base0; acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0 == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu = (struct MessageUnit_UNION *)mem_base0; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu; struct CommandControlBlock *freesrb; u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) }; vm_offset_t mem_base[]={0,0}; u_long size; if (vendor_dev_id == PCIDevVenIDARC1203) size = sizeof(struct HBB_DOORBELL_1203); else size = sizeof(struct HBB_DOORBELL); for(i=0; i < 2; i++) { if(i == 0) { acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid[i], RF_ACTIVE); } else { acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid[i], RF_ACTIVE); } if(acb->sys_res_arcmsr[i] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i); return ENXIO; } mem_base[i] = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]); if(mem_base[i] == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i); return ENXIO; } acb->btag[i] = rman_get_bustag(acb->sys_res_arcmsr[i]); acb->bhandle[i] = rman_get_bushandle(acb->sys_res_arcmsr[i]); } freesrb = (struct CommandControlBlock *)acb->uncacheptr; acb->pmu = (struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE); phbbmu = (struct HBB_MessageUnit *)acb->pmu; phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)mem_base[0]; phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)mem_base[1]; if (vendor_dev_id == PCIDevVenIDARC1203) { phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell); phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell_mask); phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell); phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell_mask); } else { phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL, drv2iop_doorbell); phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL, drv2iop_doorbell_mask); phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL, iop2drv_doorbell); phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL, iop2drv_doorbell_mask); } } break; case ACB_ADAPTER_TYPE_C: { u_int32_t rid0 = PCIR_BAR(1); vm_offset_t mem_base0; acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0 == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu = (struct MessageUnit_UNION *)mem_base0; } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu; u_int32_t rid0 = PCIR_BAR(0); vm_offset_t mem_base0; acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0 == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu = (struct MessageUnit_UNION *)((unsigned long)acb->uncacheptr+ARCMSR_SRBS_POOL_SIZE); phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; phbdmu->phbdmu = (struct HBD_MessageUnit *)mem_base0; } break; } if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) { arcmsr_free_resource(acb); printf("arcmsr%d: map free srb failure!\n", device_get_unit(dev)); return ENXIO; } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; /* ******************************************************************** ** init raid volume state ******************************************************************** */ for(i=0; i < ARCMSR_MAX_TARGETID; i++) { for(j=0; j < ARCMSR_MAX_TARGETLUN; j++) { acb->devstate[i][j] = ARECA_RAID_GONE; } } arcmsr_iop_init(acb); return(0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_attach(device_t dev) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); u_int32_t unit=device_get_unit(dev); struct ccb_setasync csa; struct cam_devq *devq; /* Device Queue to use for this SIM */ struct resource *irqres; int rid; if(acb == NULL) { printf("arcmsr%d: cannot allocate softc\n", unit); return (ENOMEM); } arcmsr_mutex_init(acb); acb->pci_dev = dev; acb->pci_unit = unit; if(arcmsr_initialize(dev)) { printf("arcmsr%d: initialize failure!\n", unit); arcmsr_mutex_destroy(acb); return ENXIO; } /* After setting up the adapter, map our interrupt */ rid = 0; irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if(irqres == NULL || #if __FreeBSD_version >= 700025 bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, NULL, arcmsr_intr_handler, acb, &acb->ih)) { #else bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih)) { #endif arcmsr_free_resource(acb); arcmsr_mutex_destroy(acb); printf("arcmsr%d: unable to register interrupt handler!\n", unit); return ENXIO; } acb->irqres = irqres; /* * Now let the CAM generic SCSI layer find the SCSI devices on * the bus * start queue to reset to the idle loop. * * Create device queue of SIM(s) * (MAX_START_JOB - 1) : * max_sim_transactions */ devq = cam_simq_alloc(acb->maxOutstanding); if(devq == NULL) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); arcmsr_mutex_destroy(acb); printf("arcmsr%d: cam_simq_alloc failure!\n", unit); return ENXIO; } #if __FreeBSD_version >= 700025 acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->isr_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); #else acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); #endif if(acb->psim == NULL) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); cam_simq_free(devq); arcmsr_mutex_destroy(acb); printf("arcmsr%d: cam_sim_alloc failure!\n", unit); return ENXIO; } ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); #if __FreeBSD_version >= 700044 if(xpt_bus_register(acb->psim, dev, 0) != CAM_SUCCESS) { #else if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) { #endif arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); cam_sim_free(acb->psim, /*free_devq*/TRUE); arcmsr_mutex_destroy(acb); printf("arcmsr%d: xpt_bus_register failure!\n", unit); return ENXIO; } if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); xpt_bus_deregister(cam_sim_path(acb->psim)); cam_sim_free(acb->psim, /* free_simq */ TRUE); arcmsr_mutex_destroy(acb); printf("arcmsr%d: xpt_create_path failure!\n", unit); return ENXIO; } /* **************************************************** */ xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE; csa.callback = arcmsr_async; csa.callback_arg = acb->psim; xpt_action((union ccb *)&csa); ARCMSR_LOCK_RELEASE(&acb->isr_lock); /* Create the control device. */ acb->ioctl_dev = make_dev(&arcmsr_cdevsw, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit); #if __FreeBSD_version < 503000 acb->ioctl_dev->si_drv1 = acb; #endif #if __FreeBSD_version > 500005 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit); #endif arcmsr_callout_init(&acb->devmap_callout); callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb); return (0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_probe(device_t dev) { u_int32_t id; u_int16_t sub_device_id; static char buf[256]; char x_type[]={"unknown"}; char *type; int raid6 = 1; if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) { return (ENXIO); } sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); switch(id = pci_get_devid(dev)) { case PCIDevVenIDARC1110: case PCIDevVenIDARC1200: case PCIDevVenIDARC1201: case PCIDevVenIDARC1210: raid6 = 0; /*FALLTHRU*/ case PCIDevVenIDARC1120: case PCIDevVenIDARC1130: case PCIDevVenIDARC1160: case PCIDevVenIDARC1170: case PCIDevVenIDARC1220: case PCIDevVenIDARC1230: case PCIDevVenIDARC1231: case PCIDevVenIDARC1260: case PCIDevVenIDARC1261: case PCIDevVenIDARC1270: case PCIDevVenIDARC1280: type = "SATA 3G"; break; case PCIDevVenIDARC1212: case PCIDevVenIDARC1222: case PCIDevVenIDARC1380: case PCIDevVenIDARC1381: case PCIDevVenIDARC1680: case PCIDevVenIDARC1681: type = "SAS 3G"; break; case PCIDevVenIDARC1880: case PCIDevVenIDARC1882: case PCIDevVenIDARC1213: case PCIDevVenIDARC1223: if (sub_device_id == ARECA_SUB_DEV_ID_1883) type = "SAS 12G"; else type = "SAS 6G"; break; case PCIDevVenIDARC1214: case PCIDevVenIDARC1203: type = "SATA 6G"; break; default: type = x_type; raid6 = 0; break; } if(type == x_type) return(ENXIO); sprintf(buf, "Areca %s Host Adapter RAID Controller %s\n%s\n", type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_shutdown(device_t dev) { u_int32_t i; u_int32_t intmask_org; struct CommandControlBlock *srb; struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); /* stop adapter background rebuild */ ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); /* abort all outstanding command */ acb->acb_flags |= ACB_F_SCSISTOPADAPTER; acb->acb_flags &= ~ACB_F_IOP_INITED; if(acb->srboutstandingcount != 0) { /*clear and abort all outbound posted Q*/ arcmsr_done4abort_postqueue(acb); /* talk to iop 331 outstanding command aborted*/ arcmsr_abort_allcmd(acb); for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if(srb->srb_state == ARCMSR_SRB_START) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); } } } acb->srboutstandingcount = 0; acb->workingsrb_doneindex = 0; acb->workingsrb_startindex = 0; acb->pktRequestCount = 0; acb->pktReturnCount = 0; ARCMSR_LOCK_RELEASE(&acb->isr_lock); return (0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_detach(device_t dev) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); int i; callout_stop(&acb->devmap_callout); bus_teardown_intr(dev, acb->irqres, acb->ih); arcmsr_shutdown(dev); arcmsr_free_resource(acb); for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]); } bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); xpt_async(AC_LOST_DEVICE, acb->ppath, NULL); xpt_free_path(acb->ppath); xpt_bus_deregister(cam_sim_path(acb->psim)); cam_sim_free(acb->psim, TRUE); ARCMSR_LOCK_RELEASE(&acb->isr_lock); arcmsr_mutex_destroy(acb); return (0); } #ifdef ARCMSR_DEBUG1 static void arcmsr_dump_data(struct AdapterControlBlock *acb) { if((acb->pktRequestCount - acb->pktReturnCount) == 0) return; printf("Command Request Count =0x%x\n",acb->pktRequestCount); printf("Command Return Count =0x%x\n",acb->pktReturnCount); printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount)); printf("Queued Command Count =0x%x\n",acb->srboutstandingcount); } #endif Index: head/sys/dev/iir/iir.c =================================================================== --- head/sys/dev/iir/iir.c (revision 296890) +++ head/sys/dev/iir/iir.c (revision 296891) @@ -1,1912 +1,1912 @@ /*- * Copyright (c) 2000-04 ICP vortex GmbH * Copyright (c) 2002-04 Intel Corporation * Copyright (c) 2003-04 Adaptec Inc. * All Rights Reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * iir.c: SCSI dependant code for the Intel Integrated RAID Controller driver * * Written by: Achim Leubner * Fixes/Additions: Boji Tony Kannanthanam * * credits: Niklas Hallqvist; OpenBSD driver for the ICP Controllers. * Mike Smith; Some driver source code. * FreeBSD.ORG; Great O/S to work on and for. * * $Id: iir.c 1.5 2004/03/30 10:17:53 achim Exp $" */ #include __FBSDID("$FreeBSD$"); #define _IIR_C_ /* #include "opt_iir.h" */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_GDTBUF, "iirbuf", "iir driver buffer"); #ifdef GDT_DEBUG int gdt_debug = GDT_DEBUG; #ifdef __SERIAL__ #define MAX_SERBUF 160 static void ser_init(void); static void ser_puts(char *str); static void ser_putc(int c); static char strbuf[MAX_SERBUF+1]; #ifdef __COM2__ #define COM_BASE 0x2f8 #else #define COM_BASE 0x3f8 #endif static void ser_init() { unsigned port=COM_BASE; outb(port+3, 0x80); outb(port+1, 0); /* 19200 Baud, if 9600: outb(12,port) */ outb(port, 6); outb(port+3, 3); outb(port+1, 0); } static void ser_puts(char *str) { char *ptr; ser_init(); for (ptr=str;*ptr;++ptr) ser_putc((int)(*ptr)); } static void ser_putc(int c) { unsigned port=COM_BASE; while ((inb(port+5) & 0x20)==0); outb(port, c); if (c==0x0a) { while ((inb(port+5) & 0x20)==0); outb(port, 0x0d); } } int ser_printf(const char *fmt, ...) { va_list args; int i; va_start(args,fmt); i = vsprintf(strbuf,fmt,args); ser_puts(strbuf); va_end(args); return i; } #endif #endif /* controller cnt. */ int gdt_cnt = 0; /* event buffer */ static gdt_evt_str ebuffer[GDT_MAX_EVENTS]; static int elastidx, eoldidx; static struct mtx elock; MTX_SYSINIT(iir_elock, &elock, "iir events", MTX_DEF); /* statistics */ gdt_statist_t gdt_stat; /* Definitions for our use of the SIM private CCB area */ #define ccb_sim_ptr spriv_ptr0 #define ccb_priority spriv_field1 static void iir_action(struct cam_sim *sim, union ccb *ccb); static int iir_intr_locked(struct gdt_softc *gdt); static void iir_poll(struct cam_sim *sim); static void iir_shutdown(void *arg, int howto); static void iir_timeout(void *arg); static void gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs); static int gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb, u_int8_t service, u_int16_t opcode, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *ccb, int timeout); static struct gdt_ccb *gdt_get_ccb(struct gdt_softc *gdt); static int gdt_sync_event(struct gdt_softc *gdt, int service, u_int8_t index, struct gdt_ccb *gccb); static int gdt_async_event(struct gdt_softc *gdt, int service); static struct gdt_ccb *gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb); static struct gdt_ccb *gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb); static struct gdt_ccb *gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd); static void gdt_internal_cache_cmd(struct gdt_softc *gdt, union ccb *ccb); static void gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error); static void gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error); int iir_init(struct gdt_softc *gdt) { u_int16_t cdev_cnt; int i, id, drv_cyls, drv_hds, drv_secs; struct gdt_ccb *gccb; GDT_DPRINTF(GDT_D_DEBUG, ("iir_init()\n")); gdt->sc_state = GDT_POLLING; gdt_clear_events(); bzero(&gdt_stat, sizeof(gdt_statist_t)); SLIST_INIT(&gdt->sc_free_gccb); SLIST_INIT(&gdt->sc_pending_gccb); TAILQ_INIT(&gdt->sc_ccb_queue); TAILQ_INIT(&gdt->sc_ucmd_queue); /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/DFLTPHYS, /*nsegments*/GDT_MAXSG, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/BUS_DMA_ALLOCNOW, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&gdt->sc_lock, &gdt->sc_buffer_dmat) != 0) { device_printf(gdt->sc_devnode, "bus_dma_tag_create(..., gdt->sc_buffer_dmat) failed\n"); return (1); } gdt->sc_init_level++; /* DMA tag for our ccb structures */ if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, GDT_MAXCMDS * GDT_SCRATCH_SZ, /* maxsize */ /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&gdt->sc_lock, &gdt->sc_gcscratch_dmat) != 0) { device_printf(gdt->sc_devnode, "bus_dma_tag_create(...,gdt->sc_gcscratch_dmat) failed\n"); return (1); } gdt->sc_init_level++; /* Allocation for our ccb scratch area */ if (bus_dmamem_alloc(gdt->sc_gcscratch_dmat, (void **)&gdt->sc_gcscratch, BUS_DMA_NOWAIT, &gdt->sc_gcscratch_dmamap) != 0) { device_printf(gdt->sc_devnode, "bus_dmamem_alloc(...,&gdt->sc_gccbs,...) failed\n"); return (1); } gdt->sc_init_level++; /* And permanently map them */ bus_dmamap_load(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch_dmamap, gdt->sc_gcscratch, GDT_MAXCMDS * GDT_SCRATCH_SZ, gdtmapmem, &gdt->sc_gcscratch_busbase, /*flags*/0); gdt->sc_init_level++; /* Clear them out. */ bzero(gdt->sc_gcscratch, GDT_MAXCMDS * GDT_SCRATCH_SZ); /* Initialize the ccbs */ gdt->sc_gccbs = malloc(sizeof(struct gdt_ccb) * GDT_MAXCMDS, M_GDTBUF, M_NOWAIT | M_ZERO); if (gdt->sc_gccbs == NULL) { device_printf(gdt->sc_devnode, "no memory for gccbs.\n"); return (1); } for (i = GDT_MAXCMDS-1; i >= 0; i--) { gccb = &gdt->sc_gccbs[i]; gccb->gc_cmd_index = i + 2; gccb->gc_flags = GDT_GCF_UNUSED; gccb->gc_map_flag = FALSE; if (bus_dmamap_create(gdt->sc_buffer_dmat, /*flags*/0, &gccb->gc_dmamap) != 0) return(1); gccb->gc_map_flag = TRUE; gccb->gc_scratch = &gdt->sc_gcscratch[GDT_SCRATCH_SZ * i]; gccb->gc_scratch_busbase = gdt->sc_gcscratch_busbase + GDT_SCRATCH_SZ * i; callout_init_mtx(&gccb->gc_timeout, &gdt->sc_lock, 0); SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle); } gdt->sc_init_level++; /* create the control device */ gdt->sc_dev = gdt_make_dev(gdt); /* allocate ccb for gdt_internal_cmd() */ mtx_lock(&gdt->sc_lock); gccb = gdt_get_ccb(gdt); if (gccb == NULL) { mtx_unlock(&gdt->sc_lock); device_printf(gdt->sc_devnode, "No free command index found\n"); return (1); } bzero(gccb->gc_cmd, GDT_CMD_SZ); if (!gdt_internal_cmd(gdt, gccb, GDT_SCREENSERVICE, GDT_INIT, 0, 0, 0)) { device_printf(gdt->sc_devnode, "Screen service initialization error %d\n", gdt->sc_status); gdt_free_ccb(gdt, gccb); mtx_unlock(&gdt->sc_lock); return (1); } gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0); if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT, GDT_LINUX_OS, 0, 0)) { device_printf(gdt->sc_devnode, "Cache service initialization error %d\n", gdt->sc_status); gdt_free_ccb(gdt, gccb); mtx_unlock(&gdt->sc_lock); return (1); } cdev_cnt = (u_int16_t)gdt->sc_info; gdt->sc_fw_vers = gdt->sc_service; /* Detect number of buses */ gdt_enc32(gccb->gc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST); gccb->gc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS; gccb->gc_scratch[GDT_IOC_FIRST_CHAN] = 0; gccb->gc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1; gdt_enc32(gccb->gc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ); if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL, GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL, GDT_IOC_HDR_SZ + GDT_MAXBUS * GDT_RAWIOC_SZ)) { gdt->sc_bus_cnt = gccb->gc_scratch[GDT_IOC_CHAN_COUNT]; for (i = 0; i < gdt->sc_bus_cnt; i++) { id = gccb->gc_scratch[GDT_IOC_HDR_SZ + i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID]; gdt->sc_bus_id[i] = id < GDT_MAXID_FC ? id : 0xff; } } else { /* New method failed, use fallback. */ for (i = 0; i < GDT_MAXBUS; i++) { gdt_enc32(gccb->gc_scratch + GDT_GETCH_CHANNEL_NO, i); if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL, GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN, GDT_IO_CHANNEL | GDT_INVALID_CHANNEL, GDT_GETCH_SZ)) { if (i == 0) { device_printf(gdt->sc_devnode, "Cannot get channel count, " "error %d\n", gdt->sc_status); gdt_free_ccb(gdt, gccb); mtx_unlock(&gdt->sc_lock); return (1); } break; } gdt->sc_bus_id[i] = (gccb->gc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID_FC) ? gccb->gc_scratch[GDT_GETCH_SIOP_ID] : 0xff; } gdt->sc_bus_cnt = i; } /* add one "virtual" channel for the host drives */ gdt->sc_virt_bus = gdt->sc_bus_cnt; gdt->sc_bus_cnt++; if (!gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_INIT, 0, 0, 0)) { device_printf(gdt->sc_devnode, "Raw service initialization error %d\n", gdt->sc_status); gdt_free_ccb(gdt, gccb); mtx_unlock(&gdt->sc_lock); return (1); } /* Set/get features raw service (scatter/gather) */ gdt->sc_raw_feat = 0; if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_SET_FEAT, GDT_SCATTER_GATHER, 0, 0)) { if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) { gdt->sc_raw_feat = gdt->sc_info; if (!(gdt->sc_info & GDT_SCATTER_GATHER)) { panic("%s: Scatter/Gather Raw Service " "required but not supported!\n", device_get_nameunit(gdt->sc_devnode)); gdt_free_ccb(gdt, gccb); mtx_unlock(&gdt->sc_lock); return (1); } } } /* Set/get features cache service (scatter/gather) */ gdt->sc_cache_feat = 0; if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_SET_FEAT, 0, GDT_SCATTER_GATHER, 0)) { if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) { gdt->sc_cache_feat = gdt->sc_info; if (!(gdt->sc_info & GDT_SCATTER_GATHER)) { panic("%s: Scatter/Gather Cache Service " "required but not supported!\n", device_get_nameunit(gdt->sc_devnode)); gdt_free_ccb(gdt, gccb); mtx_unlock(&gdt->sc_lock); return (1); } } } /* OEM */ gdt_enc32(gccb->gc_scratch + GDT_OEM_VERSION, 0x01); gdt_enc32(gccb->gc_scratch + GDT_OEM_BUFSIZE, sizeof(gdt_oem_record_t)); if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL, GDT_OEM_STR_RECORD, GDT_INVALID_CHANNEL, sizeof(gdt_oem_str_record_t))) { strncpy(gdt->oem_name, ((gdt_oem_str_record_t *) gccb->gc_scratch)->text.scsi_host_drive_inquiry_vendor_id, 7); gdt->oem_name[7]='\0'; } else { /* Old method, based on PCI ID */ if (gdt->sc_vendor == INTEL_VENDOR_ID_IIR) strcpy(gdt->oem_name,"Intel "); else strcpy(gdt->oem_name,"ICP "); } /* Scan for cache devices */ for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) { if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INFO, i, 0, 0)) { gdt->sc_hdr[i].hd_present = 1; gdt->sc_hdr[i].hd_size = gdt->sc_info; /* * Evaluate mapping (sectors per head, heads per cyl) */ gdt->sc_hdr[i].hd_size &= ~GDT_SECS32; if (gdt->sc_info2 == 0) gdt_eval_mapping(gdt->sc_hdr[i].hd_size, &drv_cyls, &drv_hds, &drv_secs); else { drv_hds = gdt->sc_info2 & 0xff; drv_secs = (gdt->sc_info2 >> 8) & 0xff; drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds / drv_secs; } gdt->sc_hdr[i].hd_heads = drv_hds; gdt->sc_hdr[i].hd_secs = drv_secs; /* Round the size */ gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs; if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_DEVTYPE, i, 0, 0)) gdt->sc_hdr[i].hd_devtype = gdt->sc_info; } } GDT_DPRINTF(GDT_D_INIT, ("dpmem %x %d-bus %d cache device%s\n", gdt->sc_dpmembase, gdt->sc_bus_cnt, cdev_cnt, cdev_cnt == 1 ? "" : "s")); gdt_free_ccb(gdt, gccb); mtx_unlock(&gdt->sc_lock); atomic_add_int(&gdt_cnt, 1); return (0); } void iir_free(struct gdt_softc *gdt) { int i; GDT_DPRINTF(GDT_D_INIT, ("iir_free()\n")); switch (gdt->sc_init_level) { default: gdt_destroy_dev(gdt->sc_dev); case 5: for (i = GDT_MAXCMDS-1; i >= 0; i--) if (gdt->sc_gccbs[i].gc_map_flag) { callout_drain(&gdt->sc_gccbs[i].gc_timeout); bus_dmamap_destroy(gdt->sc_buffer_dmat, gdt->sc_gccbs[i].gc_dmamap); } bus_dmamap_unload(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch_dmamap); free(gdt->sc_gccbs, M_GDTBUF); case 4: bus_dmamem_free(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch, gdt->sc_gcscratch_dmamap); case 3: bus_dma_tag_destroy(gdt->sc_gcscratch_dmat); case 2: bus_dma_tag_destroy(gdt->sc_buffer_dmat); case 1: bus_dma_tag_destroy(gdt->sc_parent_dmat); case 0: break; } } void iir_attach(struct gdt_softc *gdt) { struct cam_devq *devq; int i; GDT_DPRINTF(GDT_D_INIT, ("iir_attach()\n")); /* * Create the device queue for our SIM. * XXX Throttle this down since the card has problems under load. */ devq = cam_simq_alloc(32); if (devq == NULL) return; for (i = 0; i < gdt->sc_bus_cnt; i++) { /* * Construct our SIM entry */ gdt->sims[i] = cam_sim_alloc(iir_action, iir_poll, "iir", gdt, device_get_unit(gdt->sc_devnode), &gdt->sc_lock, /*untagged*/1, /*tagged*/GDT_MAXCMDS, devq); mtx_lock(&gdt->sc_lock); if (xpt_bus_register(gdt->sims[i], gdt->sc_devnode, i) != CAM_SUCCESS) { cam_sim_free(gdt->sims[i], /*free_devq*/i == 0); mtx_unlock(&gdt->sc_lock); break; } if (xpt_create_path(&gdt->paths[i], /*periph*/NULL, cam_sim_path(gdt->sims[i]), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(gdt->sims[i])); cam_sim_free(gdt->sims[i], /*free_devq*/i == 0); mtx_unlock(&gdt->sc_lock); break; } mtx_unlock(&gdt->sc_lock); } if (i > 0) EVENTHANDLER_REGISTER(shutdown_final, iir_shutdown, gdt, SHUTDOWN_PRI_DEFAULT); /* iir_watchdog(gdt); */ gdt->sc_state = GDT_NORMAL; } static void gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs) { *cyls = size / GDT_HEADS / GDT_SECS; if (*cyls < GDT_MAXCYLS) { *heads = GDT_HEADS; *secs = GDT_SECS; } else { /* Too high for 64 * 32 */ *cyls = size / GDT_MEDHEADS / GDT_MEDSECS; if (*cyls < GDT_MAXCYLS) { *heads = GDT_MEDHEADS; *secs = GDT_MEDSECS; } else { /* Too high for 127 * 63 */ *cyls = size / GDT_BIGHEADS / GDT_BIGSECS; *heads = GDT_BIGHEADS; *secs = GDT_BIGSECS; } } } static int gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *gccb, int timeout) { int rv = 0; GDT_DPRINTF(GDT_D_INIT, ("gdt_wait(%p, %p, %d)\n", gdt, gccb, timeout)); gdt->sc_state |= GDT_POLL_WAIT; do { if (iir_intr_locked(gdt) == gccb->gc_cmd_index) { rv = 1; break; } DELAY(1); } while (--timeout); gdt->sc_state &= ~GDT_POLL_WAIT; while (gdt->sc_test_busy(gdt)) DELAY(1); /* XXX correct? */ return (rv); } static int gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb, u_int8_t service, u_int16_t opcode, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { int retries; GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d)\n", gdt, service, opcode, arg1, arg2, arg3)); bzero(gccb->gc_cmd, GDT_CMD_SZ); for (retries = GDT_RETRIES; ; ) { gccb->gc_service = service; gccb->gc_flags = GDT_GCF_INTERNAL; gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, opcode); switch (service) { case GDT_CACHESERVICE: if (opcode == GDT_IOCTL) { gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC, arg1); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL, arg2); gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM, gccb->gc_scratch_busbase); } else { gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO, (u_int16_t)arg1); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO, arg2); } break; case GDT_SCSIRAWSERVICE: gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION, arg1); gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] = (u_int8_t)arg2; gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] = (u_int8_t)arg3; gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] = (u_int8_t)(arg3 >> 8); } gdt->sc_set_sema0(gdt); gccb->gc_cmd_len = GDT_CMD_SZ; gdt->sc_cmd_off = 0; gdt->sc_cmd_cnt = 0; gdt->sc_copy_cmd(gdt, gccb); gdt->sc_release_event(gdt); DELAY(20); if (!gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT)) return (0); if (gdt->sc_status != GDT_S_BSY || --retries == 0) break; DELAY(1); } return (gdt->sc_status == GDT_S_OK); } static struct gdt_ccb * gdt_get_ccb(struct gdt_softc *gdt) { struct gdt_ccb *gccb; GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p)\n", gdt)); mtx_assert(&gdt->sc_lock, MA_OWNED); gccb = SLIST_FIRST(&gdt->sc_free_gccb); if (gccb != NULL) { SLIST_REMOVE_HEAD(&gdt->sc_free_gccb, sle); SLIST_INSERT_HEAD(&gdt->sc_pending_gccb, gccb, sle); ++gdt_stat.cmd_index_act; if (gdt_stat.cmd_index_act > gdt_stat.cmd_index_max) gdt_stat.cmd_index_max = gdt_stat.cmd_index_act; } return (gccb); } void gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb) { GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p)\n", gdt, gccb)); mtx_assert(&gdt->sc_lock, MA_OWNED); gccb->gc_flags = GDT_GCF_UNUSED; SLIST_REMOVE(&gdt->sc_pending_gccb, gccb, gdt_ccb, sle); SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle); --gdt_stat.cmd_index_act; if (gdt->sc_state & GDT_SHUTDOWN) wakeup(gccb); } void gdt_next(struct gdt_softc *gdt) { union ccb *ccb; gdt_ucmd_t *ucmd; struct cam_sim *sim; int bus, target, lun; int next_cmd; struct ccb_scsiio *csio; struct ccb_hdr *ccbh; struct gdt_ccb *gccb = NULL; u_int8_t cmd; GDT_DPRINTF(GDT_D_QUEUE, ("gdt_next(%p)\n", gdt)); mtx_assert(&gdt->sc_lock, MA_OWNED); if (gdt->sc_test_busy(gdt)) { if (!(gdt->sc_state & GDT_POLLING)) { return; } while (gdt->sc_test_busy(gdt)) DELAY(1); } gdt->sc_cmd_cnt = gdt->sc_cmd_off = 0; next_cmd = TRUE; for (;;) { /* I/Os in queue? controller ready? */ if (!TAILQ_FIRST(&gdt->sc_ucmd_queue) && !TAILQ_FIRST(&gdt->sc_ccb_queue)) break; /* 1.: I/Os without ccb (IOCTLs) */ ucmd = TAILQ_FIRST(&gdt->sc_ucmd_queue); if (ucmd != NULL) { TAILQ_REMOVE(&gdt->sc_ucmd_queue, ucmd, links); if ((gccb = gdt_ioctl_cmd(gdt, ucmd)) == NULL) { TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links); break; } break; /* wenn mehrere Kdos. zulassen: if (!gdt_polling) continue; */ } /* 2.: I/Os with ccb */ ccb = (union ccb *)TAILQ_FIRST(&gdt->sc_ccb_queue); /* ist dann immer != NULL, da oben getestet */ sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr; bus = cam_sim_bus(sim); target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; TAILQ_REMOVE(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe); --gdt_stat.req_queue_act; /* ccb->ccb_h.func_code is XPT_SCSI_IO */ GDT_DPRINTF(GDT_D_QUEUE, ("XPT_SCSI_IO flags 0x%x)\n", ccb->ccb_h.flags)); csio = &ccb->csio; ccbh = &ccb->ccb_h; - cmd = csio->cdb_io.cdb_bytes[0]; - /* Max CDB length is 12 bytes */ - if (csio->cdb_len > 12) { + cmd = scsiio_cdb_ptr(csio)[0]; + /* Max CDB length is 12 bytes, can't be phys addr */ + if (csio->cdb_len > 12 || (ccbh->flags & CAM_CDB_PHYS)) { ccbh->status = CAM_REQ_INVALID; --gdt_stat.io_count_act; xpt_done(ccb); } else if (bus != gdt->sc_virt_bus) { /* raw service command */ if ((gccb = gdt_raw_cmd(gdt, ccb)) == NULL) { TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe); ++gdt_stat.req_queue_act; if (gdt_stat.req_queue_act > gdt_stat.req_queue_max) gdt_stat.req_queue_max = gdt_stat.req_queue_act; next_cmd = FALSE; } } else if (target >= GDT_MAX_HDRIVES || !gdt->sc_hdr[target].hd_present || lun != 0) { ccbh->status = CAM_DEV_NOT_THERE; --gdt_stat.io_count_act; xpt_done(ccb); } else { /* cache service command */ if (cmd == READ_6 || cmd == WRITE_6 || cmd == READ_10 || cmd == WRITE_10) { if ((gccb = gdt_cache_cmd(gdt, ccb)) == NULL) { TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe); ++gdt_stat.req_queue_act; if (gdt_stat.req_queue_act > gdt_stat.req_queue_max) gdt_stat.req_queue_max = gdt_stat.req_queue_act; next_cmd = FALSE; } } else { gdt_internal_cache_cmd(gdt, ccb); } } if ((gdt->sc_state & GDT_POLLING) || !next_cmd) break; } if (gdt->sc_cmd_cnt > 0) gdt->sc_release_event(gdt); if ((gdt->sc_state & GDT_POLLING) && gdt->sc_cmd_cnt > 0) { gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT); } } static struct gdt_ccb * gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb) { struct gdt_ccb *gccb; struct cam_sim *sim; int error; GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb)); if (roundup(GDT_CMD_UNION + GDT_RAW_SZ, sizeof(u_int32_t)) + gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET > gdt->sc_ic_all_size) { GDT_DPRINTF(GDT_D_INVALID, ("%s: gdt_raw_cmd(): DPMEM overflow\n", device_get_nameunit(gdt->sc_devnode))); return (NULL); } gccb = gdt_get_ccb(gdt); if (gccb == NULL) { GDT_DPRINTF(GDT_D_INVALID, ("%s: No free command index found\n", device_get_nameunit(gdt->sc_devnode))); return (gccb); } bzero(gccb->gc_cmd, GDT_CMD_SZ); sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr; gccb->gc_ccb = ccb; gccb->gc_service = GDT_SCSIRAWSERVICE; gccb->gc_flags = GDT_GCF_SCSI; if (gdt->sc_cmd_cnt == 0) gdt->sc_set_sema0(gdt); gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_WRITE); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION, (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ? GDT_DATA_IN : GDT_DATA_OUT); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN, ccb->csio.dxfer_len); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN, ccb->csio.cdb_len); bcopy(ccb->csio.cdb_io.cdb_bytes, gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CMD, ccb->csio.cdb_len); gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] = ccb->ccb_h.target_id; gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] = ccb->ccb_h.target_lun; gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] = cam_sim_bus(sim); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN, sizeof(struct scsi_sense_data)); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA, gccb->gc_scratch_busbase); error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat, gccb->gc_dmamap, ccb, gdtexecuteccb, gccb, /*flags*/0); if (error == EINPROGRESS) { xpt_freeze_simq(sim, 1); gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } return (gccb); } static struct gdt_ccb * gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb) { struct gdt_ccb *gccb; struct cam_sim *sim; u_int8_t *cmdp; u_int16_t opcode; u_int32_t blockno, blockcnt; int error; GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb)); if (roundup(GDT_CMD_UNION + GDT_CACHE_SZ, sizeof(u_int32_t)) + gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET > gdt->sc_ic_all_size) { GDT_DPRINTF(GDT_D_INVALID, ("%s: gdt_cache_cmd(): DPMEM overflow\n", device_get_nameunit(gdt->sc_devnode))); return (NULL); } gccb = gdt_get_ccb(gdt); if (gccb == NULL) { GDT_DPRINTF(GDT_D_DEBUG, ("%s: No free command index found\n", device_get_nameunit(gdt->sc_devnode))); return (gccb); } bzero(gccb->gc_cmd, GDT_CMD_SZ); sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr; gccb->gc_ccb = ccb; gccb->gc_service = GDT_CACHESERVICE; gccb->gc_flags = GDT_GCF_SCSI; if (gdt->sc_cmd_cnt == 0) gdt->sc_set_sema0(gdt); gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); cmdp = ccb->csio.cdb_io.cdb_bytes; opcode = (*cmdp == WRITE_6 || *cmdp == WRITE_10) ? GDT_WRITE : GDT_READ; if ((gdt->sc_state & GDT_SHUTDOWN) && opcode == GDT_WRITE) opcode = GDT_WRITE_THR; gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, opcode); gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO, ccb->ccb_h.target_id); if (ccb->csio.cdb_len == 6) { struct scsi_rw_6 *rw = (struct scsi_rw_6 *)cmdp; blockno = scsi_3btoul(rw->addr) & ((SRW_TOPADDR<<16) | 0xffff); blockcnt = rw->length ? rw->length : 0x100; } else { struct scsi_rw_10 *rw = (struct scsi_rw_10 *)cmdp; blockno = scsi_4btoul(rw->addr); blockcnt = scsi_2btoul(rw->length); } gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO, blockno); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT, blockcnt); error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat, gccb->gc_dmamap, ccb, gdtexecuteccb, gccb, /*flags*/0); if (error == EINPROGRESS) { xpt_freeze_simq(sim, 1); gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } return (gccb); } static struct gdt_ccb * gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd) { struct gdt_ccb *gccb; u_int32_t cnt; GDT_DPRINTF(GDT_D_DEBUG, ("gdt_ioctl_cmd(%p, %p)\n", gdt, ucmd)); gccb = gdt_get_ccb(gdt); if (gccb == NULL) { GDT_DPRINTF(GDT_D_DEBUG, ("%s: No free command index found\n", device_get_nameunit(gdt->sc_devnode))); return (gccb); } bzero(gccb->gc_cmd, GDT_CMD_SZ); gccb->gc_ucmd = ucmd; gccb->gc_service = ucmd->service; gccb->gc_flags = GDT_GCF_IOCTL; /* check DPMEM space, copy data buffer from user space */ if (ucmd->service == GDT_CACHESERVICE) { if (ucmd->OpCode == GDT_IOCTL) { gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_IOCTL_SZ, sizeof(u_int32_t)); cnt = ucmd->u.ioctl.param_size; if (cnt > GDT_SCRATCH_SZ) { device_printf(gdt->sc_devnode, "Scratch buffer too small (%d/%d)\n", GDT_SCRATCH_SZ, cnt); gdt_free_ccb(gdt, gccb); return (NULL); } } else { gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST + GDT_SG_SZ, sizeof(u_int32_t)); cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE; if (cnt > GDT_SCRATCH_SZ) { device_printf(gdt->sc_devnode, "Scratch buffer too small (%d/%d)\n", GDT_SCRATCH_SZ, cnt); gdt_free_ccb(gdt, gccb); return (NULL); } } } else { gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST + GDT_SG_SZ, sizeof(u_int32_t)); cnt = ucmd->u.raw.sdlen; if (cnt + ucmd->u.raw.sense_len > GDT_SCRATCH_SZ) { device_printf(gdt->sc_devnode, "Scratch buffer too small (%d/%d)\n", GDT_SCRATCH_SZ, cnt + ucmd->u.raw.sense_len); gdt_free_ccb(gdt, gccb); return (NULL); } } if (cnt != 0) bcopy(ucmd->data, gccb->gc_scratch, cnt); if (gdt->sc_cmd_off + gccb->gc_cmd_len + GDT_DPMEM_COMMAND_OFFSET > gdt->sc_ic_all_size) { GDT_DPRINTF(GDT_D_INVALID, ("%s: gdt_ioctl_cmd(): DPMEM overflow\n", device_get_nameunit(gdt->sc_devnode))); gdt_free_ccb(gdt, gccb); return (NULL); } if (gdt->sc_cmd_cnt == 0) gdt->sc_set_sema0(gdt); /* fill cmd structure */ gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, ucmd->OpCode); if (ucmd->service == GDT_CACHESERVICE) { if (ucmd->OpCode == GDT_IOCTL) { /* IOCTL */ gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE, ucmd->u.ioctl.param_size); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC, ucmd->u.ioctl.subfunc); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL, ucmd->u.ioctl.channel); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM, gccb->gc_scratch_busbase); } else { /* cache service command */ gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO, ucmd->u.cache.DeviceNo); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO, ucmd->u.cache.BlockNo); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT, ucmd->u.cache.BlockCnt); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR, 0xffffffffUL); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ, 1); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST + GDT_SG_PTR, gccb->gc_scratch_busbase); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST + GDT_SG_LEN, ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE); } } else { /* raw service command */ gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION, ucmd->u.raw.direction); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA, 0xffffffffUL); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN, ucmd->u.raw.sdlen); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN, ucmd->u.raw.clen); bcopy(ucmd->u.raw.cmd, gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CMD, 12); gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] = ucmd->u.raw.target; gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] = ucmd->u.raw.lun; gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] = ucmd->u.raw.bus; gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN, ucmd->u.raw.sense_len); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA, gccb->gc_scratch_busbase + ucmd->u.raw.sdlen); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ, 1); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST + GDT_SG_PTR, gccb->gc_scratch_busbase); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST + GDT_SG_LEN, ucmd->u.raw.sdlen); } gdt_stat.sg_count_act = 1; gdt->sc_copy_cmd(gdt, gccb); return (gccb); } static void gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb) { int t; t = ccb->ccb_h.target_id; GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd(%p, %p, 0x%x, %d)\n", gdt, ccb, ccb->csio.cdb_io.cdb_bytes[0], t)); switch (ccb->csio.cdb_io.cdb_bytes[0]) { case TEST_UNIT_READY: case START_STOP: break; case REQUEST_SENSE: GDT_DPRINTF(GDT_D_MISC, ("REQUEST_SENSE\n")); break; case INQUIRY: { struct scsi_inquiry_data inq; size_t copylen = MIN(sizeof(inq), ccb->csio.dxfer_len); bzero(&inq, sizeof(inq)); inq.device = (gdt->sc_hdr[t].hd_devtype & 4) ? T_CDROM : T_DIRECT; inq.dev_qual2 = (gdt->sc_hdr[t].hd_devtype & 1) ? 0x80 : 0; inq.version = SCSI_REV_2; inq.response_format = 2; inq.additional_length = 32; inq.flags = SID_CmdQue | SID_Sync; strncpy(inq.vendor, gdt->oem_name, sizeof(inq.vendor)); snprintf(inq.product, sizeof(inq.product), "Host Drive #%02d", t); strncpy(inq.revision, " ", sizeof(inq.revision)); bcopy(&inq, ccb->csio.data_ptr, copylen ); if( ccb->csio.dxfer_len > copylen ) bzero( ccb->csio.data_ptr+copylen, ccb->csio.dxfer_len - copylen ); break; } case MODE_SENSE_6: { struct mpd_data { struct scsi_mode_hdr_6 hd; struct scsi_mode_block_descr bd; struct scsi_control_page cp; } mpd; size_t copylen = MIN(sizeof(mpd), ccb->csio.dxfer_len); u_int8_t page; /*mpd = (struct mpd_data *)ccb->csio.data_ptr;*/ bzero(&mpd, sizeof(mpd)); mpd.hd.datalen = sizeof(struct scsi_mode_hdr_6) + sizeof(struct scsi_mode_block_descr); mpd.hd.dev_specific = (gdt->sc_hdr[t].hd_devtype & 2) ? 0x80 : 0; mpd.hd.block_descr_len = sizeof(struct scsi_mode_block_descr); mpd.bd.block_len[0] = (GDT_SECTOR_SIZE & 0x00ff0000) >> 16; mpd.bd.block_len[1] = (GDT_SECTOR_SIZE & 0x0000ff00) >> 8; mpd.bd.block_len[2] = (GDT_SECTOR_SIZE & 0x000000ff); bcopy(&mpd, ccb->csio.data_ptr, copylen ); if( ccb->csio.dxfer_len > copylen ) bzero( ccb->csio.data_ptr+copylen, ccb->csio.dxfer_len - copylen ); page=((struct scsi_mode_sense_6 *)ccb->csio.cdb_io.cdb_bytes)->page; switch (page) { default: GDT_DPRINTF(GDT_D_MISC, ("MODE_SENSE_6: page 0x%x\n", page)); break; } break; } case READ_CAPACITY: { struct scsi_read_capacity_data rcd; size_t copylen = MIN(sizeof(rcd), ccb->csio.dxfer_len); /*rcd = (struct scsi_read_capacity_data *)ccb->csio.data_ptr;*/ bzero(&rcd, sizeof(rcd)); scsi_ulto4b(gdt->sc_hdr[t].hd_size - 1, rcd.addr); scsi_ulto4b(GDT_SECTOR_SIZE, rcd.length); bcopy(&rcd, ccb->csio.data_ptr, copylen ); if( ccb->csio.dxfer_len > copylen ) bzero( ccb->csio.data_ptr+copylen, ccb->csio.dxfer_len - copylen ); break; } default: GDT_DPRINTF(GDT_D_MISC, ("gdt_internal_cache_cmd(%d) unknown\n", ccb->csio.cdb_io.cdb_bytes[0])); break; } ccb->ccb_h.status |= CAM_REQ_CMP; --gdt_stat.io_count_act; xpt_done(ccb); } static void gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { bus_addr_t *busaddrp; busaddrp = (bus_addr_t *)arg; *busaddrp = dm_segs->ds_addr; } static void gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct gdt_ccb *gccb; union ccb *ccb; struct gdt_softc *gdt; int i; gccb = (struct gdt_ccb *)arg; ccb = gccb->gc_ccb; gdt = cam_sim_softc((struct cam_sim *)ccb->ccb_h.ccb_sim_ptr); mtx_assert(&gdt->sc_lock, MA_OWNED); GDT_DPRINTF(GDT_D_CMD, ("gdtexecuteccb(%p, %p, %p, %d, %d)\n", gdt, gccb, dm_segs, nseg, error)); gdt_stat.sg_count_act = nseg; if (nseg > gdt_stat.sg_count_max) gdt_stat.sg_count_max = nseg; /* Copy the segments into our SG list */ if (gccb->gc_service == GDT_CACHESERVICE) { for (i = 0; i < nseg; ++i) { gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST + i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST + i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len); dm_segs++; } gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ, nseg); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR, 0xffffffffUL); gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST + nseg * GDT_SG_SZ, sizeof(u_int32_t)); } else { for (i = 0; i < nseg; ++i) { gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST + i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST + i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len); dm_segs++; } gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ, nseg); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA, 0xffffffffUL); gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST + nseg * GDT_SG_SZ, sizeof(u_int32_t)); } if (nseg != 0) { bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); } /* We must NOT abort the command here if CAM_REQ_INPROG is not set, * because command semaphore is already set! */ ccb->ccb_h.status |= CAM_SIM_QUEUED; /* timeout handling */ callout_reset_sbt(&gccb->gc_timeout, SBT_1MS * ccb->ccb_h.timeout, 0, iir_timeout, (caddr_t)gccb, 0); gdt->sc_copy_cmd(gdt, gccb); } static void iir_action( struct cam_sim *sim, union ccb *ccb ) { struct gdt_softc *gdt; int bus, target, lun; gdt = (struct gdt_softc *)cam_sim_softc( sim ); mtx_assert(&gdt->sc_lock, MA_OWNED); ccb->ccb_h.ccb_sim_ptr = sim; bus = cam_sim_bus(sim); target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; GDT_DPRINTF(GDT_D_CMD, ("iir_action(%p) func 0x%x cmd 0x%x bus %d target %d lun %d\n", gdt, ccb->ccb_h.func_code, ccb->csio.cdb_io.cdb_bytes[0], bus, target, lun)); ++gdt_stat.io_count_act; if (gdt_stat.io_count_act > gdt_stat.io_count_max) gdt_stat.io_count_max = gdt_stat.io_count_act; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: TAILQ_INSERT_TAIL(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe); ++gdt_stat.req_queue_act; if (gdt_stat.req_queue_act > gdt_stat.req_queue_max) gdt_stat.req_queue_max = gdt_stat.req_queue_act; gdt_next(gdt); break; case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; --gdt_stat.io_count_act; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; --gdt_stat.io_count_act; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (cts->type == CTS_TYPE_USER_SETTINGS) { spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; spi->sync_period = 25; /* 10MHz */ if (spi->sync_period != 0) spi->sync_offset = 15; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; } --gdt_stat.io_count_act; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; ccg->heads = gdt->sc_hdr[target].hd_heads; ccg->secs_per_track = gdt->sc_hdr[target].hd_secs; secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; --gdt_stat.io_count_act; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_CMP; --gdt_stat.io_count_act; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; --gdt_stat.io_count_act; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 1; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; if (bus == gdt->sc_virt_bus) cpi->max_target = GDT_MAX_HDRIVES - 1; else if (gdt->sc_class & GDT_FC) cpi->max_target = GDT_MAXID_FC - 1; else cpi->max_target = GDT_MAXID - 1; cpi->max_lun = 7; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = bus; cpi->initiator_id = (bus == gdt->sc_virt_bus ? 127 : gdt->sc_bus_id[bus]); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); if (gdt->sc_vendor == INTEL_VENDOR_ID_IIR) strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN); else strncpy(cpi->hba_vid, "ICP vortex ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; --gdt_stat.io_count_act; xpt_done(ccb); break; } default: GDT_DPRINTF(GDT_D_INVALID, ("gdt_next(%p) cmd 0x%x invalid\n", gdt, ccb->ccb_h.func_code)); ccb->ccb_h.status = CAM_REQ_INVALID; --gdt_stat.io_count_act; xpt_done(ccb); break; } } static void iir_poll( struct cam_sim *sim ) { struct gdt_softc *gdt; gdt = (struct gdt_softc *)cam_sim_softc( sim ); GDT_DPRINTF(GDT_D_CMD, ("iir_poll sim %p gdt %p\n", sim, gdt)); iir_intr_locked(gdt); } static void iir_timeout(void *arg) { GDT_DPRINTF(GDT_D_TIMEOUT, ("iir_timeout(%p)\n", gccb)); } static void iir_shutdown( void *arg, int howto ) { struct gdt_softc *gdt; struct gdt_ccb *gccb; gdt_ucmd_t *ucmd; int i; gdt = (struct gdt_softc *)arg; GDT_DPRINTF(GDT_D_CMD, ("iir_shutdown(%p, %d)\n", gdt, howto)); device_printf(gdt->sc_devnode, "Flushing all Host Drives. Please wait ... "); /* allocate ucmd buffer */ ucmd = malloc(sizeof(gdt_ucmd_t), M_GDTBUF, M_NOWAIT); if (ucmd == NULL) { printf("\n"); device_printf(gdt->sc_devnode, "iir_shutdown(): Cannot allocate resource\n"); return; } bzero(ucmd, sizeof(gdt_ucmd_t)); /* wait for pending IOs */ mtx_lock(&gdt->sc_lock); gdt->sc_state = GDT_SHUTDOWN; if ((gccb = SLIST_FIRST(&gdt->sc_pending_gccb)) != NULL) mtx_sleep(gccb, &gdt->sc_lock, PCATCH | PRIBIO, "iirshw", 100 * hz); /* flush */ for (i = 0; i < GDT_MAX_HDRIVES; ++i) { if (gdt->sc_hdr[i].hd_present) { ucmd->service = GDT_CACHESERVICE; ucmd->OpCode = GDT_FLUSH; ucmd->u.cache.DeviceNo = i; TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links); ucmd->complete_flag = FALSE; gdt_next(gdt); if (!ucmd->complete_flag) mtx_sleep(ucmd, &gdt->sc_lock, PCATCH | PRIBIO, "iirshw", 10 * hz); } } mtx_unlock(&gdt->sc_lock); free(ucmd, M_DEVBUF); printf("Done.\n"); } void iir_intr(void *arg) { struct gdt_softc *gdt = arg; mtx_lock(&gdt->sc_lock); iir_intr_locked(gdt); mtx_unlock(&gdt->sc_lock); } int iir_intr_locked(struct gdt_softc *gdt) { struct gdt_intr_ctx ctx; struct gdt_ccb *gccb; gdt_ucmd_t *ucmd; u_int32_t cnt; GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p)\n", gdt)); mtx_assert(&gdt->sc_lock, MA_OWNED); /* If polling and we were not called from gdt_wait, just return */ if ((gdt->sc_state & GDT_POLLING) && !(gdt->sc_state & GDT_POLL_WAIT)) return (0); ctx.istatus = gdt->sc_get_status(gdt); if (ctx.istatus == 0x00) { gdt->sc_status = GDT_S_NO_STATUS; return (ctx.istatus); } gdt->sc_intr(gdt, &ctx); gdt->sc_status = ctx.cmd_status; gdt->sc_service = ctx.service; gdt->sc_info = ctx.info; gdt->sc_info2 = ctx.info2; if (ctx.istatus == GDT_ASYNCINDEX) { gdt_async_event(gdt, ctx.service); return (ctx.istatus); } if (ctx.istatus == GDT_SPEZINDEX) { GDT_DPRINTF(GDT_D_INVALID, ("%s: Service unknown or not initialized!\n", device_get_nameunit(gdt->sc_devnode))); gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver); gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum; gdt_store_event(GDT_ES_DRIVER, 4, &gdt->sc_dvr); return (ctx.istatus); } gccb = &gdt->sc_gccbs[ctx.istatus - 2]; ctx.service = gccb->gc_service; switch (gccb->gc_flags) { case GDT_GCF_UNUSED: GDT_DPRINTF(GDT_D_INVALID, ("%s: Index (%d) to unused command!\n", device_get_nameunit(gdt->sc_devnode), ctx.istatus)); gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver); gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum; gdt->sc_dvr.eu.driver.index = ctx.istatus; gdt_store_event(GDT_ES_DRIVER, 1, &gdt->sc_dvr); gdt_free_ccb(gdt, gccb); break; case GDT_GCF_INTERNAL: break; case GDT_GCF_IOCTL: ucmd = gccb->gc_ucmd; if (gdt->sc_status == GDT_S_BSY) { GDT_DPRINTF(GDT_D_DEBUG, ("iir_intr(%p) ioctl: gccb %p busy\n", gdt, gccb)); TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links); } else { ucmd->status = gdt->sc_status; ucmd->info = gdt->sc_info; ucmd->complete_flag = TRUE; if (ucmd->service == GDT_CACHESERVICE) { if (ucmd->OpCode == GDT_IOCTL) { cnt = ucmd->u.ioctl.param_size; if (cnt != 0) bcopy(gccb->gc_scratch, ucmd->data, cnt); } else { cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE; if (cnt != 0) bcopy(gccb->gc_scratch, ucmd->data, cnt); } } else { cnt = ucmd->u.raw.sdlen; if (cnt != 0) bcopy(gccb->gc_scratch, ucmd->data, cnt); if (ucmd->u.raw.sense_len != 0) bcopy(gccb->gc_scratch, ucmd->data, cnt); } gdt_free_ccb(gdt, gccb); /* wakeup */ wakeup(ucmd); } gdt_next(gdt); break; default: gdt_free_ccb(gdt, gccb); gdt_sync_event(gdt, ctx.service, ctx.istatus, gccb); gdt_next(gdt); break; } return (ctx.istatus); } int gdt_async_event(struct gdt_softc *gdt, int service) { struct gdt_ccb *gccb; GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d)\n", gdt, service)); if (service == GDT_SCREENSERVICE) { if (gdt->sc_status == GDT_MSG_REQUEST) { while (gdt->sc_test_busy(gdt)) DELAY(1); gccb = gdt_get_ccb(gdt); if (gccb == NULL) { device_printf(gdt->sc_devnode, "No free command index found\n"); return (1); } bzero(gccb->gc_cmd, GDT_CMD_SZ); gccb->gc_service = service; gccb->gc_flags = GDT_GCF_SCREEN; gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_READ); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE, GDT_MSG_INV_HANDLE); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR, gccb->gc_scratch_busbase); gdt->sc_set_sema0(gdt); gdt->sc_cmd_off = 0; gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ, sizeof(u_int32_t)); gdt->sc_cmd_cnt = 0; gdt->sc_copy_cmd(gdt, gccb); device_printf(gdt->sc_devnode, "[PCI %d/%d] ", gdt->sc_bus, gdt->sc_slot); gdt->sc_release_event(gdt); } } else { if ((gdt->sc_fw_vers & 0xff) >= 0x1a) { gdt->sc_dvr.size = 0; gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum; gdt->sc_dvr.eu.async.status = gdt->sc_status; /* severity and event_string already set! */ } else { gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.async); gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum; gdt->sc_dvr.eu.async.service = service; gdt->sc_dvr.eu.async.status = gdt->sc_status; gdt->sc_dvr.eu.async.info = gdt->sc_info; *(u_int32_t *)gdt->sc_dvr.eu.async.scsi_coord = gdt->sc_info2; } gdt_store_event(GDT_ES_ASYNC, service, &gdt->sc_dvr); device_printf(gdt->sc_devnode, "%s\n", gdt->sc_dvr.event_string); } return (0); } int gdt_sync_event(struct gdt_softc *gdt, int service, u_int8_t index, struct gdt_ccb *gccb) { union ccb *ccb; GDT_DPRINTF(GDT_D_INTR, ("gdt_sync_event(%p, %d, %d, %p)\n", gdt,service,index,gccb)); ccb = gccb->gc_ccb; if (service == GDT_SCREENSERVICE) { u_int32_t msg_len; msg_len = gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_LEN); if (msg_len) if (!(gccb->gc_scratch[GDT_SCR_MSG_ANSWER] && gccb->gc_scratch[GDT_SCR_MSG_EXT])) { gccb->gc_scratch[GDT_SCR_MSG_TEXT + msg_len] = '\0'; printf("%s",&gccb->gc_scratch[GDT_SCR_MSG_TEXT]); } if (gccb->gc_scratch[GDT_SCR_MSG_EXT] && !gccb->gc_scratch[GDT_SCR_MSG_ANSWER]) { while (gdt->sc_test_busy(gdt)) DELAY(1); bzero(gccb->gc_cmd, GDT_CMD_SZ); gccb = gdt_get_ccb(gdt); if (gccb == NULL) { device_printf(gdt->sc_devnode, "No free command index found\n"); return (1); } gccb->gc_service = service; gccb->gc_flags = GDT_GCF_SCREEN; gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_READ); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE, gccb->gc_scratch[GDT_SCR_MSG_HANDLE]); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR, gccb->gc_scratch_busbase); gdt->sc_set_sema0(gdt); gdt->sc_cmd_off = 0; gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ, sizeof(u_int32_t)); gdt->sc_cmd_cnt = 0; gdt->sc_copy_cmd(gdt, gccb); gdt->sc_release_event(gdt); return (0); } if (gccb->gc_scratch[GDT_SCR_MSG_ANSWER] && gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN)) { /* default answers (getchar() not possible) */ if (gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) == 1) { gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 0); gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 1); gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 0; } else { gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) - 2); gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 2); gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 1; gccb->gc_scratch[GDT_SCR_MSG_TEXT + 1] = 0; } gccb->gc_scratch[GDT_SCR_MSG_EXT] = 0; gccb->gc_scratch[GDT_SCR_MSG_ANSWER] = 0; while (gdt->sc_test_busy(gdt)) DELAY(1); bzero(gccb->gc_cmd, GDT_CMD_SZ); gccb = gdt_get_ccb(gdt); if (gccb == NULL) { device_printf(gdt->sc_devnode, "No free command index found\n"); return (1); } gccb->gc_service = service; gccb->gc_flags = GDT_GCF_SCREEN; gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX, gccb->gc_cmd_index); gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_WRITE); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE, gccb->gc_scratch[GDT_SCR_MSG_HANDLE]); gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR, gccb->gc_scratch_busbase); gdt->sc_set_sema0(gdt); gdt->sc_cmd_off = 0; gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ, sizeof(u_int32_t)); gdt->sc_cmd_cnt = 0; gdt->sc_copy_cmd(gdt, gccb); gdt->sc_release_event(gdt); return (0); } printf("\n"); return (0); } else { callout_stop(&gccb->gc_timeout); if (gdt->sc_status == GDT_S_BSY) { GDT_DPRINTF(GDT_D_DEBUG, ("gdt_sync_event(%p) gccb %p busy\n", gdt, gccb)); TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe); ++gdt_stat.req_queue_act; if (gdt_stat.req_queue_act > gdt_stat.req_queue_max) gdt_stat.req_queue_max = gdt_stat.req_queue_act; return (2); } bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(gdt->sc_buffer_dmat, gccb->gc_dmamap); ccb->csio.resid = 0; if (gdt->sc_status == GDT_S_OK) { ccb->ccb_h.status |= CAM_REQ_CMP; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; } else { /* error */ if (gccb->gc_service == GDT_CACHESERVICE) { struct scsi_sense_data *sense; ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; bzero(&ccb->csio.sense_data, ccb->csio.sense_len); sense = &ccb->csio.sense_data; scsi_set_sense_data(sense, /*sense_format*/ SSD_TYPE_NONE, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NOT_READY, /*asc*/ 0x4, /*ascq*/ 0x01, SSD_ELEM_NONE); gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.sync); gdt->sc_dvr.eu.sync.ionode = gdt->sc_hanum; gdt->sc_dvr.eu.sync.service = service; gdt->sc_dvr.eu.sync.status = gdt->sc_status; gdt->sc_dvr.eu.sync.info = gdt->sc_info; gdt->sc_dvr.eu.sync.hostdrive = ccb->ccb_h.target_id; if (gdt->sc_status >= 0x8000) gdt_store_event(GDT_ES_SYNC, 0, &gdt->sc_dvr); else gdt_store_event(GDT_ES_SYNC, service, &gdt->sc_dvr); } else { /* raw service */ if (gdt->sc_status != GDT_S_RAW_SCSI || gdt->sc_info >= 0x100) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->csio.scsi_status = gdt->sc_info; bcopy(gccb->gc_scratch, &ccb->csio.sense_data, ccb->csio.sense_len); } } } --gdt_stat.io_count_act; xpt_done(ccb); } return (0); } /* Controller event handling functions */ void gdt_store_event(u_int16_t source, u_int16_t idx, gdt_evt_data *evt) { gdt_evt_str *e; struct timeval tv; GDT_DPRINTF(GDT_D_MISC, ("gdt_store_event(%d, %d)\n", source, idx)); if (source == 0) /* no source -> no event */ return; mtx_lock(&elock); if (ebuffer[elastidx].event_source == source && ebuffer[elastidx].event_idx == idx && ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 && !memcmp((char *)&ebuffer[elastidx].event_data.eu, (char *)&evt->eu, evt->size)) || (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 && !strcmp((char *)&ebuffer[elastidx].event_data.event_string, (char *)&evt->event_string)))) { e = &ebuffer[elastidx]; getmicrotime(&tv); e->last_stamp = tv.tv_sec; ++e->same_count; } else { if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */ ++elastidx; if (elastidx == GDT_MAX_EVENTS) elastidx = 0; if (elastidx == eoldidx) { /* reached mark ? */ ++eoldidx; if (eoldidx == GDT_MAX_EVENTS) eoldidx = 0; } } e = &ebuffer[elastidx]; e->event_source = source; e->event_idx = idx; getmicrotime(&tv); e->first_stamp = e->last_stamp = tv.tv_sec; e->same_count = 1; e->event_data = *evt; e->application = 0; } mtx_unlock(&elock); } int gdt_read_event(int handle, gdt_evt_str *estr) { gdt_evt_str *e; int eindex; GDT_DPRINTF(GDT_D_MISC, ("gdt_read_event(%d)\n", handle)); mtx_lock(&elock); if (handle == -1) eindex = eoldidx; else eindex = handle; estr->event_source = 0; if (eindex >= GDT_MAX_EVENTS) { mtx_unlock(&elock); return eindex; } e = &ebuffer[eindex]; if (e->event_source != 0) { if (eindex != elastidx) { if (++eindex == GDT_MAX_EVENTS) eindex = 0; } else { eindex = -1; } memcpy(estr, e, sizeof(gdt_evt_str)); } mtx_unlock(&elock); return eindex; } void gdt_readapp_event(u_int8_t application, gdt_evt_str *estr) { gdt_evt_str *e; int found = FALSE; int eindex; GDT_DPRINTF(GDT_D_MISC, ("gdt_readapp_event(%d)\n", application)); mtx_lock(&elock); eindex = eoldidx; for (;;) { e = &ebuffer[eindex]; if (e->event_source == 0) break; if ((e->application & application) == 0) { e->application |= application; found = TRUE; break; } if (eindex == elastidx) break; if (++eindex == GDT_MAX_EVENTS) eindex = 0; } if (found) memcpy(estr, e, sizeof(gdt_evt_str)); else estr->event_source = 0; mtx_unlock(&elock); } void gdt_clear_events() { GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events\n")); mtx_lock(&elock); eoldidx = elastidx = 0; ebuffer[0].event_source = 0; mtx_unlock(&elock); } Index: head/sys/dev/isci/isci_controller.c =================================================================== --- head/sys/dev/isci/isci_controller.c (revision 296890) +++ head/sys/dev/isci/isci_controller.c (revision 296891) @@ -1,832 +1,838 @@ /*- * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void isci_action(struct cam_sim *sim, union ccb *ccb); void isci_poll(struct cam_sim *sim); #define ccb_sim_ptr sim_priv.entries[0].ptr /** * @brief This user callback will inform the user that the controller has * had a serious unexpected error. The user should not the error, * disable interrupts, and wait for current ongoing processing to * complete. Subsequently, the user should reset the controller. * * @param[in] controller This parameter specifies the controller that had * an error. * * @return none */ void scif_cb_controller_error(SCI_CONTROLLER_HANDLE_T controller, SCI_CONTROLLER_ERROR error) { isci_log_message(0, "ISCI", "scif_cb_controller_error: 0x%x\n", error); } /** * @brief This user callback will inform the user that the controller has * finished the start process. * * @param[in] controller This parameter specifies the controller that was * started. * @param[in] completion_status This parameter specifies the results of * the start operation. SCI_SUCCESS indicates successful * completion. * * @return none */ void scif_cb_controller_start_complete(SCI_CONTROLLER_HANDLE_T controller, SCI_STATUS completion_status) { uint32_t index; struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(controller); isci_controller->is_started = TRUE; /* Set bits for all domains. We will clear them one-by-one once * the domains complete discovery, or return error when calling * scif_domain_discover. Once all bits are clear, we will register * the controller with CAM. */ isci_controller->initial_discovery_mask = (1 << SCI_MAX_DOMAINS) - 1; for(index = 0; index < SCI_MAX_DOMAINS; index++) { SCI_STATUS status; SCI_DOMAIN_HANDLE_T domain = isci_controller->domain[index].sci_object; status = scif_domain_discover( domain, scif_domain_get_suggested_discover_timeout(domain), DEVICE_TIMEOUT ); if (status != SCI_SUCCESS) { isci_controller_domain_discovery_complete( isci_controller, &isci_controller->domain[index]); } } } /** * @brief This user callback will inform the user that the controller has * finished the stop process. Note, after user calls * scif_controller_stop(), before user receives this controller stop * complete callback, user should not expect any callback from * framework, such like scif_cb_domain_change_notification(). * * @param[in] controller This parameter specifies the controller that was * stopped. * @param[in] completion_status This parameter specifies the results of * the stop operation. SCI_SUCCESS indicates successful * completion. * * @return none */ void scif_cb_controller_stop_complete(SCI_CONTROLLER_HANDLE_T controller, SCI_STATUS completion_status) { struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(controller); isci_controller->is_started = FALSE; } static void isci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) { SCI_PHYSICAL_ADDRESS *phys_addr = arg; *phys_addr = seg[0].ds_addr; } /** * @brief This method will be invoked to allocate memory dynamically. * * @param[in] controller This parameter represents the controller * object for which to allocate memory. * @param[out] mde This parameter represents the memory descriptor to * be filled in by the user that will reference the newly * allocated memory. * * @return none */ void scif_cb_controller_allocate_memory(SCI_CONTROLLER_HANDLE_T controller, SCI_PHYSICAL_MEMORY_DESCRIPTOR_T *mde) { struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(controller); /* * Note this routine is only used for buffers needed to translate * SCSI UNMAP commands to ATA DSM commands for SATA disks. * * We first try to pull a buffer from the controller's pool, and only * call contigmalloc if one isn't there. */ if (!sci_pool_empty(isci_controller->unmap_buffer_pool)) { sci_pool_get(isci_controller->unmap_buffer_pool, mde->virtual_address); } else mde->virtual_address = contigmalloc(PAGE_SIZE, M_ISCI, M_NOWAIT, 0, BUS_SPACE_MAXADDR, mde->constant_memory_alignment, 0); if (mde->virtual_address != NULL) bus_dmamap_load(isci_controller->buffer_dma_tag, NULL, mde->virtual_address, PAGE_SIZE, isci_single_map, &mde->physical_address, BUS_DMA_NOWAIT); } /** * @brief This method will be invoked to allocate memory dynamically. * * @param[in] controller This parameter represents the controller * object for which to allocate memory. * @param[out] mde This parameter represents the memory descriptor to * be filled in by the user that will reference the newly * allocated memory. * * @return none */ void scif_cb_controller_free_memory(SCI_CONTROLLER_HANDLE_T controller, SCI_PHYSICAL_MEMORY_DESCRIPTOR_T * mde) { struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(controller); /* * Put the buffer back into the controller's buffer pool, rather * than invoking configfree. This helps reduce chance we won't * have buffers available when system is under memory pressure. */ sci_pool_put(isci_controller->unmap_buffer_pool, mde->virtual_address); } void isci_controller_construct(struct ISCI_CONTROLLER *controller, struct isci_softc *isci) { SCI_CONTROLLER_HANDLE_T scif_controller_handle; scif_library_allocate_controller(isci->sci_library_handle, &scif_controller_handle); scif_controller_construct(isci->sci_library_handle, scif_controller_handle, NULL); controller->isci = isci; controller->scif_controller_handle = scif_controller_handle; /* This allows us to later use * sci_object_get_association(scif_controller_handle) * inside of a callback routine to get our struct ISCI_CONTROLLER object */ sci_object_set_association(scif_controller_handle, (void *)controller); controller->is_started = FALSE; controller->is_frozen = FALSE; controller->release_queued_ccbs = FALSE; controller->sim = NULL; controller->initial_discovery_mask = 0; sci_fast_list_init(&controller->pending_device_reset_list); mtx_init(&controller->lock, "isci", NULL, MTX_DEF); uint32_t domain_index; for(domain_index = 0; domain_index < SCI_MAX_DOMAINS; domain_index++) { isci_domain_construct( &controller->domain[domain_index], domain_index, controller); } controller->timer_memory = malloc( sizeof(struct ISCI_TIMER) * SCI_MAX_TIMERS, M_ISCI, M_NOWAIT | M_ZERO); sci_pool_initialize(controller->timer_pool); struct ISCI_TIMER *timer = (struct ISCI_TIMER *) controller->timer_memory; for ( int i = 0; i < SCI_MAX_TIMERS; i++ ) { sci_pool_put(controller->timer_pool, timer++); } sci_pool_initialize(controller->unmap_buffer_pool); } static void isci_led_fault_func(void *priv, int onoff) { struct ISCI_PHY *phy = priv; /* map onoff to the fault LED */ phy->led_fault = onoff; scic_sgpio_update_led_state(phy->handle, 1 << phy->index, phy->led_fault, phy->led_locate, 0); } static void isci_led_locate_func(void *priv, int onoff) { struct ISCI_PHY *phy = priv; /* map onoff to the locate LED */ phy->led_locate = onoff; scic_sgpio_update_led_state(phy->handle, 1 << phy->index, phy->led_fault, phy->led_locate, 0); } SCI_STATUS isci_controller_initialize(struct ISCI_CONTROLLER *controller) { SCIC_USER_PARAMETERS_T scic_user_parameters; SCI_CONTROLLER_HANDLE_T scic_controller_handle; char led_name[64]; unsigned long tunable; uint32_t io_shortage; uint32_t fail_on_timeout; int i; scic_controller_handle = scif_controller_get_scic_handle(controller->scif_controller_handle); if (controller->isci->oem_parameters_found == TRUE) { scic_oem_parameters_set( scic_controller_handle, &controller->oem_parameters, (uint8_t)(controller->oem_parameters_version)); } scic_user_parameters_get(scic_controller_handle, &scic_user_parameters); if (TUNABLE_ULONG_FETCH("hw.isci.no_outbound_task_timeout", &tunable)) scic_user_parameters.sds1.no_outbound_task_timeout = (uint8_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.ssp_max_occupancy_timeout", &tunable)) scic_user_parameters.sds1.ssp_max_occupancy_timeout = (uint16_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.stp_max_occupancy_timeout", &tunable)) scic_user_parameters.sds1.stp_max_occupancy_timeout = (uint16_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.ssp_inactivity_timeout", &tunable)) scic_user_parameters.sds1.ssp_inactivity_timeout = (uint16_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.stp_inactivity_timeout", &tunable)) scic_user_parameters.sds1.stp_inactivity_timeout = (uint16_t)tunable; if (TUNABLE_ULONG_FETCH("hw.isci.max_speed_generation", &tunable)) for (i = 0; i < SCI_MAX_PHYS; i++) scic_user_parameters.sds1.phys[i].max_speed_generation = (uint8_t)tunable; scic_user_parameters_set(scic_controller_handle, &scic_user_parameters); /* Scheduler bug in SCU requires SCIL to reserve some task contexts as a * a workaround - one per domain. */ controller->queue_depth = SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS; if (TUNABLE_INT_FETCH("hw.isci.controller_queue_depth", &controller->queue_depth)) { controller->queue_depth = max(1, min(controller->queue_depth, SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS)); } /* Reserve one request so that we can ensure we have one available TC * to do internal device resets. */ controller->sim_queue_depth = controller->queue_depth - 1; /* Although we save one TC to do internal device resets, it is possible * we could end up using several TCs for simultaneous device resets * while at the same time having CAM fill our controller queue. To * simulate this condition, and how our driver handles it, we can set * this io_shortage parameter, which will tell CAM that we have a * large queue depth than we really do. */ io_shortage = 0; TUNABLE_INT_FETCH("hw.isci.io_shortage", &io_shortage); controller->sim_queue_depth += io_shortage; fail_on_timeout = 1; TUNABLE_INT_FETCH("hw.isci.fail_on_task_timeout", &fail_on_timeout); controller->fail_on_task_timeout = fail_on_timeout; /* Attach to CAM using xpt_bus_register now, then immediately freeze * the simq. It will get released later when initial domain discovery * is complete. */ controller->has_been_scanned = FALSE; mtx_lock(&controller->lock); isci_controller_attach_to_cam(controller); xpt_freeze_simq(controller->sim, 1); mtx_unlock(&controller->lock); for (i = 0; i < SCI_MAX_PHYS; i++) { controller->phys[i].handle = scic_controller_handle; controller->phys[i].index = i; /* fault */ controller->phys[i].led_fault = 0; sprintf(led_name, "isci.bus%d.port%d.fault", controller->index, i); controller->phys[i].cdev_fault = led_create(isci_led_fault_func, &controller->phys[i], led_name); /* locate */ controller->phys[i].led_locate = 0; sprintf(led_name, "isci.bus%d.port%d.locate", controller->index, i); controller->phys[i].cdev_locate = led_create(isci_led_locate_func, &controller->phys[i], led_name); } return (scif_controller_initialize(controller->scif_controller_handle)); } int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller) { int error; device_t device = controller->isci->device; uint32_t max_segment_size = isci_io_request_get_max_io_size(); uint32_t status = 0; struct ISCI_MEMORY *uncached_controller_memory = &controller->uncached_controller_memory; struct ISCI_MEMORY *cached_controller_memory = &controller->cached_controller_memory; struct ISCI_MEMORY *request_memory = &controller->request_memory; POINTER_UINT virtual_address; bus_addr_t physical_address; controller->mdl = sci_controller_get_memory_descriptor_list_handle( controller->scif_controller_handle); uncached_controller_memory->size = sci_mdl_decorator_get_memory_size( controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS); error = isci_allocate_dma_buffer(device, uncached_controller_memory); if (error != 0) return (error); sci_mdl_decorator_assign_memory( controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS, uncached_controller_memory->virtual_address, uncached_controller_memory->physical_address); cached_controller_memory->size = sci_mdl_decorator_get_memory_size( controller->mdl, SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS ); error = isci_allocate_dma_buffer(device, cached_controller_memory); if (error != 0) return (error); sci_mdl_decorator_assign_memory(controller->mdl, SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS, cached_controller_memory->virtual_address, cached_controller_memory->physical_address); request_memory->size = controller->queue_depth * isci_io_request_get_object_size(); error = isci_allocate_dma_buffer(device, request_memory); if (error != 0) return (error); /* For STP PIO testing, we want to ensure we can force multiple SGLs * since this has been a problem area in SCIL. This tunable parameter * will allow us to force DMA segments to a smaller size, ensuring * that even if a physically contiguous buffer is attached to this * I/O, the DMA subsystem will pass us multiple segments in our DMA * load callback. */ TUNABLE_INT_FETCH("hw.isci.max_segment_size", &max_segment_size); /* Create DMA tag for our I/O requests. Then we can create DMA maps based off * of this tag and store them in each of our ISCI_IO_REQUEST objects. This * will enable better performance than creating the DMA maps everytime we get * an I/O. */ status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1, 0x0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, isci_io_request_get_max_io_size(), SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0, NULL, NULL, &controller->buffer_dma_tag); sci_pool_initialize(controller->request_pool); virtual_address = request_memory->virtual_address; physical_address = request_memory->physical_address; for (int i = 0; i < controller->queue_depth; i++) { struct ISCI_REQUEST *request = (struct ISCI_REQUEST *)virtual_address; isci_request_construct(request, controller->scif_controller_handle, controller->buffer_dma_tag, physical_address); sci_pool_put(controller->request_pool, request); virtual_address += isci_request_get_object_size(); physical_address += isci_request_get_object_size(); } uint32_t remote_device_size = sizeof(struct ISCI_REMOTE_DEVICE) + scif_remote_device_get_object_size(); controller->remote_device_memory = (uint8_t *) malloc( remote_device_size * SCI_MAX_REMOTE_DEVICES, M_ISCI, M_NOWAIT | M_ZERO); sci_pool_initialize(controller->remote_device_pool); uint8_t *remote_device_memory_ptr = controller->remote_device_memory; for (int i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { struct ISCI_REMOTE_DEVICE *remote_device = (struct ISCI_REMOTE_DEVICE *)remote_device_memory_ptr; controller->remote_device[i] = NULL; remote_device->index = i; remote_device->is_resetting = FALSE; remote_device->frozen_lun_mask = 0; sci_fast_list_element_init(remote_device, &remote_device->pending_device_reset_element); TAILQ_INIT(&remote_device->queued_ccbs); remote_device->release_queued_ccb = FALSE; remote_device->queued_ccb_in_progress = NULL; /* * For the first SCI_MAX_DOMAINS device objects, do not put * them in the pool, rather assign them to each domain. This * ensures that any device attached directly to port "i" will * always get CAM target id "i". */ if (i < SCI_MAX_DOMAINS) controller->domain[i].da_remote_device = remote_device; else sci_pool_put(controller->remote_device_pool, remote_device); remote_device_memory_ptr += remote_device_size; } return (0); } void isci_controller_start(void *controller_handle) { struct ISCI_CONTROLLER *controller = (struct ISCI_CONTROLLER *)controller_handle; SCI_CONTROLLER_HANDLE_T scif_controller_handle = controller->scif_controller_handle; scif_controller_start(scif_controller_handle, scif_controller_get_suggested_start_timeout(scif_controller_handle)); scic_controller_enable_interrupts( scif_controller_get_scic_handle(controller->scif_controller_handle)); } void isci_controller_domain_discovery_complete( struct ISCI_CONTROLLER *isci_controller, struct ISCI_DOMAIN *isci_domain) { if (!isci_controller->has_been_scanned) { /* Controller has not been scanned yet. We'll clear * the discovery bit for this domain, then check if all bits * are now clear. That would indicate that all domains are * done with discovery and we can then proceed with initial * scan. */ isci_controller->initial_discovery_mask &= ~(1 << isci_domain->index); if (isci_controller->initial_discovery_mask == 0) { struct isci_softc *driver = isci_controller->isci; uint8_t next_index = isci_controller->index + 1; isci_controller->has_been_scanned = TRUE; /* Unfreeze simq to allow initial scan to proceed. */ xpt_release_simq(isci_controller->sim, TRUE); #if __FreeBSD_version < 800000 /* When driver is loaded after boot, we need to * explicitly rescan here for versions <8.0, because * CAM only automatically scans new buses at boot * time. */ union ccb *ccb = xpt_alloc_ccb_nowait(); xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(isci_controller->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); xpt_rescan(ccb); #endif if (next_index < driver->controller_count) { /* There are more controllers that need to * start. So start the next one. */ isci_controller_start( &driver->controllers[next_index]); } else { /* All controllers have been started and completed discovery. * Disestablish the config hook while will signal to the * kernel during boot that it is safe to try to find and * mount the root partition. */ config_intrhook_disestablish( &driver->config_hook); } } } } int isci_controller_attach_to_cam(struct ISCI_CONTROLLER *controller) { struct isci_softc *isci = controller->isci; device_t parent = device_get_parent(isci->device); int unit = device_get_unit(isci->device); struct cam_devq *isci_devq = cam_simq_alloc(controller->sim_queue_depth); if(isci_devq == NULL) { isci_log_message(0, "ISCI", "isci_devq is NULL \n"); return (-1); } controller->sim = cam_sim_alloc(isci_action, isci_poll, "isci", controller, unit, &controller->lock, controller->sim_queue_depth, controller->sim_queue_depth, isci_devq); if(controller->sim == NULL) { isci_log_message(0, "ISCI", "cam_sim_alloc... fails\n"); cam_simq_free(isci_devq); return (-1); } if(xpt_bus_register(controller->sim, parent, controller->index) != CAM_SUCCESS) { isci_log_message(0, "ISCI", "xpt_bus_register...fails \n"); cam_sim_free(controller->sim, TRUE); mtx_unlock(&controller->lock); return (-1); } if(xpt_create_path(&controller->path, NULL, cam_sim_path(controller->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { isci_log_message(0, "ISCI", "xpt_create_path....fails\n"); xpt_bus_deregister(cam_sim_path(controller->sim)); cam_sim_free(controller->sim, TRUE); mtx_unlock(&controller->lock); return (-1); } return (0); } void isci_poll(struct cam_sim *sim) { struct ISCI_CONTROLLER *controller = (struct ISCI_CONTROLLER *)cam_sim_softc(sim); isci_interrupt_poll_handler(controller); } void isci_action(struct cam_sim *sim, union ccb *ccb) { struct ISCI_CONTROLLER *controller = (struct ISCI_CONTROLLER *)cam_sim_softc(sim); switch ( ccb->ccb_h.func_code ) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; int bus = cam_sim_bus(sim); ccb->ccb_h.ccb_sim_ptr = sim; cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN | PIM_UNMAPPED; cpi->hba_eng_cnt = 0; cpi->max_target = SCI_MAX_REMOTE_DEVICES - 1; cpi->max_lun = ISCI_MAX_LUN; #if __FreeBSD_version >= 800102 cpi->maxio = isci_io_request_get_max_io_size(); #endif cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = bus; cpi->initiator_id = SCI_MAX_REMOTE_DEVICES; cpi->base_transfer_speed = 300000; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); } break; case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *general_settings = &ccb->cts; struct ccb_trans_settings_sas *sas_settings = &general_settings->xport_specific.sas; struct ccb_trans_settings_scsi *scsi_settings = &general_settings->proto_specific.scsi; struct ISCI_REMOTE_DEVICE *remote_device; remote_device = controller->remote_device[ccb->ccb_h.target_id]; if (remote_device == NULL) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(ccb); break; } general_settings->protocol = PROTO_SCSI; general_settings->transport = XPORT_SAS; general_settings->protocol_version = SCSI_REV_SPC2; general_settings->transport_version = 0; scsi_settings->valid = CTS_SCSI_VALID_TQ; scsi_settings->flags = CTS_SCSI_FLAGS_TAG_ENB; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP; sas_settings->bitrate = isci_remote_device_get_bitrate(remote_device); if (sas_settings->bitrate != 0) sas_settings->valid = CTS_SAS_VALID_SPEED; xpt_done(ccb); } break; case XPT_SCSI_IO: + if (ccb->ccb_h.flags & CAM_CDB_PHYS) { + ccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(ccb); + break; + } isci_io_request_execute_scsi_io(ccb, controller); break; #if __FreeBSD_version >= 900026 case XPT_SMP_IO: isci_io_request_execute_smp_io(ccb, controller); break; #endif case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; case XPT_RESET_DEV: { struct ISCI_REMOTE_DEVICE *remote_device = controller->remote_device[ccb->ccb_h.target_id]; if (remote_device != NULL) isci_remote_device_reset(remote_device, ccb); else { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(ccb); } } break; case XPT_RESET_BUS: ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; default: isci_log_message(0, "ISCI", "Unhandled func_code 0x%x\n", ccb->ccb_h.func_code); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(ccb); break; } } /* * Unfortunately, SCIL doesn't cleanly handle retry conditions. * CAM_REQUEUE_REQ works only when no one is using the pass(4) interface. So * when SCIL denotes an I/O needs to be retried (typically because of mixing * tagged/non-tagged ATA commands, or running out of NCQ slots), we queue * these I/O internally. Once SCIL completes an I/O to this device, or we get * a ready notification, we will retry the first I/O on the queue. * Unfortunately, SCIL also doesn't cleanly handle starting the new I/O within * the context of the completion handler, so we need to retry these I/O after * the completion handler is done executing. */ void isci_controller_release_queued_ccbs(struct ISCI_CONTROLLER *controller) { struct ISCI_REMOTE_DEVICE *dev; struct ccb_hdr *ccb_h; + uint8_t *ptr; int dev_idx; KASSERT(mtx_owned(&controller->lock), ("controller lock not owned")); controller->release_queued_ccbs = FALSE; for (dev_idx = 0; dev_idx < SCI_MAX_REMOTE_DEVICES; dev_idx++) { dev = controller->remote_device[dev_idx]; if (dev != NULL && dev->release_queued_ccb == TRUE && dev->queued_ccb_in_progress == NULL) { dev->release_queued_ccb = FALSE; ccb_h = TAILQ_FIRST(&dev->queued_ccbs); if (ccb_h == NULL) continue; - isci_log_message(1, "ISCI", "release %p %x\n", ccb_h, - ((union ccb *)ccb_h)->csio.cdb_io.cdb_bytes[0]); + ptr = scsiio_cdb_ptr(&((union ccb *)ccb_h)->csio); + isci_log_message(1, "ISCI", "release %p %x\n", ccb_h, *ptr); dev->queued_ccb_in_progress = (union ccb *)ccb_h; isci_io_request_execute_scsi_io( (union ccb *)ccb_h, controller); } } } Index: head/sys/dev/isci/isci_io_request.c =================================================================== --- head/sys/dev/isci/isci_io_request.c (revision 296890) +++ head/sys/dev/isci/isci_io_request.c (revision 296891) @@ -1,991 +1,991 @@ /*- * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include /** * @brief This user callback will inform the user that an IO request has * completed. * * @param[in] controller This parameter specifies the controller on * which the IO request is completing. * @param[in] remote_device This parameter specifies the remote device on * which this request is completing. * @param[in] io_request This parameter specifies the IO request that has * completed. * @param[in] completion_status This parameter specifies the results of * the IO request operation. SCI_IO_SUCCESS indicates * successful completion. * * @return none */ void scif_cb_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, SCI_REMOTE_DEVICE_HANDLE_T remote_device, SCI_IO_REQUEST_HANDLE_T io_request, SCI_IO_STATUS completion_status) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)sci_object_get_association(io_request); scif_controller_complete_io(scif_controller, remote_device, io_request); isci_io_request_complete(scif_controller, remote_device, isci_request, completion_status); } void isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, SCI_REMOTE_DEVICE_HANDLE_T remote_device, struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status) { struct ISCI_CONTROLLER *isci_controller; struct ISCI_REMOTE_DEVICE *isci_remote_device; union ccb *ccb; BOOL complete_ccb; + struct ccb_scsiio *csio; complete_ccb = TRUE; isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller); isci_remote_device = (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device); ccb = isci_request->ccb; - + csio = &ccb->csio; ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (completion_status) { case SCI_IO_SUCCESS: case SCI_IO_SUCCESS_COMPLETE_BEFORE_START: #if __FreeBSD_version >= 900026 if (ccb->ccb_h.func_code == XPT_SMP_IO) { void *smp_response = scif_io_request_get_response_iu_address( isci_request->sci_object); memcpy(ccb->smpio.smp_response, smp_response, ccb->smpio.smp_response_len); } #endif ccb->ccb_h.status |= CAM_REQ_CMP; break; case SCI_IO_SUCCESS_IO_DONE_EARLY: ccb->ccb_h.status |= CAM_REQ_CMP; ccb->csio.resid = ccb->csio.dxfer_len - scif_io_request_get_number_of_bytes_transferred( isci_request->sci_object); break; case SCI_IO_FAILURE_RESPONSE_VALID: { SCI_SSP_RESPONSE_IU_T * response_buffer; uint32_t sense_length; int error_code, sense_key, asc, ascq; - struct ccb_scsiio *csio = &ccb->csio; response_buffer = (SCI_SSP_RESPONSE_IU_T *) scif_io_request_get_response_iu_address( isci_request->sci_object); sense_length = sci_ssp_get_sense_data_length( response_buffer->sense_data_length); sense_length = MIN(csio->sense_len, sense_length); memcpy(&csio->sense_data, response_buffer->data, sense_length); csio->sense_resid = csio->sense_len - sense_length; csio->scsi_status = response_buffer->status; ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; scsi_extract_sense( &csio->sense_data, &error_code, &sense_key, &asc, &ascq ); isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, - ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], + ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio), csio->scsi_status, sense_key, asc, ascq); break; } case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: isci_remote_device_reset(isci_remote_device, NULL); ccb->ccb_h.status |= CAM_REQ_TERMIO; isci_log_message(0, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x remote device reset required\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, - ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); + ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio)); break; case SCI_IO_FAILURE_TERMINATED: ccb->ccb_h.status |= CAM_REQ_TERMIO; isci_log_message(0, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, - ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); + ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio)); break; case SCI_IO_FAILURE_INVALID_STATE: case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES: complete_ccb = FALSE; break; case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE: ccb->ccb_h.status |= CAM_DEV_NOT_THERE; break; case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE: { struct ccb_relsim ccb_relsim; struct cam_path *path; xpt_create_path(&path, NULL, cam_sim_path(isci_controller->sim), isci_remote_device->index, 0); xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5); ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ; ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE; ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS; ccb_relsim.openings = scif_remote_device_get_max_queue_depth(remote_device); xpt_action((union ccb *)&ccb_relsim); xpt_free_path(path); complete_ccb = FALSE; } break; case SCI_IO_FAILURE: case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT: case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL: case SCI_IO_FAILURE_PROTOCOL_VIOLATION: case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE: case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR: default: isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, - ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0], + ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio), completion_status); ccb->ccb_h.status |= CAM_REQ_CMP_ERR; break; } callout_stop(&isci_request->parent.timer); bus_dmamap_sync(isci_request->parent.dma_tag, isci_request->parent.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(isci_request->parent.dma_tag, isci_request->parent.dma_map); isci_request->ccb = NULL; sci_pool_put(isci_controller->request_pool, (struct ISCI_REQUEST *)isci_request); if (complete_ccb) { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* ccb will be completed with some type of non-success * status. So temporarily freeze the queue until the * upper layers can act on the status. The * CAM_DEV_QFRZN flag will then release the queue * after the status is acted upon. */ ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } if (ccb->ccb_h.status & CAM_SIM_QUEUED) { KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, ("multiple internally queued ccbs in flight")); TAILQ_REMOVE(&isci_remote_device->queued_ccbs, &ccb->ccb_h, sim_links.tqe); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; /* * This CCB that was in the queue was completed, so * set the in_progress pointer to NULL denoting that * we can retry another CCB from the queue. We only * allow one CCB at a time from the queue to be * in progress so that we can effectively maintain * ordering. */ isci_remote_device->queued_ccb_in_progress = NULL; } if (isci_remote_device->frozen_lun_mask != 0) { isci_remote_device_release_device_queue(isci_remote_device); } xpt_done(ccb); if (isci_controller->is_frozen == TRUE) { isci_controller->is_frozen = FALSE; xpt_release_simq(isci_controller->sim, TRUE); } } else { isci_remote_device_freeze_lun_queue(isci_remote_device, ccb->ccb_h.target_lun); if (ccb->ccb_h.status & CAM_SIM_QUEUED) { KASSERT(ccb == isci_remote_device->queued_ccb_in_progress, ("multiple internally queued ccbs in flight")); /* * Do nothing, CCB is already on the device's queue. * We leave it on the queue, to be retried again * next time a CCB on this device completes, or we * get a ready notification for this device. */ isci_log_message(1, "ISCI", "already queued %p %x\n", - ccb, ccb->csio.cdb_io.cdb_bytes[0]); + ccb, scsiio_cdb_ptr(csio)); isci_remote_device->queued_ccb_in_progress = NULL; } else { isci_log_message(1, "ISCI", "queue %p %x\n", ccb, - ccb->csio.cdb_io.cdb_bytes[0]); + scsiio_cdb_ptr(csio)); ccb->ccb_h.status |= CAM_SIM_QUEUED; TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs, &ccb->ccb_h, sim_links.tqe); } } } /** * @brief This callback method asks the user to provide the physical * address for the supplied virtual address when building an * io request object. * * @param[in] controller This parameter is the core controller object * handle. * @param[in] io_request This parameter is the io request object handle * for which the physical address is being requested. * @param[in] virtual_address This paramter is the virtual address which * is to be returned as a physical address. * @param[out] physical_address The physical address for the supplied virtual * address. * * @return None. */ void scic_cb_io_request_get_physical_address(SCI_CONTROLLER_HANDLE_T controller, SCI_IO_REQUEST_HANDLE_T io_request, void *virtual_address, SCI_PHYSICAL_ADDRESS *physical_address) { SCI_IO_REQUEST_HANDLE_T scif_request = sci_object_get_association(io_request); struct ISCI_REQUEST *isci_request = sci_object_get_association(scif_request); if(isci_request != NULL) { /* isci_request is not NULL, meaning this is a request initiated * by CAM or the isci layer (i.e. device reset for I/O * timeout). Therefore we can calculate the physical address * based on the address we stored in the struct ISCI_REQUEST * object. */ *physical_address = isci_request->physical_address + (uintptr_t)virtual_address - (uintptr_t)isci_request; } else { /* isci_request is NULL, meaning this is a request generated * internally by SCIL (i.e. for SMP requests or NCQ error * recovery). Therefore we calculate the physical address * based on the controller's uncached controller memory buffer, * since we know that this is what SCIL uses for internal * framework requests. */ SCI_CONTROLLER_HANDLE_T scif_controller = (SCI_CONTROLLER_HANDLE_T) sci_object_get_association(controller); struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller); U64 virt_addr_offset = (uintptr_t)virtual_address - (U64)isci_controller->uncached_controller_memory.virtual_address; *physical_address = isci_controller->uncached_controller_memory.physical_address + virt_addr_offset; } } /** * @brief This callback method asks the user to provide the address for * the command descriptor block (CDB) associated with this IO request. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the virtual address of the CDB. */ void * scif_cb_io_request_get_cdb_address(void * scif_user_io_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; - return (isci_request->ccb->csio.cdb_io.cdb_bytes); + return (scsiio_cdb_ptr(&isci_request->ccb->csio)); } /** * @brief This callback method asks the user to provide the length of * the command descriptor block (CDB) associated with this IO request. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the length of the CDB. */ uint32_t scif_cb_io_request_get_cdb_length(void * scif_user_io_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; return (isci_request->ccb->csio.cdb_len); } /** * @brief This callback method asks the user to provide the Logical Unit (LUN) * associated with this IO request. * * @note The contents of the value returned from this callback are defined * by the protocol standard (e.g. T10 SAS specification). Please * refer to the transport command information unit description * in the associated standard. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the LUN associated with this request. */ uint32_t scif_cb_io_request_get_lun(void * scif_user_io_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; return (isci_request->ccb->ccb_h.target_lun); } /** * @brief This callback method asks the user to provide the task attribute * associated with this IO request. * * @note The contents of the value returned from this callback are defined * by the protocol standard (e.g. T10 SAS specification). Please * refer to the transport command information unit description * in the associated standard. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the task attribute associated with this * IO request. */ uint32_t scif_cb_io_request_get_task_attribute(void * scif_user_io_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; uint32_t task_attribute; if((isci_request->ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) switch(isci_request->ccb->csio.tag_action) { case MSG_HEAD_OF_Q_TAG: task_attribute = SCI_SAS_HEAD_OF_QUEUE_ATTRIBUTE; break; case MSG_ORDERED_Q_TAG: task_attribute = SCI_SAS_ORDERED_ATTRIBUTE; break; case MSG_ACA_TASK: task_attribute = SCI_SAS_ACA_ATTRIBUTE; break; default: task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE; break; } else task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE; return (task_attribute); } /** * @brief This callback method asks the user to provide the command priority * associated with this IO request. * * @note The contents of the value returned from this callback are defined * by the protocol standard (e.g. T10 SAS specification). Please * refer to the transport command information unit description * in the associated standard. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the command priority associated with this * IO request. */ uint32_t scif_cb_io_request_get_command_priority(void * scif_user_io_request) { return (0); } /** * @brief This method simply returns the virtual address associated * with the scsi_io and byte_offset supplied parameters. * * @note This callback is not utilized in the fast path. The expectation * is that this method is utilized for items such as SCSI to ATA * translation for commands like INQUIRY, READ CAPACITY, etc. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * @param[in] byte_offset This parameter specifies the offset into the data * buffers pointed to by the SGL. The byte offset starts at 0 * and continues until the last byte pointed to be the last SGL * element. * * @return A virtual address pointer to the location specified by the * parameters. */ uint8_t * scif_cb_io_request_get_virtual_address_from_sgl(void * scif_user_io_request, uint32_t byte_offset) { struct ISCI_IO_REQUEST *isci_request; union ccb *ccb; isci_request = scif_user_io_request; ccb = isci_request->ccb; /* * This callback is only invoked for SCSI/ATA translation of * PIO commands such as INQUIRY and READ_CAPACITY, to allow * the driver to write the translated data directly into the * data buffer. It is never invoked for READ/WRITE commands. * The driver currently assumes only READ/WRITE commands will * be unmapped. * * As a safeguard against future changes to unmapped commands, * add an explicit panic here should the DATA_MASK != VADDR. * Otherwise, we would return some garbage pointer back to the * caller which would result in a panic or more subtle data * corruption later on. */ if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) panic("%s: requesting pointer into unmapped ccb", __func__); return (ccb->csio.data_ptr + byte_offset); } /** * @brief This callback method asks the user to provide the number of * bytes to be transfered as part of this request. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the number of payload data bytes to be * transfered for this IO request. */ uint32_t scif_cb_io_request_get_transfer_length(void * scif_user_io_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; return (isci_request->ccb->csio.dxfer_len); } /** * @brief This callback method asks the user to provide the data direction * for this request. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * * @return This method returns the value of SCI_IO_REQUEST_DATA_OUT, * SCI_IO_REQUEST_DATA_IN, or SCI_IO_REQUEST_NO_DATA. */ SCI_IO_REQUEST_DATA_DIRECTION scif_cb_io_request_get_data_direction(void * scif_user_io_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; switch (isci_request->ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: return (SCI_IO_REQUEST_DATA_IN); case CAM_DIR_OUT: return (SCI_IO_REQUEST_DATA_OUT); default: return (SCI_IO_REQUEST_NO_DATA); } } /** * @brief This callback method asks the user to provide the address * to where the next Scatter-Gather Element is located. * * Details regarding usage: * - Regarding the first SGE: the user should initialize an index, * or a pointer, prior to construction of the request that will * reference the very first scatter-gather element. This is * important since this method is called for every scatter-gather * element, including the first element. * - Regarding the last SGE: the user should return NULL from this * method when this method is called and the SGL has exhausted * all elements. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * @param[in] current_sge_address This parameter specifies the address for * the current SGE (i.e. the one that has just processed). * @param[out] next_sge An address specifying the location for the next scatter * gather element to be processed. * * @return None. */ void scif_cb_io_request_get_next_sge(void * scif_user_io_request, void * current_sge_address, void ** next_sge) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; if (isci_request->current_sge_index == isci_request->num_segments) *next_sge = NULL; else { bus_dma_segment_t *sge = &isci_request->sge[isci_request->current_sge_index]; isci_request->current_sge_index++; *next_sge = sge; } } /** * @brief This callback method asks the user to provide the contents of the * "address" field in the Scatter-Gather Element. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * @param[in] sge_address This parameter specifies the address for the * SGE from which to retrieve the address field. * * @return A physical address specifying the contents of the SGE's address * field. */ SCI_PHYSICAL_ADDRESS scif_cb_sge_get_address_field(void *scif_user_io_request, void *sge_address) { bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address; return ((SCI_PHYSICAL_ADDRESS)sge->ds_addr); } /** * @brief This callback method asks the user to provide the contents of the * "length" field in the Scatter-Gather Element. * * @param[in] scif_user_io_request This parameter points to the user's * IO request object. It is a cookie that allows the user to * provide the necessary information for this callback. * @param[in] sge_address This parameter specifies the address for the * SGE from which to retrieve the address field. * * @return This method returns the length field specified inside the SGE * referenced by the sge_address parameter. */ uint32_t scif_cb_sge_get_length_field(void *scif_user_io_request, void *sge_address) { bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address; return ((uint32_t)sge->ds_len); } void isci_request_construct(struct ISCI_REQUEST *request, SCI_CONTROLLER_HANDLE_T scif_controller_handle, bus_dma_tag_t io_buffer_dma_tag, bus_addr_t physical_address) { request->controller_handle = scif_controller_handle; request->dma_tag = io_buffer_dma_tag; request->physical_address = physical_address; bus_dmamap_create(request->dma_tag, 0, &request->dma_map); callout_init(&request->timer, 1); } static void isci_io_request_construct(void *arg, bus_dma_segment_t *seg, int nseg, int error) { union ccb *ccb; struct ISCI_IO_REQUEST *io_request = (struct ISCI_IO_REQUEST *)arg; SCI_REMOTE_DEVICE_HANDLE_T *device = io_request->parent.remote_device_handle; SCI_STATUS status; io_request->num_segments = nseg; io_request->sge = seg; ccb = io_request->ccb; if (error != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } status = scif_io_request_construct( io_request->parent.controller_handle, io_request->parent.remote_device_handle, SCI_CONTROLLER_INVALID_IO_TAG, (void *)io_request, (void *)((char*)io_request + sizeof(struct ISCI_IO_REQUEST)), &io_request->sci_object); if (status != SCI_SUCCESS) { isci_io_request_complete(io_request->parent.controller_handle, device, io_request, (SCI_IO_STATUS)status); return; } sci_object_set_association(io_request->sci_object, io_request); bus_dmamap_sync(io_request->parent.dma_tag, io_request->parent.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); status = (SCI_STATUS)scif_controller_start_io( io_request->parent.controller_handle, device, io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG); if (status != SCI_SUCCESS) { isci_io_request_complete(io_request->parent.controller_handle, device, io_request, (SCI_IO_STATUS)status); return; } if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) callout_reset_sbt(&io_request->parent.timer, SBT_1MS * ccb->ccb_h.timeout, 0, isci_io_request_timeout, io_request, 0); } void isci_io_request_execute_scsi_io(union ccb *ccb, struct ISCI_CONTROLLER *controller) { target_id_t target_id = ccb->ccb_h.target_id; struct ISCI_REQUEST *request; struct ISCI_IO_REQUEST *io_request; struct ISCI_REMOTE_DEVICE *device = controller->remote_device[target_id]; int error; if (device == NULL) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(ccb); return; } if (sci_pool_empty(controller->request_pool)) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; xpt_freeze_simq(controller->sim, 1); controller->is_frozen = TRUE; xpt_done(ccb); return; } ASSERT(device->is_resetting == FALSE); sci_pool_get(controller->request_pool, request); io_request = (struct ISCI_IO_REQUEST *)request; io_request->ccb = ccb; io_request->current_sge_index = 0; io_request->parent.remote_device_handle = device->sci_object; error = bus_dmamap_load_ccb(io_request->parent.dma_tag, io_request->parent.dma_map, ccb, isci_io_request_construct, io_request, 0x0); /* A resource shortage from BUSDMA will be automatically * continued at a later point, pushing the CCB processing * forward, which will in turn unfreeze the simq. */ if (error == EINPROGRESS) { xpt_freeze_simq(controller->sim, 1); ccb->ccb_h.flags |= CAM_RELEASE_SIMQ; } } void isci_io_request_timeout(void *arg) { struct ISCI_IO_REQUEST *request = (struct ISCI_IO_REQUEST *)arg; struct ISCI_REMOTE_DEVICE *remote_device = (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(request->parent.remote_device_handle); struct ISCI_CONTROLLER *controller = remote_device->domain->controller; mtx_lock(&controller->lock); isci_remote_device_reset(remote_device, NULL); mtx_unlock(&controller->lock); } #if __FreeBSD_version >= 900026 /** * @brief This callback method gets the size of and pointer to the buffer * (if any) containing the request buffer for an SMP request. * * @param[in] core_request This parameter specifies the SCI core's request * object associated with the SMP request. * @param[out] smp_request_buffer This parameter returns a pointer to the * payload portion of the SMP request - i.e. everything after * the SMP request header. * * @return Size of the request buffer in bytes. This does *not* include * the size of the SMP request header. */ static uint32_t smp_io_request_cb_get_request_buffer(SCI_IO_REQUEST_HANDLE_T core_request, uint8_t ** smp_request_buffer) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) sci_object_get_association(sci_object_get_association(core_request)); *smp_request_buffer = isci_request->ccb->smpio.smp_request + sizeof(SMP_REQUEST_HEADER_T); return (isci_request->ccb->smpio.smp_request_len - sizeof(SMP_REQUEST_HEADER_T)); } /** * @brief This callback method gets the SMP function for an SMP request. * * @param[in] core_request This parameter specifies the SCI core's request * object associated with the SMP request. * * @return SMP function for the SMP request. */ static uint8_t smp_io_request_cb_get_function(SCI_IO_REQUEST_HANDLE_T core_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) sci_object_get_association(sci_object_get_association(core_request)); SMP_REQUEST_HEADER_T *header = (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; return (header->function); } /** * @brief This callback method gets the SMP frame type for an SMP request. * * @param[in] core_request This parameter specifies the SCI core's request * object associated with the SMP request. * * @return SMP frame type for the SMP request. */ static uint8_t smp_io_request_cb_get_frame_type(SCI_IO_REQUEST_HANDLE_T core_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) sci_object_get_association(sci_object_get_association(core_request)); SMP_REQUEST_HEADER_T *header = (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; return (header->smp_frame_type); } /** * @brief This callback method gets the allocated response length for an SMP request. * * @param[in] core_request This parameter specifies the SCI core's request * object associated with the SMP request. * * @return Allocated response length for the SMP request. */ static uint8_t smp_io_request_cb_get_allocated_response_length( SCI_IO_REQUEST_HANDLE_T core_request) { struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *) sci_object_get_association(sci_object_get_association(core_request)); SMP_REQUEST_HEADER_T *header = (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request; return (header->allocated_response_length); } static SCI_STATUS isci_smp_request_construct(struct ISCI_IO_REQUEST *request) { SCI_STATUS status; SCIC_SMP_PASSTHRU_REQUEST_CALLBACKS_T callbacks; status = scif_request_construct(request->parent.controller_handle, request->parent.remote_device_handle, SCI_CONTROLLER_INVALID_IO_TAG, (void *)request, (void *)((char*)request + sizeof(struct ISCI_IO_REQUEST)), &request->sci_object); if (status == SCI_SUCCESS) { callbacks.scic_cb_smp_passthru_get_request = &smp_io_request_cb_get_request_buffer; callbacks.scic_cb_smp_passthru_get_function = &smp_io_request_cb_get_function; callbacks.scic_cb_smp_passthru_get_frame_type = &smp_io_request_cb_get_frame_type; callbacks.scic_cb_smp_passthru_get_allocated_response_length = &smp_io_request_cb_get_allocated_response_length; /* create the smp passthrough part of the io request */ status = scic_io_request_construct_smp_pass_through( scif_io_request_get_scic_handle(request->sci_object), &callbacks); } return (status); } void isci_io_request_execute_smp_io(union ccb *ccb, struct ISCI_CONTROLLER *controller) { SCI_STATUS status; target_id_t target_id = ccb->ccb_h.target_id; struct ISCI_REQUEST *request; struct ISCI_IO_REQUEST *io_request; SCI_REMOTE_DEVICE_HANDLE_T smp_device_handle; struct ISCI_REMOTE_DEVICE *end_device = controller->remote_device[target_id]; /* SMP commands are sent to an end device, because SMP devices are not * exposed to the kernel. It is our responsibility to use this method * to get the SMP device that contains the specified end device. If * the device is direct-attached, the handle will come back NULL, and * we'll just fail the SMP_IO with DEV_NOT_THERE. */ scif_remote_device_get_containing_device(end_device->sci_object, &smp_device_handle); if (smp_device_handle == NULL) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(ccb); return; } if (sci_pool_empty(controller->request_pool)) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; xpt_freeze_simq(controller->sim, 1); controller->is_frozen = TRUE; xpt_done(ccb); return; } ASSERT(device->is_resetting == FALSE); sci_pool_get(controller->request_pool, request); io_request = (struct ISCI_IO_REQUEST *)request; io_request->ccb = ccb; io_request->parent.remote_device_handle = smp_device_handle; status = isci_smp_request_construct(io_request); if (status != SCI_SUCCESS) { isci_io_request_complete(controller->scif_controller_handle, smp_device_handle, io_request, (SCI_IO_STATUS)status); return; } sci_object_set_association(io_request->sci_object, io_request); status = (SCI_STATUS) scif_controller_start_io( controller->scif_controller_handle, smp_device_handle, io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG); if (status != SCI_SUCCESS) { isci_io_request_complete(controller->scif_controller_handle, smp_device_handle, io_request, (SCI_IO_STATUS)status); return; } if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) callout_reset_sbt(&io_request->parent.timer, SBT_1MS * ccb->ccb_h.timeout, 0, isci_io_request_timeout, request, 0); } #endif Index: head/sys/dev/ncr/ncr.c =================================================================== --- head/sys/dev/ncr/ncr.c (revision 296890) +++ head/sys/dev/ncr/ncr.c (revision 296891) @@ -1,7111 +1,7119 @@ /************************************************************************** ** ** ** Device driver for the NCR 53C8XX PCI-SCSI-Controller Family. ** **------------------------------------------------------------------------- ** ** Written for 386bsd and FreeBSD by ** Wolfgang Stanglmeier ** Stefan Esser ** **------------------------------------------------------------------------- */ /*- ** Copyright (c) 1994 Wolfgang Stanglmeier. All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** *************************************************************************** */ #include __FBSDID("$FreeBSD$"); #define NCR_GETCC_WITHMSG #if defined (__FreeBSD__) && defined(_KERNEL) #include "opt_ncr.h" #endif /*========================================================== ** ** Configuration and Debugging ** ** May be overwritten in ** **========================================================== */ /* ** SCSI address of this device. ** The boot routines should have set it. ** If not, use this. */ #ifndef SCSI_NCR_MYADDR #define SCSI_NCR_MYADDR (7) #endif /* SCSI_NCR_MYADDR */ /* ** The default synchronous period factor ** (0=asynchronous) ** If maximum synchronous frequency is defined, use it instead. */ #ifndef SCSI_NCR_MAX_SYNC #ifndef SCSI_NCR_DFLT_SYNC #define SCSI_NCR_DFLT_SYNC (12) #endif /* SCSI_NCR_DFLT_SYNC */ #else #if SCSI_NCR_MAX_SYNC == 0 #define SCSI_NCR_DFLT_SYNC 0 #else #define SCSI_NCR_DFLT_SYNC (250000 / SCSI_NCR_MAX_SYNC) #endif #endif /* ** The minimal asynchronous pre-scaler period (ns) ** Shall be 40. */ #ifndef SCSI_NCR_MIN_ASYNC #define SCSI_NCR_MIN_ASYNC (40) #endif /* SCSI_NCR_MIN_ASYNC */ /* ** The maximal bus with (in log2 byte) ** (0=8 bit, 1=16 bit) */ #ifndef SCSI_NCR_MAX_WIDE #define SCSI_NCR_MAX_WIDE (1) #endif /* SCSI_NCR_MAX_WIDE */ /*========================================================== ** ** Configuration and Debugging ** **========================================================== */ /* ** Number of targets supported by the driver. ** n permits target numbers 0..n-1. ** Default is 7, meaning targets #0..#6. ** #7 .. is myself. */ #define MAX_TARGET (16) /* ** Number of logic units supported by the driver. ** n enables logic unit numbers 0..n-1. ** The common SCSI devices require only ** one lun, so take 1 as the default. */ #ifndef MAX_LUN #define MAX_LUN (8) #endif /* MAX_LUN */ /* ** The maximum number of jobs scheduled for starting. ** There should be one slot per target, and one slot ** for each tag of each target in use. */ #define MAX_START (256) /* ** The maximum number of segments a transfer is split into. */ #define MAX_SCATTER (33) /* ** The maximum transfer length (should be >= 64k). ** MUST NOT be greater than (MAX_SCATTER-1) * PAGE_SIZE. */ #define MAX_SIZE ((MAX_SCATTER-1) * (long) PAGE_SIZE) /* ** other */ #define NCR_SNOOP_TIMEOUT (1000000) /*========================================================== ** ** Include files ** **========================================================== */ #include #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include /*========================================================== ** ** Debugging tags ** **========================================================== */ #define DEBUG_ALLOC (0x0001) #define DEBUG_PHASE (0x0002) #define DEBUG_POLL (0x0004) #define DEBUG_QUEUE (0x0008) #define DEBUG_RESULT (0x0010) #define DEBUG_SCATTER (0x0020) #define DEBUG_SCRIPT (0x0040) #define DEBUG_TINY (0x0080) #define DEBUG_TIMING (0x0100) #define DEBUG_NEGO (0x0200) #define DEBUG_TAGS (0x0400) #define DEBUG_FREEZE (0x0800) #define DEBUG_RESTART (0x1000) /* ** Enable/Disable debug messages. ** Can be changed at runtime too. */ #ifdef SCSI_NCR_DEBUG #define DEBUG_FLAGS ncr_debug #else /* SCSI_NCR_DEBUG */ #define SCSI_NCR_DEBUG 0 #define DEBUG_FLAGS 0 #endif /* SCSI_NCR_DEBUG */ /*========================================================== ** ** assert () ** **========================================================== ** ** modified copy from 386bsd:/usr/include/sys/assert.h ** **---------------------------------------------------------- */ #ifdef DIAGNOSTIC #define assert(expression) { \ KASSERT(expression, ("%s", #expression)); \ } #else #define assert(expression) { \ if (!(expression)) { \ (void)printf("assertion \"%s\" failed: " \ "file \"%s\", line %d\n", \ #expression, __FILE__, __LINE__); \ } \ } #endif /*========================================================== ** ** Access to the controller chip. ** **========================================================== */ #define INB(r) bus_read_1(np->reg_res, offsetof(struct ncr_reg, r)) #define INW(r) bus_read_2(np->reg_res, offsetof(struct ncr_reg, r)) #define INL(r) bus_read_4(np->reg_res, offsetof(struct ncr_reg, r)) #define OUTB(r, val) bus_write_1(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTW(r, val) bus_write_2(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTL(r, val) bus_write_4(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTL_OFF(o, val) bus_write_4(np->reg_res, o, val) #define INB_OFF(o) bus_read_1(np->reg_res, o) #define INW_OFF(o) bus_read_2(np->reg_res, o) #define INL_OFF(o) bus_read_4(np->reg_res, o) #define READSCRIPT_OFF(base, off) \ (base ? *((volatile u_int32_t *)((volatile char *)base + (off))) : \ bus_read_4(np->sram_res, off)) #define WRITESCRIPT_OFF(base, off, val) \ do { \ if (base) \ *((volatile u_int32_t *) \ ((volatile char *)base + (off))) = (val); \ else \ bus_write_4(np->sram_res, off, val); \ } while (0) #define READSCRIPT(r) \ READSCRIPT_OFF(np->script, offsetof(struct script, r)) #define WRITESCRIPT(r, val) \ WRITESCRIPT_OFF(np->script, offsetof(struct script, r), val) /* ** Set bit field ON, OFF */ #define OUTONB(r, m) OUTB(r, INB(r) | (m)) #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m)) #define OUTONW(r, m) OUTW(r, INW(r) | (m)) #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m)) #define OUTONL(r, m) OUTL(r, INL(r) | (m)) #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m)) /*========================================================== ** ** Command control block states. ** **========================================================== */ #define HS_IDLE (0) #define HS_BUSY (1) #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ #define HS_DISCONNECT (3) /* Disconnected by target */ #define HS_COMPLETE (4) #define HS_SEL_TIMEOUT (5) /* Selection timeout */ #define HS_RESET (6) /* SCSI reset */ #define HS_ABORTED (7) /* Transfer aborted */ #define HS_TIMEOUT (8) /* Software timeout */ #define HS_FAIL (9) /* SCSI or PCI bus errors */ #define HS_UNEXPECTED (10) /* Unexpected disconnect */ #define HS_STALL (11) /* QUEUE FULL or BUSY */ #define HS_DONEMASK (0xfc) /*========================================================== ** ** Software Interrupt Codes ** **========================================================== */ #define SIR_SENSE_RESTART (1) #define SIR_SENSE_FAILED (2) #define SIR_STALL_RESTART (3) #define SIR_STALL_QUEUE (4) #define SIR_NEGO_SYNC (5) #define SIR_NEGO_WIDE (6) #define SIR_NEGO_FAILED (7) #define SIR_NEGO_PROTO (8) #define SIR_REJECT_RECEIVED (9) #define SIR_REJECT_SENT (10) #define SIR_IGN_RESIDUE (11) #define SIR_MISSING_SAVE (12) #define SIR_MAX (12) /*========================================================== ** ** Extended error codes. ** xerr_status field of struct nccb. ** **========================================================== */ #define XE_OK (0) #define XE_EXTRA_DATA (1) /* unexpected data phase */ #define XE_BAD_PHASE (2) /* illegal phase (4/5) */ /*========================================================== ** ** Negotiation status. ** nego_status field of struct nccb. ** **========================================================== */ #define NS_SYNC (1) #define NS_WIDE (2) /*========================================================== ** ** XXX These are no longer used. Remove once the ** script is updated. ** "Special features" of targets. ** quirks field of struct tcb. ** actualquirks field of struct nccb. ** **========================================================== */ #define QUIRK_AUTOSAVE (0x01) #define QUIRK_NOMSG (0x02) #define QUIRK_NOSYNC (0x10) #define QUIRK_NOWIDE16 (0x20) #define QUIRK_NOTAGS (0x40) #define QUIRK_UPDATE (0x80) /*========================================================== ** ** Misc. ** **========================================================== */ #define CCB_MAGIC (0xf2691ad2) #define MAX_TAGS (32) /* hard limit */ /*========================================================== ** ** OS dependencies. ** **========================================================== */ #define PRINT_ADDR(ccb) xpt_print_path((ccb)->ccb_h.path) /*========================================================== ** ** Declaration of structs. ** **========================================================== */ struct tcb; struct lcb; struct nccb; struct ncb; struct script; typedef struct ncb * ncb_p; typedef struct tcb * tcb_p; typedef struct lcb * lcb_p; typedef struct nccb * nccb_p; struct link { ncrcmd l_cmd; ncrcmd l_paddr; }; struct usrcmd { u_long target; u_long lun; u_long data; u_long cmd; }; #define UC_SETSYNC 10 #define UC_SETTAGS 11 #define UC_SETDEBUG 12 #define UC_SETORDER 13 #define UC_SETWIDE 14 #define UC_SETFLAG 15 #define UF_TRACE (0x01) /*--------------------------------------- ** ** Timestamps for profiling ** **--------------------------------------- */ /* Type of the kernel variable `ticks'. XXX should be declared with the var. */ typedef int ticks_t; struct tstamp { ticks_t start; ticks_t end; ticks_t select; ticks_t command; ticks_t data; ticks_t status; ticks_t disconnect; }; /* ** profiling data (per device) */ struct profile { u_long num_trans; u_long num_bytes; u_long num_disc; u_long num_break; u_long num_int; u_long num_fly; u_long ms_setup; u_long ms_data; u_long ms_disc; u_long ms_post; }; /*========================================================== ** ** Declaration of structs: target control block ** **========================================================== */ #define NCR_TRANS_CUR 0x01 /* Modify current neogtiation status */ #define NCR_TRANS_ACTIVE 0x03 /* Assume this is the active target */ #define NCR_TRANS_GOAL 0x04 /* Modify negotiation goal */ #define NCR_TRANS_USER 0x08 /* Modify user negotiation settings */ struct ncr_transinfo { u_int8_t width; u_int8_t period; u_int8_t offset; }; struct ncr_target_tinfo { /* Hardware version of our sync settings */ u_int8_t disc_tag; #define NCR_CUR_DISCENB 0x01 #define NCR_CUR_TAGENB 0x02 #define NCR_USR_DISCENB 0x04 #define NCR_USR_TAGENB 0x08 u_int8_t sval; struct ncr_transinfo current; struct ncr_transinfo goal; struct ncr_transinfo user; /* Hardware version of our wide settings */ u_int8_t wval; }; struct tcb { /* ** during reselection the ncr jumps to this point ** with SFBR set to the encoded target number ** with bit 7 set. ** if it's not this target, jump to the next. ** ** JUMP IF (SFBR != #target#) ** @(next tcb) */ struct link jump_tcb; /* ** load the actual values for the sxfer and the scntl3 ** register (sync/wide mode). ** ** SCR_COPY (1); ** @(sval field of this tcb) ** @(sxfer register) ** SCR_COPY (1); ** @(wval field of this tcb) ** @(scntl3 register) */ ncrcmd getscr[6]; /* ** if next message is "identify" ** then load the message to SFBR, ** else load 0 to SFBR. ** ** CALL ** */ struct link call_lun; /* ** now look for the right lun. ** ** JUMP ** @(first nccb of this lun) */ struct link jump_lcb; /* ** pointer to interrupted getcc nccb */ nccb_p hold_cp; /* ** pointer to nccb used for negotiating. ** Avoid to start a nego for all queued commands ** when tagged command queuing is enabled. */ nccb_p nego_cp; /* ** statistical data */ u_long transfers; u_long bytes; /* ** user settable limits for sync transfer ** and tagged commands. */ struct ncr_target_tinfo tinfo; /* ** the lcb's of this tcb */ lcb_p lp[MAX_LUN]; }; /*========================================================== ** ** Declaration of structs: lun control block ** **========================================================== */ struct lcb { /* ** during reselection the ncr jumps to this point ** with SFBR set to the "Identify" message. ** if it's not this lun, jump to the next. ** ** JUMP IF (SFBR != #lun#) ** @(next lcb of this target) */ struct link jump_lcb; /* ** if next message is "simple tag", ** then load the tag to SFBR, ** else load 0 to SFBR. ** ** CALL ** */ struct link call_tag; /* ** now look for the right nccb. ** ** JUMP ** @(first nccb of this lun) */ struct link jump_nccb; /* ** start of the nccb chain */ nccb_p next_nccb; /* ** Control of tagged queueing */ u_char reqnccbs; u_char reqlink; u_char actlink; u_char usetags; u_char lasttag; }; /*========================================================== ** ** Declaration of structs: COMMAND control block ** **========================================================== ** ** This substructure is copied from the nccb to a ** global address after selection (or reselection) ** and copied back before disconnect. ** ** These fields are accessible to the script processor. ** **---------------------------------------------------------- */ struct head { /* ** Execution of a nccb starts at this point. ** It's a jump to the "SELECT" label ** of the script. ** ** After successful selection the script ** processor overwrites it with a jump to ** the IDLE label of the script. */ struct link launch; /* ** Saved data pointer. ** Points to the position in the script ** responsible for the actual transfer ** of data. ** It's written after reception of a ** "SAVE_DATA_POINTER" message. ** The goalpointer points after ** the last transfer command. */ u_int32_t savep; u_int32_t lastp; u_int32_t goalp; /* ** The virtual address of the nccb ** containing this header. */ nccb_p cp; /* ** space for some timestamps to gather ** profiling data about devices and this driver. */ struct tstamp stamp; /* ** status fields. */ u_char status[8]; }; /* ** The status bytes are used by the host and the script processor. ** ** The first four byte are copied to the scratchb register ** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect, ** and copied back just after disconnecting. ** Inside the script the XX_REG are used. ** ** The last four bytes are used inside the script by "COPY" commands. ** Because source and destination must have the same alignment ** in a longword, the fields HAVE to be at the choosen offsets. ** xerr_st (4) 0 (0x34) scratcha ** sync_st (5) 1 (0x05) sxfer ** wide_st (7) 3 (0x03) scntl3 */ /* ** First four bytes (script) */ #define QU_REG scr0 #define HS_REG scr1 #define HS_PRT nc_scr1 #define SS_REG scr2 #define PS_REG scr3 /* ** First four bytes (host) */ #define actualquirks phys.header.status[0] #define host_status phys.header.status[1] #define s_status phys.header.status[2] #define parity_status phys.header.status[3] /* ** Last four bytes (script) */ #define xerr_st header.status[4] /* MUST be ==0 mod 4 */ #define sync_st header.status[5] /* MUST be ==1 mod 4 */ #define nego_st header.status[6] #define wide_st header.status[7] /* MUST be ==3 mod 4 */ /* ** Last four bytes (host) */ #define xerr_status phys.xerr_st #define sync_status phys.sync_st #define nego_status phys.nego_st #define wide_status phys.wide_st /*========================================================== ** ** Declaration of structs: Data structure block ** **========================================================== ** ** During execution of a nccb by the script processor, ** the DSA (data structure address) register points ** to this substructure of the nccb. ** This substructure contains the header with ** the script-processor-changable data and ** data blocks for the indirect move commands. ** **---------------------------------------------------------- */ struct dsb { /* ** Header. ** Has to be the first entry, ** because it's jumped to by the ** script processor */ struct head header; /* ** Table data for Script */ struct scr_tblsel select; struct scr_tblmove smsg ; struct scr_tblmove smsg2 ; struct scr_tblmove cmd ; struct scr_tblmove scmd ; struct scr_tblmove sense ; struct scr_tblmove data [MAX_SCATTER]; }; /*========================================================== ** ** Declaration of structs: Command control block. ** **========================================================== ** ** During execution of a nccb by the script processor, ** the DSA (data structure address) register points ** to this substructure of the nccb. ** This substructure contains the header with ** the script-processor-changable data and then ** data blocks for the indirect move commands. ** **---------------------------------------------------------- */ struct nccb { /* ** This filler ensures that the global header is ** cache line size aligned. */ ncrcmd filler[4]; /* ** during reselection the ncr jumps to this point. ** If a "SIMPLE_TAG" message was received, ** then SFBR is set to the tag. ** else SFBR is set to 0 ** If looking for another tag, jump to the next nccb. ** ** JUMP IF (SFBR != #TAG#) ** @(next nccb of this lun) */ struct link jump_nccb; /* ** After execution of this call, the return address ** (in the TEMP register) points to the following ** data structure block. ** So copy it to the DSA register, and start ** processing of this data structure. ** ** CALL ** */ struct link call_tmp; /* ** This is the data structure which is ** to be executed by the script processor. */ struct dsb phys; /* ** If a data transfer phase is terminated too early ** (after reception of a message (i.e. DISCONNECT)), ** we have to prepare a mini script to transfer ** the rest of the data. */ ncrcmd patch[8]; /* ** The general SCSI driver provides a ** pointer to a control block. */ union ccb *ccb; /* ** We prepare a message to be sent after selection, ** and a second one to be sent after getcc selection. ** Contents are IDENTIFY and SIMPLE_TAG. ** While negotiating sync or wide transfer, ** a SDTM or WDTM message is appended. */ u_char scsi_smsg [8]; u_char scsi_smsg2[8]; /* ** Lock this nccb. ** Flag is used while looking for a free nccb. */ u_long magic; /* ** Physical address of this instance of nccb */ u_long p_nccb; /* ** Completion time out for this job. ** It's set to time of start + allowed number of seconds. */ time_t tlimit; /* ** All nccbs of one hostadapter are chained. */ nccb_p link_nccb; /* ** All nccbs of one target/lun are chained. */ nccb_p next_nccb; /* ** Sense command */ u_char sensecmd[6]; /* ** Tag for this transfer. ** It's patched into jump_nccb. ** If it's not zero, a SIMPLE_TAG ** message is included in smsg. */ u_char tag; }; #define CCB_PHYS(cp,lbl) (cp->p_nccb + offsetof(struct nccb, lbl)) /*========================================================== ** ** Declaration of structs: NCR device descriptor ** **========================================================== */ struct ncb { /* ** The global header. ** Accessible to both the host and the ** script-processor. ** We assume it is cache line size aligned. */ struct head header; device_t dev; /*----------------------------------------------- ** Scripts .. **----------------------------------------------- ** ** During reselection the ncr jumps to this point. ** The SFBR register is loaded with the encoded target id. ** ** Jump to the first target. ** ** JUMP ** @(next tcb) */ struct link jump_tcb; /*----------------------------------------------- ** Configuration .. **----------------------------------------------- ** ** virtual and physical addresses ** of the 53c810 chip. */ int reg_rid; struct resource *reg_res; int sram_rid; struct resource *sram_res; struct resource *irq_res; void *irq_handle; /* ** Scripts instance virtual address. */ struct script *script; struct scripth *scripth; /* ** Scripts instance physical address. */ u_long p_script; u_long p_scripth; /* ** The SCSI address of the host adapter. */ u_char myaddr; /* ** timing parameters */ u_char minsync; /* Minimum sync period factor */ u_char maxsync; /* Maximum sync period factor */ u_char maxoffs; /* Max scsi offset */ u_char clock_divn; /* Number of clock divisors */ u_long clock_khz; /* SCSI clock frequency in KHz */ u_long features; /* Chip features map */ u_char multiplier; /* Clock multiplier (1,2,4) */ u_char maxburst; /* log base 2 of dwords burst */ /* ** BIOS supplied PCI bus options */ u_char rv_scntl3; u_char rv_dcntl; u_char rv_dmode; u_char rv_ctest3; u_char rv_ctest4; u_char rv_ctest5; u_char rv_gpcntl; u_char rv_stest2; /*----------------------------------------------- ** CAM SIM information for this instance **----------------------------------------------- */ struct cam_sim *sim; struct cam_path *path; /*----------------------------------------------- ** Job control **----------------------------------------------- ** ** Commands from user */ struct usrcmd user; /* ** Target data */ struct tcb target[MAX_TARGET]; /* ** Start queue. */ u_int32_t squeue [MAX_START]; u_short squeueput; /* ** Timeout handler */ time_t heartbeat; u_short ticks; u_short latetime; time_t lasttime; struct callout timer; /*----------------------------------------------- ** Debug and profiling **----------------------------------------------- ** ** register dump */ struct ncr_reg regdump; time_t regtime; /* ** Profiling data */ struct profile profile; u_long disc_phys; u_long disc_ref; /* ** Head of list of all nccbs for this controller. */ nccb_p link_nccb; /* ** message buffers. ** Should be longword aligned, ** because they're written with a ** COPY script command. */ u_char msgout[8]; u_char msgin [8]; u_int32_t lastmsg; /* ** Buffer for STATUS_IN phase. */ u_char scratch; /* ** controller chip dependent maximal transfer width. */ u_char maxwide; struct mtx lock; #ifdef NCR_IOMAPPED /* ** address of the ncr control registers in io space */ pci_port_t port; #endif }; #define NCB_SCRIPT_PHYS(np,lbl) (np->p_script + offsetof (struct script, lbl)) #define NCB_SCRIPTH_PHYS(np,lbl) (np->p_scripth + offsetof (struct scripth,lbl)) /*========================================================== ** ** ** Script for NCR-Processor. ** ** Use ncr_script_fill() to create the variable parts. ** Use ncr_script_copy_and_bind() to make a copy and ** bind to physical addresses. ** ** **========================================================== ** ** We have to know the offsets of all labels before ** we reach them (for forward jumps). ** Therefore we declare a struct here. ** If you make changes inside the script, ** DONT FORGET TO CHANGE THE LENGTHS HERE! ** **---------------------------------------------------------- */ /* ** Script fragments which are loaded into the on-board RAM ** of 825A, 875 and 895 chips. */ struct script { ncrcmd start [ 7]; ncrcmd start0 [ 2]; ncrcmd start1 [ 3]; ncrcmd startpos [ 1]; ncrcmd trysel [ 8]; ncrcmd skip [ 8]; ncrcmd skip2 [ 3]; ncrcmd idle [ 2]; ncrcmd select [ 18]; ncrcmd prepare [ 4]; ncrcmd loadpos [ 14]; ncrcmd prepare2 [ 24]; ncrcmd setmsg [ 5]; ncrcmd clrack [ 2]; ncrcmd dispatch [ 33]; ncrcmd no_data [ 17]; ncrcmd checkatn [ 10]; ncrcmd command [ 15]; ncrcmd status [ 27]; ncrcmd msg_in [ 26]; ncrcmd msg_bad [ 6]; ncrcmd complete [ 13]; ncrcmd cleanup [ 12]; ncrcmd cleanup0 [ 9]; ncrcmd signal [ 12]; ncrcmd save_dp [ 5]; ncrcmd restore_dp [ 5]; ncrcmd disconnect [ 12]; ncrcmd disconnect0 [ 5]; ncrcmd disconnect1 [ 23]; ncrcmd msg_out [ 9]; ncrcmd msg_out_done [ 7]; ncrcmd badgetcc [ 6]; ncrcmd reselect [ 8]; ncrcmd reselect1 [ 8]; ncrcmd reselect2 [ 8]; ncrcmd resel_tmp [ 5]; ncrcmd resel_lun [ 18]; ncrcmd resel_tag [ 24]; ncrcmd data_in [MAX_SCATTER * 4 + 7]; ncrcmd data_out [MAX_SCATTER * 4 + 7]; }; /* ** Script fragments which stay in main memory for all chips. */ struct scripth { ncrcmd tryloop [MAX_START*5+2]; ncrcmd msg_parity [ 6]; ncrcmd msg_reject [ 8]; ncrcmd msg_ign_residue [ 32]; ncrcmd msg_extended [ 18]; ncrcmd msg_ext_2 [ 18]; ncrcmd msg_wdtr [ 27]; ncrcmd msg_ext_3 [ 18]; ncrcmd msg_sdtr [ 27]; ncrcmd msg_out_abort [ 10]; ncrcmd getcc [ 4]; ncrcmd getcc1 [ 5]; #ifdef NCR_GETCC_WITHMSG ncrcmd getcc2 [ 29]; #else ncrcmd getcc2 [ 14]; #endif ncrcmd getcc3 [ 6]; ncrcmd aborttag [ 4]; ncrcmd abort [ 22]; ncrcmd snooptest [ 9]; ncrcmd snoopend [ 2]; }; /*========================================================== ** ** ** Function headers. ** ** **========================================================== */ #ifdef _KERNEL static nccb_p ncr_alloc_nccb(ncb_p np, u_long target, u_long lun); static void ncr_complete(ncb_p np, nccb_p cp); static int ncr_delta(int * from, int * to); static void ncr_exception(ncb_p np); static void ncr_free_nccb(ncb_p np, nccb_p cp); static void ncr_freeze_devq(ncb_p np, struct cam_path *path); static void ncr_selectclock(ncb_p np, u_char scntl3); static void ncr_getclock(ncb_p np, u_char multiplier); static nccb_p ncr_get_nccb(ncb_p np, u_long t,u_long l); #if 0 static u_int32_t ncr_info(int unit); #endif static void ncr_init(ncb_p np, char * msg, u_long code); static void ncr_intr(void *vnp); static void ncr_intr_locked(ncb_p np); static void ncr_int_ma(ncb_p np, u_char dstat); static void ncr_int_sir(ncb_p np); static void ncr_int_sto(ncb_p np); #if 0 static void ncr_min_phys(struct buf *bp); #endif static void ncr_poll(struct cam_sim *sim); static void ncb_profile(ncb_p np, nccb_p cp); static void ncr_script_copy_and_bind(ncb_p np, ncrcmd *src, ncrcmd *dst, int len); static void ncr_script_fill(struct script * scr, struct scripth *scrh); static int ncr_scatter(struct dsb* phys, vm_offset_t vaddr, vm_size_t datalen); static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p); static void ncr_setsync(ncb_p np, nccb_p cp,u_char scntl3,u_char sxfer, u_char period); static void ncr_setwide(ncb_p np, nccb_p cp, u_char wide, u_char ack); static int ncr_show_msg(u_char * msg); static int ncr_snooptest(ncb_p np); static void ncr_action(struct cam_sim *sim, union ccb *ccb); static void ncr_timeout(void *arg); static void ncr_wakeup(ncb_p np, u_long code); static int ncr_probe(device_t dev); static int ncr_attach(device_t dev); #endif /* _KERNEL */ /*========================================================== ** ** ** Global static data. ** ** **========================================================== */ #ifdef _KERNEL static int ncr_debug = SCSI_NCR_DEBUG; SYSCTL_INT(_debug, OID_AUTO, ncr_debug, CTLFLAG_RW, &ncr_debug, 0, ""); static int ncr_cache; /* to be aligned _NOT_ static */ /*========================================================== ** ** ** Global static data: auto configure ** ** **========================================================== */ #define NCR_810_ID (0x00011000ul) #define NCR_815_ID (0x00041000ul) #define NCR_820_ID (0x00021000ul) #define NCR_825_ID (0x00031000ul) #define NCR_860_ID (0x00061000ul) #define NCR_875_ID (0x000f1000ul) #define NCR_875_ID2 (0x008f1000ul) #define NCR_885_ID (0x000d1000ul) #define NCR_895_ID (0x000c1000ul) #define NCR_896_ID (0x000b1000ul) #define NCR_895A_ID (0x00121000ul) #define NCR_1510D_ID (0x000a1000ul) /*========================================================== ** ** ** Scripts for NCR-Processor. ** ** Use ncr_script_bind for binding to physical addresses. ** ** **========================================================== ** ** NADDR generates a reference to a field of the controller data. ** PADDR generates a reference to another part of the script. ** RADDR generates a reference to a script processor register. ** FADDR generates a reference to a script processor register ** with offset. ** **---------------------------------------------------------- */ #define RELOC_SOFTC 0x40000000 #define RELOC_LABEL 0x50000000 #define RELOC_REGISTER 0x60000000 #define RELOC_KVAR 0x70000000 #define RELOC_LABELH 0x80000000 #define RELOC_MASK 0xf0000000 #define NADDR(label) (RELOC_SOFTC | offsetof(struct ncb, label)) #define PADDR(label) (RELOC_LABEL | offsetof(struct script, label)) #define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label)) #define RADDR(label) (RELOC_REGISTER | REG(label)) #define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs))) #define KVAR(which) (RELOC_KVAR | (which)) #define KVAR_SECOND (0) #define KVAR_TICKS (1) #define KVAR_NCR_CACHE (2) #define SCRIPT_KVAR_FIRST (0) #define SCRIPT_KVAR_LAST (3) /* * Kernel variables referenced in the scripts. * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY. */ static volatile void *script_kvars[] = { &time_second, &ticks, &ncr_cache }; static struct script script0 = { /*--------------------------< START >-----------------------*/ { /* ** Claim to be still alive ... */ SCR_COPY (sizeof (((struct ncb *)0)->heartbeat)), KVAR (KVAR_SECOND), NADDR (heartbeat), /* ** Make data structure address invalid. ** clear SIGP. */ SCR_LOAD_REG (dsa, 0xff), 0, SCR_FROM_REG (ctest2), 0, }/*-------------------------< START0 >----------------------*/,{ /* ** Hook for interrupted GetConditionCode. ** Will be patched to ... IFTRUE by ** the interrupt handler. */ SCR_INT ^ IFFALSE (0), SIR_SENSE_RESTART, }/*-------------------------< START1 >----------------------*/,{ /* ** Hook for stalled start queue. ** Will be patched to IFTRUE by the interrupt handler. */ SCR_INT ^ IFFALSE (0), SIR_STALL_RESTART, /* ** Then jump to a certain point in tryloop. ** Due to the lack of indirect addressing the code ** is self modifying here. */ SCR_JUMP, }/*-------------------------< STARTPOS >--------------------*/,{ PADDRH(tryloop), }/*-------------------------< TRYSEL >----------------------*/,{ /* ** Now: ** DSA: Address of a Data Structure ** or Address of the IDLE-Label. ** ** TEMP: Address of a script, which tries to ** start the NEXT entry. ** ** Save the TEMP register into the SCRATCHA register. ** Then copy the DSA to TEMP and RETURN. ** This is kind of an indirect jump. ** (The script processor has NO stack, so the ** CALL is actually a jump and link, and the ** RETURN is an indirect jump.) ** ** If the slot was empty, DSA contains the address ** of the IDLE part of this script. The processor ** jumps to IDLE and waits for a reselect. ** It will wake up and try the same slot again ** after the SIGP bit becomes set by the host. ** ** If the slot was not empty, DSA contains ** the address of the phys-part of a nccb. ** The processor jumps to this address. ** phys starts with head, ** head starts with launch, ** so actually the processor jumps to ** the lauch part. ** If the entry is scheduled for execution, ** then launch contains a jump to SELECT. ** If it's not scheduled, it contains a jump to IDLE. */ SCR_COPY (4), RADDR (temp), RADDR (scratcha), SCR_COPY (4), RADDR (dsa), RADDR (temp), SCR_RETURN, 0 }/*-------------------------< SKIP >------------------------*/,{ /* ** This entry has been canceled. ** Next time use the next slot. */ SCR_COPY (4), RADDR (scratcha), PADDR (startpos), /* ** patch the launch field. ** should look like an idle process. */ SCR_COPY_F (4), RADDR (dsa), PADDR (skip2), SCR_COPY (8), PADDR (idle), }/*-------------------------< SKIP2 >-----------------------*/,{ 0, SCR_JUMP, PADDR(start), }/*-------------------------< IDLE >------------------------*/,{ /* ** Nothing to do? ** Wait for reselect. */ SCR_JUMP, PADDR(reselect), }/*-------------------------< SELECT >----------------------*/,{ /* ** DSA contains the address of a scheduled ** data structure. ** ** SCRATCHA contains the address of the script, ** which starts the next entry. ** ** Set Initiator mode. ** ** (Target mode is left as an exercise for the reader) */ SCR_CLR (SCR_TRG), 0, SCR_LOAD_REG (HS_REG, 0xff), 0, /* ** And try to select this target. */ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select), PADDR (reselect), /* ** Now there are 4 possibilities: ** ** (1) The ncr loses arbitration. ** This is ok, because it will try again, ** when the bus becomes idle. ** (But beware of the timeout function!) ** ** (2) The ncr is reselected. ** Then the script processor takes the jump ** to the RESELECT label. ** ** (3) The ncr completes the selection. ** Then it will execute the next statement. ** ** (4) There is a selection timeout. ** Then the ncr should interrupt the host and stop. ** Unfortunately, it seems to continue execution ** of the script. But it will fail with an ** IID-interrupt on the next WHEN. */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)), 0, /* ** Send the IDENTIFY and SIMPLE_TAG messages ** (and the MSG_EXT_SDTR message) */ SCR_MOVE_TBL ^ SCR_MSG_OUT, offsetof (struct dsb, smsg), #ifdef undef /* XXX better fail than try to deal with this ... */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_OUT)), -16, #endif SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), /* ** Selection complete. ** Next time use the next slot. */ SCR_COPY (4), RADDR (scratcha), PADDR (startpos), }/*-------------------------< PREPARE >----------------------*/,{ /* ** The ncr doesn't have an indirect load ** or store command. So we have to ** copy part of the control block to a ** fixed place, where we can access it. ** ** We patch the address part of a ** COPY command with the DSA-register. */ SCR_COPY_F (4), RADDR (dsa), PADDR (loadpos), /* ** then we do the actual copy. */ SCR_COPY (sizeof (struct head)), /* ** continued after the next label ... */ }/*-------------------------< LOADPOS >---------------------*/,{ 0, NADDR (header), /* ** Mark this nccb as not scheduled. */ SCR_COPY (8), PADDR (idle), NADDR (header.launch), /* ** Set a time stamp for this selection */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.select), /* ** load the savep (saved pointer) into ** the TEMP register (actual pointer) */ SCR_COPY (4), NADDR (header.savep), RADDR (temp), /* ** Initialize the status registers */ SCR_COPY (4), NADDR (header.status), RADDR (scr0), }/*-------------------------< PREPARE2 >---------------------*/,{ /* ** Load the synchronous mode register */ SCR_COPY (1), NADDR (sync_st), RADDR (sxfer), /* ** Load the wide mode and timing register */ SCR_COPY (1), NADDR (wide_st), RADDR (scntl3), /* ** Initialize the msgout buffer with a NOOP message. */ SCR_LOAD_REG (scratcha, MSG_NOOP), 0, SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_COPY (1), RADDR (scratcha), NADDR (msgin), /* ** Message in phase ? */ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** Extended or reject message ? */ SCR_FROM_REG (sbdl), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXTENDED)), PADDR (msg_in), SCR_JUMP ^ IFTRUE (DATA (MSG_MESSAGE_REJECT)), PADDRH (msg_reject), /* ** normal processing */ SCR_JUMP, PADDR (dispatch), }/*-------------------------< SETMSG >----------------------*/,{ SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_SET (SCR_ATN), 0, }/*-------------------------< CLRACK >----------------------*/,{ /* ** Terminate possible pending message phase. */ SCR_CLR (SCR_ACK), 0, }/*-----------------------< DISPATCH >----------------------*/,{ SCR_FROM_REG (HS_REG), 0, SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), SIR_NEGO_FAILED, /* ** remove bogus output signals */ SCR_REG_REG (socl, SCR_AND, CACK|CATN), 0, SCR_RETURN ^ IFTRUE (WHEN (SCR_DATA_OUT)), 0, SCR_RETURN ^ IFTRUE (IF (SCR_DATA_IN)), 0, SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)), PADDR (msg_out), SCR_JUMP ^ IFTRUE (IF (SCR_MSG_IN)), PADDR (msg_in), SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)), PADDR (command), SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)), PADDR (status), /* ** Discard one illegal phase byte, if required. */ SCR_LOAD_REG (scratcha, XE_BAD_PHASE), 0, SCR_COPY (1), RADDR (scratcha), NADDR (xerr_st), SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_OUT)), 8, SCR_MOVE_ABS (1) ^ SCR_ILG_OUT, NADDR (scratch), SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_IN)), 8, SCR_MOVE_ABS (1) ^ SCR_ILG_IN, NADDR (scratch), SCR_JUMP, PADDR (dispatch), }/*-------------------------< NO_DATA >--------------------*/,{ /* ** The target wants to tranfer too much data ** or in the wrong direction. ** Remember that in extended error. */ SCR_LOAD_REG (scratcha, XE_EXTRA_DATA), 0, SCR_COPY (1), RADDR (scratcha), NADDR (xerr_st), /* ** Discard one data byte, if required. */ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)), 8, SCR_MOVE_ABS (1) ^ SCR_DATA_OUT, NADDR (scratch), SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)), 8, SCR_MOVE_ABS (1) ^ SCR_DATA_IN, NADDR (scratch), /* ** .. and repeat as required. */ SCR_CALL, PADDR (dispatch), SCR_JUMP, PADDR (no_data), }/*-------------------------< CHECKATN >--------------------*/,{ /* ** If AAP (bit 1 of scntl0 register) is set ** and a parity error is detected, ** the script processor asserts ATN. ** ** The target should switch to a MSG_OUT phase ** to get the message. */ SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFFALSE (MASK (CATN, CATN)), PADDR (dispatch), /* ** count it */ SCR_REG_REG (PS_REG, SCR_ADD, 1), 0, /* ** Prepare a MSG_INITIATOR_DET_ERR message ** (initiator detected error). ** The target should retry the transfer. */ SCR_LOAD_REG (scratcha, MSG_INITIATOR_DET_ERR), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< COMMAND >--------------------*/,{ /* ** If this is not a GETCC transfer ... */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), 28, /* ** ... set a timestamp ... */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.command), /* ** ... and send the command */ SCR_MOVE_TBL ^ SCR_COMMAND, offsetof (struct dsb, cmd), SCR_JUMP, PADDR (dispatch), /* ** Send the GETCC command */ /*>>>*/ SCR_MOVE_TBL ^ SCR_COMMAND, offsetof (struct dsb, scmd), SCR_JUMP, PADDR (dispatch), }/*-------------------------< STATUS >--------------------*/,{ /* ** set the timestamp. */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.status), /* ** If this is a GETCC transfer, */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (SCSI_STATUS_CHECK_COND)), 40, /* ** get the status */ SCR_MOVE_ABS (1) ^ SCR_STATUS, NADDR (scratch), /* ** Save status to scsi_status. ** Mark as complete. ** And wait for disconnect. */ SCR_TO_REG (SS_REG), 0, SCR_REG_REG (SS_REG, SCR_OR, SCSI_STATUS_SENSE), 0, SCR_LOAD_REG (HS_REG, HS_COMPLETE), 0, SCR_JUMP, PADDR (checkatn), /* ** If it was no GETCC transfer, ** save the status to scsi_status. */ /*>>>*/ SCR_MOVE_ABS (1) ^ SCR_STATUS, NADDR (scratch), SCR_TO_REG (SS_REG), 0, /* ** if it was no check condition ... */ SCR_JUMP ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), PADDR (checkatn), /* ** ... mark as complete. */ SCR_LOAD_REG (HS_REG, HS_COMPLETE), 0, SCR_JUMP, PADDR (checkatn), }/*-------------------------< MSG_IN >--------------------*/,{ /* ** Get the first byte of the message ** and save it to SCRATCHA. ** ** The script processor doesn't negate the ** ACK signal after this transfer. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[0]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* ** Parity was ok, handle this message. */ SCR_JUMP ^ IFTRUE (DATA (MSG_CMDCOMPLETE)), PADDR (complete), SCR_JUMP ^ IFTRUE (DATA (MSG_SAVEDATAPOINTER)), PADDR (save_dp), SCR_JUMP ^ IFTRUE (DATA (MSG_RESTOREPOINTERS)), PADDR (restore_dp), SCR_JUMP ^ IFTRUE (DATA (MSG_DISCONNECT)), PADDR (disconnect), SCR_JUMP ^ IFTRUE (DATA (MSG_EXTENDED)), PADDRH (msg_extended), SCR_JUMP ^ IFTRUE (DATA (MSG_NOOP)), PADDR (clrack), SCR_JUMP ^ IFTRUE (DATA (MSG_MESSAGE_REJECT)), PADDRH (msg_reject), SCR_JUMP ^ IFTRUE (DATA (MSG_IGN_WIDE_RESIDUE)), PADDRH (msg_ign_residue), /* ** Rest of the messages left as ** an exercise ... ** ** Unimplemented messages: ** fall through to MSG_BAD. */ }/*-------------------------< MSG_BAD >------------------*/,{ /* ** unimplemented message - reject it. */ SCR_INT, SIR_REJECT_SENT, SCR_LOAD_REG (scratcha, MSG_MESSAGE_REJECT), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< COMPLETE >-----------------*/,{ /* ** Complete message. ** ** If it's not the get condition code, ** copy TEMP register to LASTP in header. */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFTRUE (MASK (SCSI_STATUS_SENSE, SCSI_STATUS_SENSE)), 12, SCR_COPY (4), RADDR (temp), NADDR (header.lastp), /*>>>*/ /* ** When we terminate the cycle by clearing ACK, ** the target may disconnect immediately. ** ** We don't want to be told of an ** "unexpected disconnect", ** so we disable this feature. */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, /* ** Terminate cycle ... */ SCR_CLR (SCR_ACK|SCR_ATN), 0, /* ** ... and wait for the disconnect. */ SCR_WAIT_DISC, 0, }/*-------------------------< CLEANUP >-------------------*/,{ /* ** dsa: Pointer to nccb ** or xxxxxxFF (no nccb) ** ** HS_REG: Host-Status (<>0!) */ SCR_FROM_REG (dsa), 0, SCR_JUMP ^ IFTRUE (DATA (0xff)), PADDR (signal), /* ** dsa is valid. ** save the status registers */ SCR_COPY (4), RADDR (scr0), NADDR (header.status), /* ** and copy back the header to the nccb. */ SCR_COPY_F (4), RADDR (dsa), PADDR (cleanup0), SCR_COPY (sizeof (struct head)), NADDR (header), }/*-------------------------< CLEANUP0 >--------------------*/,{ 0, /* ** If command resulted in "check condition" ** status and is not yet completed, ** try to get the condition code. */ SCR_FROM_REG (HS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (0, HS_DONEMASK)), 16, SCR_FROM_REG (SS_REG), 0, SCR_JUMP ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), PADDRH(getcc2), }/*-------------------------< SIGNAL >----------------------*/,{ /* ** if status = queue full, ** reinsert in startqueue and stall queue. */ /*>>>*/ SCR_FROM_REG (SS_REG), 0, SCR_INT ^ IFTRUE (DATA (SCSI_STATUS_QUEUE_FULL)), SIR_STALL_QUEUE, /* ** And make the DSA register invalid. */ SCR_LOAD_REG (dsa, 0xff), /* invalid */ 0, /* ** if job completed ... */ SCR_FROM_REG (HS_REG), 0, /* ** ... signal completion to the host */ SCR_INT_FLY ^ IFFALSE (MASK (0, HS_DONEMASK)), 0, /* ** Auf zu neuen Schandtaten! */ SCR_JUMP, PADDR(start), }/*-------------------------< SAVE_DP >------------------*/,{ /* ** SAVE_DP message: ** Copy TEMP register to SAVEP in header. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), SCR_JUMP, PADDR (clrack), }/*-------------------------< RESTORE_DP >---------------*/,{ /* ** RESTORE_DP message: ** Copy SAVEP in header to TEMP register. */ SCR_COPY (4), NADDR (header.savep), RADDR (temp), SCR_JUMP, PADDR (clrack), }/*-------------------------< DISCONNECT >---------------*/,{ /* ** If QUIRK_AUTOSAVE is set, ** do a "save pointer" operation. */ SCR_FROM_REG (QU_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (QUIRK_AUTOSAVE, QUIRK_AUTOSAVE)), 12, /* ** like SAVE_DP message: ** Copy TEMP register to SAVEP in header. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), /*>>>*/ /* ** Check if temp==savep or temp==goalp: ** if not, log a missing save pointer message. ** In fact, it's a comparison mod 256. ** ** Hmmm, I hadn't thought that I would be urged to ** write this kind of ugly self modifying code. ** ** It's unbelievable, but the ncr53c8xx isn't able ** to subtract one register from another. */ SCR_FROM_REG (temp), 0, /* ** You are not expected to understand this .. ** ** CAUTION: only little endian architectures supported! XXX */ SCR_COPY_F (1), NADDR (header.savep), PADDR (disconnect0), }/*-------------------------< DISCONNECT0 >--------------*/,{ /*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (1)), 20, /* ** neither this */ SCR_COPY_F (1), NADDR (header.goalp), PADDR (disconnect1), }/*-------------------------< DISCONNECT1 >--------------*/,{ SCR_INT ^ IFFALSE (DATA (1)), SIR_MISSING_SAVE, /*>>>*/ /* ** DISCONNECTing ... ** ** disable the "unexpected disconnect" feature, ** and remove the ACK signal. */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_CLR (SCR_ACK|SCR_ATN), 0, /* ** Wait for the disconnect. */ SCR_WAIT_DISC, 0, /* ** Profiling: ** Set a time stamp, ** and count the disconnects. */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.disconnect), SCR_COPY (4), NADDR (disc_phys), RADDR (temp), SCR_REG_REG (temp, SCR_ADD, 0x01), 0, SCR_COPY (4), RADDR (temp), NADDR (disc_phys), /* ** Status is: DISCONNECTED. */ SCR_LOAD_REG (HS_REG, HS_DISCONNECT), 0, SCR_JUMP, PADDR (cleanup), }/*-------------------------< MSG_OUT >-------------------*/,{ /* ** The target requests a message. */ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), /* ** If it was no ABORT message ... */ SCR_JUMP ^ IFTRUE (DATA (MSG_ABORT)), PADDRH (msg_out_abort), /* ** ... wait for the next phase ** if it's a message out, send it again, ... */ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), PADDR (msg_out), }/*-------------------------< MSG_OUT_DONE >--------------*/,{ /* ** ... else clear the message ... */ SCR_LOAD_REG (scratcha, MSG_NOOP), 0, SCR_COPY (4), RADDR (scratcha), NADDR (msgout), /* ** ... and process the next phase */ SCR_JUMP, PADDR (dispatch), }/*------------------------< BADGETCC >---------------------*/,{ /* ** If SIGP was set, clear it and try again. */ SCR_FROM_REG (ctest2), 0, SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)), PADDRH (getcc2), SCR_INT, SIR_SENSE_FAILED, }/*-------------------------< RESELECT >--------------------*/,{ /* ** This NOP will be patched with LED OFF ** SCR_REG_REG (gpreg, SCR_OR, 0x01) */ SCR_NO_OP, 0, /* ** make the DSA invalid. */ SCR_LOAD_REG (dsa, 0xff), 0, SCR_CLR (SCR_TRG), 0, /* ** Sleep waiting for a reselection. ** If SIGP is set, special treatment. ** ** Zu allem bereit .. */ SCR_WAIT_RESEL, PADDR(reselect2), }/*-------------------------< RESELECT1 >--------------------*/,{ /* ** This NOP will be patched with LED ON ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) */ SCR_NO_OP, 0, /* ** ... zu nichts zu gebrauchen ? ** ** load the target id into the SFBR ** and jump to the control block. ** ** Look at the declarations of ** - struct ncb ** - struct tcb ** - struct lcb ** - struct nccb ** to understand what's going on. */ SCR_REG_SFBR (ssid, SCR_AND, 0x8F), 0, SCR_TO_REG (sdid), 0, SCR_JUMP, NADDR (jump_tcb), }/*-------------------------< RESELECT2 >-------------------*/,{ /* ** This NOP will be patched with LED ON ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) */ SCR_NO_OP, 0, /* ** If it's not connected :( ** -> interrupted by SIGP bit. ** Jump to start. */ SCR_FROM_REG (ctest2), 0, SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)), PADDR (start), SCR_JUMP, PADDR (reselect), }/*-------------------------< RESEL_TMP >-------------------*/,{ /* ** The return address in TEMP ** is in fact the data structure address, ** so copy it to the DSA register. */ SCR_COPY (4), RADDR (temp), RADDR (dsa), SCR_JUMP, PADDR (prepare), }/*-------------------------< RESEL_LUN >-------------------*/,{ /* ** come back to this point ** to get an IDENTIFY message ** Wait for a msg_in phase. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 48, /* ** message phase ** It's not a sony, it's a trick: ** read the data without acknowledging it. */ SCR_FROM_REG (sbdl), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (MSG_IDENTIFYFLAG, 0x98)), 32, /* ** It WAS an Identify message. ** get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK), 0, /* ** Mask out the lun. */ SCR_REG_REG (sfbr, SCR_AND, 0x07), 0, SCR_RETURN, 0, /* ** No message phase or no IDENTIFY message: ** return 0. */ /*>>>*/ SCR_LOAD_SFBR (0), 0, SCR_RETURN, 0, }/*-------------------------< RESEL_TAG >-------------------*/,{ /* ** come back to this point ** to get a SIMPLE_TAG message ** Wait for a MSG_IN phase. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 64, /* ** message phase ** It's a trick - read the data ** without acknowledging it. */ SCR_FROM_REG (sbdl), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (MSG_SIMPLE_Q_TAG)), 48, /* ** It WAS a SIMPLE_TAG message. ** get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK), 0, /* ** Wait for the second byte (the tag) */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 24, /* ** Get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK|SCR_CARRY), 0, SCR_RETURN, 0, /* ** No message phase or no SIMPLE_TAG message ** or no second byte: return 0. */ /*>>>*/ SCR_LOAD_SFBR (0), 0, SCR_SET (SCR_CARRY), 0, SCR_RETURN, 0, }/*-------------------------< DATA_IN >--------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTER parameter, ** it is filled in at runtime. ** ** SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)), ** PADDR (no_data), ** SCR_COPY (sizeof (ticks)), ** KVAR (KVAR_TICKS), ** NADDR (header.stamp.data), ** SCR_MOVE_TBL ^ SCR_DATA_IN, ** offsetof (struct dsb, data[ 0]), ** ** ##===========< i=1; i========= ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)), ** || PADDR (checkatn), ** || SCR_MOVE_TBL ^ SCR_DATA_IN, ** || offsetof (struct dsb, data[ i]), ** ##========================================== ** ** SCR_CALL, ** PADDR (checkatn), ** SCR_JUMP, ** PADDR (no_data), */ 0 }/*-------------------------< DATA_OUT >-------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTER parameter, ** it is filled in at runtime. ** ** SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_OUT)), ** PADDR (no_data), ** SCR_COPY (sizeof (ticks)), ** KVAR (KVAR_TICKS), ** NADDR (header.stamp.data), ** SCR_MOVE_TBL ^ SCR_DATA_OUT, ** offsetof (struct dsb, data[ 0]), ** ** ##===========< i=1; i========= ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)), ** || PADDR (dispatch), ** || SCR_MOVE_TBL ^ SCR_DATA_OUT, ** || offsetof (struct dsb, data[ i]), ** ##========================================== ** ** SCR_CALL, ** PADDR (dispatch), ** SCR_JUMP, ** PADDR (no_data), ** **--------------------------------------------------------- */ (u_long)0 }/*--------------------------------------------------------*/ }; static struct scripth scripth0 = { /*-------------------------< TRYLOOP >---------------------*/{ /* ** Load an entry of the start queue into dsa ** and try to start it by jumping to TRYSEL. ** ** Because the size depends on the ** #define MAX_START parameter, it is filled ** in at runtime. ** **----------------------------------------------------------- ** ** ##===========< I=0; i=========== ** || SCR_COPY (4), ** || NADDR (squeue[i]), ** || RADDR (dsa), ** || SCR_CALL, ** || PADDR (trysel), ** ##========================================== ** ** SCR_JUMP, ** PADDRH(tryloop), ** **----------------------------------------------------------- */ 0 }/*-------------------------< MSG_PARITY >---------------*/,{ /* ** count it */ SCR_REG_REG (PS_REG, SCR_ADD, 0x01), 0, /* ** send a "message parity error" message. */ SCR_LOAD_REG (scratcha, MSG_PARITY_ERROR), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< MSG_MESSAGE_REJECT >---------------*/,{ /* ** If a negotiation was in progress, ** negotiation failed. */ SCR_FROM_REG (HS_REG), 0, SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), SIR_NEGO_FAILED, /* ** else make host log this message */ SCR_INT ^ IFFALSE (DATA (HS_NEGOTIATE)), SIR_REJECT_RECEIVED, SCR_JUMP, PADDR (clrack), }/*-------------------------< MSG_IGN_RESIDUE >----------*/,{ /* ** Terminate cycle */ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get residue size. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[1]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* ** Size is 0 .. ignore message. */ SCR_JUMP ^ IFTRUE (DATA (0)), PADDR (clrack), /* ** Size is not 1 .. have to interrupt. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (1)), 40, /* ** Check for residue byte in swide register */ SCR_FROM_REG (scntl2), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)), 16, /* ** There IS data in the swide register. ** Discard it. */ SCR_REG_REG (scntl2, SCR_OR, WSR), 0, SCR_JUMP, PADDR (clrack), /* ** Load again the size to the sfbr register. */ /*>>>*/ SCR_FROM_REG (scratcha), 0, /*>>>*/ SCR_INT, SIR_IGN_RESIDUE, SCR_JUMP, PADDR (clrack), }/*-------------------------< MSG_EXTENDED >-------------*/,{ /* ** Terminate cycle */ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get length. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[1]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* */ SCR_JUMP ^ IFTRUE (DATA (3)), PADDRH (msg_ext_3), SCR_JUMP ^ IFFALSE (DATA (2)), PADDR (msg_bad), }/*-------------------------< MSG_EXT_2 >----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get extended message code. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[2]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXT_WDTR)), PADDRH (msg_wdtr), /* ** unknown extended message */ SCR_JUMP, PADDR (msg_bad) }/*-------------------------< MSG_WDTR >-----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get data bus width */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[3]), SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), /* ** let the host do the real work. */ SCR_INT, SIR_NEGO_WIDE, /* ** let the target fetch our answer. */ SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)), SIR_NEGO_PROTO, /* ** Send the MSG_EXT_WDTR */ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT, NADDR (msgout), SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (msg_out_done), }/*-------------------------< MSG_EXT_3 >----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get extended message code. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[2]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXT_SDTR)), PADDRH (msg_sdtr), /* ** unknown extended message */ SCR_JUMP, PADDR (msg_bad) }/*-------------------------< MSG_SDTR >-----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get period and offset */ SCR_MOVE_ABS (2) ^ SCR_MSG_IN, NADDR (msgin[3]), SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), /* ** let the host do the real work. */ SCR_INT, SIR_NEGO_SYNC, /* ** let the target fetch our answer. */ SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)), SIR_NEGO_PROTO, /* ** Send the MSG_EXT_SDTR */ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT, NADDR (msgout), SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (msg_out_done), }/*-------------------------< MSG_OUT_ABORT >-------------*/,{ /* ** After ABORT message, ** ** expect an immediate disconnect, ... */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_CLR (SCR_ACK|SCR_ATN), 0, SCR_WAIT_DISC, 0, /* ** ... and set the status to "ABORTED" */ SCR_LOAD_REG (HS_REG, HS_ABORTED), 0, SCR_JUMP, PADDR (cleanup), }/*-------------------------< GETCC >-----------------------*/,{ /* ** The ncr doesn't have an indirect load ** or store command. So we have to ** copy part of the control block to a ** fixed place, where we can modify it. ** ** We patch the address part of a COPY command ** with the address of the dsa register ... */ SCR_COPY_F (4), RADDR (dsa), PADDRH (getcc1), /* ** ... then we do the actual copy. */ SCR_COPY (sizeof (struct head)), }/*-------------------------< GETCC1 >----------------------*/,{ 0, NADDR (header), /* ** Initialize the status registers */ SCR_COPY (4), NADDR (header.status), RADDR (scr0), }/*-------------------------< GETCC2 >----------------------*/,{ /* ** Get the condition code from a target. ** ** DSA points to a data structure. ** Set TEMP to the script location ** that receives the condition code. ** ** Because there is no script command ** to load a longword into a register, ** we use a CALL command. */ /*<<<*/ SCR_CALLR, 24, /* ** Get the condition code. */ SCR_MOVE_TBL ^ SCR_DATA_IN, offsetof (struct dsb, sense), /* ** No data phase may follow! */ SCR_CALL, PADDR (checkatn), SCR_JUMP, PADDR (no_data), /*>>>*/ /* ** The CALL jumps to this point. ** Prepare for a RESTORE_POINTER message. ** Save the TEMP register into the saved pointer. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), /* ** Load scratcha, because in case of a selection timeout, ** the host will expect a new value for startpos in ** the scratcha register. */ SCR_COPY (4), PADDR (startpos), RADDR (scratcha), #ifdef NCR_GETCC_WITHMSG /* ** If QUIRK_NOMSG is set, select without ATN. ** and don't send a message. */ SCR_FROM_REG (QU_REG), 0, SCR_JUMP ^ IFTRUE (MASK (QUIRK_NOMSG, QUIRK_NOMSG)), PADDRH(getcc3), /* ** Then try to connect to the target. ** If we are reselected, special treatment ** of the current job is required before ** accepting the reselection. */ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select), PADDR(badgetcc), /* ** Send the IDENTIFY message. ** In case of short transfer, remove ATN. */ SCR_MOVE_TBL ^ SCR_MSG_OUT, offsetof (struct dsb, smsg2), SCR_CLR (SCR_ATN), 0, /* ** save the first byte of the message. */ SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (prepare2), #endif }/*-------------------------< GETCC3 >----------------------*/,{ /* ** Try to connect to the target. ** If we are reselected, special treatment ** of the current job is required before ** accepting the reselection. ** ** Silly target won't accept a message. ** Select without ATN. */ SCR_SEL_TBL ^ offsetof (struct dsb, select), PADDR(badgetcc), /* ** Force error if selection timeout */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)), 0, /* ** don't negotiate. */ SCR_JUMP, PADDR (prepare2), }/*-------------------------< ABORTTAG >-------------------*/,{ /* ** Abort a bad reselection. ** Set the message to ABORT vs. ABORT_TAG */ SCR_LOAD_REG (scratcha, MSG_ABORT_TAG), 0, SCR_JUMPR ^ IFFALSE (CARRYSET), 8, }/*-------------------------< ABORT >----------------------*/,{ SCR_LOAD_REG (scratcha, MSG_ABORT), 0, SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, /* ** and send it. ** we expect an immediate disconnect */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_CLR (SCR_ACK|SCR_ATN), 0, SCR_WAIT_DISC, 0, SCR_JUMP, PADDR (start), }/*-------------------------< SNOOPTEST >-------------------*/,{ /* ** Read the variable. */ SCR_COPY (4), KVAR (KVAR_NCR_CACHE), RADDR (scratcha), /* ** Write the variable. */ SCR_COPY (4), RADDR (temp), KVAR (KVAR_NCR_CACHE), /* ** Read back the variable. */ SCR_COPY (4), KVAR (KVAR_NCR_CACHE), RADDR (temp), }/*-------------------------< SNOOPEND >-------------------*/,{ /* ** And stop. */ SCR_INT, 99, }/*--------------------------------------------------------*/ }; /*========================================================== ** ** ** Fill in #define dependent parts of the script ** ** **========================================================== */ static void ncr_script_fill (struct script * scr, struct scripth * scrh) { int i; ncrcmd *p; p = scrh->tryloop; for (i=0; itryloop + sizeof (scrh->tryloop)); p = scr->data_in; *p++ =SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)); *p++ =PADDR (no_data); *p++ =SCR_COPY (sizeof (ticks)); *p++ =(ncrcmd) KVAR (KVAR_TICKS); *p++ =NADDR (header.stamp.data); *p++ =SCR_MOVE_TBL ^ SCR_DATA_IN; *p++ =offsetof (struct dsb, data[ 0]); for (i=1; idata_in + sizeof (scr->data_in)); p = scr->data_out; *p++ =SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_OUT)); *p++ =PADDR (no_data); *p++ =SCR_COPY (sizeof (ticks)); *p++ =(ncrcmd) KVAR (KVAR_TICKS); *p++ =NADDR (header.stamp.data); *p++ =SCR_MOVE_TBL ^ SCR_DATA_OUT; *p++ =offsetof (struct dsb, data[ 0]); for (i=1; idata_out + sizeof (scr->data_out)); } /*========================================================== ** ** ** Copy and rebind a script. ** ** **========================================================== */ static void ncr_script_copy_and_bind (ncb_p np, ncrcmd *src, ncrcmd *dst, int len) { ncrcmd opcode, new, old, tmp1, tmp2; ncrcmd *start, *end; int relocs, offset; start = src; end = src + len/4; offset = 0; while (src < end) { opcode = *src++; WRITESCRIPT_OFF(dst, offset, opcode); offset += 4; /* ** If we forget to change the length ** in struct script, a field will be ** padded with 0. This is an illegal ** command. */ if (opcode == 0) { device_printf(np->dev, "ERROR0 IN SCRIPT at %d.\n", (int)(src - start - 1)); DELAY (1000000); }; if (DEBUG_FLAGS & DEBUG_SCRIPT) printf ("%p: <%x>\n", (src-1), (unsigned)opcode); /* ** We don't have to decode ALL commands */ switch (opcode >> 28) { case 0xc: /* ** COPY has TWO arguments. */ relocs = 2; tmp1 = src[0]; if ((tmp1 & RELOC_MASK) == RELOC_KVAR) tmp1 = 0; tmp2 = src[1]; if ((tmp2 & RELOC_MASK) == RELOC_KVAR) tmp2 = 0; if ((tmp1 ^ tmp2) & 3) { device_printf(np->dev, "ERROR1 IN SCRIPT at %d.\n", (int)(src - start - 1)); DELAY (1000000); } /* ** If PREFETCH feature not enabled, remove ** the NO FLUSH bit if present. */ if ((opcode & SCR_NO_FLUSH) && !(np->features&FE_PFEN)) WRITESCRIPT_OFF(dst, offset - 4, (opcode & ~SCR_NO_FLUSH)); break; case 0x0: /* ** MOVE (absolute address) */ relocs = 1; break; case 0x8: /* ** JUMP / CALL ** dont't relocate if relative :-) */ if (opcode & 0x00800000) relocs = 0; else relocs = 1; break; case 0x4: case 0x5: case 0x6: case 0x7: relocs = 1; break; default: relocs = 0; break; }; if (relocs) { while (relocs--) { old = *src++; switch (old & RELOC_MASK) { case RELOC_REGISTER: new = (old & ~RELOC_MASK) + rman_get_start(np->reg_res); break; case RELOC_LABEL: new = (old & ~RELOC_MASK) + np->p_script; break; case RELOC_LABELH: new = (old & ~RELOC_MASK) + np->p_scripth; break; case RELOC_SOFTC: new = (old & ~RELOC_MASK) + vtophys(np); break; case RELOC_KVAR: if (((old & ~RELOC_MASK) < SCRIPT_KVAR_FIRST) || ((old & ~RELOC_MASK) > SCRIPT_KVAR_LAST)) panic("ncr KVAR out of range"); new = vtophys(script_kvars[old & ~RELOC_MASK]); break; case 0: /* Don't relocate a 0 address. */ if (old == 0) { new = old; break; } /* FALLTHROUGH */ default: panic("ncr_script_copy_and_bind: weird relocation %x @ %d\n", old, (int)(src - start)); break; } WRITESCRIPT_OFF(dst, offset, new); offset += 4; } } else { WRITESCRIPT_OFF(dst, offset, *src++); offset += 4; } }; } /*========================================================== ** ** ** Auto configuration. ** ** **========================================================== */ #if 0 /*---------------------------------------------------------- ** ** Reduce the transfer length to the max value ** we can transfer safely. ** ** Reading a block greater then MAX_SIZE from the ** raw (character) device exercises a memory leak ** in the vm subsystem. This is common to ALL devices. ** We have submitted a description of this bug to ** . ** It should be fixed in the current release. ** **---------------------------------------------------------- */ void ncr_min_phys (struct buf *bp) { if ((unsigned long)bp->b_bcount > MAX_SIZE) bp->b_bcount = MAX_SIZE; } #endif #if 0 /*---------------------------------------------------------- ** ** Maximal number of outstanding requests per target. ** **---------------------------------------------------------- */ u_int32_t ncr_info (int unit) { return (1); /* may be changed later */ } #endif /*---------------------------------------------------------- ** ** NCR chip devices table and chip look up function. ** Features bit are defined in ncrreg.h. Is it the ** right place? ** **---------------------------------------------------------- */ typedef struct { unsigned long device_id; unsigned short minrevid; char *name; unsigned char maxburst; unsigned char maxoffs; unsigned char clock_divn; unsigned int features; } ncr_chip; static ncr_chip ncr_chip_table[] = { {NCR_810_ID, 0x00, "ncr 53c810 fast10 scsi", 4, 8, 4, FE_ERL} , {NCR_810_ID, 0x10, "ncr 53c810a fast10 scsi", 4, 8, 4, FE_ERL|FE_LDSTR|FE_PFEN|FE_BOF} , {NCR_815_ID, 0x00, "ncr 53c815 fast10 scsi", 4, 8, 4, FE_ERL|FE_BOF} , {NCR_820_ID, 0x00, "ncr 53c820 fast10 wide scsi", 4, 8, 4, FE_WIDE|FE_ERL} , {NCR_825_ID, 0x00, "ncr 53c825 fast10 wide scsi", 4, 8, 4, FE_WIDE|FE_ERL|FE_BOF} , {NCR_825_ID, 0x10, "ncr 53c825a fast10 wide scsi", 7, 8, 4, FE_WIDE|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_860_ID, 0x00, "ncr 53c860 fast20 scsi", 4, 8, 5, FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_LDSTR|FE_PFEN} , {NCR_875_ID, 0x00, "ncr 53c875 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_875_ID, 0x02, "ncr 53c875 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_875_ID2, 0x00, "ncr 53c875j fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_885_ID, 0x00, "ncr 53c885 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_895_ID, 0x00, "ncr 53c895 fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_896_ID, 0x00, "ncr 53c896 fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_895A_ID, 0x00, "ncr 53c895a fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_1510D_ID, 0x00, "ncr 53c1510d fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} }; static int ncr_chip_lookup(u_long device_id, u_char revision_id) { int i, found; found = -1; for (i = 0; i < sizeof(ncr_chip_table)/sizeof(ncr_chip_table[0]); i++) { if (device_id == ncr_chip_table[i].device_id && ncr_chip_table[i].minrevid <= revision_id) { if (found < 0 || ncr_chip_table[found].minrevid < ncr_chip_table[i].minrevid) { found = i; } } } return found; } /*---------------------------------------------------------- ** ** Probe the hostadapter. ** **---------------------------------------------------------- */ static int ncr_probe (device_t dev) { int i; i = ncr_chip_lookup(pci_get_devid(dev), pci_get_revid(dev)); if (i >= 0) { device_set_desc(dev, ncr_chip_table[i].name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /*========================================================== ** ** NCR chip clock divisor table. ** Divisors are multiplied by 10,000,000 in order to make ** calculations more simple. ** **========================================================== */ #define _5M 5000000 static u_long div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /*=============================================================== ** ** NCR chips allow burst lengths of 2, 4, 8, 16, 32, 64, 128 ** transfers. 32,64,128 are only supported by 875 and 895 chips. ** We use log base 2 (burst length) as internal code, with ** value 0 meaning "burst disabled". ** **=============================================================== */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. */ #define burst_code(dmode, ctest4, ctest5) \ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 /* * Set initial io register bits from burst code. */ static void ncr_init_burst(ncb_p np, u_char bc) { np->rv_ctest4 &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { np->rv_ctest4 |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } /*========================================================== ** ** ** Auto configuration: attach and init a host adapter. ** ** **========================================================== */ static int ncr_attach (device_t dev) { ncb_p np = (struct ncb*) device_get_softc(dev); u_char rev = 0; u_long period; int i, rid; u_int8_t usrsync; u_int8_t usrwide; struct cam_devq *devq; /* ** allocate and initialize structures. */ np->dev = dev; mtx_init(&np->lock, "ncr", NULL, MTX_DEF); callout_init_mtx(&np->timer, &np->lock, 0); /* ** Try to map the controller chip to ** virtual and physical memory. */ np->reg_rid = PCIR_BAR(1); np->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &np->reg_rid, RF_ACTIVE); if (!np->reg_res) { device_printf(dev, "could not map memory\n"); return ENXIO; } /* ** Now the INB INW INL OUTB OUTW OUTL macros ** can be used safely. */ #ifdef NCR_IOMAPPED /* ** Try to map the controller chip into iospace. */ if (!pci_map_port (config_id, 0x10, &np->port)) return; #endif /* ** Save some controller register default values */ np->rv_scntl3 = INB(nc_scntl3) & 0x77; np->rv_dmode = INB(nc_dmode) & 0xce; np->rv_dcntl = INB(nc_dcntl) & 0xa9; np->rv_ctest3 = INB(nc_ctest3) & 0x01; np->rv_ctest4 = INB(nc_ctest4) & 0x88; np->rv_ctest5 = INB(nc_ctest5) & 0x24; np->rv_gpcntl = INB(nc_gpcntl); np->rv_stest2 = INB(nc_stest2) & 0x20; if (bootverbose >= 2) { printf ("\tBIOS values: SCNTL3:%02x DMODE:%02x DCNTL:%02x\n", np->rv_scntl3, np->rv_dmode, np->rv_dcntl); printf ("\t CTEST3:%02x CTEST4:%02x CTEST5:%02x\n", np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } np->rv_dcntl |= NOCOM; /* ** Do chip dependent initialization. */ rev = pci_get_revid(dev); /* ** Get chip features from chips table. */ i = ncr_chip_lookup(pci_get_devid(dev), rev); if (i >= 0) { np->maxburst = ncr_chip_table[i].maxburst; np->maxoffs = ncr_chip_table[i].maxoffs; np->clock_divn = ncr_chip_table[i].clock_divn; np->features = ncr_chip_table[i].features; } else { /* Should'nt happen if probe() is ok */ np->maxburst = 4; np->maxoffs = 8; np->clock_divn = 4; np->features = FE_ERL; } np->maxwide = np->features & FE_WIDE ? 1 : 0; np->clock_khz = np->features & FE_CLK80 ? 80000 : 40000; if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; /* ** Get the frequency of the chip's clock. ** Find the right value for scntl3. */ if (np->features & (FE_ULTRA|FE_ULTRA2)) ncr_getclock(np, np->multiplier); #ifdef NCR_TEKRAM_EEPROM if (bootverbose) { device_printf(dev, "Tekram EEPROM read %s\n", read_tekram_eeprom (np, NULL) ? "succeeded" : "failed"); } #endif /* NCR_TEKRAM_EEPROM */ /* * If scntl3 != 0, we assume BIOS is present. */ if (np->rv_scntl3) np->features |= FE_BIOS; /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (i >= 0) { --i; if (10ul * SCSI_NCR_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; else np->minsync = (period + 40 - 1) / 40; /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2))) np->minsync = 25; else if (np->minsync < 12 && !(np->features & FE_ULTRA2)) np->minsync = 12; /* * Maximum synchronous period factor supported by the chip. */ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* * Now, some features available with Symbios compatible boards. * LED support through GPIO0 and DIFF support. */ #ifdef SCSI_NCR_SYMBIOS_COMPAT if (!(np->rv_gpcntl & 0x01)) np->features |= FE_LED0; #if 0 /* Not safe enough without NVRAM support or user settable option */ if (!(INB(nc_gpreg) & 0x08)) np->features |= FE_DIFF; #endif #endif /* SCSI_NCR_SYMBIOS_COMPAT */ /* * Prepare initial IO registers settings. * Trust BIOS only if we believe we have one and if we want to. */ #ifdef SCSI_NCR_TRUST_BIOS if (!(np->features & FE_BIOS)) { #else if (1) { #endif np->rv_dmode = 0; np->rv_dcntl = NOCOM; np->rv_ctest3 = 0; np->rv_ctest4 = MPEE; np->rv_ctest5 = 0; np->rv_stest2 = 0; if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_PFEN) np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ if (np->features & FE_DIFF) np->rv_stest2 |= 0x20; /* Differential mode */ ncr_init_burst(np, np->maxburst); /* Max dwords burst length */ } else { np->maxburst = burst_code(np->rv_dmode, np->rv_ctest4, np->rv_ctest5); } /* ** Get on-chip SRAM address, if supported */ if ((np->features & FE_RAM) && sizeof(struct script) <= 4096) { np->sram_rid = PCIR_BAR(2); np->sram_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &np->sram_rid, RF_ACTIVE); } /* ** Allocate structure for script relocation. */ if (np->sram_res != NULL) { np->script = NULL; np->p_script = rman_get_start(np->sram_res); } else if (sizeof (struct script) > PAGE_SIZE) { np->script = (struct script*) contigmalloc (round_page(sizeof (struct script)), M_DEVBUF, M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); } else { np->script = (struct script *) malloc (sizeof (struct script), M_DEVBUF, M_WAITOK); } if (sizeof (struct scripth) > PAGE_SIZE) { np->scripth = (struct scripth*) contigmalloc (round_page(sizeof (struct scripth)), M_DEVBUF, M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); } else { np->scripth = (struct scripth *) malloc (sizeof (struct scripth), M_DEVBUF, M_WAITOK); } #ifdef SCSI_NCR_PCI_CONFIG_FIXUP /* ** If cache line size is enabled, check PCI config space and ** try to fix it up if necessary. */ #ifdef PCIR_CACHELNSZ /* To be sure that new PCI stuff is present */ { u_char cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); u_short command = pci_read_config(dev, PCIR_COMMAND, 2); if (!cachelnsz) { cachelnsz = 8; device_printf(dev, "setting PCI cache line size register to %d.\n", (int)cachelnsz); pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1); } if (!(command & PCIM_CMD_MWRICEN)) { command |= PCIM_CMD_MWRICEN; device_printf(dev, "setting PCI command write and invalidate.\n"); pci_write_config(dev, PCIR_COMMAND, command, 2); } } #endif /* PCIR_CACHELNSZ */ #endif /* SCSI_NCR_PCI_CONFIG_FIXUP */ /* Initialize per-target user settings */ usrsync = 0; if (SCSI_NCR_DFLT_SYNC) { usrsync = SCSI_NCR_DFLT_SYNC; if (usrsync > np->maxsync) usrsync = np->maxsync; if (usrsync < np->minsync) usrsync = np->minsync; }; usrwide = (SCSI_NCR_MAX_WIDE); if (usrwide > np->maxwide) usrwide=np->maxwide; for (i=0;itarget[i]; tp->tinfo.user.period = usrsync; tp->tinfo.user.offset = usrsync != 0 ? np->maxoffs : 0; tp->tinfo.user.width = usrwide; tp->tinfo.disc_tag = NCR_CUR_DISCENB | NCR_CUR_TAGENB | NCR_USR_DISCENB | NCR_USR_TAGENB; } /* ** Bells and whistles ;-) */ if (bootverbose) device_printf(dev, "minsync=%d, maxsync=%d, maxoffs=%d, %d dwords burst, %s dma fifo\n", np->minsync, np->maxsync, np->maxoffs, burst_length(np->maxburst), (np->rv_ctest5 & DFS) ? "large" : "normal"); /* ** Print some complementary information that can be helpfull. */ if (bootverbose) device_printf(dev, "%s, %s IRQ driver%s\n", np->rv_stest2 & 0x20 ? "differential" : "single-ended", np->rv_dcntl & IRQM ? "totem pole" : "open drain", np->sram_res ? ", using on-chip SRAM" : ""); /* ** Patch scripts to physical addresses */ ncr_script_fill (&script0, &scripth0); if (np->script) np->p_script = vtophys(np->script); np->p_scripth = vtophys(np->scripth); ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script, sizeof(struct script)); ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth, sizeof(struct scripth)); /* ** Patch the script for LED support. */ if (np->features & FE_LED0) { WRITESCRIPT(reselect[0], SCR_REG_REG(gpreg, SCR_OR, 0x01)); WRITESCRIPT(reselect1[0], SCR_REG_REG(gpreg, SCR_AND, 0xfe)); WRITESCRIPT(reselect2[0], SCR_REG_REG(gpreg, SCR_AND, 0xfe)); } /* ** init data structure */ np->jump_tcb.l_cmd = SCR_JUMP; np->jump_tcb.l_paddr = NCB_SCRIPTH_PHYS (np, abort); /* ** Get SCSI addr of host adapter (set by bios?). */ np->myaddr = INB(nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SCSI_NCR_MYADDR; #ifdef NCR_DUMP_REG /* ** Log the initial register contents */ { int reg; for (reg=0; reg<256; reg+=4) { if (reg%16==0) printf ("reg[%2x]", reg); printf (" %08x", (int)pci_conf_read (config_id, reg)); if (reg%16==12) printf ("\n"); } } #endif /* NCR_DUMP_REG */ /* ** Reset chip. */ OUTB (nc_istat, SRST); DELAY (1000); OUTB (nc_istat, 0 ); /* ** Now check the cache handling of the pci chipset. */ if (ncr_snooptest (np)) { printf ("CACHE INCORRECTLY CONFIGURED.\n"); return EINVAL; }; /* ** Install the interrupt handler. */ rid = 0; np->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (np->irq_res == NULL) { device_printf(dev, "interruptless mode: reduced performance.\n"); } else { bus_setup_intr(dev, np->irq_res, INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, ncr_intr, np, &np->irq_handle); } /* ** Create the device queue. We only allow MAX_START-1 concurrent ** transactions so we can be sure to have one element free in our ** start queue to reset to the idle loop. */ devq = cam_simq_alloc(MAX_START - 1); if (devq == NULL) return ENOMEM; /* ** Now tell the generic SCSI layer ** about our bus. */ np->sim = cam_sim_alloc(ncr_action, ncr_poll, "ncr", np, device_get_unit(dev), &np->lock, 1, MAX_TAGS, devq); if (np->sim == NULL) { cam_simq_free(devq); return ENOMEM; } mtx_lock(&np->lock); if (xpt_bus_register(np->sim, dev, 0) != CAM_SUCCESS) { cam_sim_free(np->sim, /*free_devq*/ TRUE); mtx_unlock(&np->lock); return ENOMEM; } if (xpt_create_path(&np->path, /*periph*/NULL, cam_sim_path(np->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(np->sim)); cam_sim_free(np->sim, /*free_devq*/TRUE); mtx_unlock(&np->lock); return ENOMEM; } /* ** start the timeout daemon */ ncr_timeout (np); np->lasttime=0; mtx_unlock(&np->lock); return 0; } /*========================================================== ** ** ** Process pending device interrupts. ** ** **========================================================== */ static void ncr_intr(vnp) void *vnp; { ncb_p np = vnp; mtx_lock(&np->lock); ncr_intr_locked(np); mtx_unlock(&np->lock); } static void ncr_intr_locked(ncb_p np) { if (DEBUG_FLAGS & DEBUG_TINY) printf ("["); if (INB(nc_istat) & (INTF|SIP|DIP)) { /* ** Repeat until no outstanding ints */ do { ncr_exception (np); } while (INB(nc_istat) & (INTF|SIP|DIP)); np->ticks = 100; }; if (DEBUG_FLAGS & DEBUG_TINY) printf ("]\n"); } /*========================================================== ** ** ** Start execution of a SCSI command. ** This is called from the generic SCSI driver. ** ** **========================================================== */ static void ncr_action (struct cam_sim *sim, union ccb *ccb) { ncb_p np; np = (ncb_p) cam_sim_softc(sim); mtx_assert(&np->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { nccb_p cp; lcb_p lp; tcb_p tp; struct ccb_scsiio *csio; u_int8_t *msgptr; u_int msglen; u_int msglen2; int segments; u_int8_t nego; u_int8_t idmsg; int qidx; tp = &np->target[ccb->ccb_h.target_id]; csio = &ccb->csio; /* + * Make sure we support this request. We can't do + * PHYS pointers. + */ + if (ccb->ccb_h.flags & CAM_CDB_PHYS) { + ccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(ccb); + return; + } + + /* * Last time we need to check if this CCB needs to * be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { xpt_done(ccb); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; /*--------------------------------------------------- ** ** Assign an nccb / bind ccb ** **---------------------------------------------------- */ cp = ncr_get_nccb (np, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); if (cp == NULL) { /* XXX JGibbs - Freeze SIMQ */ ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; }; cp->ccb = ccb; /*--------------------------------------------------- ** ** timestamp ** **---------------------------------------------------- */ /* ** XXX JGibbs - Isn't this expensive ** enough to be conditionalized?? */ bzero (&cp->phys.header.stamp, sizeof (struct tstamp)); cp->phys.header.stamp.start = ticks; nego = 0; if (tp->nego_cp == NULL) { if (tp->tinfo.current.width != tp->tinfo.goal.width) { tp->nego_cp = cp; nego = NS_WIDE; } else if ((tp->tinfo.current.period != tp->tinfo.goal.period) || (tp->tinfo.current.offset != tp->tinfo.goal.offset)) { tp->nego_cp = cp; nego = NS_SYNC; }; }; /*--------------------------------------------------- ** ** choose a new tag ... ** **---------------------------------------------------- */ lp = tp->lp[ccb->ccb_h.target_lun]; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 && (ccb->csio.tag_action != CAM_TAG_ACTION_NONE) && (nego == 0)) { /* ** assign a tag to this nccb */ while (!cp->tag) { nccb_p cp2 = lp->next_nccb; lp->lasttag = lp->lasttag % 255 + 1; while (cp2 && cp2->tag != lp->lasttag) cp2 = cp2->next_nccb; if (cp2) continue; cp->tag=lp->lasttag; if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_ADDR(ccb); printf ("using tag #%d.\n", cp->tag); }; }; } else { cp->tag=0; }; /*---------------------------------------------------- ** ** Build the identify / tag / sdtr message ** **---------------------------------------------------- */ idmsg = MSG_IDENTIFYFLAG | ccb->ccb_h.target_lun; if (tp->tinfo.disc_tag & NCR_CUR_DISCENB) idmsg |= MSG_IDENTIFY_DISCFLAG; msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = idmsg; if (cp->tag) { msgptr[msglen++] = ccb->csio.tag_action; msgptr[msglen++] = cp->tag; } switch (nego) { case NS_SYNC: msgptr[msglen++] = MSG_EXTENDED; msgptr[msglen++] = MSG_EXT_SDTR_LEN; msgptr[msglen++] = MSG_EXT_SDTR; msgptr[msglen++] = tp->tinfo.goal.period; msgptr[msglen++] = tp->tinfo.goal.offset; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(ccb); printf ("sync msgout: "); ncr_show_msg (&cp->scsi_smsg [msglen-5]); printf (".\n"); }; break; case NS_WIDE: msgptr[msglen++] = MSG_EXTENDED; msgptr[msglen++] = MSG_EXT_WDTR_LEN; msgptr[msglen++] = MSG_EXT_WDTR; msgptr[msglen++] = tp->tinfo.goal.width; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(ccb); printf ("wide msgout: "); ncr_show_msg (&cp->scsi_smsg [msglen-4]); printf (".\n"); }; break; }; /*---------------------------------------------------- ** ** Build the identify message for getcc. ** **---------------------------------------------------- */ cp->scsi_smsg2 [0] = idmsg; msglen2 = 1; /*---------------------------------------------------- ** ** Build the data descriptors ** **---------------------------------------------------- */ /* XXX JGibbs - Handle other types of I/O */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { segments = ncr_scatter(&cp->phys, (vm_offset_t)csio->data_ptr, (vm_size_t)csio->dxfer_len); if (segments < 0) { ccb->ccb_h.status = CAM_REQ_TOO_BIG; ncr_free_nccb(np, cp); xpt_done(ccb); return; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cp->phys.header.savep = NCB_SCRIPT_PHYS (np, data_in); cp->phys.header.goalp = cp->phys.header.savep +20 +segments*16; } else { /* CAM_DIR_OUT */ cp->phys.header.savep = NCB_SCRIPT_PHYS (np, data_out); cp->phys.header.goalp = cp->phys.header.savep +20 +segments*16; } } else { cp->phys.header.savep = NCB_SCRIPT_PHYS (np, no_data); cp->phys.header.goalp = cp->phys.header.savep; } cp->phys.header.lastp = cp->phys.header.savep; /*---------------------------------------------------- ** ** fill in nccb ** **---------------------------------------------------- ** ** ** physical -> virtual backlink ** Generic SCSI command */ cp->phys.header.cp = cp; /* ** Startqueue */ cp->phys.header.launch.l_paddr = NCB_SCRIPT_PHYS (np, select); cp->phys.header.launch.l_cmd = SCR_JUMP; /* ** select */ cp->phys.select.sel_id = ccb->ccb_h.target_id; cp->phys.select.sel_scntl3 = tp->tinfo.wval; cp->phys.select.sel_sxfer = tp->tinfo.sval; /* ** message */ cp->phys.smsg.addr = CCB_PHYS (cp, scsi_smsg); cp->phys.smsg.size = msglen; cp->phys.smsg2.addr = CCB_PHYS (cp, scsi_smsg2); cp->phys.smsg2.size = msglen2; /* ** command */ - /* XXX JGibbs - Support other command types */ - cp->phys.cmd.addr = vtophys (csio->cdb_io.cdb_bytes); + cp->phys.cmd.addr = vtophys (scsiio_cdb_ptr(csio)); cp->phys.cmd.size = csio->cdb_len; /* ** sense command */ cp->phys.scmd.addr = CCB_PHYS (cp, sensecmd); cp->phys.scmd.size = 6; /* ** patch requested size into sense command */ cp->sensecmd[0] = 0x03; cp->sensecmd[1] = ccb->ccb_h.target_lun << 5; - cp->sensecmd[4] = sizeof(struct scsi_sense_data); cp->sensecmd[4] = csio->sense_len; /* ** sense data */ cp->phys.sense.addr = vtophys (&csio->sense_data); cp->phys.sense.size = csio->sense_len; /* ** status */ cp->actualquirks = QUIRK_NOMSG; cp->host_status = nego ? HS_NEGOTIATE : HS_BUSY; cp->s_status = SCSI_STATUS_ILLEGAL; cp->parity_status = 0; cp->xerr_status = XE_OK; cp->sync_status = tp->tinfo.sval; cp->nego_status = nego; cp->wide_status = tp->tinfo.wval; /*---------------------------------------------------- ** ** Critical region: start this job. ** **---------------------------------------------------- */ /* ** reselect pattern and activate this job. */ cp->jump_nccb.l_cmd = (SCR_JUMP ^ IFFALSE (DATA (cp->tag))); cp->tlimit = time_second + ccb->ccb_h.timeout / 1000 + 2; cp->magic = CCB_MAGIC; /* ** insert into start queue. */ qidx = np->squeueput + 1; if (qidx >= MAX_START) qidx = 0; np->squeue [qidx ] = NCB_SCRIPT_PHYS (np, idle); np->squeue [np->squeueput] = CCB_PHYS (cp, phys); np->squeueput = qidx; if(DEBUG_FLAGS & DEBUG_QUEUE) device_printf(np->dev, "queuepos=%d tryoffset=%d.\n", np->squeueput, (unsigned)(READSCRIPT(startpos[0]) - (NCB_SCRIPTH_PHYS (np, tryloop)))); /* ** Script processor may be waiting for reselect. ** Wake it up. */ OUTB (nc_istat, SIGP); break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; tcb_p tp; u_int update_type; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; update_type = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) update_type |= NCR_TRANS_GOAL; if (cts->type == CTS_TYPE_USER_SETTINGS) update_type |= NCR_TRANS_USER; tp = &np->target[ccb->ccb_h.target_id]; /* Tag and disc enables */ if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if (update_type & NCR_TRANS_GOAL) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) tp->tinfo.disc_tag |= NCR_CUR_DISCENB; else tp->tinfo.disc_tag &= ~NCR_CUR_DISCENB; } if (update_type & NCR_TRANS_USER) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) tp->tinfo.disc_tag |= NCR_USR_DISCENB; else tp->tinfo.disc_tag &= ~NCR_USR_DISCENB; } } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if (update_type & NCR_TRANS_GOAL) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) tp->tinfo.disc_tag |= NCR_CUR_TAGENB; else tp->tinfo.disc_tag &= ~NCR_CUR_TAGENB; } if (update_type & NCR_TRANS_USER) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) tp->tinfo.disc_tag |= NCR_USR_TAGENB; else tp->tinfo.disc_tag &= ~NCR_USR_TAGENB; } } /* Filter bus width and sync negotiation settings */ if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { if (spi->bus_width > np->maxwide) spi->bus_width = np->maxwide; } if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { if (spi->sync_period != 0 && (spi->sync_period < np->minsync)) spi->sync_period = np->minsync; } if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) { if (spi->sync_offset == 0) spi->sync_period = 0; if (spi->sync_offset > np->maxoffs) spi->sync_offset = np->maxoffs; } } if ((update_type & NCR_TRANS_USER) != 0) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tp->tinfo.user.period = spi->sync_period; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tp->tinfo.user.offset = spi->sync_offset; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tp->tinfo.user.width = spi->bus_width; } if ((update_type & NCR_TRANS_GOAL) != 0) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tp->tinfo.goal.period = spi->sync_period; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tp->tinfo.goal.offset = spi->sync_offset; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tp->tinfo.goal.width = spi->bus_width; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct ncr_transinfo *tinfo; tcb_p tp = &np->target[ccb->ccb_h.target_id]; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tinfo = &tp->tinfo.current; if (tp->tinfo.disc_tag & NCR_CUR_DISCENB) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; else spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (tp->tinfo.disc_tag & NCR_CUR_TAGENB) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } else { tinfo = &tp->tinfo.user; if (tp->tinfo.disc_tag & NCR_USR_DISCENB) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; else spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (tp->tinfo.disc_tag & NCR_USR_TAGENB) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } spi->sync_period = tinfo->period; spi->sync_offset = tinfo->offset; spi->bus_width = tinfo->width; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { /* XXX JGibbs - I'm sure the NCR uses a different strategy, * but it should be able to deal with Adaptec * geometry too. */ cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { OUTB (nc_scntl1, CRST); ccb->ccb_h.status = CAM_REQ_CMP; DELAY(10000); /* Wait until our interrupt handler sees it */ xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; if ((np->features & FE_WIDE) != 0) cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = (np->features & FE_WIDE) ? 15 : 7; cpi->max_lun = MAX_LUN - 1; cpi->initiator_id = np->myaddr; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Symbios", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /*========================================================== ** ** ** Complete execution of a SCSI command. ** Signal completion to the generic SCSI driver. ** ** **========================================================== */ static void ncr_complete (ncb_p np, nccb_p cp) { union ccb *ccb; tcb_p tp; /* ** Sanity check */ if (!cp || (cp->magic!=CCB_MAGIC) || !cp->ccb) return; cp->magic = 1; cp->tlimit= 0; /* ** No Reselect anymore. */ cp->jump_nccb.l_cmd = (SCR_JUMP); /* ** No starting. */ cp->phys.header.launch.l_paddr= NCB_SCRIPT_PHYS (np, idle); /* ** timestamp */ ncb_profile (np, cp); if (DEBUG_FLAGS & DEBUG_TINY) printf ("CCB=%x STAT=%x/%x\n", (int)(intptr_t)cp & 0xfff, cp->host_status,cp->s_status); ccb = cp->ccb; cp->ccb = NULL; tp = &np->target[ccb->ccb_h.target_id]; /* ** We do not queue more than 1 nccb per target ** with negotiation at any time. If this nccb was ** used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = NULL; /* ** Check for parity errors. */ /* XXX JGibbs - What about reporting them??? */ if (cp->parity_status) { PRINT_ADDR(ccb); printf ("%d parity error(s), fallback.\n", cp->parity_status); /* ** fallback to asynch transfer. */ tp->tinfo.goal.period = 0; tp->tinfo.goal.offset = 0; }; /* ** Check for extended errors. */ if (cp->xerr_status != XE_OK) { PRINT_ADDR(ccb); switch (cp->xerr_status) { case XE_EXTRA_DATA: printf ("extraneous data discarded.\n"); break; case XE_BAD_PHASE: printf ("illegal scsi phase (4/5).\n"); break; default: printf ("extended error %d.\n", cp->xerr_status); break; }; if (cp->host_status==HS_COMPLETE) cp->host_status = HS_FAIL; }; /* ** Check the status. */ if (cp->host_status == HS_COMPLETE) { if (cp->s_status == SCSI_STATUS_OK) { /* ** All went well. */ /* XXX JGibbs - Properly calculate residual */ tp->bytes += ccb->csio.dxfer_len; tp->transfers ++; ccb->ccb_h.status = CAM_REQ_CMP; } else if ((cp->s_status & SCSI_STATUS_SENSE) != 0) { /* * XXX Could be TERMIO too. Should record * original status. */ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; cp->s_status &= ~SCSI_STATUS_SENSE; if (cp->s_status == SCSI_STATUS_OK) { ccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR; } else { ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; } } else { ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = cp->s_status; } } else if (cp->host_status == HS_SEL_TIMEOUT) { /* ** Device failed selection */ ccb->ccb_h.status = CAM_SEL_TIMEOUT; } else if (cp->host_status == HS_TIMEOUT) { /* ** No response */ ccb->ccb_h.status = CAM_CMD_TIMEOUT; } else if (cp->host_status == HS_STALL) { ccb->ccb_h.status = CAM_REQUEUE_REQ; } else { /* ** Other protocol messes */ PRINT_ADDR(ccb); printf ("COMMAND FAILED (%x %x) @%p.\n", cp->host_status, cp->s_status, cp); ccb->ccb_h.status = CAM_CMD_TIMEOUT; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } /* ** Free this nccb */ ncr_free_nccb (np, cp); /* ** signal completion to generic driver. */ xpt_done (ccb); } /*========================================================== ** ** ** Signal all (or one) control block done. ** ** **========================================================== */ static void ncr_wakeup (ncb_p np, u_long code) { /* ** Starting at the default nccb and following ** the links, complete all jobs with a ** host_status greater than "disconnect". ** ** If the "code" parameter is not zero, ** complete all jobs that are not IDLE. */ nccb_p cp = np->link_nccb; while (cp) { switch (cp->host_status) { case HS_IDLE: break; case HS_DISCONNECT: if(DEBUG_FLAGS & DEBUG_TINY) printf ("D"); /* FALLTHROUGH */ case HS_BUSY: case HS_NEGOTIATE: if (!code) break; cp->host_status = code; /* FALLTHROUGH */ default: ncr_complete (np, cp); break; }; cp = cp -> link_nccb; }; } static void ncr_freeze_devq (ncb_p np, struct cam_path *path) { nccb_p cp; int i; int count; int firstskip; /* ** Starting at the first nccb and following ** the links, complete all jobs that match ** the passed in path and are in the start queue. */ cp = np->link_nccb; count = 0; firstskip = 0; while (cp) { switch (cp->host_status) { case HS_BUSY: case HS_NEGOTIATE: if ((cp->phys.header.launch.l_paddr == NCB_SCRIPT_PHYS (np, select)) && (xpt_path_comp(path, cp->ccb->ccb_h.path) >= 0)) { /* Mark for removal from the start queue */ for (i = 1; i < MAX_START; i++) { int idx; idx = np->squeueput - i; if (idx < 0) idx = MAX_START + idx; if (np->squeue[idx] == CCB_PHYS(cp, phys)) { np->squeue[idx] = NCB_SCRIPT_PHYS (np, skip); if (i > firstskip) firstskip = i; break; } } cp->host_status=HS_STALL; ncr_complete (np, cp); count++; } break; default: break; } cp = cp->link_nccb; } if (count > 0) { int j; int bidx; /* Compress the start queue */ j = 0; bidx = np->squeueput; i = np->squeueput - firstskip; if (i < 0) i = MAX_START + i; for (;;) { bidx = i - j; if (bidx < 0) bidx = MAX_START + bidx; if (np->squeue[i] == NCB_SCRIPT_PHYS (np, skip)) { j++; } else if (j != 0) { np->squeue[bidx] = np->squeue[i]; if (np->squeue[bidx] == NCB_SCRIPT_PHYS(np, idle)) break; } i = (i + 1) % MAX_START; } np->squeueput = bidx; } } /*========================================================== ** ** ** Start NCR chip. ** ** **========================================================== */ static void ncr_init(ncb_p np, char * msg, u_long code) { int i; /* ** Reset chip. */ OUTB (nc_istat, SRST); DELAY (1000); OUTB (nc_istat, 0); /* ** Message. */ if (msg) device_printf(np->dev, "restart (%s).\n", msg); /* ** Clear Start Queue */ for (i=0;i squeue [i] = NCB_SCRIPT_PHYS (np, idle); /* ** Start at first entry. */ np->squeueput = 0; WRITESCRIPT(startpos[0], NCB_SCRIPTH_PHYS (np, tryloop)); WRITESCRIPT(start0 [0], SCR_INT ^ IFFALSE (0)); /* ** Wakeup all pending jobs. */ ncr_wakeup (np, code); /* ** Init chip. */ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort ... */ OUTB (nc_scntl0, 0xca ); /* full arb., ena parity, par->ATN */ OUTB (nc_scntl1, 0x00 ); /* odd parity, and remove CRST!! */ ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB (nc_scid , RRE|np->myaddr);/* host adapter SCSI address */ OUTW (nc_respid, 1ul<myaddr);/* id to respond to */ OUTB (nc_istat , SIGP ); /* Signal Process */ OUTB (nc_dmode , np->rv_dmode); /* XXX modify burstlen ??? */ OUTB (nc_dcntl , np->rv_dcntl); OUTB (nc_ctest3, np->rv_ctest3); OUTB (nc_ctest5, np->rv_ctest5); OUTB (nc_ctest4, np->rv_ctest4);/* enable master parity checking */ OUTB (nc_stest2, np->rv_stest2|EXT); /* Extended Sreq/Sack filtering */ OUTB (nc_stest3, TE ); /* TolerANT enable */ OUTB (nc_stime0, 0x0b ); /* HTH = disabled, STO = 0.1 sec. */ if (bootverbose >= 2) { printf ("\tACTUAL values:SCNTL3:%02x DMODE:%02x DCNTL:%02x\n", np->rv_scntl3, np->rv_dmode, np->rv_dcntl); printf ("\t CTEST3:%02x CTEST4:%02x CTEST5:%02x\n", np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } /* ** Enable GPIO0 pin for writing if LED support. */ if (np->features & FE_LED0) { OUTOFFB (nc_gpcntl, 0x01); } /* ** Fill in target structure. */ for (i=0;itarget[i]; tp->tinfo.sval = 0; tp->tinfo.wval = np->rv_scntl3; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; tp->tinfo.current.width = MSG_EXT_WDTR_BUS_8_BIT; } /* ** enable ints */ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST); OUTB (nc_dien , MDPE|BF|ABRT|SSI|SIR|IID); /* ** Start script processor. */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start)); /* * Notify the XPT of the event */ if (code == HS_RESET) xpt_async(AC_BUS_RESET, np->path, NULL); } static void ncr_poll(struct cam_sim *sim) { ncr_intr_locked(cam_sim_softc(sim)); } /*========================================================== ** ** Get clock factor and sync divisor for a given ** synchronous factor period. ** Returns the clock factor (in sxfer) and scntl3 ** synchronous divisor field. ** **========================================================== */ static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p) { u_long clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u_long fak; /* Sync factor in sxfer */ u_long per; /* Period in tenths of ns */ u_long kpc; /* (per * clk) */ /* ** Compute the synchronous period in tenths of nano-seconds */ if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; /* ** Look for the greatest clock divisor that allows an ** input speed faster than the period. */ kpc = per * clk; while (--div >= 0) if (kpc >= (div_10M[div] * 4)) break; /* ** Calculate the lowest clock factor that allows an output ** speed not faster than the period. */ fak = (kpc - 1) / div_10M[div] + 1; #if 0 /* You can #if 1 if you think this optimization is useful */ per = (fak * div_10M[div]) / clk; /* ** Why not to try the immediate lower divisor and to choose ** the one that allows the fastest output speed ? ** We dont want input speed too much greater than output speed. */ if (div >= 1 && fak < 6) { u_long fak2, per2; fak2 = (kpc - 1) / div_10M[div-1] + 1; per2 = (fak2 * div_10M[div-1]) / clk; if (per2 < per && fak2 <= 6) { fak = fak2; per = per2; --div; } } #endif if (fak < 4) fak = 4; /* Should never happen, too bad ... */ /* ** Compute and return sync parameters for the ncr */ *fakp = fak - 4; *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0); } /*========================================================== ** ** Switch sync mode for current job and its target ** **========================================================== */ static void ncr_setsync(ncb_p np, nccb_p cp, u_char scntl3, u_char sxfer, u_char period) { union ccb *ccb; struct ccb_trans_settings neg; tcb_p tp; int div; u_int target = INB (nc_sdid) & 0x0f; u_int period_10ns; assert (cp); if (!cp) return; ccb = cp->ccb; assert (ccb); if (!ccb) return; assert (target == ccb->ccb_h.target_id); tp = &np->target[target]; if (!scntl3 || !(sxfer & 0x1f)) scntl3 = np->rv_scntl3; scntl3 = (scntl3 & 0xf0) | (tp->tinfo.wval & EWS) | (np->rv_scntl3 & 0x07); /* ** Deduce the value of controller sync period from scntl3. ** period is in tenths of nano-seconds. */ div = ((scntl3 >> 4) & 0x7); if ((sxfer & 0x1f) && div) period_10ns = (((sxfer>>5)+4)*div_10M[div-1])/np->clock_khz; else period_10ns = 0; tp->tinfo.goal.period = period; tp->tinfo.goal.offset = sxfer & 0x1f; tp->tinfo.current.period = period; tp->tinfo.current.offset = sxfer & 0x1f; /* ** Stop there if sync parameters are unchanged */ if (tp->tinfo.sval == sxfer && tp->tinfo.wval == scntl3) return; tp->tinfo.sval = sxfer; tp->tinfo.wval = scntl3; if (sxfer & 0x1f) { /* ** Disable extended Sreq/Sack filtering */ if (period_10ns <= 2000) OUTOFFB (nc_stest2, EXT); } /* ** Tell the SCSI layer about the ** new transfer parameters. */ memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; neg.xport_specific.spi.sync_period = period; neg.xport_specific.spi.sync_offset = sxfer & 0x1f; neg.xport_specific.spi.valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); /* ** set actual value and sync_status */ OUTB (nc_sxfer, sxfer); np->sync_st = sxfer; OUTB (nc_scntl3, scntl3); np->wide_st = scntl3; /* ** patch ALL nccbs of this target. */ for (cp = np->link_nccb; cp; cp = cp->link_nccb) { if (!cp->ccb) continue; if (cp->ccb->ccb_h.target_id != target) continue; cp->sync_status = sxfer; cp->wide_status = scntl3; }; } /*========================================================== ** ** Switch wide mode for current job and its target ** SCSI specs say: a SCSI device that accepts a WDTR ** message shall reset the synchronous agreement to ** asynchronous mode. ** **========================================================== */ static void ncr_setwide (ncb_p np, nccb_p cp, u_char wide, u_char ack) { union ccb *ccb; struct ccb_trans_settings neg; u_int target = INB (nc_sdid) & 0x0f; tcb_p tp; u_char scntl3; u_char sxfer; assert (cp); if (!cp) return; ccb = cp->ccb; assert (ccb); if (!ccb) return; assert (target == ccb->ccb_h.target_id); tp = &np->target[target]; tp->tinfo.current.width = wide; tp->tinfo.goal.width = wide; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; scntl3 = (tp->tinfo.wval & (~EWS)) | (wide ? EWS : 0); sxfer = ack ? 0 : tp->tinfo.sval; /* ** Stop there if sync/wide parameters are unchanged */ if (tp->tinfo.sval == sxfer && tp->tinfo.wval == scntl3) return; tp->tinfo.sval = sxfer; tp->tinfo.wval = scntl3; /* Tell the SCSI layer about the new transfer params */ memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; neg.xport_specific.spi.bus_width = (scntl3 & EWS) ? MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT; neg.xport_specific.spi.sync_period = 0; neg.xport_specific.spi.sync_offset = 0; neg.xport_specific.spi.valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); /* ** set actual value and sync_status */ OUTB (nc_sxfer, sxfer); np->sync_st = sxfer; OUTB (nc_scntl3, scntl3); np->wide_st = scntl3; /* ** patch ALL nccbs of this target. */ for (cp = np->link_nccb; cp; cp = cp->link_nccb) { if (!cp->ccb) continue; if (cp->ccb->ccb_h.target_id != target) continue; cp->sync_status = sxfer; cp->wide_status = scntl3; }; } /*========================================================== ** ** ** ncr timeout handler. ** ** **========================================================== ** ** Misused to keep the driver running when ** interrupts are not configured correctly. ** **---------------------------------------------------------- */ static void ncr_timeout (void *arg) { ncb_p np = arg; time_t thistime = time_second; ticks_t step = np->ticks; u_long count = 0; long signed t; nccb_p cp; mtx_assert(&np->lock, MA_OWNED); if (np->lasttime != thistime) { np->lasttime = thistime; /*---------------------------------------------------- ** ** handle ncr chip timeouts ** ** Assumption: ** We have a chance to arbitrate for the ** SCSI bus at least every 10 seconds. ** **---------------------------------------------------- */ t = thistime - np->heartbeat; if (t<2) np->latetime=0; else np->latetime++; if (np->latetime>2) { /* ** If there are no requests, the script ** processor will sleep on SEL_WAIT_RESEL. ** But we have to check whether it died. ** Let's try to wake it up. */ OUTB (nc_istat, SIGP); }; /*---------------------------------------------------- ** ** handle nccb timeouts ** **---------------------------------------------------- */ for (cp=np->link_nccb; cp; cp=cp->link_nccb) { /* ** look for timed out nccbs. */ if (!cp->host_status) continue; count++; if (cp->tlimit > thistime) continue; /* ** Disable reselect. ** Remove it from startqueue. */ cp->jump_nccb.l_cmd = (SCR_JUMP); if (cp->phys.header.launch.l_paddr == NCB_SCRIPT_PHYS (np, select)) { device_printf(np->dev, "timeout nccb=%p (skip)\n", cp); cp->phys.header.launch.l_paddr = NCB_SCRIPT_PHYS (np, skip); }; switch (cp->host_status) { case HS_BUSY: case HS_NEGOTIATE: /* FALLTHROUGH */ case HS_DISCONNECT: cp->host_status=HS_TIMEOUT; }; cp->tag = 0; /* ** wakeup this nccb. */ ncr_complete (np, cp); }; } callout_reset(&np->timer, step ? step : 1, ncr_timeout, np); if (INB(nc_istat) & (INTF|SIP|DIP)) { /* ** Process pending interrupts. */ if (DEBUG_FLAGS & DEBUG_TINY) printf ("{"); ncr_exception (np); if (DEBUG_FLAGS & DEBUG_TINY) printf ("}"); }; } /*========================================================== ** ** log message for real hard errors ** ** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)." ** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf." ** ** exception register: ** ds: dstat ** si: sist ** ** SCSI bus lines: ** so: control lines as driver by NCR. ** si: control lines as seen by NCR. ** sd: scsi data lines as seen by NCR. ** ** wide/fastmode: ** sxfer: (see the manual) ** scntl3: (see the manual) ** ** current script command: ** dsp: script address (relative to start of script). ** dbc: first word of script command. ** ** First 16 register of the chip: ** r0..rf ** **========================================================== */ static void ncr_log_hard_error(ncb_p np, u_short sist, u_char dstat) { u_int32_t dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL (nc_dsp); if (np->p_script < dsp && dsp <= np->p_script + sizeof(struct script)) { script_ofs = dsp - np->p_script; script_size = sizeof(struct script); script_base = (u_char *) np->script; script_name = "script"; } else if (np->p_scripth < dsp && dsp <= np->p_scripth + sizeof(struct scripth)) { script_ofs = dsp - np->p_scripth; script_size = sizeof(struct scripth); script_base = (u_char *) np->scripth; script_name = "scripth"; } else { script_ofs = dsp; script_size = 0; script_base = 0; script_name = "mem"; } device_printf(np->dev, "%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", (unsigned)INB (nc_sdid)&0x0f, dstat, sist, (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer),(unsigned)INB (nc_scntl3), script_name, script_ofs, (unsigned)INL (nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { device_printf(np->dev, "script cmd = %08x\n", (int)READSCRIPT_OFF(script_base, script_ofs)); } device_printf(np->dev, "regdump:"); for (i=0; i<16;i++) printf (" %02x", (unsigned)INB_OFF(i)); printf (".\n"); } /*========================================================== ** ** ** ncr chip exception handler. ** ** **========================================================== */ static void ncr_exception (ncb_p np) { u_char istat, dstat; u_short sist; /* ** interrupt on the fly ? */ while ((istat = INB (nc_istat)) & INTF) { if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); OUTB (nc_istat, INTF); np->profile.num_fly++; ncr_wakeup (np, 0); }; if (!(istat & (SIP|DIP))) { return; } /* ** Steinbach's Guideline for Systems Programming: ** Never test for an error condition you don't know how to handle. */ sist = (istat & SIP) ? INW (nc_sist) : 0; dstat = (istat & DIP) ? INB (nc_dstat) : 0; np->profile.num_int++; if (DEBUG_FLAGS & DEBUG_TINY) printf ("<%d|%x:%x|%x:%x>", INB(nc_scr0), dstat,sist, (unsigned)INL(nc_dsp), (unsigned)INL(nc_dbc)); if ((dstat==DFE) && (sist==PAR)) return; /*========================================================== ** ** First the normal cases. ** **========================================================== */ /*------------------------------------------- ** SCSI reset **------------------------------------------- */ if (sist & RST) { ncr_init (np, bootverbose ? "scsi reset" : NULL, HS_RESET); return; }; /*------------------------------------------- ** selection timeout ** ** IID excluded from dstat mask! ** (chip bug) **------------------------------------------- */ if ((sist & STO) && !(sist & (GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR))) { ncr_int_sto (np); return; }; /*------------------------------------------- ** Phase mismatch. **------------------------------------------- */ if ((sist & MA) && !(sist & (STO|GEN|HTH|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { ncr_int_ma (np, dstat); return; }; /*---------------------------------------- ** move command with length 0 **---------------------------------------- */ if ((dstat & IID) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR)) && ((INL(nc_dbc) & 0xf8000000) == SCR_MOVE_TBL)) { /* ** Target wants more data than available. ** The "no_data" script will do it. */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, no_data)); return; }; /*------------------------------------------- ** Programmed interrupt **------------------------------------------- */ if ((dstat & SIR) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|IID)) && (INB(nc_dsps) <= SIR_MAX)) { ncr_int_sir (np); return; }; /*======================================== ** log message for real hard errors **======================================== */ ncr_log_hard_error(np, sist, dstat); /*======================================== ** do the register dump **======================================== */ if (time_second - np->regtime > 10) { int i; np->regtime = time_second; for (i=0; iregdump); i++) ((volatile char*)&np->regdump)[i] = INB_OFF(i); np->regdump.nc_dstat = dstat; np->regdump.nc_sist = sist; }; /*---------------------------------------- ** clean up the dma fifo **---------------------------------------- */ if ( (INB(nc_sstat0) & (ILF|ORF|OLF) ) || (INB(nc_sstat1) & (FF3210) ) || (INB(nc_sstat2) & (ILF1|ORF1|OLF1)) || /* wide .. */ !(dstat & DFE)) { device_printf(np->dev, "have to clear fifos.\n"); OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ } /*---------------------------------------- ** handshake timeout **---------------------------------------- */ if (sist & HTH) { device_printf(np->dev, "handshake timeout\n"); OUTB (nc_scntl1, CRST); DELAY (1000); OUTB (nc_scntl1, 0x00); OUTB (nc_scr0, HS_FAIL); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, cleanup)); return; } /*---------------------------------------- ** unexpected disconnect **---------------------------------------- */ if ((sist & UDC) && !(sist & (STO|GEN|HTH|MA|SGE|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { OUTB (nc_scr0, HS_UNEXPECTED); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, cleanup)); return; }; /*---------------------------------------- ** cannot disconnect **---------------------------------------- */ if ((dstat & IID) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR)) && ((INL(nc_dbc) & 0xf8000000) == SCR_WAIT_DISC)) { /* ** Unexpected data cycle while waiting for disconnect. */ if (INB(nc_sstat2) & LDSC) { /* ** It's an early reconnect. ** Let's continue ... */ OUTB (nc_dcntl, np->rv_dcntl | STD); /* ** info message */ device_printf(np->dev, "INFO: LDSC while IID.\n"); return; }; device_printf(np->dev, "target %d doesn't release the bus.\n", INB (nc_sdid)&0x0f); /* ** return without restarting the NCR. ** timeout will do the real work. */ return; }; /*---------------------------------------- ** single step **---------------------------------------- */ if ((dstat & SSI) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { OUTB (nc_dcntl, np->rv_dcntl | STD); return; }; /* ** @RECOVER@ HTH, SGE, ABRT. ** ** We should try to recover from these interrupts. ** They may occur if there are problems with synch transfers, or ** if targets are switched on or off while the driver is running. */ if (sist & SGE) { /* clear scsi offsets */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); } /* ** Freeze controller to be able to read the messages. */ if (DEBUG_FLAGS & DEBUG_FREEZE) { int i; unsigned char val; for (i=0; i<0x60; i++) { switch (i%16) { case 0: device_printf(np->dev, "reg[%d0]: ", i / 16); break; case 4: case 8: case 12: printf (" "); break; }; val = bus_read_1(np->reg_res, i); printf (" %x%x", val/16, val%16); if (i%16==15) printf (".\n"); }; callout_stop(&np->timer); device_printf(np->dev, "halted!\n"); /* ** don't restart controller ... */ OUTB (nc_istat, SRST); return; }; #ifdef NCR_FREEZE /* ** Freeze system to be able to read the messages. */ printf ("ncr: fatal error: system halted - press reset to reboot ..."); for (;;); #endif /* ** sorry, have to kill ALL jobs ... */ ncr_init (np, "fatal error", HS_FAIL); } /*========================================================== ** ** ncr chip exception handler for selection timeout ** **========================================================== ** ** There seems to be a bug in the 53c810. ** Although a STO-Interrupt is pending, ** it continues executing script commands. ** But it will fail and interrupt (IID) on ** the next instruction where it's looking ** for a valid phase. ** **---------------------------------------------------------- */ static void ncr_int_sto (ncb_p np) { u_long dsa, scratcha, diff; nccb_p cp; if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); /* ** look for nccb and set the status. */ dsa = INL (nc_dsa); cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; if (cp) { cp-> host_status = HS_SEL_TIMEOUT; ncr_complete (np, cp); }; /* ** repair start queue */ scratcha = INL (nc_scratcha); diff = scratcha - NCB_SCRIPTH_PHYS (np, tryloop); /* assert ((diff <= MAX_START * 20) && !(diff % 20));*/ if ((diff <= MAX_START * 20) && !(diff % 20)) { WRITESCRIPT(startpos[0], scratcha); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start)); return; }; ncr_init (np, "selection timeout", HS_FAIL); } /*========================================================== ** ** ** ncr chip exception handler for phase errors. ** ** **========================================================== ** ** We have to construct a new transfer descriptor, ** to transfer the rest of the current block. ** **---------------------------------------------------------- */ static void ncr_int_ma (ncb_p np, u_char dstat) { u_int32_t dbc; u_int32_t rest; u_int32_t dsa; u_int32_t dsp; u_int32_t nxtdsp; volatile void *vdsp_base; size_t vdsp_off; u_int32_t oadr, olen; u_int32_t *tblp, *newcmd; u_char cmd, sbcl, ss0, ss2, ctest5; u_short delta; nccb_p cp; dsp = INL (nc_dsp); dsa = INL (nc_dsa); dbc = INL (nc_dbc); ss0 = INB (nc_sstat0); ss2 = INB (nc_sstat2); sbcl= INB (nc_sbcl); cmd = dbc >> 24; rest= dbc & 0xffffff; ctest5 = (np->rv_ctest5 & DFS) ? INB (nc_ctest5) : 0; if (ctest5 & DFS) delta=(((ctest5<<8) | (INB (nc_dfifo) & 0xff)) - rest) & 0x3ff; else delta=(INB (nc_dfifo) - rest) & 0x7f; /* ** The data in the dma fifo has not been transfered to ** the target -> add the amount to the rest ** and clear the data. ** Check the sstat2 register in case of wide transfer. */ if (!(dstat & DFE)) rest += delta; if (ss0 & OLF) rest++; if (ss0 & ORF) rest++; if (INB(nc_scntl3) & EWS) { if (ss2 & OLF1) rest++; if (ss2 & ORF1) rest++; }; OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ /* ** locate matching cp */ cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; if (!cp) { device_printf(np->dev, "SCSI phase error fixup: CCB already dequeued (%p)\n", (void *)np->header.cp); return; } if (cp != np->header.cp) { device_printf(np->dev, "SCSI phase error fixup: CCB address mismatch " "(%p != %p) np->nccb = %p\n", (void *)cp, (void *)np->header.cp, (void *)np->link_nccb); /* return;*/ } /* ** find the interrupted script command, ** and the address at which to continue. */ if (dsp == vtophys (&cp->patch[2])) { vdsp_base = cp; vdsp_off = offsetof(struct nccb, patch[0]); nxtdsp = READSCRIPT_OFF(vdsp_base, vdsp_off + 3*4); } else if (dsp == vtophys (&cp->patch[6])) { vdsp_base = cp; vdsp_off = offsetof(struct nccb, patch[4]); nxtdsp = READSCRIPT_OFF(vdsp_base, vdsp_off + 3*4); } else if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) { vdsp_base = np->script; vdsp_off = dsp - np->p_script - 8; nxtdsp = dsp; } else { vdsp_base = np->scripth; vdsp_off = dsp - np->p_scripth - 8; nxtdsp = dsp; }; /* ** log the information */ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) { printf ("P%x%x ",cmd&7, sbcl&7); printf ("RL=%d D=%d SS0=%x ", (unsigned) rest, (unsigned) delta, ss0); }; if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("\nCP=%p CP2=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, np->header.cp, dsp, nxtdsp, (volatile char*)vdsp_base+vdsp_off, cmd); }; /* ** get old startaddress and old length. */ oadr = READSCRIPT_OFF(vdsp_base, vdsp_off + 1*4); if (cmd & 0x10) { /* Table indirect */ tblp = (u_int32_t *) ((char*) &cp->phys + oadr); olen = tblp[0]; oadr = tblp[1]; } else { tblp = (u_int32_t *) 0; olen = READSCRIPT_OFF(vdsp_base, vdsp_off) & 0xffffff; }; if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("OCMD=%x\nTBLP=%p OLEN=%lx OADR=%lx\n", (unsigned) (READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24), (void *) tblp, (u_long) olen, (u_long) oadr); }; /* ** if old phase not dataphase, leave here. */ if (cmd != (READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24)) { PRINT_ADDR(cp->ccb); printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", (unsigned)cmd, (unsigned)READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24); return; } if (cmd & 0x06) { PRINT_ADDR(cp->ccb); printf ("phase change %x-%x %d@%08x resid=%d.\n", cmd&7, sbcl&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); OUTB (nc_dcntl, np->rv_dcntl | STD); return; }; /* ** choose the correct patch area. ** if savep points to one, choose the other. */ newcmd = cp->patch; if (cp->phys.header.savep == vtophys (newcmd)) newcmd+=4; /* ** fillin the commands */ newcmd[0] = ((cmd & 0x0f) << 24) | rest; newcmd[1] = oadr + olen - rest; newcmd[2] = SCR_JUMP; newcmd[3] = nxtdsp; if (DEBUG_FLAGS & DEBUG_PHASE) { PRINT_ADDR(cp->ccb); printf ("newcmd[%d] %x %x %x %x.\n", (int)(newcmd - cp->patch), (unsigned)newcmd[0], (unsigned)newcmd[1], (unsigned)newcmd[2], (unsigned)newcmd[3]); } /* ** fake the return address (to the patch). ** and restart script processor at dispatcher. */ np->profile.num_break++; OUTL (nc_temp, vtophys (newcmd)); if ((cmd & 7) == 0) OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch)); else OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, checkatn)); } /*========================================================== ** ** ** ncr chip exception handler for programmed interrupts. ** ** **========================================================== */ static int ncr_show_msg (u_char * msg) { u_char i; printf ("%x",*msg); if (*msg==MSG_EXTENDED) { for (i=1;i<8;i++) { if (i-1>msg[1]) break; printf ("-%x",msg[i]); }; return (i+1); } else if ((*msg & 0xf0) == 0x20) { printf ("-%x",msg[1]); return (2); }; return (1); } static void ncr_int_sir (ncb_p np) { u_char scntl3; u_char chg, ofs, per, fak, wide; u_char num = INB (nc_dsps); nccb_p cp=0; u_long dsa; u_int target = INB (nc_sdid) & 0x0f; tcb_p tp = &np->target[target]; int i; if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); switch (num) { case SIR_SENSE_RESTART: case SIR_STALL_RESTART: break; default: /* ** lookup the nccb */ dsa = INL (nc_dsa); cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; assert (cp); if (!cp) goto out; assert (cp == np->header.cp); if (cp != np->header.cp) goto out; } switch (num) { /*-------------------------------------------------------------------- ** ** Processing of interrupted getcc selects ** **-------------------------------------------------------------------- */ case SIR_SENSE_RESTART: /*------------------------------------------ ** Script processor is idle. ** Look for interrupted "check cond" **------------------------------------------ */ if (DEBUG_FLAGS & DEBUG_RESTART) device_printf(np->dev, "int#%d", num); cp = (nccb_p) 0; for (i=0; itarget[i]; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+"); cp = tp->hold_cp; if (!cp) continue; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+"); if ((cp->host_status==HS_BUSY) && (cp->s_status==SCSI_STATUS_CHECK_COND)) break; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("- (remove)"); tp->hold_cp = cp = (nccb_p) 0; }; if (cp) { if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+ restart job ..\n"); OUTL (nc_dsa, CCB_PHYS (cp, phys)); OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, getcc)); return; }; /* ** no job, resume normal processing */ if (DEBUG_FLAGS & DEBUG_RESTART) printf (" -- remove trap\n"); WRITESCRIPT(start0[0], SCR_INT ^ IFFALSE (0)); break; case SIR_SENSE_FAILED: /*------------------------------------------- ** While trying to select for ** getting the condition code, ** a target reselected us. **------------------------------------------- */ if (DEBUG_FLAGS & DEBUG_RESTART) { PRINT_ADDR(cp->ccb); printf ("in getcc reselect by t%d.\n", INB(nc_ssid) & 0x0f); } /* ** Mark this job */ cp->host_status = HS_BUSY; cp->s_status = SCSI_STATUS_CHECK_COND; np->target[cp->ccb->ccb_h.target_id].hold_cp = cp; /* ** And patch code to restart it. */ WRITESCRIPT(start0[0], SCR_INT); break; /*----------------------------------------------------------------------------- ** ** Was Sie schon immer ueber transfermode negotiation wissen wollten ... ** ** We try to negotiate sync and wide transfer only after ** a successfull inquire command. We look at byte 7 of the ** inquire data to determine the capabilities if the target. ** ** When we try to negotiate, we append the negotiation message ** to the identify and (maybe) simple tag message. ** The host status field is set to HS_NEGOTIATE to mark this ** situation. ** ** If the target doesn't answer this message immidiately ** (as required by the standard), the SIR_NEGO_FAIL interrupt ** will be raised eventually. ** The handler removes the HS_NEGOTIATE status, and sets the ** negotiated value to the default (async / nowide). ** ** If we receive a matching answer immediately, we check it ** for validity, and set the values. ** ** If we receive a Reject message immediately, we assume the ** negotiation has failed, and fall back to standard values. ** ** If we receive a negotiation message while not in HS_NEGOTIATE ** state, it's a target initiated negotiation. We prepare a ** (hopefully) valid answer, set our parameters, and send back ** this answer to the target. ** ** If the target doesn't fetch the answer (no message out phase), ** we assume the negotiation has failed, and fall back to default ** settings. ** ** When we set the values, we adjust them in all nccbs belonging ** to this target, in the controller's register, and in the "phys" ** field of the controller's struct ncb. ** ** Possible cases: hs sir msg_in value send goto ** We try try to negotiate: ** -> target doesnt't msgin NEG FAIL noop defa. - dispatch ** -> target rejected our msg NEG FAIL reject defa. - dispatch ** -> target answered (ok) NEG SYNC sdtr set - clrack ** -> target answered (!ok) NEG SYNC sdtr defa. REJ--->msg_bad ** -> target answered (ok) NEG WIDE wdtr set - clrack ** -> target answered (!ok) NEG WIDE wdtr defa. REJ--->msg_bad ** -> any other msgin NEG FAIL noop defa. - dispatch ** ** Target tries to negotiate: ** -> incoming message --- SYNC sdtr set SDTR - ** -> incoming message --- WIDE wdtr set WDTR - ** We sent our answer: ** -> target doesn't msgout --- PROTO ? defa. - dispatch ** **----------------------------------------------------------------------------- */ case SIR_NEGO_FAILED: /*------------------------------------------------------- ** ** Negotiation failed. ** Target doesn't send an answer message, ** or target rejected our message. ** ** Remove negotiation request. ** **------------------------------------------------------- */ OUTB (HS_PRT, HS_BUSY); /* FALLTHROUGH */ case SIR_NEGO_PROTO: /*------------------------------------------------------- ** ** Negotiation failed. ** Target doesn't fetch the answer message. ** **------------------------------------------------------- */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("negotiation failed sir=%x status=%x.\n", num, cp->nego_status); }; /* ** any error in negotiation: ** fall back to default mode. */ switch (cp->nego_status) { case NS_SYNC: ncr_setsync (np, cp, 0, 0xe0, 0); break; case NS_WIDE: ncr_setwide (np, cp, 0, 0); break; }; np->msgin [0] = MSG_NOOP; np->msgout[0] = MSG_NOOP; cp->nego_status = 0; OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch)); break; case SIR_NEGO_SYNC: /* ** Synchronous request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync msgin: "); (void) ncr_show_msg (np->msgin); printf (".\n"); }; /* ** get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; if (ofs==0) per=255; /* ** check values against driver limits. */ if (per < np->minsync) {chg = 1; per = np->minsync;} if (per < tp->tinfo.user.period) {chg = 1; per = tp->tinfo.user.period;} if (ofs > tp->tinfo.user.offset) {chg = 1; ofs = tp->tinfo.user.offset;} /* ** Check against controller limits. */ fak = 7; scntl3 = 0; if (ofs != 0) { ncr_getsync(np, per, &fak, &scntl3); if (fak > 7) { chg = 1; ofs = 0; } } if (ofs == 0) { fak = 7; per = 0; scntl3 = 0; } if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync: per=%d scntl3=0x%x ofs=%d fak=%d chg=%d.\n", per, scntl3, ofs, fak, chg); } if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); switch (cp->nego_status) { case NS_SYNC: /* ** This was an answer message */ if (chg) { /* ** Answer wasn't acceptable. */ ncr_setsync (np, cp, 0, 0xe0, 0); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); } else { /* ** Answer is ok. */ ncr_setsync (np,cp,scntl3,(fak<<5)|ofs, per); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack)); }; return; case NS_WIDE: ncr_setwide (np, cp, 0, 0); break; }; }; /* ** It was a request. Set value and ** prepare an answer message */ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs, per); np->msgout[0] = MSG_EXTENDED; np->msgout[1] = 3; np->msgout[2] = MSG_EXT_SDTR; np->msgout[3] = per; np->msgout[4] = ofs; cp->nego_status = NS_SYNC; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync msgout: "); (void) ncr_show_msg (np->msgout); printf (".\n"); } if (!ofs) { OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); return; } np->msgin [0] = MSG_NOOP; break; case SIR_NEGO_WIDE: /* ** Wide request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide msgin: "); (void) ncr_show_msg (np->msgin); printf (".\n"); }; /* ** get requested values. */ chg = 0; wide = np->msgin[3]; /* ** check values against driver limits. */ if (wide > tp->tinfo.user.width) {chg = 1; wide = tp->tinfo.user.width;} if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide: wide=%d chg=%d.\n", wide, chg); } if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); switch (cp->nego_status) { case NS_WIDE: /* ** This was an answer message */ if (chg) { /* ** Answer wasn't acceptable. */ ncr_setwide (np, cp, 0, 1); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); } else { /* ** Answer is ok. */ ncr_setwide (np, cp, wide, 1); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack)); }; return; case NS_SYNC: ncr_setsync (np, cp, 0, 0xe0, 0); break; }; }; /* ** It was a request, set value and ** prepare an answer message */ ncr_setwide (np, cp, wide, 1); np->msgout[0] = MSG_EXTENDED; np->msgout[1] = 2; np->msgout[2] = MSG_EXT_WDTR; np->msgout[3] = wide; np->msgin [0] = MSG_NOOP; cp->nego_status = NS_WIDE; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide msgout: "); (void) ncr_show_msg (np->msgout); printf (".\n"); } break; /*-------------------------------------------------------------------- ** ** Processing of special messages ** **-------------------------------------------------------------------- */ case SIR_REJECT_RECEIVED: /*----------------------------------------------- ** ** We received a MSG_MESSAGE_REJECT message. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_MESSAGE_REJECT received (%x:%x).\n", (unsigned)np->lastmsg, np->msgout[0]); break; case SIR_REJECT_SENT: /*----------------------------------------------- ** ** We received an unknown message ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_MESSAGE_REJECT sent for "); (void) ncr_show_msg (np->msgin); printf (".\n"); break; /*-------------------------------------------------------------------- ** ** Processing of special messages ** **-------------------------------------------------------------------- */ case SIR_IGN_RESIDUE: /*----------------------------------------------- ** ** We received an IGNORE RESIDUE message, ** which couldn't be handled by the script. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_IGN_WIDE_RESIDUE received, but not yet implemented.\n"); break; case SIR_MISSING_SAVE: /*----------------------------------------------- ** ** We received an DISCONNECT message, ** but the datapointer wasn't saved before. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_DISCONNECT received, but datapointer not saved:\n" "\tdata=%x save=%x goal=%x.\n", (unsigned) INL (nc_temp), (unsigned) np->header.savep, (unsigned) np->header.goalp); break; /*-------------------------------------------------------------------- ** ** Processing of a "SCSI_STATUS_QUEUE_FULL" status. ** ** XXX JGibbs - We should do the same thing for BUSY status. ** ** The current command has been rejected, ** because there are too many in the command queue. ** We have started too many commands for that target. ** **-------------------------------------------------------------------- */ case SIR_STALL_QUEUE: cp->xerr_status = XE_OK; cp->host_status = HS_COMPLETE; cp->s_status = SCSI_STATUS_QUEUE_FULL; ncr_freeze_devq(np, cp->ccb->ccb_h.path); ncr_complete(np, cp); /* FALLTHROUGH */ case SIR_STALL_RESTART: /*----------------------------------------------- ** ** Enable selecting again, ** if NO disconnected jobs. ** **----------------------------------------------- */ /* ** Look for a disconnected job. */ cp = np->link_nccb; while (cp && cp->host_status != HS_DISCONNECT) cp = cp->link_nccb; /* ** if there is one, ... */ if (cp) { /* ** wait for reselection */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, reselect)); return; }; /* ** else remove the interrupt. */ device_printf(np->dev, "queue empty.\n"); WRITESCRIPT(start1[0], SCR_INT ^ IFFALSE (0)); break; }; out: OUTB (nc_dcntl, np->rv_dcntl | STD); } /*========================================================== ** ** ** Aquire a control block ** ** **========================================================== */ static nccb_p ncr_get_nccb (ncb_p np, u_long target, u_long lun) { lcb_p lp; nccb_p cp = NULL; /* ** Lun structure available ? */ lp = np->target[target].lp[lun]; if (lp) { cp = lp->next_nccb; /* ** Look for free CCB */ while (cp && cp->magic) { cp = cp->next_nccb; } } /* ** if nothing available, create one. */ if (cp == NULL) cp = ncr_alloc_nccb(np, target, lun); if (cp != NULL) { if (cp->magic) { device_printf(np->dev, "Bogus free cp found\n"); return (NULL); } cp->magic = 1; } return (cp); } /*========================================================== ** ** ** Release one control block ** ** **========================================================== */ static void ncr_free_nccb (ncb_p np, nccb_p cp) { /* ** sanity */ assert (cp != NULL); cp -> host_status = HS_IDLE; cp -> magic = 0; } /*========================================================== ** ** ** Allocation of resources for Targets/Luns/Tags. ** ** **========================================================== */ static nccb_p ncr_alloc_nccb (ncb_p np, u_long target, u_long lun) { tcb_p tp; lcb_p lp; nccb_p cp; assert (np != NULL); if (target>=MAX_TARGET) return(NULL); if (lun >=MAX_LUN ) return(NULL); tp=&np->target[target]; if (!tp->jump_tcb.l_cmd) { /* ** initialize it. */ tp->jump_tcb.l_cmd = (SCR_JUMP^IFFALSE (DATA (0x80 + target))); tp->jump_tcb.l_paddr = np->jump_tcb.l_paddr; tp->getscr[0] = (np->features & FE_PFEN)? SCR_COPY(1) : SCR_COPY_F(1); tp->getscr[1] = vtophys (&tp->tinfo.sval); tp->getscr[2] = rman_get_start(np->reg_res) + offsetof (struct ncr_reg, nc_sxfer); tp->getscr[3] = (np->features & FE_PFEN)? SCR_COPY(1) : SCR_COPY_F(1); tp->getscr[4] = vtophys (&tp->tinfo.wval); tp->getscr[5] = rman_get_start(np->reg_res) + offsetof (struct ncr_reg, nc_scntl3); assert (((offsetof(struct ncr_reg, nc_sxfer) ^ (offsetof(struct tcb ,tinfo) + offsetof(struct ncr_target_tinfo, sval))) & 3) == 0); assert (((offsetof(struct ncr_reg, nc_scntl3) ^ (offsetof(struct tcb, tinfo) + offsetof(struct ncr_target_tinfo, wval))) &3) == 0); tp->call_lun.l_cmd = (SCR_CALL); tp->call_lun.l_paddr = NCB_SCRIPT_PHYS (np, resel_lun); tp->jump_lcb.l_cmd = (SCR_JUMP); tp->jump_lcb.l_paddr = NCB_SCRIPTH_PHYS (np, abort); np->jump_tcb.l_paddr = vtophys (&tp->jump_tcb); } /* ** Logic unit control block */ lp = tp->lp[lun]; if (!lp) { /* ** Allocate a lcb */ lp = (lcb_p) malloc (sizeof (struct lcb), M_DEVBUF, M_NOWAIT | M_ZERO); if (!lp) return(NULL); /* ** Initialize it */ lp->jump_lcb.l_cmd = (SCR_JUMP ^ IFFALSE (DATA (lun))); lp->jump_lcb.l_paddr = tp->jump_lcb.l_paddr; lp->call_tag.l_cmd = (SCR_CALL); lp->call_tag.l_paddr = NCB_SCRIPT_PHYS (np, resel_tag); lp->jump_nccb.l_cmd = (SCR_JUMP); lp->jump_nccb.l_paddr = NCB_SCRIPTH_PHYS (np, aborttag); lp->actlink = 1; /* ** Chain into LUN list */ tp->jump_lcb.l_paddr = vtophys (&lp->jump_lcb); tp->lp[lun] = lp; } /* ** Allocate a nccb */ cp = (nccb_p) malloc (sizeof (struct nccb), M_DEVBUF, M_NOWAIT|M_ZERO); if (!cp) return (NULL); if (DEBUG_FLAGS & DEBUG_ALLOC) { printf ("new nccb @%p.\n", cp); } /* ** Fill in physical addresses */ cp->p_nccb = vtophys (cp); /* ** Chain into reselect list */ cp->jump_nccb.l_cmd = SCR_JUMP; cp->jump_nccb.l_paddr = lp->jump_nccb.l_paddr; lp->jump_nccb.l_paddr = CCB_PHYS (cp, jump_nccb); cp->call_tmp.l_cmd = SCR_CALL; cp->call_tmp.l_paddr = NCB_SCRIPT_PHYS (np, resel_tmp); /* ** Chain into wakeup list */ cp->link_nccb = np->link_nccb; np->link_nccb = cp; /* ** Chain into CCB list */ cp->next_nccb = lp->next_nccb; lp->next_nccb = cp; return (cp); } /*========================================================== ** ** ** Build Scatter Gather Block ** ** **========================================================== ** ** The transfer area may be scattered among ** several non adjacent physical pages. ** ** We may use MAX_SCATTER blocks. ** **---------------------------------------------------------- */ static int ncr_scatter (struct dsb* phys, vm_offset_t vaddr, vm_size_t datalen) { u_long paddr, pnext; u_short segment = 0; u_long segsize, segaddr; u_long size, csize = 0; u_long chunk = MAX_SIZE; int free; bzero (&phys->data, sizeof (phys->data)); if (!datalen) return (0); paddr = vtophys (vaddr); /* ** insert extra break points at a distance of chunk. ** We try to reduce the number of interrupts caused ** by unexpected phase changes due to disconnects. ** A typical harddisk may disconnect before ANY block. ** If we wanted to avoid unexpected phase changes at all ** we had to use a break point every 512 bytes. ** Of course the number of scatter/gather blocks is ** limited. */ free = MAX_SCATTER - 1; if (vaddr & PAGE_MASK) free -= datalen / PAGE_SIZE; if (free>1) while ((chunk * free >= 2 * datalen) && (chunk>=1024)) chunk /= 2; if(DEBUG_FLAGS & DEBUG_SCATTER) printf("ncr?:\tscattering virtual=%p size=%d chunk=%d.\n", (void *) vaddr, (unsigned) datalen, (unsigned) chunk); /* ** Build data descriptors. */ while (datalen && (segment < MAX_SCATTER)) { /* ** this segment is empty */ segsize = 0; segaddr = paddr; pnext = paddr; if (!csize) csize = chunk; while ((datalen) && (paddr == pnext) && (csize)) { /* ** continue this segment */ pnext = (paddr & (~PAGE_MASK)) + PAGE_SIZE; /* ** Compute max size */ size = pnext - paddr; /* page size */ if (size > datalen) size = datalen; /* data size */ if (size > csize ) size = csize ; /* chunksize */ segsize += size; vaddr += size; csize -= size; datalen -= size; paddr = vtophys (vaddr); }; if(DEBUG_FLAGS & DEBUG_SCATTER) printf ("\tseg #%d addr=%x size=%d (rest=%d).\n", segment, (unsigned) segaddr, (unsigned) segsize, (unsigned) datalen); phys->data[segment].addr = segaddr; phys->data[segment].size = segsize; segment++; } if (datalen) { printf("ncr?: scatter/gather failed (residue=%d).\n", (unsigned) datalen); return (-1); }; return (segment); } /*========================================================== ** ** ** Test the pci bus snoop logic :-( ** ** Has to be called with interrupts disabled. ** ** **========================================================== */ #ifndef NCR_IOMAPPED static int ncr_regtest (struct ncb* np) { register volatile u_int32_t data; /* ** ncr registers may NOT be cached. ** write 0xffffffff to a read only register area, ** and try to read it back. */ data = 0xffffffff; OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data); data = INL_OFF(offsetof(struct ncr_reg, nc_dstat)); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return (0x10); }; return (0); } #endif static int ncr_snooptest (struct ncb* np) { u_int32_t ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc; int i, err=0; #ifndef NCR_IOMAPPED err |= ncr_regtest (np); if (err) return (err); #endif /* ** init */ pc = NCB_SCRIPTH_PHYS (np, snooptest); host_wr = 1; ncr_wr = 2; /* ** Set memory and register. */ ncr_cache = host_wr; OUTL (nc_temp, ncr_wr); /* ** Start script (exchange values) */ OUTL (nc_dsp, pc); /* ** Wait 'til done (with timeout) */ for (i=0; i=NCR_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); }; /* ** Check termination position. */ if (pc != NCB_SCRIPTH_PHYS (np, snoopend)+8) { printf ("CACHE TEST FAILED: script execution failed.\n"); printf ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) NCB_SCRIPTH_PHYS (np, snooptest), (u_long) pc, (u_long) NCB_SCRIPTH_PHYS (np, snoopend) +8); return (0x40); }; /* ** Show results. */ if (host_wr != ncr_rd) { printf ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n", (int) host_wr, (int) ncr_rd); err |= 1; }; if (host_rd != ncr_wr) { printf ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n", (int) ncr_wr, (int) host_rd); err |= 2; }; if (ncr_bk != ncr_wr) { printf ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n", (int) ncr_wr, (int) ncr_bk); err |= 4; }; return (err); } /*========================================================== ** ** ** Profiling the drivers and targets performance. ** ** **========================================================== */ /* ** Compute the difference in milliseconds. **/ static int ncr_delta (int *from, int *to) { if (!from) return (-1); if (!to) return (-2); return ((to - from) * 1000 / hz); } #define PROFILE cp->phys.header.stamp static void ncb_profile (ncb_p np, nccb_p cp) { int co, da, st, en, di, se, post,work,disc; u_long diff; PROFILE.end = ticks; st = ncr_delta (&PROFILE.start,&PROFILE.status); if (st<0) return; /* status not reached */ da = ncr_delta (&PROFILE.start,&PROFILE.data); if (da<0) return; /* No data transfer phase */ co = ncr_delta (&PROFILE.start,&PROFILE.command); if (co<0) return; /* command not executed */ en = ncr_delta (&PROFILE.start,&PROFILE.end), di = ncr_delta (&PROFILE.start,&PROFILE.disconnect), se = ncr_delta (&PROFILE.start,&PROFILE.select); post = en - st; /* ** @PROFILE@ Disconnect time invalid if multiple disconnects */ if (di>=0) disc = se-di; else disc = 0; work = (st - co) - disc; diff = (np->disc_phys - np->disc_ref) & 0xff; np->disc_ref += diff; np->profile.num_trans += 1; if (cp->ccb) np->profile.num_bytes += cp->ccb->csio.dxfer_len; np->profile.num_disc += diff; np->profile.ms_setup += co; np->profile.ms_data += work; np->profile.ms_disc += disc; np->profile.ms_post += post; } #undef PROFILE /*========================================================== ** ** Determine the ncr's clock frequency. ** This is essential for the negotiation ** of the synchronous transfer rate. ** **========================================================== ** ** Note: we have to return the correct value. ** THERE IS NO SAVE DEFAULT VALUE. ** ** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. ** 53C860 and 53C875 rev. 1 support fast20 transfers but ** do not have a clock doubler and so are provided with a ** 80 MHz clock. All other fast20 boards incorporate a doubler ** and so should be delivered with a 40 MHz clock. ** The future fast40 chips (895/895) use a 40 Mhz base clock ** and provide a clock quadrupler (160 Mhz). The code below ** tries to deal as cleverly as possible with all this stuff. ** **---------------------------------------------------------- */ /* * Select NCR SCSI clock frequency */ static void ncr_selectclock(ncb_p np, u_char scntl3) { if (np->multiplier < 2) { OUTB(nc_scntl3, scntl3); return; } if (bootverbose >= 2) device_printf(np->dev, "enabling clock multiplier\n"); OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ if (np->multiplier > 2) { /* Poll bit 5 of stest4 for quadrupler */ int i = 20; while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) DELAY(20); if (!i) device_printf(np->dev, "the chip cannot lock the frequency\n"); } else /* Wait 20 micro-seconds for doubler */ DELAY(20); OUTB(nc_stest3, HSC); /* Halt the scsi clock */ OUTB(nc_scntl3, scntl3); OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(nc_stest3, 0x00); /* Restart scsi clock */ } /* * calculate NCR SCSI clock frequency (in KHz) */ static unsigned ncrgetfreq (ncb_p np, int gen) { int ms = 0; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTB (nc_stest1, 0); /* make sure clock doubler is OFF */ OUTW (nc_sien , 0); /* mask all scsi interrupts */ (void) INW (nc_sist); /* clear pending scsi interrupt */ OUTB (nc_dien , 0); /* mask all dma interrupts */ (void) INW (nc_sist); /* another one, just to be sure :) */ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB (nc_stime1, 0); /* disable general purpose timer */ OUTB (nc_stime1, gen); /* set to nominal delay of (1<= 2) printf ("\tDelay (GEN=%d): %u msec\n", gen, ms); /* * adjust for prescaler, and convert into KHz */ return ms ? ((1 << gen) * 4440) / ms : 0; } static void ncr_getclock (ncb_p np, u_char multiplier) { unsigned char scntl3; unsigned char stest1; scntl3 = INB(nc_scntl3); stest1 = INB(nc_stest1); np->multiplier = 1; if (multiplier > 1) { np->multiplier = multiplier; np->clock_khz = 40000 * multiplier; } else { if ((scntl3 & 7) == 0) { unsigned f1, f2; /* throw away first result */ (void) ncrgetfreq (np, 11); f1 = ncrgetfreq (np, 11); f2 = ncrgetfreq (np, 11); if (bootverbose >= 2) printf ("\tNCR clock is %uKHz, %uKHz\n", f1, f2); if (f1 > f2) f1 = f2; /* trust lower result */ if (f1 > 45000) { scntl3 = 5; /* >45Mhz: assume 80MHz */ } else { scntl3 = 3; /* <45Mhz: assume 40MHz */ } } else if ((scntl3 & 7) == 5) np->clock_khz = 80000; /* Probably a 875 rev. 1 ? */ } } /*=========================================================================*/ #ifdef NCR_TEKRAM_EEPROM struct tekram_eeprom_dev { u_char devmode; #define TKR_PARCHK 0x01 #define TKR_TRYSYNC 0x02 #define TKR_ENDISC 0x04 #define TKR_STARTUNIT 0x08 #define TKR_USETAGS 0x10 #define TKR_TRYWIDE 0x20 u_char syncparam; /* max. sync transfer rate (table ?) */ u_char filler1; u_char filler2; }; struct tekram_eeprom { struct tekram_eeprom_dev dev[16]; u_char adaptid; u_char adaptmode; #define TKR_ADPT_GT2DRV 0x01 #define TKR_ADPT_GT1GB 0x02 #define TKR_ADPT_RSTBUS 0x04 #define TKR_ADPT_ACTNEG 0x08 #define TKR_ADPT_NOSEEK 0x10 #define TKR_ADPT_MORLUN 0x20 u_char delay; /* unit ? ( table ??? ) */ u_char tags; /* use 4 times as many ... */ u_char filler[60]; }; static void tekram_write_bit (ncb_p np, int bit) { u_char val = 0x10 + ((bit & 1) << 1); DELAY(10); OUTB (nc_gpreg, val); DELAY(10); OUTB (nc_gpreg, val | 0x04); DELAY(10); OUTB (nc_gpreg, val); DELAY(10); } static int tekram_read_bit (ncb_p np) { OUTB (nc_gpreg, 0x10); DELAY(10); OUTB (nc_gpreg, 0x14); DELAY(10); return INB (nc_gpreg) & 1; } static u_short read_tekram_eeprom_reg (ncb_p np, int reg) { int bit; u_short result = 0; int cmd = 0x80 | reg; OUTB (nc_gpreg, 0x10); tekram_write_bit (np, 1); for (bit = 7; bit >= 0; bit--) { tekram_write_bit (np, cmd >> bit); } for (bit = 0; bit < 16; bit++) { result <<= 1; result |= tekram_read_bit (np); } OUTB (nc_gpreg, 0x00); return result; } static int read_tekram_eeprom(ncb_p np, struct tekram_eeprom *buffer) { u_short *p = (u_short *) buffer; u_short sum = 0; int i; if (INB (nc_gpcntl) != 0x09) { return 0; } for (i = 0; i < 64; i++) { u_short val; if((i&0x0f) == 0) printf ("%02x:", i*2); val = read_tekram_eeprom_reg (np, i); if (p) *p++ = val; sum += val; if((i&0x01) == 0x00) printf (" "); printf ("%02x%02x", val & 0xff, (val >> 8) & 0xff); if((i&0x0f) == 0x0f) printf ("\n"); } printf ("Sum = %04x\n", sum); return sum == 0x1234; } #endif /* NCR_TEKRAM_EEPROM */ static device_method_t ncr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ncr_probe), DEVMETHOD(device_attach, ncr_attach), { 0, 0 } }; static driver_t ncr_driver = { "ncr", ncr_methods, sizeof(struct ncb), }; static devclass_t ncr_devclass; DRIVER_MODULE(ncr, pci, ncr_driver, ncr_devclass, 0, 0); MODULE_DEPEND(ncr, cam, 1, 1, 1); MODULE_DEPEND(ncr, pci, 1, 1, 1); /*=========================================================================*/ #endif /* _KERNEL */ Index: head/sys/dev/ppbus/vpo.c =================================================================== --- head/sys/dev/ppbus/vpo.c (revision 296890) +++ head/sys/dev/ppbus/vpo.c (revision 296891) @@ -1,432 +1,438 @@ /*- * Copyright (c) 1997, 1998, 1999 Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_vpo.h" #include #include #include "ppbus_if.h" struct vpo_sense { struct scsi_sense cmd; unsigned int stat; unsigned int count; }; struct vpo_data { device_t vpo_dev; int vpo_stat; int vpo_count; int vpo_error; int vpo_isplus; struct cam_sim *sim; struct vpo_sense vpo_sense; struct vpoio_data vpo_io; /* interface to low level functions */ }; #define DEVTOSOFTC(dev) \ ((struct vpo_data *)device_get_softc(dev)) /* cam related functions */ static void vpo_action(struct cam_sim *sim, union ccb *ccb); static void vpo_poll(struct cam_sim *sim); static void vpo_identify(driver_t *driver, device_t parent) { device_t dev; dev = device_find_child(parent, "vpo", -1); if (!dev) BUS_ADD_CHILD(parent, 0, "vpo", -1); } /* * vpo_probe() */ static int vpo_probe(device_t dev) { device_t ppbus = device_get_parent(dev); struct vpo_data *vpo; int error; vpo = DEVTOSOFTC(dev); vpo->vpo_dev = dev; /* check ZIP before ZIP+ or imm_probe() will send controls to * the printer or whatelse connected to the port */ ppb_lock(ppbus); if ((error = vpoio_probe(dev, &vpo->vpo_io)) == 0) { vpo->vpo_isplus = 0; device_set_desc(dev, "Iomega VPI0 Parallel to SCSI interface"); } else if ((error = imm_probe(dev, &vpo->vpo_io)) == 0) { vpo->vpo_isplus = 1; device_set_desc(dev, "Iomega Matchmaker Parallel to SCSI interface"); } else { ppb_unlock(ppbus); return (error); } ppb_unlock(ppbus); return (0); } /* * vpo_attach() */ static int vpo_attach(device_t dev) { struct vpo_data *vpo = DEVTOSOFTC(dev); device_t ppbus = device_get_parent(dev); struct ppb_data *ppb = device_get_softc(ppbus); /* XXX: layering */ struct cam_devq *devq; int error; /* low level attachment */ if (vpo->vpo_isplus) { if ((error = imm_attach(&vpo->vpo_io))) return (error); } else { if ((error = vpoio_attach(&vpo->vpo_io))) return (error); } /* ** Now tell the generic SCSI layer ** about our bus. */ devq = cam_simq_alloc(/*maxopenings*/1); /* XXX What about low-level detach on error? */ if (devq == NULL) return (ENXIO); vpo->sim = cam_sim_alloc(vpo_action, vpo_poll, "vpo", vpo, device_get_unit(dev), ppb->ppc_lock, /*untagged*/1, /*tagged*/0, devq); if (vpo->sim == NULL) { cam_simq_free(devq); return (ENXIO); } ppb_lock(ppbus); if (xpt_bus_register(vpo->sim, dev, /*bus*/0) != CAM_SUCCESS) { cam_sim_free(vpo->sim, /*free_devq*/TRUE); ppb_unlock(ppbus); return (ENXIO); } ppb_unlock(ppbus); return (0); } /* * vpo_intr() */ static void vpo_intr(struct vpo_data *vpo, struct ccb_scsiio *csio) { int errno; /* error in errno.h */ #ifdef VP0_DEBUG int i; #endif + uint8_t *ptr; + ptr = scsiio_cdb_ptr(csio); if (vpo->vpo_isplus) { errno = imm_do_scsi(&vpo->vpo_io, VP0_INITIATOR, csio->ccb_h.target_id, - (char *)&csio->cdb_io.cdb_bytes, csio->cdb_len, + ptr, csio->cdb_len, (char *)csio->data_ptr, csio->dxfer_len, &vpo->vpo_stat, &vpo->vpo_count, &vpo->vpo_error); } else { errno = vpoio_do_scsi(&vpo->vpo_io, VP0_INITIATOR, csio->ccb_h.target_id, - (char *)&csio->cdb_io.cdb_bytes, csio->cdb_len, + ptr, csio->cdb_len, (char *)csio->data_ptr, csio->dxfer_len, &vpo->vpo_stat, &vpo->vpo_count, &vpo->vpo_error); } #ifdef VP0_DEBUG printf("vpo_do_scsi = %d, status = 0x%x, count = %d, vpo_error = %d\n", errno, vpo->vpo_stat, vpo->vpo_count, vpo->vpo_error); /* dump of command */ for (i=0; icdb_len; i++) - printf("%x ", ((char *)&csio->cdb_io.cdb_bytes)[i]); + printf("%x ", ((char *)ptr)[i]); printf("\n"); #endif if (errno) { /* connection to ppbus interrupted */ csio->ccb_h.status = CAM_CMD_TIMEOUT; return; } /* if a timeout occured, no sense */ if (vpo->vpo_error) { if (vpo->vpo_error != VP0_ESELECT_TIMEOUT) device_printf(vpo->vpo_dev, "VP0 error/timeout (%d)\n", vpo->vpo_error); csio->ccb_h.status = CAM_CMD_TIMEOUT; return; } /* check scsi status */ if (vpo->vpo_stat != SCSI_STATUS_OK) { csio->scsi_status = vpo->vpo_stat; /* check if we have to sense the drive */ if ((vpo->vpo_stat & SCSI_STATUS_CHECK_COND) != 0) { vpo->vpo_sense.cmd.opcode = REQUEST_SENSE; vpo->vpo_sense.cmd.length = csio->sense_len; vpo->vpo_sense.cmd.control = 0; if (vpo->vpo_isplus) { errno = imm_do_scsi(&vpo->vpo_io, VP0_INITIATOR, csio->ccb_h.target_id, (char *)&vpo->vpo_sense.cmd, sizeof(vpo->vpo_sense.cmd), (char *)&csio->sense_data, csio->sense_len, &vpo->vpo_sense.stat, &vpo->vpo_sense.count, &vpo->vpo_error); } else { errno = vpoio_do_scsi(&vpo->vpo_io, VP0_INITIATOR, csio->ccb_h.target_id, (char *)&vpo->vpo_sense.cmd, sizeof(vpo->vpo_sense.cmd), (char *)&csio->sense_data, csio->sense_len, &vpo->vpo_sense.stat, &vpo->vpo_sense.count, &vpo->vpo_error); } #ifdef VP0_DEBUG printf("(sense) vpo_do_scsi = %d, status = 0x%x, count = %d, vpo_error = %d\n", errno, vpo->vpo_sense.stat, vpo->vpo_sense.count, vpo->vpo_error); #endif /* check sense return status */ if (errno == 0 && vpo->vpo_sense.stat == SCSI_STATUS_OK) { /* sense ok */ csio->ccb_h.status = CAM_AUTOSNS_VALID | CAM_SCSI_STATUS_ERROR; csio->sense_resid = csio->sense_len - vpo->vpo_sense.count; #ifdef VP0_DEBUG /* dump of sense info */ printf("(sense) "); for (i=0; ivpo_sense.count; i++) printf("%x ", ((char *)&csio->sense_data)[i]); printf("\n"); #endif } else { /* sense failed */ csio->ccb_h.status = CAM_AUTOSENSE_FAIL; } } else { /* no sense */ csio->ccb_h.status = CAM_SCSI_STATUS_ERROR; } return; } csio->resid = csio->dxfer_len - vpo->vpo_count; csio->ccb_h.status = CAM_REQ_CMP; } static void vpo_action(struct cam_sim *sim, union ccb *ccb) { struct vpo_data *vpo = (struct vpo_data *)sim->softc; ppb_assert_locked(device_get_parent(vpo->vpo_dev)); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct ccb_scsiio *csio; csio = &ccb->csio; + if (ccb->ccb_h.flags & CAM_CDB_PHYS) { + ccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(ccb); + break; + } #ifdef VP0_DEBUG device_printf(vpo->vpo_dev, "XPT_SCSI_IO (0x%x) request\n", - csio->cdb_io.cdb_bytes[0]); + scsiio_cdb_ptr(csio)); #endif - vpo_intr(vpo, csio); xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; #ifdef VP0_DEBUG device_printf(vpo->vpo_dev, "XPT_CALC_GEOMETRY (bs=%d,vs=%jd,c=%d,h=%d,spt=%d) request\n", ccg->block_size, (intmax_t)ccg->volume_size, ccg->cylinders, ccg->heads, ccg->secs_per_track); #endif ccg->heads = 64; ccg->secs_per_track = 32; ccg->cylinders = ccg->volume_size / (ccg->heads * ccg->secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { #ifdef VP0_DEBUG device_printf(vpo->vpo_dev, "XPT_RESET_BUS request\n"); #endif if (vpo->vpo_isplus) { if (imm_reset_bus(&vpo->vpo_io)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); return; } } else { if (vpoio_reset_bus(&vpo->vpo_io)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); return; } } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; #ifdef VP0_DEBUG device_printf(vpo->vpo_dev, "XPT_PATH_INQ request\n"); #endif cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 0; cpi->initiator_id = VP0_INITIATOR; cpi->bus_id = sim->bus_id; cpi->base_transfer_speed = 93; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Iomega", HBA_IDLEN); strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); cpi->unit_number = sim->unit_number; cpi->transport = XPORT_PPB; cpi->transport_version = 0; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } return; } static void vpo_poll(struct cam_sim *sim) { /* The ZIP is actually always polled throw vpo_action(). */ } static devclass_t vpo_devclass; static device_method_t vpo_methods[] = { /* device interface */ DEVMETHOD(device_identify, vpo_identify), DEVMETHOD(device_probe, vpo_probe), DEVMETHOD(device_attach, vpo_attach), { 0, 0 } }; static driver_t vpo_driver = { "vpo", vpo_methods, sizeof(struct vpo_data), }; DRIVER_MODULE(vpo, ppbus, vpo_driver, vpo_devclass, 0, 0); MODULE_DEPEND(vpo, ppbus, 1, 1, 1); MODULE_DEPEND(vpo, cam, 1, 1, 1);