Index: stable/10/sys/cam/cam_ccb.h
===================================================================
--- stable/10/sys/cam/cam_ccb.h	(revision 312849)
+++ stable/10/sys/cam/cam_ccb.h	(revision 312850)
@@ -1,1373 +1,1380 @@
 /*-
  * Data structures and definitions for CAM Control Blocks (CCBs).
  *
  * Copyright (c) 1997, 1998 Justin T. Gibbs.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions, and the following disclaimer,
  *    without modification, immediately at the beginning of the file.
  * 2. The name of the author may not be used to endorse or promote products
  *    derived from this software without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 
 #ifndef _CAM_CAM_CCB_H
 #define _CAM_CAM_CCB_H 1
 
 #include <sys/queue.h>
 #include <sys/cdefs.h>
 #include <sys/time.h>
 #include <sys/limits.h>
 #ifndef _KERNEL
 #include <sys/callout.h>
 #endif
 #include <cam/cam_debug.h>
 #include <cam/scsi/scsi_all.h>
 #include <cam/ata/ata_all.h>
 
 /* General allocation length definitions for CCB structures */
 #define	IOCDBLEN	CAM_MAX_CDBLEN	/* Space for CDB bytes/pointer */
 #define	VUHBALEN	14		/* Vendor Unique HBA length */
 #define	SIM_IDLEN	16		/* ASCII string len for SIM ID */
 #define	HBA_IDLEN	16		/* ASCII string len for HBA ID */
 #define	DEV_IDLEN	16		/* ASCII string len for device names */
 #define CCB_PERIPH_PRIV_SIZE 	2	/* size of peripheral private area */
 #define CCB_SIM_PRIV_SIZE 	2	/* size of sim private area */
 
 /* Struct definitions for CAM control blocks */
 
 /* Common CCB header */
 /* CAM CCB flags */
 typedef enum {
 	CAM_CDB_POINTER		= 0x00000001,/* The CDB field is a pointer    */
 	CAM_QUEUE_ENABLE	= 0x00000002,/* SIM queue actions are enabled */
 	CAM_CDB_LINKED		= 0x00000004,/* CCB contains a linked CDB     */
 	CAM_NEGOTIATE		= 0x00000008,/*
 					      * Perform transport negotiation
 					      * with this command.
 					      */
 	CAM_DATA_ISPHYS		= 0x00000010,/* Data type with physical addrs */
 	CAM_DIS_AUTOSENSE	= 0x00000020,/* Disable autosense feature     */
 	CAM_DIR_BOTH		= 0x00000000,/* Data direction (00:IN/OUT)    */
 	CAM_DIR_IN		= 0x00000040,/* Data direction (01:DATA IN)   */
 	CAM_DIR_OUT		= 0x00000080,/* Data direction (10:DATA OUT)  */
 	CAM_DIR_NONE		= 0x000000C0,/* Data direction (11:no data)   */
 	CAM_DIR_MASK		= 0x000000C0,/* Data direction Mask	      */
 	CAM_DATA_VADDR		= 0x00000000,/* Data type (000:Virtual)       */
 	CAM_DATA_PADDR		= 0x00000010,/* Data type (001:Physical)      */
 	CAM_DATA_SG		= 0x00040000,/* Data type (010:sglist)        */
 	CAM_DATA_SG_PADDR	= 0x00040010,/* Data type (011:sglist phys)   */
 	CAM_DATA_BIO		= 0x00200000,/* Data type (100:bio)           */
 	CAM_DATA_MASK		= 0x00240010,/* Data type mask                */
 	CAM_SOFT_RST_OP		= 0x00000100,/* Use Soft reset alternative    */
 	CAM_ENG_SYNC		= 0x00000200,/* Flush resid bytes on complete */
 	CAM_DEV_QFRZDIS		= 0x00000400,/* Disable DEV Q freezing	      */
 	CAM_DEV_QFREEZE		= 0x00000800,/* Freeze DEV Q on execution     */
 	CAM_HIGH_POWER		= 0x00001000,/* Command takes a lot of power  */
 	CAM_SENSE_PTR		= 0x00002000,/* Sense data is a pointer	      */
 	CAM_SENSE_PHYS		= 0x00004000,/* Sense pointer is physical addr*/
 	CAM_TAG_ACTION_VALID	= 0x00008000,/* Use the tag action in this ccb*/
 	CAM_PASS_ERR_RECOVER	= 0x00010000,/* Pass driver does err. recovery*/
 	CAM_DIS_DISCONNECT	= 0x00020000,/* Disable disconnect	      */
 	CAM_MSG_BUF_PHYS	= 0x00080000,/* Message buffer ptr is physical*/
 	CAM_SNS_BUF_PHYS	= 0x00100000,/* Autosense data ptr is physical*/
 	CAM_CDB_PHYS		= 0x00400000,/* CDB poiner is physical	      */
 	CAM_ENG_SGLIST		= 0x00800000,/* SG list is for the HBA engine */
 
 /* Phase cognizant mode flags */
 	CAM_DIS_AUTOSRP		= 0x01000000,/* Disable autosave/restore ptrs */
 	CAM_DIS_AUTODISC	= 0x02000000,/* Disable auto disconnect	      */
 	CAM_TGT_CCB_AVAIL	= 0x04000000,/* Target CCB available	      */
 	CAM_TGT_PHASE_MODE	= 0x08000000,/* The SIM runs in phase mode    */
 	CAM_MSGB_VALID		= 0x10000000,/* Message buffer valid	      */
 	CAM_STATUS_VALID	= 0x20000000,/* Status buffer valid	      */
 	CAM_DATAB_VALID		= 0x40000000,/* Data buffer valid	      */
 
 /* Host target Mode flags */
 	CAM_SEND_SENSE		= 0x08000000,/* Send sense data with status   */
 	CAM_TERM_IO		= 0x10000000,/* Terminate I/O Message sup.    */
 	CAM_DISCONNECT		= 0x20000000,/* Disconnects are mandatory     */
 	CAM_SEND_STATUS		= 0x40000000,/* Send status after data phase  */
 
 	CAM_UNLOCKED		= 0x80000000 /* Call callback without lock.   */
 } ccb_flags;
 
 typedef enum {
 	CAM_EXTLUN_VALID	= 0x00000001,/* 64bit lun field is valid      */
 	CAM_USER_DATA_ADDR	= 0x00000002,/* Userspace data pointers */
 	CAM_SG_FORMAT_IOVEC	= 0x00000004,/* iovec instead of busdma S/G*/
 	CAM_UNMAPPED_BUF	= 0x00000008 /* use unmapped I/O */
 } ccb_xflags;
 
 /* XPT Opcodes for xpt_action */
 typedef enum {
 /* Function code flags are bits greater than 0xff */
 	XPT_FC_QUEUED		= 0x100,
 				/* Non-immediate function code */
 	XPT_FC_USER_CCB		= 0x200,
 	XPT_FC_XPT_ONLY		= 0x400,
 				/* Only for the transport layer device */
 	XPT_FC_DEV_QUEUED	= 0x800 | XPT_FC_QUEUED,
 				/* Passes through the device queues */
 /* Common function commands: 0x00->0x0F */
 	XPT_NOOP 		= 0x00,
 				/* Execute Nothing */
 	XPT_SCSI_IO		= 0x01 | XPT_FC_DEV_QUEUED,
 				/* Execute the requested I/O operation */
 	XPT_GDEV_TYPE		= 0x02,
 				/* Get type information for specified device */
 	XPT_GDEVLIST		= 0x03,
 				/* Get a list of peripheral devices */
 	XPT_PATH_INQ		= 0x04,
 				/* Path routing inquiry */
 	XPT_REL_SIMQ		= 0x05,
 				/* Release a frozen device queue */
 	XPT_SASYNC_CB		= 0x06,
 				/* Set Asynchronous Callback Parameters */
 	XPT_SDEV_TYPE		= 0x07,
 				/* Set device type information */
 	XPT_SCAN_BUS		= 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB
 				       | XPT_FC_XPT_ONLY,
 				/* (Re)Scan the SCSI Bus */
 	XPT_DEV_MATCH		= 0x09 | XPT_FC_XPT_ONLY,
 				/* Get EDT entries matching the given pattern */
 	XPT_DEBUG		= 0x0a,
 				/* Turn on debugging for a bus, target or lun */
 	XPT_PATH_STATS		= 0x0b,
 				/* Path statistics (error counts, etc.) */
 	XPT_GDEV_STATS		= 0x0c,
 				/* Device statistics (error counts, etc.) */
 	XPT_DEV_ADVINFO		= 0x0e,
 				/* Get/Set Device advanced information */
 	XPT_ASYNC		= 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB
 				       | XPT_FC_XPT_ONLY,
 				/* Asynchronous event */
 /* SCSI Control Functions: 0x10->0x1F */
 	XPT_ABORT		= 0x10,
 				/* Abort the specified CCB */
 	XPT_RESET_BUS		= 0x11 | XPT_FC_XPT_ONLY,
 				/* Reset the specified SCSI bus */
 	XPT_RESET_DEV		= 0x12 | XPT_FC_DEV_QUEUED,
 				/* Bus Device Reset the specified SCSI device */
 	XPT_TERM_IO		= 0x13,
 				/* Terminate the I/O process */
 	XPT_SCAN_LUN		= 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB
 				       | XPT_FC_XPT_ONLY,
 				/* Scan Logical Unit */
 	XPT_GET_TRAN_SETTINGS	= 0x15,
 				/*
 				 * Get default/user transfer settings
 				 * for the target
 				 */
 	XPT_SET_TRAN_SETTINGS	= 0x16,
 				/*
 				 * Set transfer rate/width
 				 * negotiation settings
 				 */
 	XPT_CALC_GEOMETRY	= 0x17,
 				/*
 				 * Calculate the geometry parameters for
 				 * a device give the sector size and
 				 * volume size.
 				 */
 	XPT_ATA_IO		= 0x18 | XPT_FC_DEV_QUEUED,
 				/* Execute the requested ATA I/O operation */
 
 	XPT_GET_SIM_KNOB	= 0x18,
 				/*
 				 * Get SIM specific knob values.
 				 */
 
 	XPT_SET_SIM_KNOB	= 0x19,
 				/*
 				 * Set SIM specific knob values.
 				 */
 
 	XPT_SMP_IO		= 0x1b | XPT_FC_DEV_QUEUED,
 				/* Serial Management Protocol */
 
 	XPT_SCAN_TGT		= 0x1E | XPT_FC_QUEUED | XPT_FC_USER_CCB
 				       | XPT_FC_XPT_ONLY,
 				/* Scan Target */
 
 /* HBA engine commands 0x20->0x2F */
 	XPT_ENG_INQ		= 0x20 | XPT_FC_XPT_ONLY,
 				/* HBA engine feature inquiry */
 	XPT_ENG_EXEC		= 0x21 | XPT_FC_DEV_QUEUED,
 				/* HBA execute engine request */
 
 /* Target mode commands: 0x30->0x3F */
 	XPT_EN_LUN		= 0x30,
 				/* Enable LUN as a target */
 	XPT_TARGET_IO		= 0x31 | XPT_FC_DEV_QUEUED,
 				/* Execute target I/O request */
 	XPT_ACCEPT_TARGET_IO	= 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
 				/* Accept Host Target Mode CDB */
 	XPT_CONT_TARGET_IO	= 0x33 | XPT_FC_DEV_QUEUED,
 				/* Continue Host Target I/O Connection */
 	XPT_IMMED_NOTIFY	= 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
 				/* Notify Host Target driver of event (obsolete) */
 	XPT_NOTIFY_ACK		= 0x35,
 				/* Acknowledgement of event (obsolete) */
 	XPT_IMMEDIATE_NOTIFY	= 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
 				/* Notify Host Target driver of event */
 	XPT_NOTIFY_ACKNOWLEDGE	= 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
 				/* Acknowledgement of event */
 	XPT_REPROBE_LUN		= 0x38 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
 				/* Query device capacity and notify GEOM */
 
 /* Vendor Unique codes: 0x80->0x8F */
 	XPT_VUNIQUE		= 0x80
 } xpt_opcode;
 
 #define XPT_FC_GROUP_MASK		0xF0
 #define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK)
 #define XPT_FC_GROUP_COMMON		0x00
 #define XPT_FC_GROUP_SCSI_CONTROL	0x10
 #define XPT_FC_GROUP_HBA_ENGINE		0x20
 #define XPT_FC_GROUP_TMODE		0x30
 #define XPT_FC_GROUP_VENDOR_UNIQUE	0x80
 
 #define XPT_FC_IS_DEV_QUEUED(ccb) 	\
     (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED)
 #define XPT_FC_IS_QUEUED(ccb) 	\
     (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0)
 
 typedef enum {
 	PROTO_UNKNOWN,
 	PROTO_UNSPECIFIED,
 	PROTO_SCSI,	/* Small Computer System Interface */
 	PROTO_ATA,	/* AT Attachment */
 	PROTO_ATAPI,	/* AT Attachment Packetized Interface */
 	PROTO_SATAPM,	/* SATA Port Multiplier */
 	PROTO_SEMB,	/* SATA Enclosure Management Bridge */
 } cam_proto;
 
 typedef enum {
 	XPORT_UNKNOWN,
 	XPORT_UNSPECIFIED,
 	XPORT_SPI,	/* SCSI Parallel Interface */
 	XPORT_FC,	/* Fiber Channel */
 	XPORT_SSA,	/* Serial Storage Architecture */
 	XPORT_USB,	/* Universal Serial Bus */
 	XPORT_PPB,	/* Parallel Port Bus */
 	XPORT_ATA,	/* AT Attachment */
 	XPORT_SAS,	/* Serial Attached SCSI */
 	XPORT_SATA,	/* Serial AT Attachment */
 	XPORT_ISCSI,	/* iSCSI */
 	XPORT_SRP,	/* SCSI RDMA Protocol */
 } cam_xport;
 
 #define XPORT_IS_ATA(t)		((t) == XPORT_ATA || (t) == XPORT_SATA)
 #define XPORT_IS_SCSI(t)	((t) != XPORT_UNKNOWN && \
 				 (t) != XPORT_UNSPECIFIED && \
 				 !XPORT_IS_ATA(t))
 #define XPORT_DEVSTAT_TYPE(t)	(XPORT_IS_ATA(t) ? DEVSTAT_TYPE_IF_IDE : \
 				 XPORT_IS_SCSI(t) ? DEVSTAT_TYPE_IF_SCSI : \
 				 DEVSTAT_TYPE_IF_OTHER)
 
 #define PROTO_VERSION_UNKNOWN (UINT_MAX - 1)
 #define PROTO_VERSION_UNSPECIFIED UINT_MAX
 #define XPORT_VERSION_UNKNOWN (UINT_MAX - 1)
 #define XPORT_VERSION_UNSPECIFIED UINT_MAX
 
 typedef union {
 	LIST_ENTRY(ccb_hdr) le;
 	SLIST_ENTRY(ccb_hdr) sle;
 	TAILQ_ENTRY(ccb_hdr) tqe;
 	STAILQ_ENTRY(ccb_hdr) stqe;
 } camq_entry;
 
 typedef union {
 	void		*ptr;
 	u_long		field;
 	u_int8_t	bytes[sizeof(uintptr_t)];
 } ccb_priv_entry;
 
 typedef union {
 	ccb_priv_entry	entries[CCB_PERIPH_PRIV_SIZE];
 	u_int8_t	bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)];
 } ccb_ppriv_area;
 
 typedef union {
 	ccb_priv_entry	entries[CCB_SIM_PRIV_SIZE];
 	u_int8_t	bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)];
 } ccb_spriv_area;
 
 typedef struct {
 	struct timeval	*etime;
 	uintptr_t	sim_data;
 	uintptr_t	periph_data;
 } ccb_qos_area;
 
 struct ccb_hdr {
 	cam_pinfo	pinfo;		/* Info for priority scheduling */
 	camq_entry	xpt_links;	/* For chaining in the XPT layer */	
 	camq_entry	sim_links;	/* For chaining in the SIM layer */	
 	camq_entry	periph_links;	/* For chaining in the type driver */
 	u_int32_t	retry_count;
 	void		(*cbfcnp)(struct cam_periph *, union ccb *);
 					/* Callback on completion function */
 	xpt_opcode	func_code;	/* XPT function code */
 	u_int32_t	status;		/* Status returned by CAM subsystem */
 	struct		cam_path *path;	/* Compiled path for this ccb */
 	path_id_t	path_id;	/* Path ID for the request */
 	target_id_t	target_id;	/* Target device ID */
 	lun_id_t	target_lun;	/* Target LUN number */
 	lun64_id_t	ext_lun;	/* 64bit extended/multi-level LUNs */
 	u_int32_t	flags;		/* ccb_flags */
 	u_int32_t	xflags;		/* Extended flags */
 	ccb_ppriv_area	periph_priv;
 	ccb_spriv_area	sim_priv;
 	ccb_qos_area	qos;
 	u_int32_t	timeout;	/* Hard timeout value in mseconds */
 	struct timeval	softtimeout;	/* Soft timeout value in sec + usec */
 };
 
 /* Get Device Information CCB */
 struct ccb_getdev {
 	struct	  ccb_hdr ccb_h;
 	cam_proto protocol;
 	struct scsi_inquiry_data inq_data;
 	struct ata_params ident_data;
 	u_int8_t  serial_num[252];
 	u_int8_t  inq_flags;
 	u_int8_t  serial_num_len;
 };
 
 /* Device Statistics CCB */
 struct ccb_getdevstats {
 	struct	ccb_hdr	ccb_h;
 	int	dev_openings;	/* Space left for more work on device*/	
 	int	dev_active;	/* Transactions running on the device */
 	int	allocated;	/* CCBs allocated for the device */
 	int	queued;		/* CCBs queued to be sent to the device */
 	int	held;		/*
 				 * CCBs held by peripheral drivers
 				 * for this device
 				 */
 	int	maxtags;	/*
 				 * Boundary conditions for number of
 				 * tagged operations
 				 */
 	int	mintags;
 	struct	timeval last_reset;	/* Time of last bus reset/loop init */
 };
 
 typedef enum {
 	CAM_GDEVLIST_LAST_DEVICE,
 	CAM_GDEVLIST_LIST_CHANGED,
 	CAM_GDEVLIST_MORE_DEVS,
 	CAM_GDEVLIST_ERROR
 } ccb_getdevlist_status_e;
 
 struct ccb_getdevlist {
 	struct ccb_hdr		ccb_h;
 	char 			periph_name[DEV_IDLEN];
 	u_int32_t		unit_number;
 	unsigned int		generation;
 	u_int32_t		index;
 	ccb_getdevlist_status_e	status;
 };
 
 typedef enum {
 	PERIPH_MATCH_NONE	= 0x000,
 	PERIPH_MATCH_PATH	= 0x001,
 	PERIPH_MATCH_TARGET	= 0x002,
 	PERIPH_MATCH_LUN	= 0x004,
 	PERIPH_MATCH_NAME	= 0x008,
 	PERIPH_MATCH_UNIT	= 0x010,
 	PERIPH_MATCH_ANY	= 0x01f
 } periph_pattern_flags;
 
 struct periph_match_pattern {
 	char			periph_name[DEV_IDLEN];
 	u_int32_t		unit_number;
 	path_id_t		path_id;
 	target_id_t		target_id;
 	lun_id_t		target_lun;
 	periph_pattern_flags	flags;
 };
 
 typedef enum {
 	DEV_MATCH_NONE		= 0x000,
 	DEV_MATCH_PATH		= 0x001,
 	DEV_MATCH_TARGET	= 0x002,
 	DEV_MATCH_LUN		= 0x004,
 	DEV_MATCH_INQUIRY	= 0x008,
 	DEV_MATCH_DEVID		= 0x010,
 	DEV_MATCH_ANY		= 0x00f
 } dev_pattern_flags;
 
 struct device_id_match_pattern {
 	uint8_t id_len;
 	uint8_t id[256];
 };
 
 struct device_match_pattern {
 	path_id_t					path_id;
 	target_id_t					target_id;
 	lun_id_t					target_lun;
 	dev_pattern_flags				flags;
 	union {
 		struct scsi_static_inquiry_pattern	inq_pat;
 		struct device_id_match_pattern		devid_pat;
 	} data;	
 };
 
 typedef enum {
 	BUS_MATCH_NONE		= 0x000,
 	BUS_MATCH_PATH		= 0x001,
 	BUS_MATCH_NAME		= 0x002,
 	BUS_MATCH_UNIT		= 0x004,
 	BUS_MATCH_BUS_ID	= 0x008,
 	BUS_MATCH_ANY		= 0x00f
 } bus_pattern_flags;
 
 struct bus_match_pattern {
 	path_id_t		path_id;
 	char			dev_name[DEV_IDLEN];
 	u_int32_t		unit_number;
 	u_int32_t		bus_id;
 	bus_pattern_flags	flags;
 };
 
 union match_pattern {
 	struct periph_match_pattern	periph_pattern;
 	struct device_match_pattern	device_pattern;
 	struct bus_match_pattern	bus_pattern;
 };
 
 typedef enum {
 	DEV_MATCH_PERIPH,
 	DEV_MATCH_DEVICE,
 	DEV_MATCH_BUS
 } dev_match_type;
 
 struct dev_match_pattern {
 	dev_match_type		type;
 	union match_pattern	pattern;
 };
 
 struct periph_match_result {
 	char			periph_name[DEV_IDLEN];
 	u_int32_t		unit_number;
 	path_id_t		path_id;
 	target_id_t		target_id;
 	lun_id_t		target_lun;
 };
 
 typedef enum {
 	DEV_RESULT_NOFLAG		= 0x00,
 	DEV_RESULT_UNCONFIGURED		= 0x01
 } dev_result_flags;
 
 struct device_match_result {
 	path_id_t			path_id;
 	target_id_t			target_id;
 	lun_id_t			target_lun;
 	cam_proto			protocol;
 	struct scsi_inquiry_data	inq_data;
 	struct ata_params		ident_data;
 	dev_result_flags		flags;
 };
 
 struct bus_match_result {
 	path_id_t	path_id;
 	char		dev_name[DEV_IDLEN];
 	u_int32_t	unit_number;
 	u_int32_t	bus_id;
 };
 
 union match_result {
 	struct periph_match_result	periph_result;
 	struct device_match_result	device_result;
 	struct bus_match_result		bus_result;
 };
 
 struct dev_match_result {
 	dev_match_type		type;
 	union match_result	result;
 };
 
 typedef enum {
 	CAM_DEV_MATCH_LAST,
 	CAM_DEV_MATCH_MORE,
 	CAM_DEV_MATCH_LIST_CHANGED,
 	CAM_DEV_MATCH_SIZE_ERROR,
 	CAM_DEV_MATCH_ERROR
 } ccb_dev_match_status;
 
 typedef enum {
 	CAM_DEV_POS_NONE	= 0x000,
 	CAM_DEV_POS_BUS		= 0x001,
 	CAM_DEV_POS_TARGET	= 0x002,
 	CAM_DEV_POS_DEVICE	= 0x004,
 	CAM_DEV_POS_PERIPH	= 0x008,
 	CAM_DEV_POS_PDPTR	= 0x010,
 	CAM_DEV_POS_TYPEMASK	= 0xf00,
 	CAM_DEV_POS_EDT		= 0x100,
 	CAM_DEV_POS_PDRV	= 0x200
 } dev_pos_type;
 
 struct ccb_dm_cookie {
 	void 	*bus;
 	void	*target;	
 	void	*device;
 	void	*periph;
 	void	*pdrv;
 };
 
 struct ccb_dev_position {
 	u_int			generations[4];
 #define	CAM_BUS_GENERATION	0x00
 #define CAM_TARGET_GENERATION	0x01
 #define CAM_DEV_GENERATION	0x02
 #define CAM_PERIPH_GENERATION	0x03
 	dev_pos_type		position_type;
 	struct ccb_dm_cookie	cookie;
 };
 
 struct ccb_dev_match {
 	struct ccb_hdr			ccb_h;
 	ccb_dev_match_status		status;
 	u_int32_t			num_patterns;
 	u_int32_t			pattern_buf_len;
 	struct dev_match_pattern	*patterns;
 	u_int32_t			num_matches;
 	u_int32_t			match_buf_len;
 	struct dev_match_result		*matches;
 	struct ccb_dev_position		pos;
 };
 
 /*
  * Definitions for the path inquiry CCB fields.
  */
 #define CAM_VERSION	0x18	/* Hex value for current version */
 
 typedef enum {
 	PI_MDP_ABLE	= 0x80,	/* Supports MDP message */
 	PI_WIDE_32	= 0x40,	/* Supports 32 bit wide SCSI */
 	PI_WIDE_16	= 0x20, /* Supports 16 bit wide SCSI */
 	PI_SDTR_ABLE	= 0x10,	/* Supports SDTR message */
 	PI_LINKED_CDB	= 0x08, /* Supports linked CDBs */
 	PI_SATAPM	= 0x04,	/* Supports SATA PM */
 	PI_TAG_ABLE	= 0x02,	/* Supports tag queue messages */
 	PI_SOFT_RST	= 0x01	/* Supports soft reset alternative */
 } pi_inqflag;
 
 typedef enum {
 	PIT_PROCESSOR	= 0x80,	/* Target mode processor mode */
 	PIT_PHASE	= 0x40,	/* Target mode phase cog. mode */
 	PIT_DISCONNECT	= 0x20,	/* Disconnects supported in target mode */
 	PIT_TERM_IO	= 0x10,	/* Terminate I/O message supported in TM */
 	PIT_GRP_6	= 0x08,	/* Group 6 commands supported */
 	PIT_GRP_7	= 0x04	/* Group 7 commands supported */
 } pi_tmflag;
 
 typedef enum {
 	PIM_EXTLUNS	= 0x100,/* 64bit extended LUNs supported */
 	PIM_SCANHILO	= 0x80,	/* Bus scans from high ID to low ID */
 	PIM_NOREMOVE	= 0x40,	/* Removeable devices not included in scan */
 	PIM_NOINITIATOR	= 0x20,	/* Initiator role not supported. */
 	PIM_NOBUSRESET	= 0x10,	/* User has disabled initial BUS RESET */
 	PIM_NO_6_BYTE	= 0x08,	/* Do not send 6-byte commands */
 	PIM_SEQSCAN	= 0x04,	/* Do bus scans sequentially, not in parallel */
 	PIM_UNMAPPED	= 0x02,
 	PIM_NOSCAN	= 0x01	/* SIM does its own scanning */
 } pi_miscflag;
 
 /* Path Inquiry CCB */
 struct ccb_pathinq_settings_spi {
 	u_int8_t ppr_options;
 };
 
 struct ccb_pathinq_settings_fc {
 	u_int64_t wwnn;		/* world wide node name */
 	u_int64_t wwpn;		/* world wide port name */
 	u_int32_t port;		/* 24 bit port id, if known */
 	u_int32_t bitrate;	/* Mbps */
 };
 
 struct ccb_pathinq_settings_sas {
 	u_int32_t bitrate;	/* Mbps */
 };
 #define	PATHINQ_SETTINGS_SIZE	128
 
 struct ccb_pathinq {
 	struct 	    ccb_hdr ccb_h;
 	u_int8_t    version_num;	/* Version number for the SIM/HBA */
 	u_int8_t    hba_inquiry;	/* Mimic of INQ byte 7 for the HBA */
 	u_int16_t   target_sprt;	/* Flags for target mode support */
 	u_int32_t   hba_misc;		/* Misc HBA features */
 	u_int16_t   hba_eng_cnt;	/* HBA engine count */
 					/* Vendor Unique capabilities */
 	u_int8_t    vuhba_flags[VUHBALEN];
 	u_int32_t   max_target;		/* Maximum supported Target */
 	u_int32_t   max_lun;		/* Maximum supported Lun */
 	u_int32_t   async_flags;	/* Installed Async handlers */
 	path_id_t   hpath_id;		/* Highest Path ID in the subsystem */
 	target_id_t initiator_id;	/* ID of the HBA on the SCSI bus */
 	char	    sim_vid[SIM_IDLEN];	/* Vendor ID of the SIM */
 	char	    hba_vid[HBA_IDLEN];	/* Vendor ID of the HBA */
 	char 	    dev_name[DEV_IDLEN];/* Device name for SIM */
 	u_int32_t   unit_number;	/* Unit number for SIM */
 	u_int32_t   bus_id;		/* Bus ID for SIM */
 	u_int32_t   base_transfer_speed;/* Base bus speed in KB/sec */
 	cam_proto   protocol;
 	u_int	    protocol_version;
 	cam_xport   transport;
 	u_int	    transport_version;
 	union {
 		struct ccb_pathinq_settings_spi spi;
 		struct ccb_pathinq_settings_fc fc;
 		struct ccb_pathinq_settings_sas sas;
 		char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE];
 	} xport_specific;
 	u_int		maxio;		/* Max supported I/O size, in bytes. */
 	u_int16_t	hba_vendor;	/* HBA vendor ID */
 	u_int16_t	hba_device;	/* HBA device ID */
 	u_int16_t	hba_subvendor;	/* HBA subvendor ID */
 	u_int16_t	hba_subdevice;	/* HBA subdevice ID */
 };
 
 /* Path Statistics CCB */
 struct ccb_pathstats {
 	struct	ccb_hdr	ccb_h;
 	struct	timeval last_reset;	/* Time of last bus reset/loop init */
 };
 
 typedef enum {
 	SMP_FLAG_NONE		= 0x00,
 	SMP_FLAG_REQ_SG		= 0x01,
 	SMP_FLAG_RSP_SG		= 0x02
 } ccb_smp_pass_flags;
 
 /*
  * Serial Management Protocol CCB
  * XXX Currently the semantics for this CCB are that it is executed either
  * by the addressed device, or that device's parent (i.e. an expander for
  * any device on an expander) if the addressed device doesn't support SMP.
  * Later, once we have the ability to probe SMP-only devices and put them
  * in CAM's topology, the CCB will only be executed by the addressed device
  * if possible.
  */
 struct ccb_smpio {
 	struct ccb_hdr		ccb_h;
 	uint8_t			*smp_request;
 	int			smp_request_len;
 	uint16_t		smp_request_sglist_cnt;
 	uint8_t			*smp_response;
 	int			smp_response_len;
 	uint16_t		smp_response_sglist_cnt;
 	ccb_smp_pass_flags	flags;
 };
 
 typedef union {
 	u_int8_t *sense_ptr;		/*
 					 * Pointer to storage
 					 * for sense information
 					 */
 	                                /* Storage Area for sense information */
 	struct	 scsi_sense_data sense_buf;
 } sense_t;
 
 typedef union {
 	u_int8_t  *cdb_ptr;		/* Pointer to the CDB bytes to send */
 					/* Area for the CDB send */
 	u_int8_t  cdb_bytes[IOCDBLEN];
 } cdb_t;
 
 /*
  * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO
  * function codes.
  */
 struct ccb_scsiio {
 	struct	   ccb_hdr ccb_h;
 	union	   ccb *next_ccb;	/* Ptr for next CCB for action */
 	u_int8_t   *req_map;		/* Ptr to mapping info */
 	u_int8_t   *data_ptr;		/* Ptr to the data buf/SG list */
 	u_int32_t  dxfer_len;		/* Data transfer length */
 					/* Autosense storage */	
 	struct     scsi_sense_data sense_data;
 	u_int8_t   sense_len;		/* Number of bytes to autosense */
 	u_int8_t   cdb_len;		/* Number of bytes for the CDB */
 	u_int16_t  sglist_cnt;		/* Number of SG list entries */
 	u_int8_t   scsi_status;		/* Returned SCSI status */
 	u_int8_t   sense_resid;		/* Autosense resid length: 2's comp */
 	u_int32_t  resid;		/* Transfer residual length: 2's comp */
 	cdb_t	   cdb_io;		/* Union for CDB bytes/pointer */
 	u_int8_t   *msg_ptr;		/* Pointer to the message buffer */
 	u_int16_t  msg_len;		/* Number of bytes for the Message */
 	u_int8_t   tag_action;		/* What to do for tag queueing */
 	/*
 	 * The tag action should be either the define below (to send a
 	 * non-tagged transaction) or one of the defined scsi tag messages
 	 * from scsi_message.h.
 	 */
 #define		CAM_TAG_ACTION_NONE	0x00
 	u_int	   tag_id;		/* tag id from initator (target mode) */
 	u_int	   init_id;		/* initiator id of who selected */
 };
 
+static __inline uint8_t *
+scsiio_cdb_ptr(struct ccb_scsiio *ccb)
+{
+	return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
+	    ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes);
+}
+
 /*
  * ATA I/O Request CCB used for the XPT_ATA_IO function code.
  */
 struct ccb_ataio {
 	struct	   ccb_hdr ccb_h;
 	union	   ccb *next_ccb;	/* Ptr for next CCB for action */
 	struct ata_cmd	cmd;		/* ATA command register set */
 	struct ata_res	res;		/* ATA result register set */
 	u_int8_t   *data_ptr;		/* Ptr to the data buf/SG list */
 	u_int32_t  dxfer_len;		/* Data transfer length */
 	u_int32_t  resid;		/* Transfer residual length: 2's comp */
 	u_int8_t   tag_action;		/* What to do for tag queueing */
 	/*
 	 * The tag action should be either the define below (to send a
 	 * non-tagged transaction) or one of the defined scsi tag messages
 	 * from scsi_message.h.
 	 */
 #define		CAM_TAG_ACTION_NONE	0x00
 	u_int	   tag_id;		/* tag id from initator (target mode) */
 	u_int	   init_id;		/* initiator id of who selected */
 };
 
 struct ccb_accept_tio {
 	struct	   ccb_hdr ccb_h;
 	cdb_t	   cdb_io;		/* Union for CDB bytes/pointer */
 	u_int8_t   cdb_len;		/* Number of bytes for the CDB */
 	u_int8_t   tag_action;		/* What to do for tag queueing */
 	u_int8_t   sense_len;		/* Number of bytes of Sense Data */
 	u_int      tag_id;		/* tag id from initator (target mode) */
 	u_int      init_id;		/* initiator id of who selected */
 	struct     scsi_sense_data sense_data;
 };
 
 static __inline uint8_t *
 atio_cdb_ptr(struct ccb_accept_tio *ccb)
 {
 	return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
 	    ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes);
 }
 
 /* Release SIM Queue */
 struct ccb_relsim {
 	struct ccb_hdr ccb_h;
 	u_int32_t      release_flags;
 #define RELSIM_ADJUST_OPENINGS		0x01
 #define RELSIM_RELEASE_AFTER_TIMEOUT	0x02
 #define RELSIM_RELEASE_AFTER_CMDCMPLT	0x04
 #define RELSIM_RELEASE_AFTER_QEMPTY	0x08
 	u_int32_t      openings;
 	u_int32_t      release_timeout;	/* Abstract argument. */
 	u_int32_t      qfrozen_cnt;
 };
 
 /*
  * Definitions for the asynchronous callback CCB fields.
  */
 typedef enum {
 	AC_UNIT_ATTENTION	= 0x4000,/* Device reported UNIT ATTENTION */
 	AC_ADVINFO_CHANGED	= 0x2000,/* Advance info might have changes */
 	AC_CONTRACT		= 0x1000,/* A contractual callback */
 	AC_GETDEV_CHANGED	= 0x800,/* Getdev info might have changed */
 	AC_INQ_CHANGED		= 0x400,/* Inquiry info might have changed */
 	AC_TRANSFER_NEG		= 0x200,/* New transfer settings in effect */
 	AC_LOST_DEVICE		= 0x100,/* A device went away */
 	AC_FOUND_DEVICE		= 0x080,/* A new device was found */
 	AC_PATH_DEREGISTERED	= 0x040,/* A path has de-registered */
 	AC_PATH_REGISTERED	= 0x020,/* A new path has been registered */
 	AC_SENT_BDR		= 0x010,/* A BDR message was sent to target */
 	AC_SCSI_AEN		= 0x008,/* A SCSI AEN has been received */
 	AC_UNSOL_RESEL		= 0x002,/* Unsolicited reselection occurred */
 	AC_BUS_RESET		= 0x001	/* A SCSI bus reset occurred */
 } ac_code;
 
 typedef void ac_callback_t (void *softc, u_int32_t code,
 			    struct cam_path *path, void *args);
 
 /*
  * Generic Asynchronous callbacks.
  *
  * Generic arguments passed bac which are then interpreted between a per-system
  * contract number.
  */
 #define	AC_CONTRACT_DATA_MAX (128 - sizeof (u_int64_t))
 struct ac_contract {
 	u_int64_t	contract_number;
 	u_int8_t	contract_data[AC_CONTRACT_DATA_MAX];
 };
 
 #define	AC_CONTRACT_DEV_CHG	1
 struct ac_device_changed {
 	u_int64_t	wwpn;
 	u_int32_t	port;
 	target_id_t	target;
 	u_int8_t	arrived;
 };
 
 /* Set Asynchronous Callback CCB */
 struct ccb_setasync {
 	struct ccb_hdr	 ccb_h;
 	u_int32_t	 event_enable;	/* Async Event enables */	
 	ac_callback_t	*callback;
 	void		*callback_arg;
 };
 
 /* Set Device Type CCB */
 struct ccb_setdev {
 	struct	   ccb_hdr ccb_h;
 	u_int8_t   dev_type;	/* Value for dev type field in EDT */
 };
 
 /* SCSI Control Functions */
 
 /* Abort XPT request CCB */
 struct ccb_abort {
 	struct 	ccb_hdr ccb_h;
 	union	ccb *abort_ccb;	/* Pointer to CCB to abort */
 };
 
 /* Reset SCSI Bus CCB */
 struct ccb_resetbus {
 	struct	ccb_hdr ccb_h;
 };
 
 /* Reset SCSI Device CCB */
 struct ccb_resetdev {
 	struct	ccb_hdr ccb_h;
 };
 
 /* Terminate I/O Process Request CCB */
 struct ccb_termio {
 	struct	ccb_hdr ccb_h;
 	union	ccb *termio_ccb;	/* Pointer to CCB to terminate */
 };
 
 typedef enum {
 	CTS_TYPE_CURRENT_SETTINGS,
 	CTS_TYPE_USER_SETTINGS
 } cts_type;
 
 struct ccb_trans_settings_scsi
 {
 	u_int	valid;	/* Which fields to honor */
 #define	CTS_SCSI_VALID_TQ		0x01
 	u_int	flags;
 #define	CTS_SCSI_FLAGS_TAG_ENB		0x01
 };
 
 struct ccb_trans_settings_ata
 {
 	u_int	valid;	/* Which fields to honor */
 #define	CTS_ATA_VALID_TQ		0x01
 	u_int	flags;
 #define	CTS_ATA_FLAGS_TAG_ENB		0x01
 };
 
 struct ccb_trans_settings_spi
 {
 	u_int	  valid;	/* Which fields to honor */
 #define	CTS_SPI_VALID_SYNC_RATE		0x01
 #define	CTS_SPI_VALID_SYNC_OFFSET	0x02
 #define	CTS_SPI_VALID_BUS_WIDTH		0x04
 #define	CTS_SPI_VALID_DISC		0x08
 #define CTS_SPI_VALID_PPR_OPTIONS	0x10
 	u_int	flags;
 #define	CTS_SPI_FLAGS_DISC_ENB		0x01
 	u_int	sync_period;
 	u_int	sync_offset;
 	u_int	bus_width;
 	u_int	ppr_options;
 };
 
 struct ccb_trans_settings_fc {
 	u_int     	valid;		/* Which fields to honor */
 #define	CTS_FC_VALID_WWNN		0x8000
 #define	CTS_FC_VALID_WWPN		0x4000
 #define	CTS_FC_VALID_PORT		0x2000
 #define	CTS_FC_VALID_SPEED		0x1000
 	u_int64_t	wwnn;		/* world wide node name */
 	u_int64_t 	wwpn;		/* world wide port name */
 	u_int32_t 	port;		/* 24 bit port id, if known */
 	u_int32_t 	bitrate;	/* Mbps */
 };
 
 struct ccb_trans_settings_sas {
 	u_int     	valid;		/* Which fields to honor */
 #define	CTS_SAS_VALID_SPEED		0x1000
 	u_int32_t 	bitrate;	/* Mbps */
 };
 
 struct ccb_trans_settings_pata {
 	u_int     	valid;		/* Which fields to honor */
 #define	CTS_ATA_VALID_MODE		0x01
 #define	CTS_ATA_VALID_BYTECOUNT		0x02
 #define	CTS_ATA_VALID_ATAPI		0x20
 #define	CTS_ATA_VALID_CAPS		0x40
 	int		mode;		/* Mode */
 	u_int 		bytecount;	/* Length of PIO transaction */
 	u_int 		atapi;		/* Length of ATAPI CDB */
 	u_int 		caps;		/* Device and host SATA caps. */
 #define	CTS_ATA_CAPS_H			0x0000ffff
 #define	CTS_ATA_CAPS_H_DMA48		0x00000001 /* 48-bit DMA */
 #define	CTS_ATA_CAPS_D			0xffff0000
 };
 
 struct ccb_trans_settings_sata {
 	u_int     	valid;		/* Which fields to honor */
 #define	CTS_SATA_VALID_MODE		0x01
 #define	CTS_SATA_VALID_BYTECOUNT	0x02
 #define	CTS_SATA_VALID_REVISION		0x04
 #define	CTS_SATA_VALID_PM		0x08
 #define	CTS_SATA_VALID_TAGS		0x10
 #define	CTS_SATA_VALID_ATAPI		0x20
 #define	CTS_SATA_VALID_CAPS		0x40
 	int		mode;		/* Legacy PATA mode */
 	u_int 		bytecount;	/* Length of PIO transaction */
 	int		revision;	/* SATA revision */
 	u_int 		pm_present;	/* PM is present (XPT->SIM) */
 	u_int 		tags;		/* Number of allowed tags */
 	u_int 		atapi;		/* Length of ATAPI CDB */
 	u_int 		caps;		/* Device and host SATA caps. */
 #define	CTS_SATA_CAPS_H			0x0000ffff
 #define	CTS_SATA_CAPS_H_PMREQ		0x00000001
 #define	CTS_SATA_CAPS_H_APST		0x00000002
 #define	CTS_SATA_CAPS_H_DMAAA		0x00000010 /* Auto-activation */
 #define	CTS_SATA_CAPS_H_AN		0x00000020 /* Async. notification */
 #define	CTS_SATA_CAPS_D			0xffff0000
 #define	CTS_SATA_CAPS_D_PMREQ		0x00010000
 #define	CTS_SATA_CAPS_D_APST		0x00020000
 };
 
 /* Get/Set transfer rate/width/disconnection/tag queueing settings */
 struct ccb_trans_settings {
 	struct	  ccb_hdr ccb_h;
 	cts_type  type;		/* Current or User settings */
 	cam_proto protocol;
 	u_int	  protocol_version;
 	cam_xport transport;
 	u_int	  transport_version;
 	union {
 		u_int  valid;	/* Which fields to honor */
 		struct ccb_trans_settings_ata ata;
 		struct ccb_trans_settings_scsi scsi;
 	} proto_specific;
 	union {
 		u_int  valid;	/* Which fields to honor */
 		struct ccb_trans_settings_spi spi;
 		struct ccb_trans_settings_fc fc;
 		struct ccb_trans_settings_sas sas;
 		struct ccb_trans_settings_pata ata;
 		struct ccb_trans_settings_sata sata;
 	} xport_specific;
 };
 
 
 /*
  * Calculate the geometry parameters for a device
  * give the block size and volume size in blocks.
  */
 struct ccb_calc_geometry {
 	struct	  ccb_hdr ccb_h;
 	u_int32_t block_size;
 	u_int64_t volume_size;
 	u_int32_t cylinders;		
 	u_int8_t  heads;
 	u_int8_t  secs_per_track;
 };
 
 /*
  * Set or get SIM (and transport) specific knobs
  */
 
 #define	KNOB_VALID_ADDRESS	0x1
 #define	KNOB_VALID_ROLE		0x2
 
 
 #define	KNOB_ROLE_NONE		0x0
 #define	KNOB_ROLE_INITIATOR	0x1
 #define	KNOB_ROLE_TARGET	0x2
 #define	KNOB_ROLE_BOTH		0x3
 
 struct ccb_sim_knob_settings_spi {
 	u_int		valid;
 	u_int		initiator_id;
 	u_int		role;
 };
 
 struct ccb_sim_knob_settings_fc {
 	u_int		valid;
 	u_int64_t	wwnn;		/* world wide node name */
 	u_int64_t 	wwpn;		/* world wide port name */
 	u_int		role;
 };
 
 struct ccb_sim_knob_settings_sas {
 	u_int		valid;
 	u_int64_t	wwnn;		/* world wide node name */
 	u_int		role;
 };
 #define	KNOB_SETTINGS_SIZE	128
 
 struct ccb_sim_knob {
 	struct	  ccb_hdr ccb_h;
 	union {
 		u_int  valid;	/* Which fields to honor */
 		struct ccb_sim_knob_settings_spi spi;
 		struct ccb_sim_knob_settings_fc fc;
 		struct ccb_sim_knob_settings_sas sas;
 		char pad[KNOB_SETTINGS_SIZE];
 	} xport_specific;
 };
 
 /*
  * Rescan the given bus, or bus/target/lun
  */
 struct ccb_rescan {
 	struct	ccb_hdr ccb_h;
 	cam_flags	flags;
 };
 
 /*
  * Turn on debugging for the given bus, bus/target, or bus/target/lun.
  */
 struct ccb_debug {
 	struct	ccb_hdr ccb_h;
 	cam_debug_flags flags;
 };
 
 /* Target mode structures. */
 
 struct ccb_en_lun {
 	struct	  ccb_hdr ccb_h;
 	u_int16_t grp6_len;		/* Group 6 VU CDB length */
 	u_int16_t grp7_len;		/* Group 7 VU CDB length */
 	u_int8_t  enable;
 };
 
 /* old, barely used immediate notify, binary compatibility */
 struct ccb_immed_notify {
 	struct	  ccb_hdr ccb_h;
 	struct    scsi_sense_data sense_data;
 	u_int8_t  sense_len;		/* Number of bytes in sense buffer */
 	u_int8_t  initiator_id;		/* Id of initiator that selected */
 	u_int8_t  message_args[7];	/* Message Arguments */
 };
 
 struct ccb_notify_ack {
 	struct	  ccb_hdr ccb_h;
 	u_int16_t seq_id;		/* Sequence identifier */
 	u_int8_t  event;		/* Event flags */
 };
 
 struct ccb_immediate_notify {
 	struct    ccb_hdr ccb_h;
 	u_int     tag_id;		/* Tag for immediate notify */
 	u_int     seq_id;		/* Tag for target of notify */
 	u_int     initiator_id;		/* Initiator Identifier */
 	u_int     arg;			/* Function specific */
 };
 
 struct ccb_notify_acknowledge {
 	struct    ccb_hdr ccb_h;
 	u_int     tag_id;		/* Tag for immediate notify */
 	u_int     seq_id;		/* Tar for target of notify */
 	u_int     initiator_id;		/* Initiator Identifier */
 	u_int     arg;			/* Response information */
 	/*
 	 * Lower byte of arg is one of RESPONSE CODE values defined below
 	 * (subset of response codes from SPL-4 and FCP-4 specifications),
 	 * upper 3 bytes is code-specific ADDITIONAL RESPONSE INFORMATION.
 	 */
 #define	CAM_RSP_TMF_COMPLETE		0x00
 #define	CAM_RSP_TMF_REJECTED		0x04
 #define	CAM_RSP_TMF_FAILED		0x05
 #define	CAM_RSP_TMF_SUCCEEDED		0x08
 #define	CAM_RSP_TMF_INCORRECT_LUN	0x09
 };
 
 /* HBA engine structures. */
 
 typedef enum {
 	EIT_BUFFER,	/* Engine type: buffer memory */
 	EIT_LOSSLESS,	/* Engine type: lossless compression */
 	EIT_LOSSY,	/* Engine type: lossy compression */
 	EIT_ENCRYPT	/* Engine type: encryption */
 } ei_type;
 
 typedef enum {
 	EAD_VUNIQUE,	/* Engine algorithm ID: vendor unique */
 	EAD_LZ1V1,	/* Engine algorithm ID: LZ1 var.1 */
 	EAD_LZ2V1,	/* Engine algorithm ID: LZ2 var.1 */
 	EAD_LZ2V2	/* Engine algorithm ID: LZ2 var.2 */
 } ei_algo;
 
 struct ccb_eng_inq {
 	struct	  ccb_hdr ccb_h;
 	u_int16_t eng_num;	/* The engine number for this inquiry */
 	ei_type   eng_type;	/* Returned engine type */
 	ei_algo   eng_algo;	/* Returned engine algorithm type */
 	u_int32_t eng_memeory;	/* Returned engine memory size */
 };
 
 struct ccb_eng_exec {	/* This structure must match SCSIIO size */
 	struct	  ccb_hdr ccb_h;
 	u_int8_t  *pdrv_ptr;	/* Ptr used by the peripheral driver */
 	u_int8_t  *req_map;	/* Ptr for mapping info on the req. */
 	u_int8_t  *data_ptr;	/* Pointer to the data buf/SG list */
 	u_int32_t dxfer_len;	/* Data transfer length */
 	u_int8_t  *engdata_ptr;	/* Pointer to the engine buffer data */
 	u_int16_t sglist_cnt;	/* Num of scatter gather list entries */
 	u_int32_t dmax_len;	/* Destination data maximum length */
 	u_int32_t dest_len;	/* Destination data length */
 	int32_t	  src_resid;	/* Source residual length: 2's comp */
 	u_int32_t timeout;	/* Timeout value */
 	u_int16_t eng_num;	/* Engine number for this request */
 	u_int16_t vu_flags;	/* Vendor Unique flags */
 };
 
 /*
  * Definitions for the timeout field in the SCSI I/O CCB.
  */
 #define	CAM_TIME_DEFAULT	0x00000000	/* Use SIM default value */
 #define	CAM_TIME_INFINITY	0xFFFFFFFF	/* Infinite timeout */
 
 #define	CAM_SUCCESS	0	/* For signaling general success */
 #define	CAM_FAILURE	1	/* For signaling general failure */
 
 #define CAM_FALSE	0
 #define CAM_TRUE	1
 
 #define XPT_CCB_INVALID	-1	/* for signaling a bad CCB to free */
 
 /*
  * CCB for working with advanced device information.  This operates in a fashion
  * similar to XPT_GDEV_TYPE.  Specify the target in ccb_h, the buffer
  * type requested, and provide a buffer size/buffer to write to.  If the
  * buffer is too small, provsiz will be larger than bufsiz.
  */
 struct ccb_dev_advinfo {
 	struct ccb_hdr ccb_h;
 	uint32_t flags;
 #define	CDAI_FLAG_NONE		0x0	/* No flags set */
 #define	CDAI_FLAG_STORE		0x1	/* If set, action becomes store */
 	uint32_t buftype;		/* IN: Type of data being requested */
 	/* NB: buftype is interpreted on a per-transport basis */
 #define	CDAI_TYPE_SCSI_DEVID	1
 #define	CDAI_TYPE_SERIAL_NUM	2
 #define	CDAI_TYPE_PHYS_PATH	3
 #define	CDAI_TYPE_RCAPLONG	4
 #define	CDAI_TYPE_EXT_INQ	5
 	off_t bufsiz;			/* IN: Size of external buffer */
 #define	CAM_SCSI_DEVID_MAXLEN	65536	/* length in buffer is an uint16_t */
 	off_t provsiz;			/* OUT: Size required/used */
 	uint8_t *buf;			/* IN/OUT: Buffer for requested data */
 };
 
 /*
  * CCB for sending async events
  */
 struct ccb_async {
 	struct ccb_hdr ccb_h;
 	uint32_t async_code;
 	off_t async_arg_size;
 	void *async_arg_ptr;
 };
 
 /*
  * Union of all CCB types for kernel space allocation.  This union should
  * never be used for manipulating CCBs - its only use is for the allocation
  * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc
  * and the argument to xpt_ccb_free.
  */
 union ccb {
 	struct	ccb_hdr			ccb_h;	/* For convenience */
 	struct	ccb_scsiio		csio;
 	struct	ccb_getdev		cgd;
 	struct	ccb_getdevlist		cgdl;
 	struct	ccb_pathinq		cpi;
 	struct	ccb_relsim		crs;
 	struct	ccb_setasync		csa;
 	struct	ccb_setdev		csd;
 	struct	ccb_pathstats		cpis;
 	struct	ccb_getdevstats		cgds;
 	struct	ccb_dev_match		cdm;
 	struct	ccb_trans_settings	cts;
 	struct	ccb_calc_geometry	ccg;	
 	struct	ccb_sim_knob		knob;	
 	struct	ccb_abort		cab;
 	struct	ccb_resetbus		crb;
 	struct	ccb_resetdev		crd;
 	struct	ccb_termio		tio;
 	struct	ccb_accept_tio		atio;
 	struct	ccb_scsiio		ctio;
 	struct	ccb_en_lun		cel;
 	struct	ccb_immed_notify	cin;
 	struct	ccb_notify_ack		cna;
 	struct	ccb_immediate_notify	cin1;
 	struct	ccb_notify_acknowledge	cna2;
 	struct	ccb_eng_inq		cei;
 	struct	ccb_eng_exec		cee;
 	struct	ccb_smpio		smpio;
 	struct 	ccb_rescan		crcn;
 	struct  ccb_debug		cdbg;
 	struct	ccb_ataio		ataio;
 	struct	ccb_dev_advinfo		cdai;
 	struct	ccb_async		casync;
 };
 
 #define CCB_CLEAR_ALL_EXCEPT_HDR(ccbp)			\
 	bzero((char *)(ccbp) + sizeof((ccbp)->ccb_h),	\
 	    sizeof(*(ccbp)) - sizeof((ccbp)->ccb_h))
 
 __BEGIN_DECLS
 static __inline void
 cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries,
 	      void (*cbfcnp)(struct cam_periph *, union ccb *),
 	      u_int32_t flags, u_int8_t tag_action,
 	      u_int8_t *data_ptr, u_int32_t dxfer_len,
 	      u_int8_t sense_len, u_int8_t cdb_len,
 	      u_int32_t timeout);
 
 static __inline void
 cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries,
 	      void (*cbfcnp)(struct cam_periph *, union ccb *),
 	      u_int32_t flags, u_int tag_action, u_int tag_id,
 	      u_int init_id, u_int scsi_status, u_int8_t *data_ptr,
 	      u_int32_t dxfer_len, u_int32_t timeout);
 
 static __inline void
 cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries,
 	      void (*cbfcnp)(struct cam_periph *, union ccb *),
 	      u_int32_t flags, u_int tag_action,
 	      u_int8_t *data_ptr, u_int32_t dxfer_len,
 	      u_int32_t timeout);
 
 static __inline void
 cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, 
 	       void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags,
 	       uint8_t *smp_request, int smp_request_len,
 	       uint8_t *smp_response, int smp_response_len,
 	       uint32_t timeout);
 
 static __inline void
 cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries,
 	      void (*cbfcnp)(struct cam_periph *, union ccb *),
 	      u_int32_t flags, u_int8_t tag_action,
 	      u_int8_t *data_ptr, u_int32_t dxfer_len,
 	      u_int8_t sense_len, u_int8_t cdb_len,
 	      u_int32_t timeout)
 {
 	csio->ccb_h.func_code = XPT_SCSI_IO;
 	csio->ccb_h.flags = flags;
 	csio->ccb_h.xflags = 0;
 	csio->ccb_h.retry_count = retries;	
 	csio->ccb_h.cbfcnp = cbfcnp;
 	csio->ccb_h.timeout = timeout;
 	csio->data_ptr = data_ptr;
 	csio->dxfer_len = dxfer_len;
 	csio->sense_len = sense_len;
 	csio->cdb_len = cdb_len;
 	csio->tag_action = tag_action;
 }
 
 static __inline void
 cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries,
 	      void (*cbfcnp)(struct cam_periph *, union ccb *),
 	      u_int32_t flags, u_int tag_action, u_int tag_id,
 	      u_int init_id, u_int scsi_status, u_int8_t *data_ptr,
 	      u_int32_t dxfer_len, u_int32_t timeout)
 {
 	csio->ccb_h.func_code = XPT_CONT_TARGET_IO;
 	csio->ccb_h.flags = flags;
 	csio->ccb_h.xflags = 0;
 	csio->ccb_h.retry_count = retries;	
 	csio->ccb_h.cbfcnp = cbfcnp;
 	csio->ccb_h.timeout = timeout;
 	csio->data_ptr = data_ptr;
 	csio->dxfer_len = dxfer_len;
 	csio->scsi_status = scsi_status;
 	csio->tag_action = tag_action;
 	csio->tag_id = tag_id;
 	csio->init_id = init_id;
 }
 
 static __inline void
 cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries,
 	      void (*cbfcnp)(struct cam_periph *, union ccb *),
 	      u_int32_t flags, u_int tag_action,
 	      u_int8_t *data_ptr, u_int32_t dxfer_len,
 	      u_int32_t timeout)
 {
 	ataio->ccb_h.func_code = XPT_ATA_IO;
 	ataio->ccb_h.flags = flags;
 	ataio->ccb_h.retry_count = retries;
 	ataio->ccb_h.cbfcnp = cbfcnp;
 	ataio->ccb_h.timeout = timeout;
 	ataio->data_ptr = data_ptr;
 	ataio->dxfer_len = dxfer_len;
 	ataio->tag_action = tag_action;
 }
 
 static __inline void
 cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, 
 	       void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags,
 	       uint8_t *smp_request, int smp_request_len,
 	       uint8_t *smp_response, int smp_response_len,
 	       uint32_t timeout)
 {
 #ifdef _KERNEL
 	KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH,
 		("direction != CAM_DIR_BOTH"));
 	KASSERT((smp_request != NULL) && (smp_response != NULL),
 		("need valid request and response buffers"));
 	KASSERT((smp_request_len != 0) && (smp_response_len != 0),
 		("need non-zero request and response lengths"));
 #endif /*_KERNEL*/
 	smpio->ccb_h.func_code = XPT_SMP_IO;
 	smpio->ccb_h.flags = flags;
 	smpio->ccb_h.retry_count = retries;
 	smpio->ccb_h.cbfcnp = cbfcnp;
 	smpio->ccb_h.timeout = timeout;
 	smpio->smp_request = smp_request;
 	smpio->smp_request_len = smp_request_len;
 	smpio->smp_response = smp_response;
 	smpio->smp_response_len = smp_response_len;
 }
 
 static __inline void
 cam_set_ccbstatus(union ccb *ccb, cam_status status)
 {
 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
 	ccb->ccb_h.status |= status;
 }
 
 static __inline cam_status
 cam_ccb_status(union ccb *ccb)
 {
 	return ((cam_status)(ccb->ccb_h.status & CAM_STATUS_MASK));
 }
 
 void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
 
 __END_DECLS
 
 #endif /* _CAM_CAM_CCB_H */
Index: stable/10/sys/dev/arcmsr/arcmsr.c
===================================================================
--- stable/10/sys/dev/arcmsr/arcmsr.c	(revision 312849)
+++ stable/10/sys/dev/arcmsr/arcmsr.c	(revision 312850)
@@ -1,4562 +1,4569 @@
 /*
 ********************************************************************************
 **        OS    : FreeBSD
 **   FILE NAME  : arcmsr.c
 **        BY    : Erich Chen, Ching Huang
 **   Description: SCSI RAID Device Driver for 
 **                ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x)
 **                SATA/SAS RAID HOST Adapter
 ********************************************************************************
 ********************************************************************************
 **
 ** Copyright (C) 2002 - 2012, Areca Technology Corporation All rights reserved.
 **
 ** Redistribution and use in source and binary forms, with or without
 ** modification, are permitted provided that the following conditions
 ** are met:
 ** 1. Redistributions of source code must retain the above copyright
 **    notice, this list of conditions and the following disclaimer.
 ** 2. Redistributions in binary form must reproduce the above copyright
 **    notice, this list of conditions and the following disclaimer in the
 **    documentation and/or other materials provided with the distribution.
 ** 3. The name of the author may not be used to endorse or promote products
 **    derived from this software without specific prior written permission.
 **
 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 
 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 ********************************************************************************
 ** History
 **
 **    REV#         DATE         NAME        DESCRIPTION
 ** 1.00.00.00   03/31/2004  Erich Chen      First release
 ** 1.20.00.02   11/29/2004  Erich Chen      bug fix with arcmsr_bus_reset when PHY error
 ** 1.20.00.03   04/19/2005  Erich Chen      add SATA 24 Ports adapter type support
 **                                          clean unused function
 ** 1.20.00.12   09/12/2005  Erich Chen      bug fix with abort command handling, 
 **                                          firmware version check 
 **                                          and firmware update notify for hardware bug fix
 **                                          handling if none zero high part physical address 
 **                                          of srb resource 
 ** 1.20.00.13   08/18/2006  Erich Chen      remove pending srb and report busy
 **                                          add iop message xfer 
 **                                          with scsi pass-through command
 **                                          add new device id of sas raid adapters 
 **                                          code fit for SPARC64 & PPC 
 ** 1.20.00.14   02/05/2007  Erich Chen      bug fix for incorrect ccb_h.status report
 **                                          and cause g_vfs_done() read write error
 ** 1.20.00.15   10/10/2007  Erich Chen      support new RAID adapter type ARC120x
 ** 1.20.00.16   10/10/2009  Erich Chen      Bug fix for RAID adapter type ARC120x
 **                                          bus_dmamem_alloc() with BUS_DMA_ZERO
 ** 1.20.00.17   07/15/2010  Ching Huang     Added support ARC1880
 **                                          report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed,
 **                                          prevent cam_periph_error removing all LUN devices of one Target id
 **                                          for any one LUN device failed
 ** 1.20.00.18   10/14/2010  Ching Huang     Fixed "inquiry data fails comparion at DV1 step"
 **              10/25/2010  Ching Huang     Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B
 ** 1.20.00.19   11/11/2010  Ching Huang     Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0
 ** 1.20.00.20   12/08/2010  Ching Huang     Avoid calling atomic_set_int function
 ** 1.20.00.21   02/08/2011  Ching Huang     Implement I/O request timeout
 **              02/14/2011  Ching Huang     Modified pktRequestCount
 ** 1.20.00.21   03/03/2011  Ching Huang     if a command timeout, then wait its ccb back before free it
 ** 1.20.00.22   07/04/2011  Ching Huang     Fixed multiple MTX panic
 ** 1.20.00.23   10/28/2011  Ching Huang     Added TIMEOUT_DELAY in case of too many HDDs need to start 
 ** 1.20.00.23   11/08/2011  Ching Huang     Added report device transfer speed 
 ** 1.20.00.23   01/30/2012  Ching Huang     Fixed Request requeued and Retrying command
 ** 1.20.00.24   06/11/2012  Ching Huang     Fixed return sense data condition
 ** 1.20.00.25   08/17/2012  Ching Huang     Fixed hotplug device no function on type A adapter
 ** 1.20.00.26   12/14/2012  Ching Huang     Added support ARC1214,1224,1264,1284
 ** 1.20.00.27   05/06/2013  Ching Huang     Fixed out standing cmd full on ARC-12x4
 ** 1.20.00.28   09/13/2013  Ching Huang     Removed recursive mutex in arcmsr_abort_dr_ccbs
 ** 1.20.00.29   12/18/2013  Ching Huang     Change simq allocation number, support ARC1883
 ** 1.30.00.00   11/30/2015  Ching Huang     Added support ARC1203
 ******************************************************************************************
 */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #if 0
 #define ARCMSR_DEBUG1			1
 #endif
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/malloc.h>
 #include <sys/kernel.h>
 #include <sys/bus.h>
 #include <sys/queue.h>
 #include <sys/stat.h>
 #include <sys/devicestat.h>
 #include <sys/kthread.h>
 #include <sys/module.h>
 #include <sys/proc.h>
 #include <sys/lock.h>
 #include <sys/sysctl.h>
 #include <sys/poll.h>
 #include <sys/ioccom.h>
 #include <vm/vm.h>
 #include <vm/vm_param.h>
 #include <vm/pmap.h>
 
 #include <isa/rtc.h>
 
 #include <machine/bus.h>
 #include <machine/resource.h>
 #include <machine/atomic.h>
 #include <sys/conf.h>
 #include <sys/rman.h>
 
 #include <cam/cam.h>
 #include <cam/cam_ccb.h>
 #include <cam/cam_sim.h>
 #include <cam/cam_periph.h>
 #include <cam/cam_xpt_periph.h>
 #include <cam/cam_xpt_sim.h>
 #include <cam/cam_debug.h>
 #include <cam/scsi/scsi_all.h>
 #include <cam/scsi/scsi_message.h>
 /*
 **************************************************************************
 **************************************************************************
 */
 #if __FreeBSD_version >= 500005
 	#include <sys/selinfo.h>
 	#include <sys/mutex.h>
 	#include <sys/endian.h>
 	#include <dev/pci/pcivar.h>
 	#include <dev/pci/pcireg.h>
 #else
 	#include <sys/select.h>
 	#include <pci/pcivar.h>
 	#include <pci/pcireg.h>
 #endif
 
 #if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025
 #define	CAM_NEW_TRAN_CODE	1
 #endif
 
 #if __FreeBSD_version > 500000
 #define arcmsr_callout_init(a)	callout_init(a, /*mpsafe*/1);
 #else
 #define arcmsr_callout_init(a)	callout_init(a);
 #endif
 
 #define ARCMSR_DRIVER_VERSION	"arcmsr version 1.30.00.00 2015-11-30"
 #include <dev/arcmsr/arcmsr.h>
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_free_srb(struct CommandControlBlock *srb);
 static struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb);
 static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb);
 static int arcmsr_probe(device_t dev);
 static int arcmsr_attach(device_t dev);
 static int arcmsr_detach(device_t dev);
 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
 static int arcmsr_shutdown(device_t dev);
 static void arcmsr_interrupt(struct AdapterControlBlock *acb);
 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
 static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer);
 static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb);
 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg);
 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb);
 static int arcmsr_resume(device_t dev);
 static int arcmsr_suspend(device_t dev);
 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
 static void arcmsr_polling_devmap(void *arg);
 static void arcmsr_srb_timeout(void *arg);
 static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb);
 #ifdef ARCMSR_DEBUG1
 static void arcmsr_dump_data(struct AdapterControlBlock *acb);
 #endif
 /*
 **************************************************************************
 **************************************************************************
 */
 static void UDELAY(u_int32_t us) { DELAY(us); }
 /*
 **************************************************************************
 **************************************************************************
 */
 static bus_dmamap_callback_t arcmsr_map_free_srb;
 static bus_dmamap_callback_t arcmsr_execute_srb;
 /*
 **************************************************************************
 **************************************************************************
 */
 static d_open_t	arcmsr_open;
 static d_close_t arcmsr_close;
 static d_ioctl_t arcmsr_ioctl;
 
 static device_method_t arcmsr_methods[]={
 	DEVMETHOD(device_probe,		arcmsr_probe),
 	DEVMETHOD(device_attach,	arcmsr_attach),
 	DEVMETHOD(device_detach,	arcmsr_detach),
 	DEVMETHOD(device_shutdown,	arcmsr_shutdown),
 	DEVMETHOD(device_suspend,	arcmsr_suspend),
 	DEVMETHOD(device_resume,	arcmsr_resume),
 
 #if __FreeBSD_version >= 803000
 	DEVMETHOD_END
 #else
 	{ 0, 0 }
 #endif
 };
 
 static driver_t arcmsr_driver={
 	"arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
 };
 
 static devclass_t arcmsr_devclass;
 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0);
 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
 #ifndef BUS_DMA_COHERENT		
 	#define	BUS_DMA_COHERENT	0x04	/* hint: map memory in a coherent way */
 #endif
 #if __FreeBSD_version >= 501000
 static struct cdevsw arcmsr_cdevsw={
 	#if __FreeBSD_version >= 503000
 		.d_version = D_VERSION, 
 	#endif
 	#if (__FreeBSD_version>=503000 && __FreeBSD_version<600034)
 		.d_flags   = D_NEEDGIANT, 
 	#endif
 		.d_open    = arcmsr_open, 	/* open     */
 		.d_close   = arcmsr_close, 	/* close    */
 		.d_ioctl   = arcmsr_ioctl, 	/* ioctl    */
 		.d_name    = "arcmsr", 		/* name     */
 	};
 #else
 	#define ARCMSR_CDEV_MAJOR	180
 
 static struct cdevsw arcmsr_cdevsw = {
 		arcmsr_open,			/* open     */
 		arcmsr_close,			/* close    */
 		noread,				/* read     */
 		nowrite,			/* write    */
 		arcmsr_ioctl,			/* ioctl    */
 		nopoll,				/* poll     */
 		nommap,				/* mmap     */
 		nostrategy,			/* strategy */
 		"arcmsr",			/* name     */
 		ARCMSR_CDEV_MAJOR,		/* major    */
 		nodump,				/* dump     */
 		nopsize,			/* psize    */
 		0				/* flags    */
 	};
 #endif
 /*
 **************************************************************************
 **************************************************************************
 */
 #if	__FreeBSD_version < 500005
 	static int arcmsr_open(dev_t dev, int flags, int fmt, struct proc *proc)
 #else
 	#if	__FreeBSD_version < 503000
 	static int arcmsr_open(dev_t dev, int flags, int fmt, struct thread *proc)
 	#else
 	static int arcmsr_open(struct cdev *dev, int flags, int fmt, struct thread *proc)
 	#endif 
 #endif
 {
 	#if	__FreeBSD_version < 503000
 		struct AdapterControlBlock *acb = dev->si_drv1;
 	#else
 		int	unit = dev2unit(dev);
 		struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit);
 	#endif
 	if(acb == NULL) {
 		return ENXIO;
 	}
 	return (0);
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 #if	__FreeBSD_version < 500005
 	static int arcmsr_close(dev_t dev, int flags, int fmt, struct proc *proc)
 #else
 	#if	__FreeBSD_version < 503000
 	static int arcmsr_close(dev_t dev, int flags, int fmt, struct thread *proc)
 	#else
 	static int arcmsr_close(struct cdev *dev, int flags, int fmt, struct thread *proc)
 	#endif 
 #endif
 {
 	#if	__FreeBSD_version < 503000
 		struct AdapterControlBlock *acb = dev->si_drv1;
 	#else
 		int	unit = dev2unit(dev);
 		struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit);
 	#endif
 	if(acb == NULL) {
 		return ENXIO;
 	}
 	return 0;
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 #if	__FreeBSD_version < 500005
 	static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct proc *proc)
 #else
 	#if	__FreeBSD_version < 503000
 	static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc)
 	#else
 	static int arcmsr_ioctl(struct cdev *dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc)
 	#endif 
 #endif
 {
 	#if	__FreeBSD_version < 503000
 		struct AdapterControlBlock *acb = dev->si_drv1;
 	#else
 		int	unit = dev2unit(dev);
 		struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit);
 	#endif
 	
 	if(acb == NULL) {
 		return ENXIO;
 	}
 	return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
 {
 	u_int32_t intmask_org = 0;
 
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			/* disable all outbound interrupt */
 			intmask_org = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */
 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 			/* disable all outbound interrupt */
 			intmask_org = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask)
 						& (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
 			WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, 0); /* disable all interrupt */
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			/* disable all outbound interrupt */
 			intmask_org = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask)	; /* disable outbound message0 int */
 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			/* disable all outbound interrupt */
 			intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable)	; /* disable outbound message0 int */
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE);
 		}
 		break;
 	}
 	return (intmask_org);
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
 {
 	u_int32_t mask;
 
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			/* enable outbound Post Queue, outbound doorbell Interrupt */
 			mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask);
 			acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 			/* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
 			mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
 			WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
 			acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			/* enable outbound Post Queue, outbound doorbell Interrupt */
 			mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask);
 			acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			/* enable outbound Post Queue, outbound doorbell Interrupt */
 			mask = ARCMSR_HBDMU_ALL_INT_ENABLE;
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | mask);
 			CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable);
 			acb->outbound_int_enable = mask;
 		}
 		break;
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
 {
 	u_int32_t Index;
 	u_int8_t Retries = 0x00;
 
 	do {
 		for(Index=0; Index < 100; Index++) {
 			if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/
 				return TRUE;
 			}
 			UDELAY(10000);
 		}/*max 1 seconds*/
 	}while(Retries++ < 20);/*max 20 sec*/
 	return (FALSE);
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
 {
 	u_int32_t Index;
 	u_int8_t Retries = 0x00;
 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 
 	do {
 		for(Index=0; Index < 100; Index++) {
 			if(READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
 				WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
 				WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
 				return TRUE;
 			}
 			UDELAY(10000);
 		}/*max 1 seconds*/
 	}while(Retries++ < 20);/*max 20 sec*/
 	return (FALSE);
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
 {
 	u_int32_t Index;
 	u_int8_t Retries = 0x00;
 
 	do {
 		for(Index=0; Index < 100; Index++) {
 			if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/
 				return TRUE;
 			}
 			UDELAY(10000);
 		}/*max 1 seconds*/
 	}while(Retries++ < 20);/*max 20 sec*/
 	return (FALSE);
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static u_int8_t arcmsr_hbd_wait_msgint_ready(struct AdapterControlBlock *acb)
 {
 	u_int32_t Index;
 	u_int8_t Retries = 0x00;
 
 	do {
 		for(Index=0; Index < 100; Index++) {
 			if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) {
 				CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);/*clear interrupt*/
 				return TRUE;
 			}
 			UDELAY(10000);
 		}/*max 1 seconds*/
 	}while(Retries++ < 20);/*max 20 sec*/
 	return (FALSE);
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
 {
 	int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
 
 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
 	do {
 		if(arcmsr_hba_wait_msgint_ready(acb)) {
 			break;
 		} else {
 			retry_count--;
 		}
 	}while(retry_count != 0);
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
 {
 	int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 
 	WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
 	do {
 		if(arcmsr_hbb_wait_msgint_ready(acb)) {
 			break;
 		} else {
 			retry_count--;
 		}
 	}while(retry_count != 0);
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
 {
 	int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
 
 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
 	do {
 		if(arcmsr_hbc_wait_msgint_ready(acb)) {
 			break;
 		} else {
 			retry_count--;
 		}
 	}while(retry_count != 0);
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_flush_hbd_cache(struct AdapterControlBlock *acb)
 {
 	int retry_count = 30; /* enlarge wait flush adapter cache time: 10 minute */
 
 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
 	do {
 		if(arcmsr_hbd_wait_msgint_ready(acb)) {
 			break;
 		} else {
 			retry_count--;
 		}
 	}while(retry_count != 0);
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			arcmsr_flush_hba_cache(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			arcmsr_flush_hbb_cache(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			arcmsr_flush_hbc_cache(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			arcmsr_flush_hbd_cache(acb);
 		}
 		break;
 	}
 }
 /*
 *******************************************************************************
 *******************************************************************************
 */
 static int arcmsr_suspend(device_t dev)
 {
 	struct AdapterControlBlock	*acb = device_get_softc(dev);
 
 	/* flush controller */
 	arcmsr_iop_parking(acb);
 	/* disable all outbound interrupt */
 	arcmsr_disable_allintr(acb);
 	return(0);
 }
 /*
 *******************************************************************************
 *******************************************************************************
 */
 static int arcmsr_resume(device_t dev)
 {
 	struct AdapterControlBlock	*acb = device_get_softc(dev);
 
 	arcmsr_iop_init(acb);
 	return(0);
 }
 /*
 *********************************************************************************
 *********************************************************************************
 */
 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg)
 {
 	struct AdapterControlBlock *acb;
 	u_int8_t target_id, target_lun;
 	struct cam_sim *sim;
 
 	sim = (struct cam_sim *) cb_arg;
 	acb =(struct AdapterControlBlock *) cam_sim_softc(sim);
 	switch (code) {
 	case AC_LOST_DEVICE:
 		target_id = xpt_path_target_id(path);
 		target_lun = xpt_path_lun_id(path);
 		if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) {
 			break;
 		}
 	//	printf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun);
 		break;
 	default:
 		break;
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
 {
 	union ccb *pccb = srb->pccb;
 
 	pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
 	pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
 	if(pccb->csio.sense_len) {
 		memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
 		memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, 
 		get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
 		((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
 		pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
 	}
 }
 /*
 *********************************************************************
 *********************************************************************
 */
 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
 {
 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
 		printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
 	}
 }
 /*
 *********************************************************************
 *********************************************************************
 */
 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
 {
 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 	WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
 		printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
 	}
 }
 /*
 *********************************************************************
 *********************************************************************
 */
 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb)
 {
 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
 		printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
 	}
 }
 /*
 *********************************************************************
 *********************************************************************
 */
 static void arcmsr_abort_hbd_allcmd(struct AdapterControlBlock *acb)
 {
 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
 	if(!arcmsr_hbd_wait_msgint_ready(acb)) {
 		printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
 	}
 }
 /*
 *********************************************************************
 *********************************************************************
 */
 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			arcmsr_abort_hba_allcmd(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			arcmsr_abort_hbb_allcmd(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			arcmsr_abort_hbc_allcmd(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			arcmsr_abort_hbd_allcmd(acb);
 		}
 		break;
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
 {
 	struct AdapterControlBlock *acb = srb->acb;
 	union ccb *pccb = srb->pccb;
 
 	if(srb->srb_flags & SRB_FLAG_TIMER_START)
 		callout_stop(&srb->ccb_callout);
 	if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
 		bus_dmasync_op_t op;
 
 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 			op = BUS_DMASYNC_POSTREAD;
 		} else {
 			op = BUS_DMASYNC_POSTWRITE;
 		}
 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
 		bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
 	}
 	if(stand_flag == 1) {
 		atomic_subtract_int(&acb->srboutstandingcount, 1);
 		if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && (
 		acb->srboutstandingcount < (acb->maxOutstanding -10))) {
 			acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN;
 			pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
 		}
 	}
 	if(srb->srb_state != ARCMSR_SRB_TIMEOUT)
 		arcmsr_free_srb(srb);
 	acb->pktReturnCount++;
 	xpt_done(pccb);
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
 {
 	int target, lun;
 
 	target = srb->pccb->ccb_h.target_id;
 	lun = srb->pccb->ccb_h.target_lun;
 	if(error == FALSE) {
 		if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
 			acb->devstate[target][lun] = ARECA_RAID_GOOD;
 		}
 		srb->pccb->ccb_h.status |= CAM_REQ_CMP;
 		arcmsr_srb_complete(srb, 1);
 	} else {
 		switch(srb->arcmsr_cdb.DeviceStatus) {
 		case ARCMSR_DEV_SELECT_TIMEOUT: {
 				if(acb->devstate[target][lun] == ARECA_RAID_GOOD) {
 					printf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun);
 				}
 				acb->devstate[target][lun] = ARECA_RAID_GONE;
 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
 				arcmsr_srb_complete(srb, 1);
 			}
 			break;
 		case ARCMSR_DEV_ABORTED:
 		case ARCMSR_DEV_INIT_FAIL: {
 				acb->devstate[target][lun] = ARECA_RAID_GONE;
 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
 				arcmsr_srb_complete(srb, 1);
 			}
 			break;
 		case SCSISTAT_CHECK_CONDITION: {
 				acb->devstate[target][lun] = ARECA_RAID_GOOD;
 				arcmsr_report_sense_info(srb);
 				arcmsr_srb_complete(srb, 1);
 			}
 			break;
 		default:
 			printf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknown DeviceStatus=0x%x \n"
 					, acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus);
 			acb->devstate[target][lun] = ARECA_RAID_GONE;
 			srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
 			/*unknown error or crc error just for retry*/
 			arcmsr_srb_complete(srb, 1);
 			break;
 		}
 	}
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
 {
 	struct CommandControlBlock *srb;
 
 	/* check if command done with no error*/
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_C:
 	case ACB_ADAPTER_TYPE_D:
 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0)); /*frame must be 32 bytes aligned*/
 		break;
 	case ACB_ADAPTER_TYPE_A:
 	case ACB_ADAPTER_TYPE_B:
 	default:
 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
 		break;
 	}
 	if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
 		if(srb->srb_state == ARCMSR_SRB_TIMEOUT) {
 			arcmsr_free_srb(srb);
 			printf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb);
 			return;
 		}
 		printf("arcmsr%d: return srb has been completed\n"
 			"srb='%p' srb_state=0x%x outstanding srb count=%d \n",
 			acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount);
 		return;
 	}
 	arcmsr_report_srb_state(acb, srb, error);
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void	arcmsr_srb_timeout(void *arg)
 {
 	struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
 	struct AdapterControlBlock *acb;
 	int target, lun;
 	u_int8_t cmd;
 
 	target = srb->pccb->ccb_h.target_id;
 	lun = srb->pccb->ccb_h.target_lun;
 	acb = srb->acb;
 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
 	if(srb->srb_state == ARCMSR_SRB_START)
 	{
-		cmd = srb->pccb->csio.cdb_io.cdb_bytes[0];
+		cmd = scsiio_cdb_ptr(&srb->pccb->csio)[0];
 		srb->srb_state = ARCMSR_SRB_TIMEOUT;
 		srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT;
 		arcmsr_srb_complete(srb, 1);
 		printf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n",
 				 acb->pci_unit, target, lun, cmd, srb);
 	}
 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
 #ifdef ARCMSR_DEBUG1
 	arcmsr_dump_data(acb);
 #endif
 }
 
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
 {
 	int i=0;
 	u_int32_t flag_srb;
 	u_int16_t error;
 
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			u_int32_t outbound_intstatus;
 
 			/*clear and abort all outbound posted Q*/
 			outbound_intstatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
 			while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
 				error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
 				arcmsr_drain_donequeue(acb, flag_srb, error);
 			}
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
 
 			/*clear all outbound posted Q*/
 			WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
 			for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
 				if((flag_srb = phbbmu->done_qbuffer[i]) != 0) {
 					phbbmu->done_qbuffer[i] = 0;
 					error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
 					arcmsr_drain_donequeue(acb, flag_srb, error);
 				}
 				phbbmu->post_qbuffer[i] = 0;
 			}/*drain reply FIFO*/
 			phbbmu->doneq_index = 0;
 			phbbmu->postq_index = 0;
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 
 			while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
 				flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
 				error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
 				arcmsr_drain_donequeue(acb, flag_srb, error);
 			}
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			arcmsr_hbd_postqueue_isr(acb);
 		}
 		break;
 	}
 }
 /*
 ****************************************************************************
 ****************************************************************************
 */
 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
 {
 	struct CommandControlBlock *srb;
 	u_int32_t intmask_org;
 	u_int32_t i=0;
 
 	if(acb->srboutstandingcount>0) {
 		/* disable all outbound interrupt */
 		intmask_org = arcmsr_disable_allintr(acb);
 		/*clear and abort all outbound posted Q*/
 		arcmsr_done4abort_postqueue(acb);
 		/* talk to iop 331 outstanding command aborted*/
 		arcmsr_abort_allcmd(acb);
 		for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
 			srb = acb->psrb_pool[i];
 			if(srb->srb_state == ARCMSR_SRB_START) {
 				srb->srb_state = ARCMSR_SRB_ABORTED;
 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
 				arcmsr_srb_complete(srb, 1);
 				printf("arcmsr%d: scsi id=%d lun=%d srb='%p' aborted\n"
 						, acb->pci_unit, srb->pccb->ccb_h.target_id
 						, srb->pccb->ccb_h.target_lun, srb);
 			}
 		}
 		/* enable all outbound interrupt */
 		arcmsr_enable_allintr(acb, intmask_org);
 	}
 	acb->srboutstandingcount = 0;
 	acb->workingsrb_doneindex = 0;
 	acb->workingsrb_startindex = 0;
 	acb->pktRequestCount = 0;
 	acb->pktReturnCount = 0;
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_build_srb(struct CommandControlBlock *srb, 
 		bus_dma_segment_t *dm_segs, u_int32_t nseg)
 {
 	struct ARCMSR_CDB *arcmsr_cdb = &srb->arcmsr_cdb;
 	u_int8_t *psge = (u_int8_t *)&arcmsr_cdb->u;
 	u_int32_t address_lo, address_hi;
 	union ccb *pccb = srb->pccb;
 	struct ccb_scsiio *pcsio = &pccb->csio;
 	u_int32_t arccdbsize = 0x30;
 
 	memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
 	arcmsr_cdb->Bus = 0;
 	arcmsr_cdb->TargetID = pccb->ccb_h.target_id;
 	arcmsr_cdb->LUN = pccb->ccb_h.target_lun;
 	arcmsr_cdb->Function = 1;
 	arcmsr_cdb->CdbLength = (u_int8_t)pcsio->cdb_len;
-	bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
+	bcopy(scsiio_cdb_ptr(pcsio), arcmsr_cdb->Cdb, pcsio->cdb_len);
 	if(nseg != 0) {
 		struct AdapterControlBlock *acb = srb->acb;
 		bus_dmasync_op_t op;	
 		u_int32_t length, i, cdb_sgcount = 0;
 
 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 			op = BUS_DMASYNC_PREREAD;
 		} else {
 			op = BUS_DMASYNC_PREWRITE;
 			arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
 			srb->srb_flags |= SRB_FLAG_WRITE;
 		}
 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
 		for(i=0; i < nseg; i++) {
 			/* Get the physical address of the current data pointer */
 			length = arcmsr_htole32(dm_segs[i].ds_len);
 			address_lo = arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
 			address_hi = arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
 			if(address_hi == 0) {
 				struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
 				pdma_sg->address = address_lo;
 				pdma_sg->length = length;
 				psge += sizeof(struct SG32ENTRY);
 				arccdbsize += sizeof(struct SG32ENTRY);
 			} else {
 				u_int32_t sg64s_size = 0, tmplength = length;
 
 				while(1) {
 					u_int64_t span4G, length0;
 					struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
 
 					span4G = (u_int64_t)address_lo + tmplength;
 					pdma_sg->addresshigh = address_hi;
 					pdma_sg->address = address_lo;
 					if(span4G > 0x100000000) {
 						/*see if cross 4G boundary*/
 						length0 = 0x100000000-address_lo;
 						pdma_sg->length = (u_int32_t)length0 | IS_SG64_ADDR;
 						address_hi = address_hi+1;
 						address_lo = 0;
 						tmplength = tmplength - (u_int32_t)length0;
 						sg64s_size += sizeof(struct SG64ENTRY);
 						psge += sizeof(struct SG64ENTRY);
 						cdb_sgcount++;
 					} else {
 						pdma_sg->length = tmplength | IS_SG64_ADDR;
 						sg64s_size += sizeof(struct SG64ENTRY);
 						psge += sizeof(struct SG64ENTRY);
 						break;
 					}
 				}
 				arccdbsize += sg64s_size;
 			}
 			cdb_sgcount++;
 		}
 		arcmsr_cdb->sgcount = (u_int8_t)cdb_sgcount;
 		arcmsr_cdb->DataLength = pcsio->dxfer_len;
 		if( arccdbsize > 256) {
 			arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
 		}
 	} else {
 		arcmsr_cdb->DataLength = 0;
 	}
 	srb->arc_cdb_size = arccdbsize;
 	arcmsr_cdb->msgPages = (arccdbsize/256) + ((arccdbsize % 256) ? 1 : 0);
 }
 /*
 **************************************************************************
 **************************************************************************
 */ 
 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
 {
 	u_int32_t cdb_phyaddr_low = (u_int32_t) srb->cdb_phyaddr_low;
 	struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&srb->arcmsr_cdb;
 
 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
 	atomic_add_int(&acb->srboutstandingcount, 1);
 	srb->srb_state = ARCMSR_SRB_START;
 
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
 			} else {
 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low);
 			}
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 			int ending_index, index;
 
 			index = phbbmu->postq_index;
 			ending_index = ((index+1) % ARCMSR_MAX_HBB_POSTQUEUE);
 			phbbmu->post_qbuffer[ending_index] = 0;
 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
 				phbbmu->post_qbuffer[index] = cdb_phyaddr_low | ARCMSR_SRBPOST_FLAG_SGL_BSIZE;
 			} else {
 				phbbmu->post_qbuffer[index] = cdb_phyaddr_low;
 			}
 			index++;
 			index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
 			phbbmu->postq_index = index;
 			WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
 
 			arc_cdb_size = (srb->arc_cdb_size > 0x300) ? 0x300 : srb->arc_cdb_size;
 			ccb_post_stamp = (cdb_phyaddr_low | ((arc_cdb_size-1) >> 6) | 1);
 			cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
 			if(cdb_phyaddr_hi32)
 			{
 				CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
 				CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
 			}
 			else
 			{
 				CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
 			}
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
 			u_int16_t index_stripped;
 			u_int16_t postq_index;
 			struct InBound_SRB *pinbound_srb;
 
 			ARCMSR_LOCK_ACQUIRE(&acb->postDone_lock);
 			postq_index = phbdmu->postq_index;
 			pinbound_srb = (struct InBound_SRB *)&phbdmu->post_qbuffer[postq_index & 0xFF];
 			pinbound_srb->addressHigh = srb->cdb_phyaddr_high;
 			pinbound_srb->addressLow = srb->cdb_phyaddr_low;
 			pinbound_srb->length = srb->arc_cdb_size >> 2;
 			arcmsr_cdb->Context = srb->cdb_phyaddr_low;
 			if (postq_index & 0x4000) {
 				index_stripped = postq_index & 0xFF;
 				index_stripped += 1;
 				index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
 				phbdmu->postq_index = index_stripped ? (index_stripped | 0x4000) : index_stripped;
 			} else {
 				index_stripped = postq_index;
 				index_stripped += 1;
 				index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
 				phbdmu->postq_index = index_stripped ? index_stripped : (index_stripped | 0x4000);
 			}
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inboundlist_write_pointer, postq_index);
 			ARCMSR_LOCK_RELEASE(&acb->postDone_lock);
 		}
 		break;
 	}
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static struct QBUFFER *arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
 {
 	struct QBUFFER *qbuffer=NULL;
 
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu;
 
 			qbuffer = (struct QBUFFER *)&phbamu->message_rbuffer;
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 
 			qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu;
 
 			qbuffer = (struct QBUFFER *)&phbcmu->message_rbuffer;
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
 
 			qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_rbuffer;
 		}
 		break;
 	}
 	return(qbuffer);
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static struct QBUFFER *arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
 {
 	struct QBUFFER *qbuffer = NULL;
 
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu;
 
 			qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 
 			qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu;
 
 			qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer;
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
 
 			qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_wbuffer;
 		}
 		break;
 	}
 	return(qbuffer);
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			/* let IOP know data has been read */
 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 			/* let IOP know data has been read */
 			WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			/* let IOP know data has been read */
 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			/* let IOP know data has been read */
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ);
 		}
 		break;
 	}
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			/*
 			** push inbound doorbell tell iop, driver data write ok 
 			** and wait reply on next hwinterrupt for next Qbuffer post
 			*/
 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 			/*
 			** push inbound doorbell tell iop, driver data write ok 
 			** and wait reply on next hwinterrupt for next Qbuffer post
 			*/
 			WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			/*
 			** push inbound doorbell tell iop, driver data write ok 
 			** and wait reply on next hwinterrupt for next Qbuffer post
 			*/
 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			/*
 			** push inbound doorbell tell iop, driver data write ok 
 			** and wait reply on next hwinterrupt for next Qbuffer post
 			*/
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_IN_READY);
 		}
 		break;
 	}
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
 {
 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
 	CHIP_REG_WRITE32(HBA_MessageUnit, 
 		0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
 		printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
 			, acb->pci_unit);
 	}
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
 {
 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
 	WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
 		printf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
 			, acb->pci_unit);
 	}
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb)
 {
 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
 		printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
 	}
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_stop_hbd_bgrb(struct AdapterControlBlock *acb)
 {
 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
 	if(!arcmsr_hbd_wait_msgint_ready(acb)) {
 		printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
 	}
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			arcmsr_stop_hba_bgrb(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			arcmsr_stop_hbb_bgrb(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			arcmsr_stop_hbc_bgrb(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			arcmsr_stop_hbd_bgrb(acb);
 		}
 		break;
 	}
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_poll(struct cam_sim *psim)
 {
 	struct AdapterControlBlock *acb;
 	int	mutex;
 
 	acb = (struct AdapterControlBlock *)cam_sim_softc(psim);
 	mutex = mtx_owned(&acb->isr_lock);
 	if( mutex == 0 )
 		ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
 	arcmsr_interrupt(acb);
 	if( mutex == 0 )
 		ARCMSR_LOCK_RELEASE(&acb->isr_lock);
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static u_int32_t arcmsr_Read_iop_rqbuffer_data_D(struct AdapterControlBlock *acb,
 	struct QBUFFER *prbuffer) {
 
 	u_int8_t *pQbuffer;
 	u_int8_t *buf1 = 0;
 	u_int32_t *iop_data, *buf2 = 0;
 	u_int32_t iop_len, data_len;
 
 	iop_data = (u_int32_t *)prbuffer->data;
 	iop_len = (u_int32_t)prbuffer->data_len;
 	if ( iop_len > 0 )
 	{
 		buf1 = malloc(128, M_DEVBUF, M_NOWAIT | M_ZERO);
 		buf2 = (u_int32_t *)buf1;
 		if( buf1 == NULL)
 			return (0);
 		data_len = iop_len;
 		while(data_len >= 4)
 		{
 			*buf2++ = *iop_data++;
 			data_len -= 4;
 		}
 		if(data_len)
 			*buf2 = *iop_data;
 		buf2 = (u_int32_t *)buf1;
 	}
 	while (iop_len > 0) {
 		pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex];
 		*pQbuffer = *buf1;
 		acb->rqbuf_lastindex++;
 		/* if last, index number set it to 0 */
 		acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
 		buf1++;
 		iop_len--;
 	}
 	if(buf2)
 		free( (u_int8_t *)buf2, M_DEVBUF);
 	/* let IOP know data has been read */
 	arcmsr_iop_message_read(acb);
 	return (1);
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
 	struct QBUFFER *prbuffer) {
 
 	u_int8_t *pQbuffer;
 	u_int8_t *iop_data;
 	u_int32_t iop_len;
 
 	if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
 		return(arcmsr_Read_iop_rqbuffer_data_D(acb, prbuffer));
 	}
 	iop_data = (u_int8_t *)prbuffer->data;
 	iop_len = (u_int32_t)prbuffer->data_len;
 	while (iop_len > 0) {
 		pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex];
 		*pQbuffer = *iop_data;
 		acb->rqbuf_lastindex++;
 		/* if last, index number set it to 0 */
 		acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
 		iop_data++;
 		iop_len--;
 	}
 	/* let IOP know data has been read */
 	arcmsr_iop_message_read(acb);
 	return (1);
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
 {
 	struct QBUFFER *prbuffer;
 	int my_empty_len;
 
 	/*check this iop data if overflow my rqbuffer*/
 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
 	prbuffer = arcmsr_get_iop_rqbuffer(acb);
 	my_empty_len = (acb->rqbuf_lastindex - acb->rqbuf_firstindex - 1) &
 		(ARCMSR_MAX_QBUFFER-1);
 	if(my_empty_len >= prbuffer->data_len) {
 		if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
 			acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
 	} else {
 		acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
 	}
 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_Write_data_2iop_wqbuffer_D(struct AdapterControlBlock *acb)
 {
 	u_int8_t *pQbuffer;
 	struct QBUFFER *pwbuffer;
 	u_int8_t *buf1 = 0;
 	u_int32_t *iop_data, *buf2 = 0;
 	u_int32_t allxfer_len = 0, data_len;
 
 	if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
 		buf1 = malloc(128, M_DEVBUF, M_NOWAIT | M_ZERO);
 		buf2 = (u_int32_t *)buf1;
 		if( buf1 == NULL)
 			return;
 
 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
 		iop_data = (u_int32_t *)pwbuffer->data;
 		while((acb->wqbuf_firstindex != acb->wqbuf_lastindex) 
 			&& (allxfer_len < 124)) {
 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
 			*buf1 = *pQbuffer;
 			acb->wqbuf_firstindex++;
 			acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
 			buf1++;
 			allxfer_len++;
 		}
 		pwbuffer->data_len = allxfer_len;
 		data_len = allxfer_len;
 		buf1 = (u_int8_t *)buf2;
 		while(data_len >= 4)
 		{
 			*iop_data++ = *buf2++;
 			data_len -= 4;
 		}
 		if(data_len)
 			*iop_data = *buf2;
 		free( buf1, M_DEVBUF);
 		arcmsr_iop_message_wrote(acb);
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb)
 {
 	u_int8_t *pQbuffer;
 	struct QBUFFER *pwbuffer;
 	u_int8_t *iop_data;
 	int32_t allxfer_len=0;
 
 	if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
 		arcmsr_Write_data_2iop_wqbuffer_D(acb);
 		return;
 	}
 	if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
 		iop_data = (u_int8_t *)pwbuffer->data;
 		while((acb->wqbuf_firstindex != acb->wqbuf_lastindex) 
 			&& (allxfer_len < 124)) {
 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
 			*iop_data = *pQbuffer;
 			acb->wqbuf_firstindex++;
 			acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
 			iop_data++;
 			allxfer_len++;
 		}
 		pwbuffer->data_len = allxfer_len;
 		arcmsr_iop_message_wrote(acb);
 	}
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
 {
 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
 	/*
 	*****************************************************************
 	**   check if there are any mail packages from user space program
 	**   in my post bag, now is the time to send them into Areca's firmware
 	*****************************************************************
 	*/
 	if(acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
 		arcmsr_Write_data_2iop_wqbuffer(acb);
 	}
 	if(acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
 	}
 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
 {
 /*
 	if (ccb->ccb_h.status != CAM_REQ_CMP)
 		printf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x,"
 			"failure status=%x\n", ccb->ccb_h.target_id,
 			ccb->ccb_h.target_lun, ccb->ccb_h.status);
 	else
 		printf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
 */
 	xpt_free_path(ccb->ccb_h.path);
 	xpt_free_ccb(ccb);
 }
 
 static void	arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun)
 {
 	struct cam_path     *path;
 	union ccb           *ccb;
 
 	if ((ccb = (union ccb *)xpt_alloc_ccb_nowait()) == NULL)
 			return;
 	if (xpt_create_path(&path, NULL, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
 	{
 		xpt_free_ccb(ccb);
 		return;
 	}
 /*	printf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */
 	bzero(ccb, sizeof(union ccb));
 	xpt_setup_ccb(&ccb->ccb_h, path, 5);
 	ccb->ccb_h.func_code = XPT_SCAN_LUN;
 	ccb->ccb_h.cbfcnp = arcmsr_rescanLun_cb;
 	ccb->crcn.flags = CAM_FLAG_NONE;
 	xpt_action(ccb);
 }
 
 
 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
 {
 	struct CommandControlBlock *srb;
 	u_int32_t intmask_org;
 	int i;
 
 	/* disable all outbound interrupts */
 	intmask_org = arcmsr_disable_allintr(acb);
 	for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++)
 	{
 		srb = acb->psrb_pool[i];
 		if (srb->srb_state == ARCMSR_SRB_START)
 		{
 			if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
 			{
 				srb->srb_state = ARCMSR_SRB_ABORTED;
 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
 				arcmsr_srb_complete(srb, 1);
 				printf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb);
 			}
 		}
 	}
 	/* enable outbound Post Queue, outbound doorbell Interrupt */
 	arcmsr_enable_allintr(acb, intmask_org);
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
 	u_int32_t	devicemap;
 	u_int32_t	target, lun;
 	u_int32_t	deviceMapCurrent[4]={0};
 	u_int8_t	*pDevMap;
 
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A:
 		devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
 		for (target = 0; target < 4; target++) 
 		{
 			deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
 			devicemap += 4;
 		}
 		break;
 
 	case ACB_ADAPTER_TYPE_B:
 		devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
 		for (target = 0; target < 4; target++) 
 		{
 			deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1],  devicemap);
 			devicemap += 4;
 		}
 		break;
 
 	case ACB_ADAPTER_TYPE_C:
 		devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
 		for (target = 0; target < 4; target++) 
 		{
 			deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
 			devicemap += 4;
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D:
 		devicemap = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
 		for (target = 0; target < 4; target++) 
 		{
 			deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
 			devicemap += 4;
 		}
 		break;
 	}
 
 	if(acb->acb_flags & ACB_F_BUS_HANG_ON)
 	{
 		acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
 	}
 	/* 
 	** adapter posted CONFIG message 
 	** copy the new map, note if there are differences with the current map
 	*/
 	pDevMap = (u_int8_t	*)&deviceMapCurrent[0];
 	for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) 
 	{
 		if (*pDevMap != acb->device_map[target])
 		{
 			u_int8_t difference, bit_check;
 
 			difference = *pDevMap ^ acb->device_map[target];
 			for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
 			{
 				bit_check = (1 << lun);		/*check bit from 0....31*/
 				if(difference & bit_check)
 				{
 					if(acb->device_map[target] & bit_check)
 					{/* unit departed */
 						printf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
 						arcmsr_abort_dr_ccbs(acb, target, lun);
 						arcmsr_rescan_lun(acb, target, lun);
 						acb->devstate[target][lun] = ARECA_RAID_GONE;
 					}
 					else
 					{/* unit arrived */
 						printf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun);
 						arcmsr_rescan_lun(acb, target, lun);
 						acb->devstate[target][lun] = ARECA_RAID_GOOD;
 					}
 				}
 			}
 /*			printf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
 			acb->device_map[target] = *pDevMap;
 		}
 		pDevMap++;
 	}
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
 	u_int32_t outbound_message;
 
 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
 	outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]);
 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
 		arcmsr_dr_handle( acb );
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
 	u_int32_t outbound_message;
 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 
 	/* clear interrupts */
 	WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
 	outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
 		arcmsr_dr_handle( acb );
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) {
 	u_int32_t outbound_message;
 
 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
 	outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]);
 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
 		arcmsr_dr_handle( acb );
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_hbd_message_isr(struct AdapterControlBlock *acb) {
 	u_int32_t outbound_message;
 
 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);
 	outbound_message = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[0]);
 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
 		arcmsr_dr_handle( acb );
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
 {
 	u_int32_t doorbell_status;
 
 	/*
 	*******************************************************************
 	**  Maybe here we need to check wrqbuffer_lock is lock or not
 	**  DOORBELL: din! don! 
 	**  check if there are any mail need to pack from firmware
 	*******************************************************************
 	*/
 	doorbell_status = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
 	if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
 		arcmsr_iop2drv_data_wrote_handle(acb);
 	}
 	if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
 		arcmsr_iop2drv_data_read_handle(acb);
 	}
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
 {
 	u_int32_t doorbell_status;
 
 	/*
 	*******************************************************************
 	**  Maybe here we need to check wrqbuffer_lock is lock or not
 	**  DOORBELL: din! don! 
 	**  check if there are any mail need to pack from firmware
 	*******************************************************************
 	*/
 	doorbell_status = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, doorbell_status); /* clear doorbell interrupt */
 	if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
 		arcmsr_iop2drv_data_wrote_handle(acb);
 	}
 	if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
 		arcmsr_iop2drv_data_read_handle(acb);
 	}
 	if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
 		arcmsr_hbc_message_isr(acb);    /* messenger of "driver to iop commands" */
 	}
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_hbd_doorbell_isr(struct AdapterControlBlock *acb)
 {
 	u_int32_t doorbell_status;
 
 	/*
 	*******************************************************************
 	**  Maybe here we need to check wrqbuffer_lock is lock or not
 	**  DOORBELL: din! don! 
 	**  check if there are any mail need to pack from firmware
 	*******************************************************************
 	*/
 	doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE;
 	if(doorbell_status)
 		CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
 	while( doorbell_status & ARCMSR_HBDMU_F0_DOORBELL_CAUSE ) {
 		if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_WRITE_OK) {
 			arcmsr_iop2drv_data_wrote_handle(acb);
 		}
 		if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_READ_OK) {
 			arcmsr_iop2drv_data_read_handle(acb);
 		}
 		if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) {
 			arcmsr_hbd_message_isr(acb);    /* messenger of "driver to iop commands" */
 		}
 		doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE;
 		if(doorbell_status)
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */
 	}
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
 {
 	u_int32_t flag_srb;
 	u_int16_t error;
 
 	/*
 	*****************************************************************************
 	**               areca cdb command done
 	*****************************************************************************
 	*/
 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 
 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 	while((flag_srb = CHIP_REG_READ32(HBA_MessageUnit, 
 		0, outbound_queueport)) != 0xFFFFFFFF) {
 		/* check if command done with no error*/
 	error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0) ? TRUE : FALSE;
 		arcmsr_drain_donequeue(acb, flag_srb, error);
 	}	/*drain reply FIFO*/
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
 {
 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 	u_int32_t flag_srb;
 	int index;
 	u_int16_t error;
 
 	/*
 	*****************************************************************************
 	**               areca cdb command done
 	*****************************************************************************
 	*/
 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 
 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 	index = phbbmu->doneq_index;
 	while((flag_srb = phbbmu->done_qbuffer[index]) != 0) {
 		phbbmu->done_qbuffer[index] = 0;
 		index++;
 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
 		phbbmu->doneq_index = index;
 		/* check if command done with no error*/
 	error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
 		arcmsr_drain_donequeue(acb, flag_srb, error);
 	}	/*drain reply FIFO*/
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
 {
 	u_int32_t flag_srb,throttling = 0;
 	u_int16_t error;
 
 	/*
 	*****************************************************************************
 	**               areca cdb command done
 	*****************************************************************************
 	*/
 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 	do {
 		flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
 		if (flag_srb == 0xFFFFFFFF)
 			break;
 		/* check if command done with no error*/
 		error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
 		arcmsr_drain_donequeue(acb, flag_srb, error);
 		throttling++;
 		if(throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
 			throttling = 0;
 		}
 	} while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR);
 }
 /*
 **********************************************************************
 ** 
 **********************************************************************
 */
 static uint16_t arcmsr_get_doneq_index(struct HBD_MessageUnit0 *phbdmu)
 {
 	uint16_t doneq_index, index_stripped;
 
 	doneq_index = phbdmu->doneq_index;
 	if (doneq_index & 0x4000) {
 		index_stripped = doneq_index & 0xFF;
 		index_stripped += 1;
 		index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
 		phbdmu->doneq_index = index_stripped ?
 		    (index_stripped | 0x4000) : index_stripped;
 	} else {
 		index_stripped = doneq_index;
 		index_stripped += 1;
 		index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE;
 		phbdmu->doneq_index = index_stripped ?
 		    index_stripped : (index_stripped | 0x4000);
 	}
 	return (phbdmu->doneq_index);
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb)
 {
 	struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
 	u_int32_t outbound_write_pointer;
 	u_int32_t addressLow;
 	uint16_t doneq_index;
 	u_int16_t error;
 	/*
 	*****************************************************************************
 	**               areca cdb command done
 	*****************************************************************************
 	*/
 	if((CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause) &
 		ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT) == 0)
 		return;
 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 
 		BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 	outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow;
 	doneq_index = phbdmu->doneq_index;
 	while ((doneq_index & 0xFF) != (outbound_write_pointer & 0xFF)) {
 		doneq_index = arcmsr_get_doneq_index(phbdmu);
 		addressLow = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow;
 		error = (addressLow & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
 		arcmsr_drain_donequeue(acb, addressLow, error); /*Check if command done with no error */
 		CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index);
 		outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow;
 	}
 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_interrupt_cause, ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT_CLEAR);
 	CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause); /*Dummy ioread32 to force pci flush */
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
 {
 	u_int32_t outbound_intStatus;
 	/*
 	*********************************************
 	**   check outbound intstatus 
 	*********************************************
 	*/
 	outbound_intStatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
 	if(!outbound_intStatus) {
 		/*it must be share irq*/
 		return;
 	}
 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus); /*clear interrupt*/
 	/* MU doorbell interrupts*/
 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
 		arcmsr_hba_doorbell_isr(acb);
 	}
 	/* MU post queue interrupts*/
 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
 		arcmsr_hba_postqueue_isr(acb);
 	}
 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
 		arcmsr_hba_message_isr(acb);
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
 {
 	u_int32_t outbound_doorbell;
 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 	/*
 	*********************************************
 	**   check outbound intstatus 
 	*********************************************
 	*/
 	outbound_doorbell = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & acb->outbound_int_enable;
 	if(!outbound_doorbell) {
 		/*it must be share irq*/
 		return;
 	}
 	WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
 	READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell);
 	WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
 	/* MU ioctl transfer doorbell interrupts*/
 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
 		arcmsr_iop2drv_data_wrote_handle(acb);
 	}
 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
 		arcmsr_iop2drv_data_read_handle(acb);
 	}
 	/* MU post queue interrupts*/
 	if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
 		arcmsr_hbb_postqueue_isr(acb);
 	}
 	if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
 		arcmsr_hbb_message_isr(acb);
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
 {
 	u_int32_t host_interrupt_status;
 	/*
 	*********************************************
 	**   check outbound intstatus 
 	*********************************************
 	*/
 	host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) &
 		(ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
 		ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
 	if(!host_interrupt_status) {
 		/*it must be share irq*/
 		return;
 	}
 	do {
 		/* MU doorbell interrupts*/
 		if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
 			arcmsr_hbc_doorbell_isr(acb);
 		}
 		/* MU post queue interrupts*/
 		if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
 			arcmsr_hbc_postqueue_isr(acb);
 		}
 		host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
 	} while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_handle_hbd_isr( struct AdapterControlBlock *acb)
 {
 	u_int32_t host_interrupt_status;
 	u_int32_t intmask_org;
 	/*
 	*********************************************
 	**   check outbound intstatus 
 	*********************************************
 	*/
 	host_interrupt_status = CHIP_REG_READ32(HBD_MessageUnit, 0, host_int_status) & acb->outbound_int_enable;
 	if(!(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_INT)) {
 		/*it must be share irq*/
 		return;
 	}
 	/* disable outbound interrupt */
 	intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable)	; /* disable outbound message0 int */
 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE);
 	/* MU doorbell interrupts*/
 	if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_DOORBELL_INT) {
 		arcmsr_hbd_doorbell_isr(acb);
 	}
 	/* MU post queue interrupts*/
 	if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_POSTQUEUE_INT) {
 		arcmsr_hbd_postqueue_isr(acb);
 	}
 	/* enable all outbound interrupt */
 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | ARCMSR_HBDMU_ALL_INT_ENABLE);
 //	CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable);
 }
 /*
 ******************************************************************************
 ******************************************************************************
 */
 static void arcmsr_interrupt(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A:
 		arcmsr_handle_hba_isr(acb);
 		break;
 	case ACB_ADAPTER_TYPE_B:
 		arcmsr_handle_hbb_isr(acb);
 		break;
 	case ACB_ADAPTER_TYPE_C:
 		arcmsr_handle_hbc_isr(acb);
 		break;
 	case ACB_ADAPTER_TYPE_D:
 		arcmsr_handle_hbd_isr(acb);
 		break;
 	default:
 		printf("arcmsr%d: interrupt service,"
 		" unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type);
 		break;
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_intr_handler(void *arg)
 {
 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
 
 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
 	arcmsr_interrupt(acb);
 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
 }
 /*
 ******************************************************************************
 ******************************************************************************
 */
 static void	arcmsr_polling_devmap(void *arg)
 {
 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A:
 		CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
 		break;
 
     	case ACB_ADAPTER_TYPE_B: {
 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 			WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
 		}
 		break;
 
 	case ACB_ADAPTER_TYPE_C:
 		CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
 		CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
 		break;
 
 	case ACB_ADAPTER_TYPE_D:
 		CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
 		break;
 	}
 
 	if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
 	{
 		callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb);	/* polling per 5 seconds */
 	}
 }
 
 /*
 *******************************************************************************
 **
 *******************************************************************************
 */
 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
 {
 	u_int32_t intmask_org;
 
 	if(acb != NULL) {
 		/* stop adapter background rebuild */
 		if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
 			intmask_org = arcmsr_disable_allintr(acb);
 			arcmsr_stop_adapter_bgrb(acb);
 			arcmsr_flush_adapter_cache(acb);
 			arcmsr_enable_allintr(acb, intmask_org);
 		}
 	}
 }
 /*
 ***********************************************************************
 **
 ************************************************************************
 */
 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
 {
 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
 	u_int32_t retvalue = EINVAL;
 
 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) arg;
 	if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
 		return retvalue;
 	}
 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
 	switch(ioctl_cmd) {
 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
 			u_int8_t *pQbuffer;
 			u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer;			
 			u_int32_t allxfer_len=0;
 
 			while((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 
 				&& (allxfer_len < 1031)) {
 				/*copy READ QBUFFER to srb*/
 				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
 				*ptmpQbuffer = *pQbuffer;
 				acb->rqbuf_firstindex++;
 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 
 				/*if last index number set it to 0 */
 				ptmpQbuffer++;
 				allxfer_len++;
 			}
 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
 				struct QBUFFER *prbuffer;
 
 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
 				prbuffer = arcmsr_get_iop_rqbuffer(acb);
 				if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
 					acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
 			}
 			pcmdmessagefld->cmdmessage.Length = allxfer_len;
 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
 			retvalue = ARCMSR_MESSAGE_SUCCESS;
 		}
 		break;
 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
 			u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
 			u_int8_t *pQbuffer;
 			u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer;
 
 			user_len = pcmdmessagefld->cmdmessage.Length;
 			/*check if data xfer length of this request will overflow my array qbuffer */
 			wqbuf_lastindex = acb->wqbuf_lastindex;
 			wqbuf_firstindex = acb->wqbuf_firstindex;
 			if(wqbuf_lastindex != wqbuf_firstindex) {
 				arcmsr_Write_data_2iop_wqbuffer(acb);
 				pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
 			} else {
 				my_empty_len = (wqbuf_firstindex - wqbuf_lastindex - 1) &
 					(ARCMSR_MAX_QBUFFER - 1);
 				if(my_empty_len >= user_len) {
 					while(user_len > 0) {
 						/*copy srb data to wqbuffer*/
 						pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
 						*pQbuffer = *ptmpuserbuffer;
 						acb->wqbuf_lastindex++;
 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
 						/*if last index number set it to 0 */
 						ptmpuserbuffer++;
 						user_len--;
 					}
 					/*post fist Qbuffer*/
 					if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
 						acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
 						arcmsr_Write_data_2iop_wqbuffer(acb);
 					}
 					pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
 				} else {
 					pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
 				}
 			}
 			retvalue = ARCMSR_MESSAGE_SUCCESS;
 		}
 		break;
 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
 			u_int8_t *pQbuffer = acb->rqbuffer;
 
 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
 				arcmsr_iop_message_read(acb);
 				/*signature, let IOP know data has been readed */
 			}
 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
 			acb->rqbuf_firstindex = 0;
 			acb->rqbuf_lastindex = 0;
 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
 			retvalue = ARCMSR_MESSAGE_SUCCESS;
 		}
 		break;
 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
 		{
 			u_int8_t *pQbuffer = acb->wqbuffer;
 
 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
 				arcmsr_iop_message_read(acb);
 				/*signature, let IOP know data has been readed */
 			}
 			acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
 			acb->wqbuf_firstindex = 0;
 			acb->wqbuf_lastindex = 0;
 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
 			retvalue = ARCMSR_MESSAGE_SUCCESS;
 		}
 		break;
 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
 			u_int8_t *pQbuffer;
 
 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
 				arcmsr_iop_message_read(acb);
 				/*signature, let IOP know data has been readed */
 			}
 			acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
 					|ACB_F_MESSAGE_RQBUFFER_CLEARED
 					|ACB_F_MESSAGE_WQBUFFER_READ);
 			acb->rqbuf_firstindex = 0;
 			acb->rqbuf_lastindex = 0;
 			acb->wqbuf_firstindex = 0;
 			acb->wqbuf_lastindex = 0;
 			pQbuffer = acb->rqbuffer;
 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
 			pQbuffer = acb->wqbuffer;
 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
 			retvalue = ARCMSR_MESSAGE_SUCCESS;
 		}
 		break;
 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
 			retvalue = ARCMSR_MESSAGE_SUCCESS;
 		}
 		break;
 	case ARCMSR_MESSAGE_SAY_HELLO: {
 			u_int8_t *hello_string = "Hello! I am ARCMSR";
 			u_int8_t *puserbuffer = (u_int8_t *)pcmdmessagefld->messagedatabuffer;
 
 			if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
 				pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR;
 				ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
 				return ENOIOCTL;
 			}
 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
 			retvalue = ARCMSR_MESSAGE_SUCCESS;
 		}
 		break;
 	case ARCMSR_MESSAGE_SAY_GOODBYE: {
 			arcmsr_iop_parking(acb);
 			retvalue = ARCMSR_MESSAGE_SUCCESS;
 		}
 		break;
 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
 			arcmsr_flush_adapter_cache(acb);
 			retvalue = ARCMSR_MESSAGE_SUCCESS;
 		}
 		break;
 	}
 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
 	return (retvalue);
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_free_srb(struct CommandControlBlock *srb)
 {
 	struct AdapterControlBlock	*acb;
 
 	acb = srb->acb;
 	ARCMSR_LOCK_ACQUIRE(&acb->srb_lock);
 	srb->srb_state = ARCMSR_SRB_DONE;
 	srb->srb_flags = 0;
 	acb->srbworkingQ[acb->workingsrb_doneindex] = srb;
 	acb->workingsrb_doneindex++;
 	acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
 	ARCMSR_LOCK_RELEASE(&acb->srb_lock);
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb)
 {
 	struct CommandControlBlock *srb = NULL;
 	u_int32_t workingsrb_startindex, workingsrb_doneindex;
 
 	ARCMSR_LOCK_ACQUIRE(&acb->srb_lock);
 	workingsrb_doneindex = acb->workingsrb_doneindex;
 	workingsrb_startindex = acb->workingsrb_startindex;
 	srb = acb->srbworkingQ[workingsrb_startindex];
 	workingsrb_startindex++;
 	workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
 	if(workingsrb_doneindex != workingsrb_startindex) {
 		acb->workingsrb_startindex = workingsrb_startindex;
 	} else {
 		srb = NULL;
 	}
 	ARCMSR_LOCK_RELEASE(&acb->srb_lock);
 	return(srb);
 }
 /*
 **************************************************************************
 **************************************************************************
 */
 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb)
 {
 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
 	int retvalue = 0, transfer_len = 0;
 	char *buffer;
-	u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
-				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
-				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8  |
-				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
+	uint8_t *ptr = scsiio_cdb_ptr(&pccb->csio);
+	u_int32_t controlcode = (u_int32_t ) ptr[5] << 24 |
+				(u_int32_t ) ptr[6] << 16 |
+				(u_int32_t ) ptr[7] << 8  |
+				(u_int32_t ) ptr[8];
 					/* 4 bytes: Areca io control code */
 	if ((pccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
 		buffer = pccb->csio.data_ptr;
 		transfer_len = pccb->csio.dxfer_len;
 	} else {
 		retvalue = ARCMSR_MESSAGE_FAIL;
 		goto message_out;
 	}
 	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
 		retvalue = ARCMSR_MESSAGE_FAIL;
 		goto message_out;
 	}
 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
 	switch(controlcode) {
 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
 			u_int8_t *pQbuffer;
 			u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer;
 			int32_t allxfer_len = 0;
 
 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
 			while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
 				&& (allxfer_len < 1031)) {
 				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
 				*ptmpQbuffer = *pQbuffer;
 				acb->rqbuf_firstindex++;
 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
 				ptmpQbuffer++;
 				allxfer_len++;
 			}
 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
 				struct QBUFFER  *prbuffer;
 
 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
 				prbuffer = arcmsr_get_iop_rqbuffer(acb);
 				if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
 					acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
 			}
 			pcmdmessagefld->cmdmessage.Length = allxfer_len;
 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
 			retvalue = ARCMSR_MESSAGE_SUCCESS;
 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
 		}
 		break;
 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
 			int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
 			u_int8_t *pQbuffer;
 			u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer;
 
 			user_len = pcmdmessagefld->cmdmessage.Length;
 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
 			wqbuf_lastindex = acb->wqbuf_lastindex;
 			wqbuf_firstindex = acb->wqbuf_firstindex;
 			if (wqbuf_lastindex != wqbuf_firstindex) {
 				arcmsr_Write_data_2iop_wqbuffer(acb);
 				/* has error report sensedata */
 				if(pccb->csio.sense_len) {
 				((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 
 				/* Valid,ErrorCode */
 				((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 
 				/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
 				((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 
 				/* AdditionalSenseLength */
 				((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 
 				/* AdditionalSenseCode */
 				}
 				retvalue = ARCMSR_MESSAGE_FAIL;
 			} else {
 				my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
 						&(ARCMSR_MAX_QBUFFER - 1);
 				if (my_empty_len >= user_len) {
 					while (user_len > 0) {
 						pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
 						*pQbuffer = *ptmpuserbuffer;
 						acb->wqbuf_lastindex++;
 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
 						ptmpuserbuffer++;
 						user_len--;
 					}
 					if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
 						acb->acb_flags &=
 						    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
 						arcmsr_Write_data_2iop_wqbuffer(acb);
 					}
 				} else {
 					/* has error report sensedata */
 					if(pccb->csio.sense_len) {
 					((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
 					/* Valid,ErrorCode */
 					((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 
 					/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
 					((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 
 					/* AdditionalSenseLength */
 					((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 
 					/* AdditionalSenseCode */
 					}
 					retvalue = ARCMSR_MESSAGE_FAIL;
 				}
 			}
 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
 		}
 		break;
 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
 			u_int8_t *pQbuffer = acb->rqbuffer;
 
 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
 				arcmsr_iop_message_read(acb);
 			}
 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
 			acb->rqbuf_firstindex = 0;
 			acb->rqbuf_lastindex = 0;
 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
 			pcmdmessagefld->cmdmessage.ReturnCode =
 			    ARCMSR_MESSAGE_RETURNCODE_OK;
 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
 		}
 		break;
 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
 			u_int8_t *pQbuffer = acb->wqbuffer;
 
 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
 				arcmsr_iop_message_read(acb);
 			}
 			acb->acb_flags |=
 				(ACB_F_MESSAGE_WQBUFFER_CLEARED |
 					ACB_F_MESSAGE_WQBUFFER_READ);
 			acb->wqbuf_firstindex = 0;
 			acb->wqbuf_lastindex = 0;
 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
 			pcmdmessagefld->cmdmessage.ReturnCode =
 				ARCMSR_MESSAGE_RETURNCODE_OK;
 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
 		}
 		break;
 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
 			u_int8_t *pQbuffer;
 
 			ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
 				arcmsr_iop_message_read(acb);
 			}
 			acb->acb_flags |=
 				(ACB_F_MESSAGE_WQBUFFER_CLEARED
 				| ACB_F_MESSAGE_RQBUFFER_CLEARED
 				| ACB_F_MESSAGE_WQBUFFER_READ);
 			acb->rqbuf_firstindex = 0;
 			acb->rqbuf_lastindex = 0;
 			acb->wqbuf_firstindex = 0;
 			acb->wqbuf_lastindex = 0;
 			pQbuffer = acb->rqbuffer;
 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
 			pQbuffer = acb->wqbuffer;
 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
 			ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
 		}
 		break;
 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
 		}
 		break;
 	case ARCMSR_MESSAGE_SAY_HELLO: {
 			int8_t *hello_string = "Hello! I am ARCMSR";
 
 			memcpy(pcmdmessagefld->messagedatabuffer, hello_string
 				, (int16_t)strlen(hello_string));
 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
 		}
 		break;
 	case ARCMSR_MESSAGE_SAY_GOODBYE:
 		arcmsr_iop_parking(acb);
 		break;
 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
 		arcmsr_flush_adapter_cache(acb);
 		break;
 	default:
 		retvalue = ARCMSR_MESSAGE_FAIL;
 	}
 message_out:
 	return (retvalue);
 }
 /*
 *********************************************************************
 *********************************************************************
 */
 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
 {
 	struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)srb->acb;
 	union ccb *pccb;
 	int target, lun; 
 
 	pccb = srb->pccb;
 	target = pccb->ccb_h.target_id;
 	lun = pccb->ccb_h.target_lun;
 	acb->pktRequestCount++;
 	if(error != 0) {
 		if(error != EFBIG) {
 			printf("arcmsr%d: unexpected error %x"
 				" returned from 'bus_dmamap_load' \n"
 				, acb->pci_unit, error);
 		}
 		if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
 			pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
 		}
 		arcmsr_srb_complete(srb, 0);
 		return;
 	}
 	if(nseg > ARCMSR_MAX_SG_ENTRIES) {
 		pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
 		arcmsr_srb_complete(srb, 0);
 		return;
 	}
 	if(acb->acb_flags & ACB_F_BUS_RESET) {
 		printf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
 		pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
 		arcmsr_srb_complete(srb, 0);
 		return;
 	}
 	if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
 		u_int8_t block_cmd, cmd;
 
-		cmd = pccb->csio.cdb_io.cdb_bytes[0];
+		cmd = scsiio_cdb_ptr(&pccb->csio)[0];
 		block_cmd = cmd & 0x0f;
 		if(block_cmd == 0x08 || block_cmd == 0x0a) {
 			printf("arcmsr%d:block 'read/write' command "
 				"with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n"
 				, acb->pci_unit, cmd, target, lun);
 			pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
 			arcmsr_srb_complete(srb, 0);
 			return;
 		}
 	}
 	if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
 		if(nseg != 0) {
 			bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
 		}
 		arcmsr_srb_complete(srb, 0);
 		return;
 	}
 	if(acb->srboutstandingcount >= acb->maxOutstanding) {
 		if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) == 0)
 		{
 			xpt_freeze_simq(acb->psim, 1);
 			acb->acb_flags |= ACB_F_CAM_DEV_QFRZN;
 		}
 		pccb->ccb_h.status &= ~CAM_SIM_QUEUED;
 		pccb->ccb_h.status |= CAM_REQUEUE_REQ;
 		arcmsr_srb_complete(srb, 0);
 		return;
 	}
 	pccb->ccb_h.status |= CAM_SIM_QUEUED;
 	arcmsr_build_srb(srb, dm_segs, nseg);
 	arcmsr_post_srb(acb, srb);
 	if (pccb->ccb_h.timeout != CAM_TIME_INFINITY)
 	{
 		arcmsr_callout_init(&srb->ccb_callout);
 		callout_reset_sbt(&srb->ccb_callout, SBT_1MS *
 		    (pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)), 0,
 		    arcmsr_srb_timeout, srb, 0);
 		srb->srb_flags |= SRB_FLAG_TIMER_START;
 	}
 }
 /*
 *****************************************************************************************
 *****************************************************************************************
 */
 static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb)
 {
 	struct CommandControlBlock *srb;
 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
 	u_int32_t intmask_org;
 	int i = 0;
 
 	acb->num_aborts++;
 	/*
 	***************************************************************************
 	** It is the upper layer do abort command this lock just prior to calling us.
 	** First determine if we currently own this command.
 	** Start by searching the device queue. If not found
 	** at all, and the system wanted us to just abort the
 	** command return success.
 	***************************************************************************
 	*/
 	if(acb->srboutstandingcount != 0) {
 		/* disable all outbound interrupt */
 		intmask_org = arcmsr_disable_allintr(acb);
 		for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
 			srb = acb->psrb_pool[i];
 			if(srb->srb_state == ARCMSR_SRB_START) {
 				if(srb->pccb == abortccb) {
 					srb->srb_state = ARCMSR_SRB_ABORTED;
 					printf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'"
 						"outstanding command \n"
 						, acb->pci_unit, abortccb->ccb_h.target_id
 						, abortccb->ccb_h.target_lun, srb);
 					arcmsr_polling_srbdone(acb, srb);
 					/* enable outbound Post Queue, outbound doorbell Interrupt */
 					arcmsr_enable_allintr(acb, intmask_org);
 					return (TRUE);
 				}
 			}
 		}
 		/* enable outbound Post Queue, outbound doorbell Interrupt */
 		arcmsr_enable_allintr(acb, intmask_org);
 	}
 	return(FALSE);
 }
 /*
 ****************************************************************************
 ****************************************************************************
 */
 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
 {
 	int retry = 0;
 
 	acb->num_resets++;
 	acb->acb_flags |= ACB_F_BUS_RESET;
 	while(acb->srboutstandingcount != 0 && retry < 400) {
 		arcmsr_interrupt(acb);
 		UDELAY(25000);
 		retry++;
 	}
 	arcmsr_iop_reset(acb);
 	acb->acb_flags &= ~ACB_F_BUS_RESET;
 } 
 /*
 **************************************************************************
 **************************************************************************
 */
 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
 		union ccb *pccb)
 {
 	if (pccb->ccb_h.target_lun) {
 		pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
 		xpt_done(pccb);
 		return;
 	}
 	pccb->ccb_h.status |= CAM_REQ_CMP;
-	switch (pccb->csio.cdb_io.cdb_bytes[0]) {
+	switch (scsiio_cdb_ptr(&pccb->csio)[0]) {
 	case INQUIRY: {
 		unsigned char inqdata[36];
 		char *buffer = pccb->csio.data_ptr;
 	
 		inqdata[0] = T_PROCESSOR;	/* Periph Qualifier & Periph Dev Type */
 		inqdata[1] = 0;			/* rem media bit & Dev Type Modifier */
 		inqdata[2] = 0;			/* ISO, ECMA, & ANSI versions */
 		inqdata[3] = 0;
 		inqdata[4] = 31;		/* length of additional data */
 		inqdata[5] = 0;
 		inqdata[6] = 0;
 		inqdata[7] = 0;
 		strncpy(&inqdata[8], "Areca   ", 8);	/* Vendor Identification */
 		strncpy(&inqdata[16], "RAID controller ", 16);	/* Product Identification */
 		strncpy(&inqdata[32], "R001", 4); /* Product Revision */
 		memcpy(buffer, inqdata, sizeof(inqdata));
 		xpt_done(pccb);
 	}
 	break;
 	case WRITE_BUFFER:
 	case READ_BUFFER: {
 		if (arcmsr_iop_message_xfer(acb, pccb)) {
 			pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
 			pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
 		}
 		xpt_done(pccb);
 	}
 	break;
 	default:
 		xpt_done(pccb);
 	}
 }
 /*
 *********************************************************************
 *********************************************************************
 */
 static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
 {
 	struct AdapterControlBlock *acb;
 
 	acb = (struct AdapterControlBlock *) cam_sim_softc(psim);
 	if(acb == NULL) {
 		pccb->ccb_h.status |= CAM_REQ_INVALID;
 		xpt_done(pccb);
 		return;
 	}
 	switch (pccb->ccb_h.func_code) {
 	case XPT_SCSI_IO: {
 			struct CommandControlBlock *srb;
 			int target = pccb->ccb_h.target_id;
 			int error;
+
+			if (pccb->ccb_h.flags & CAM_CDB_PHYS) {
+				pccb->ccb_h.status = CAM_REQ_INVALID;
+				xpt_done(pccb);
+				return;
+			}
 
 			if(target == 16) {
 				/* virtual device for iop message transfer */
 				arcmsr_handle_virtual_command(acb, pccb);
 				return;
 			}
 			if((srb = arcmsr_get_freesrb(acb)) == NULL) {
 				pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
 				xpt_done(pccb);
 				return;
 			}
 			pccb->ccb_h.arcmsr_ccbsrb_ptr = srb;
 			pccb->ccb_h.arcmsr_ccbacb_ptr = acb;
 			srb->pccb = pccb;
 			error =	bus_dmamap_load_ccb(acb->dm_segs_dmat
 				, srb->dm_segs_dmamap
 				, pccb
 				, arcmsr_execute_srb, srb, /*flags*/0);
 			if(error == EINPROGRESS) {
 				xpt_freeze_simq(acb->psim, 1);
 				pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
 			}
 			break;
 		}
 	case XPT_TARGET_IO: {
 			/* target mode not yet support vendor specific commands. */
 			pccb->ccb_h.status |= CAM_REQ_CMP;
 			xpt_done(pccb);
 			break;
 		}
 	case XPT_PATH_INQ: {
 			struct ccb_pathinq *cpi = &pccb->cpi;
 
 			cpi->version_num = 1;
 			cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
 			cpi->target_sprt = 0;
 			cpi->hba_misc = 0;
 			cpi->hba_eng_cnt = 0;
 			cpi->max_target = ARCMSR_MAX_TARGETID;        /* 0-16 */
 			cpi->max_lun = ARCMSR_MAX_TARGETLUN;	    /* 0-7 */
 			cpi->initiator_id = ARCMSR_SCSI_INITIATOR_ID; /* 255 */
 			cpi->bus_id = cam_sim_bus(psim);
 			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 			strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
 			strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
 			cpi->unit_number = cam_sim_unit(psim);
 		#ifdef	CAM_NEW_TRAN_CODE
 			if(acb->adapter_bus_speed == ACB_BUS_SPEED_12G)
 				cpi->base_transfer_speed = 1200000;
 			else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
 				cpi->base_transfer_speed = 600000;
 			else
 				cpi->base_transfer_speed = 300000;
 			if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
 			   (acb->vendor_device_id == PCIDevVenIDARC1680) ||
 			   (acb->vendor_device_id == PCIDevVenIDARC1214))
 			{
 				cpi->transport = XPORT_SAS;
 				cpi->transport_version = 0;
 				cpi->protocol_version = SCSI_REV_SPC2;
 			}
 			else
 			{
 				cpi->transport = XPORT_SPI;
 				cpi->transport_version = 2;
 				cpi->protocol_version = SCSI_REV_2;
 			}
 			cpi->protocol = PROTO_SCSI;
 		#endif
 			cpi->ccb_h.status |= CAM_REQ_CMP;
 			xpt_done(pccb);
 			break;
 		}
 	case XPT_ABORT: {
 			union ccb *pabort_ccb;
 	
 			pabort_ccb = pccb->cab.abort_ccb;
 			switch (pabort_ccb->ccb_h.func_code) {
 			case XPT_ACCEPT_TARGET_IO:
 			case XPT_IMMED_NOTIFY:
 			case XPT_CONT_TARGET_IO:
 				if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
 					pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
 					xpt_done(pabort_ccb);
 					pccb->ccb_h.status |= CAM_REQ_CMP;
 				} else {
 					xpt_print_path(pabort_ccb->ccb_h.path);
 					printf("Not found\n");
 					pccb->ccb_h.status |= CAM_PATH_INVALID;
 				}
 				break;
 			case XPT_SCSI_IO:
 				pccb->ccb_h.status |= CAM_UA_ABORT;
 				break;
 			default:
 				pccb->ccb_h.status |= CAM_REQ_INVALID;
 				break;
 			}
 			xpt_done(pccb);
 			break;
 		}
 	case XPT_RESET_BUS:
 	case XPT_RESET_DEV: {
 			u_int32_t	i;
 
 			arcmsr_bus_reset(acb);
 			for (i=0; i < 500; i++) {
 				DELAY(1000);	
 			}
 			pccb->ccb_h.status |= CAM_REQ_CMP;
 			xpt_done(pccb);
 			break;
 		}
 	case XPT_TERM_IO: {
 			pccb->ccb_h.status |= CAM_REQ_INVALID;
 			xpt_done(pccb);
 			break;
 		}
 	case XPT_GET_TRAN_SETTINGS: {
 			struct ccb_trans_settings *cts;
 
 			if(pccb->ccb_h.target_id == 16) {
 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
 				xpt_done(pccb);
 				break;
 			}
 			cts = &pccb->cts;
 		#ifdef	CAM_NEW_TRAN_CODE
 			{
 				struct ccb_trans_settings_scsi *scsi;
 				struct ccb_trans_settings_spi *spi;
 				struct ccb_trans_settings_sas *sas;	
 
 				scsi = &cts->proto_specific.scsi;
 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
 				scsi->valid = CTS_SCSI_VALID_TQ;
 				cts->protocol = PROTO_SCSI;
 
 				if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
 				   (acb->vendor_device_id == PCIDevVenIDARC1680) ||
 				   (acb->vendor_device_id == PCIDevVenIDARC1214))
 				{
 					cts->protocol_version = SCSI_REV_SPC2;
 					cts->transport_version = 0;
 					cts->transport = XPORT_SAS;
 					sas = &cts->xport_specific.sas;
 					sas->valid = CTS_SAS_VALID_SPEED;
 					if (acb->adapter_bus_speed == ACB_BUS_SPEED_12G)
 						sas->bitrate = 1200000;
 					else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
 						sas->bitrate = 600000;
 					else if(acb->adapter_bus_speed == ACB_BUS_SPEED_3G)
 						sas->bitrate = 300000;
 				}
 				else
 				{
 					cts->protocol_version = SCSI_REV_2;
 					cts->transport_version = 2;
 					cts->transport = XPORT_SPI;
 					spi = &cts->xport_specific.spi;
 					spi->flags = CTS_SPI_FLAGS_DISC_ENB;
 					if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
 						spi->sync_period = 1;
 					else
 						spi->sync_period = 2;
 					spi->sync_offset = 32;
 					spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
 					spi->valid = CTS_SPI_VALID_DISC
 						| CTS_SPI_VALID_SYNC_RATE
 						| CTS_SPI_VALID_SYNC_OFFSET
 						| CTS_SPI_VALID_BUS_WIDTH;
 				}
 			}
 		#else
 			{
 				cts->flags = (CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
 				if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
 					cts->sync_period = 1;
 				else
 					cts->sync_period = 2;
 				cts->sync_offset = 32;
 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
 				cts->valid = CCB_TRANS_SYNC_RATE_VALID | 
 				CCB_TRANS_SYNC_OFFSET_VALID | 
 				CCB_TRANS_BUS_WIDTH_VALID | 
 				CCB_TRANS_DISC_VALID | 
 				CCB_TRANS_TQ_VALID;
 			}
 		#endif
 			pccb->ccb_h.status |= CAM_REQ_CMP;
 			xpt_done(pccb);
 			break;
 		}
 	case XPT_SET_TRAN_SETTINGS: {
 			pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
 			xpt_done(pccb);
 			break;
 		}
 	case XPT_CALC_GEOMETRY:
 			if(pccb->ccb_h.target_id == 16) {
 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
 				xpt_done(pccb);
 				break;
 			}
 #if __FreeBSD_version >= 500000
 			cam_calc_geometry(&pccb->ccg, 1);
 #else
 			{
 			struct ccb_calc_geometry *ccg;
 			u_int32_t size_mb;
 			u_int32_t secs_per_cylinder;
 
 			ccg = &pccb->ccg;
 			if (ccg->block_size == 0) {
 				pccb->ccb_h.status = CAM_REQ_INVALID;
 				xpt_done(pccb);
 				break;
 			}
 			if(((1024L * 1024L)/ccg->block_size) < 0) {
 				pccb->ccb_h.status = CAM_REQ_INVALID;
 				xpt_done(pccb);
 				break;
 			}
 			size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size);
 			if(size_mb > 1024 ) {
 				ccg->heads = 255;
 				ccg->secs_per_track = 63;
 			} else {
 				ccg->heads = 64;
 				ccg->secs_per_track = 32;
 			}
 			secs_per_cylinder = ccg->heads * ccg->secs_per_track;
 			ccg->cylinders = ccg->volume_size / secs_per_cylinder;
 			pccb->ccb_h.status |= CAM_REQ_CMP;
 			}
 #endif
 			xpt_done(pccb);
 			break;
 	default:
 		pccb->ccb_h.status |= CAM_REQ_INVALID;
 		xpt_done(pccb);
 		break;
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
 {
 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
 		printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
 {
 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
 	WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB);
 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
 		printf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb)
 {
 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
 		printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_start_hbd_bgrb(struct AdapterControlBlock *acb)
 {
 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
 	if(!arcmsr_hbd_wait_msgint_ready(acb)) {
 		printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A:
 		arcmsr_start_hba_bgrb(acb);
 		break;
 	case ACB_ADAPTER_TYPE_B:
 		arcmsr_start_hbb_bgrb(acb);
 		break;
 	case ACB_ADAPTER_TYPE_C:
 		arcmsr_start_hbc_bgrb(acb);
 		break;
 	case ACB_ADAPTER_TYPE_D:
 		arcmsr_start_hbd_bgrb(acb);
 		break;
 	}
 }
 /*
 **********************************************************************
 ** 
 **********************************************************************
 */
 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
 {
 	struct CommandControlBlock *srb;
 	u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
 	u_int16_t	error;
 
 polling_ccb_retry:
 	poll_count++;
 	outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);	/*clear interrupt*/
 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 	while(1) {
 		if((flag_srb = CHIP_REG_READ32(HBA_MessageUnit, 
 			0, outbound_queueport)) == 0xFFFFFFFF) {
 			if(poll_srb_done) {
 				break;/*chip FIFO no ccb for completion already*/
 			} else {
 				UDELAY(25000);
 				if ((poll_count > 100) && (poll_srb != NULL)) {
 					break;
 				}
 				goto polling_ccb_retry;
 			}
 		}
 		/* check if command done with no error*/
 		srb = (struct CommandControlBlock *)
 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
 		error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
 		poll_srb_done = (srb == poll_srb) ? 1:0;
 		if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
 			if(srb->srb_state == ARCMSR_SRB_ABORTED) {
 				printf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
 					"poll command abort successfully \n"
 					, acb->pci_unit
 					, srb->pccb->ccb_h.target_id
 					, srb->pccb->ccb_h.target_lun, srb);
 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
 				arcmsr_srb_complete(srb, 1);
 				continue;
 			}
 			printf("arcmsr%d: polling get an illegal srb command done srb='%p'"
 				"srboutstandingcount=%d \n"
 				, acb->pci_unit
 				, srb, acb->srboutstandingcount);
 			continue;
 		}
 		arcmsr_report_srb_state(acb, srb, error);
 	}	/*drain reply FIFO*/
 }
 /*
 **********************************************************************
 **
 **********************************************************************
 */
 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
 {
 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 	struct CommandControlBlock *srb;
 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
 	int index;
 	u_int16_t	error;
 
 polling_ccb_retry:
 	poll_count++;
 	WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 	while(1) {
 		index = phbbmu->doneq_index;
 		if((flag_srb = phbbmu->done_qbuffer[index]) == 0) {
 			if(poll_srb_done) {
 				break;/*chip FIFO no ccb for completion already*/
 			} else {
 				UDELAY(25000);
 				if ((poll_count > 100) && (poll_srb != NULL)) {
 					break;
 				}
 				goto polling_ccb_retry;
 			}
 		}
 		phbbmu->done_qbuffer[index] = 0;
 		index++;
 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
 		phbbmu->doneq_index = index;
 		/* check if command done with no error*/
 		srb = (struct CommandControlBlock *)
 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
 		error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
 		poll_srb_done = (srb == poll_srb) ? 1:0;
 		if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
 			if(srb->srb_state == ARCMSR_SRB_ABORTED) {
 				printf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
 					"poll command abort successfully \n"
 					, acb->pci_unit
 					, srb->pccb->ccb_h.target_id
 					, srb->pccb->ccb_h.target_lun, srb);
 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
 				arcmsr_srb_complete(srb, 1);		
 				continue;
 			}
 			printf("arcmsr%d: polling get an illegal srb command done srb='%p'"
 				"srboutstandingcount=%d \n"
 				, acb->pci_unit
 				, srb, acb->srboutstandingcount);
 			continue;
 		}
 		arcmsr_report_srb_state(acb, srb, error);
 	}	/*drain reply FIFO*/
 }
 /*
 **********************************************************************
 ** 
 **********************************************************************
 */
 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
 {
 	struct CommandControlBlock *srb;
 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
 	u_int16_t	error;
 
 polling_ccb_retry:
 	poll_count++;
 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 	while(1) {
 		if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
 			if(poll_srb_done) {
 				break;/*chip FIFO no ccb for completion already*/
 			} else {
 				UDELAY(25000);
 				if ((poll_count > 100) && (poll_srb != NULL)) {
 					break;
 				}
 				if (acb->srboutstandingcount == 0) {
 				    break;
 				}
 				goto polling_ccb_retry;
 			}
 		}
 		flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
 		/* check if command done with no error*/
 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
 		error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
 		if (poll_srb != NULL)
 			poll_srb_done = (srb == poll_srb) ? 1:0;
 		if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
 			if(srb->srb_state == ARCMSR_SRB_ABORTED) {
 				printf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n"
 						, acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb);
 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
 				arcmsr_srb_complete(srb, 1);
 				continue;
 			}
 			printf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
 					, acb->pci_unit, srb, acb->srboutstandingcount);
 			continue;
 		}
 		arcmsr_report_srb_state(acb, srb, error);
 	}	/*drain reply FIFO*/
 }
 /*
 **********************************************************************
 ** 
 **********************************************************************
 */
 static void arcmsr_polling_hbd_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
 {
 	struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
 	struct CommandControlBlock *srb;
 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
 	u_int32_t outbound_write_pointer;
 	u_int16_t	error, doneq_index;
 
 polling_ccb_retry:
 	poll_count++;
 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 	while(1) {
 		outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow;
 		doneq_index = phbdmu->doneq_index;
 		if ((outbound_write_pointer & 0xFF) == (doneq_index & 0xFF)) {
 			if(poll_srb_done) {
 				break;/*chip FIFO no ccb for completion already*/
 			} else {
 				UDELAY(25000);
 				if ((poll_count > 100) && (poll_srb != NULL)) {
 					break;
 				}
 				if (acb->srboutstandingcount == 0) {
 					break;
 				}
 				goto polling_ccb_retry;
 			}
 		}
 		doneq_index = arcmsr_get_doneq_index(phbdmu);
 		flag_srb = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow;
 		/* check if command done with no error*/
 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
 		error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE;
 		CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index);
 		if (poll_srb != NULL)
 			poll_srb_done = (srb == poll_srb) ? 1:0;
 		if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) {
 			if(srb->srb_state == ARCMSR_SRB_ABORTED) {
 				printf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n"
 						, acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb);
 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
 				arcmsr_srb_complete(srb, 1);
 				continue;
 			}
 			printf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
 					, acb->pci_unit, srb, acb->srboutstandingcount);
 			continue;
 		}
 		arcmsr_report_srb_state(acb, srb, error);
 	}	/*drain reply FIFO*/
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
 {
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			arcmsr_polling_hba_srbdone(acb, poll_srb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			arcmsr_polling_hbb_srbdone(acb, poll_srb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			arcmsr_polling_hbc_srbdone(acb, poll_srb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			arcmsr_polling_hbd_srbdone(acb, poll_srb);
 		}
 		break;
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
 {
 	char *acb_firm_model = acb->firm_model;
 	char *acb_firm_version = acb->firm_version;
 	char *acb_device_map = acb->device_map;
 	size_t iop_firm_model = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
 	size_t iop_firm_version = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
 	size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
 	int i;
 
 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
 		printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
 	}
 	i = 0;
 	while(i < 8) {
 		*acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); 
 		/* 8 bytes firm_model, 15, 60-67*/
 		acb_firm_model++;
 		i++;
 	}
 	i=0;
 	while(i < 16) {
 		*acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);  
 		/* 16 bytes firm_version, 17, 68-83*/
 		acb_firm_version++;
 		i++;
 	}
 	i=0;
 	while(i < 16) {
 		*acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);  
 		acb_device_map++;
 		i++;
 	}
 	printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
 	acb->firm_request_len = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
 	acb->firm_numbers_queue = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
 	acb->firm_sdram_size = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
 	acb->firm_ide_channels = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
 	acb->firm_cfg_version = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25, 	  */
 	if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
 		acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1;
 	else
 		acb->maxOutstanding = acb->firm_numbers_queue - 1;
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
 {
 	struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 	char *acb_firm_model = acb->firm_model;
 	char *acb_firm_version = acb->firm_version;
 	char *acb_device_map = acb->device_map;
 	size_t iop_firm_model = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
 	size_t iop_firm_version = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
 	size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
 	int i;
 
 	WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
 		printf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
 	}
 	i = 0;
 	while(i < 8) {
 		*acb_firm_model = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i);
 		/* 8 bytes firm_model, 15, 60-67*/
 		acb_firm_model++;
 		i++;
 	}
 	i = 0;
 	while(i < 16) {
 		*acb_firm_version = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i);
 		/* 16 bytes firm_version, 17, 68-83*/
 		acb_firm_version++;
 		i++;
 	}
 	i = 0;
 	while(i < 16) {
 		*acb_device_map = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i);  
 		acb_device_map++;
 		i++;
 	}
 	printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
 	acb->firm_request_len = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
 	acb->firm_numbers_queue = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
 	acb->firm_sdram_size = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
 	acb->firm_ide_channels = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
 	acb->firm_cfg_version = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25, 	  */
 	if(acb->firm_numbers_queue > ARCMSR_MAX_HBB_POSTQUEUE)
 		acb->maxOutstanding = ARCMSR_MAX_HBB_POSTQUEUE - 1;
 	else
 		acb->maxOutstanding = acb->firm_numbers_queue - 1;
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
 {
 	char *acb_firm_model = acb->firm_model;
 	char *acb_firm_version = acb->firm_version;
 	char *acb_device_map = acb->device_map;
 	size_t iop_firm_model = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);   /*firm_model,15,60-67*/
 	size_t iop_firm_version = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
 	size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
 	int i;
 
 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
 		printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
 	}
 	i = 0;
 	while(i < 8) {
 		*acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); 
 		/* 8 bytes firm_model, 15, 60-67*/
 		acb_firm_model++;
 		i++;
 	}
 	i = 0;
 	while(i < 16) {
 		*acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);  
 		/* 16 bytes firm_version, 17, 68-83*/
 		acb_firm_version++;
 		i++;
 	}
 	i = 0;
 	while(i < 16) {
 		*acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);  
 		acb_device_map++;
 		i++;
 	}
 	printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
 	acb->firm_request_len	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]);	/*firm_request_len,   1, 04-07*/
 	acb->firm_numbers_queue	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]);	/*firm_numbers_queue, 2, 08-11*/
 	acb->firm_sdram_size	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]);	/*firm_sdram_size,    3, 12-15*/
 	acb->firm_ide_channels	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]);	/*firm_ide_channels,  4, 16-19*/
 	acb->firm_cfg_version	= CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25, 	  */
 	if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
 		acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1;
 	else
 		acb->maxOutstanding = acb->firm_numbers_queue - 1;
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_get_hbd_config(struct AdapterControlBlock *acb)
 {
 	char *acb_firm_model = acb->firm_model;
 	char *acb_firm_version = acb->firm_version;
 	char *acb_device_map = acb->device_map;
 	size_t iop_firm_model = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);   /*firm_model,15,60-67*/
 	size_t iop_firm_version = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
 	size_t iop_device_map = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
 	int i;
 
 	if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE)
 		CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);
 	CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
 	if(!arcmsr_hbd_wait_msgint_ready(acb)) {
 		printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
 	}
 	i = 0;
 	while(i < 8) {
 		*acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); 
 		/* 8 bytes firm_model, 15, 60-67*/
 		acb_firm_model++;
 		i++;
 	}
 	i = 0;
 	while(i < 16) {
 		*acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);  
 		/* 16 bytes firm_version, 17, 68-83*/
 		acb_firm_version++;
 		i++;
 	}
 	i = 0;
 	while(i < 16) {
 		*acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);  
 		acb_device_map++;
 		i++;
 	}
 	printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version);
 	acb->firm_request_len	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[1]);	/*firm_request_len,   1, 04-07*/
 	acb->firm_numbers_queue	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[2]);	/*firm_numbers_queue, 2, 08-11*/
 	acb->firm_sdram_size	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[3]);	/*firm_sdram_size,    3, 12-15*/
 	acb->firm_ide_channels	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[4]);	/*firm_ide_channels,  4, 16-19*/
 	acb->firm_cfg_version	= CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25, 	  */
 	if(acb->firm_numbers_queue > ARCMSR_MAX_HBD_POSTQUEUE)
 		acb->maxOutstanding = ARCMSR_MAX_HBD_POSTQUEUE - 1;
 	else
 		acb->maxOutstanding = acb->firm_numbers_queue - 1;
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			arcmsr_get_hba_config(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			arcmsr_get_hbb_config(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			arcmsr_get_hbc_config(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			arcmsr_get_hbd_config(acb);
 		}
 		break;
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
 {
 	int	timeout=0;
 
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
 			{
 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
 				{
 					printf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit);
 					return;
 				}
 				UDELAY(15000); /* wait 15 milli-seconds */
 			}
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 			while ((READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
 			{
 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
 				{
 					printf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit);
 					return;
 				}
 				UDELAY(15000); /* wait 15 milli-seconds */
 			}
 			WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0)
 			{
 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
 				{
 					printf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
 					return;
 				}
 				UDELAY(15000); /* wait 15 milli-seconds */
 			}
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			while ((CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBDMU_MESSAGE_FIRMWARE_OK) == 0)
 			{
 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
 				{
 					printf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
 					return;
 				}
 				UDELAY(15000); /* wait 15 milli-seconds */
 			}
 		}
 		break;
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
 {
 	u_int32_t outbound_doorbell;
 
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			/* empty doorbell Qbuffer if door bell ringed */
 			outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell);	/*clear doorbell interrupt */
 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
 			
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 			WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
 			WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
 			/* let IOP know data has been read */
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			/* empty doorbell Qbuffer if door bell ringed */
 			outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell);	/*clear doorbell interrupt */
 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
 			CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell_clear); /* Dummy read to force pci flush */
 			CHIP_REG_READ32(HBC_MessageUnit, 0, inbound_doorbell); /* Dummy read to force pci flush */
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			/* empty doorbell Qbuffer if door bell ringed */
 			outbound_doorbell = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell);
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, outbound_doorbell);	/*clear doorbell interrupt */
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ);
 			
 		}
 		break;
 	}
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
 {
 	unsigned long srb_phyaddr;
 	u_int32_t srb_phyaddr_hi32;
 	u_int32_t srb_phyaddr_lo32;
 
 	/*
 	********************************************************************
 	** here we need to tell iop 331 our freesrb.HighPart 
 	** if freesrb.HighPart is not zero
 	********************************************************************
 	*/
 	srb_phyaddr = (unsigned long) acb->srb_phyaddr.phyaddr;
 	srb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
 	srb_phyaddr_lo32 = acb->srb_phyaddr.B.phyadd_low;
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			if(srb_phyaddr_hi32 != 0) {
 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
 				if(!arcmsr_hba_wait_msgint_ready(acb)) {
 					printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
 					return FALSE;
 				}
 			}
 		}
 		break;
 		/*
 		***********************************************************************
 		**    if adapter type B, set window of "post command Q" 
 		***********************************************************************
 		*/
 	case ACB_ADAPTER_TYPE_B: {
 			u_int32_t post_queue_phyaddr;
 			struct HBB_MessageUnit *phbbmu;
 
 			phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 			phbbmu->postq_index = 0;
 			phbbmu->doneq_index = 0;
 			WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
 				printf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
 				return FALSE;
 			}
 			post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE 
 								+ offsetof(struct HBB_MessageUnit, post_qbuffer);
 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */
 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
 			WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
 				printf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
 				return FALSE;
 			}
 			WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
 				printf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
 				return FALSE;
 			}
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			if(srb_phyaddr_hi32 != 0) {
 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
 				if(!arcmsr_hbc_wait_msgint_ready(acb)) {
 					printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
 					return FALSE;
 				}
 			}
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			u_int32_t post_queue_phyaddr, done_queue_phyaddr;
 			struct HBD_MessageUnit0 *phbdmu;
 
 			phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
 			phbdmu->postq_index = 0;
 			phbdmu->doneq_index = 0x40FF;
 			post_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE 
 								+ offsetof(struct HBD_MessageUnit0, post_qbuffer);
 			done_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE 
 								+ offsetof(struct HBD_MessageUnit0, done_qbuffer);
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ base */
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[3], done_queue_phyaddr); /* doneQ base */
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[4], 0x100);
 			CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
 			if(!arcmsr_hbd_wait_msgint_ready(acb)) {
 				printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
 				return FALSE;
 			}
 		}
 		break;
 	}
 	return (TRUE);
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type)
 	{
 	case ACB_ADAPTER_TYPE_A:
 	case ACB_ADAPTER_TYPE_C:
 	case ACB_ADAPTER_TYPE_D:
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 			WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
 				printf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
 				return;
 			}
 		}
 		break;
 	}
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
 {
 	u_int32_t intmask_org;
 
 	/* disable all outbound interrupt */
 	intmask_org = arcmsr_disable_allintr(acb);
 	arcmsr_wait_firmware_ready(acb);
 	arcmsr_iop_confirm(acb);
 	arcmsr_get_firmware_spec(acb);
 	/*start background rebuild*/
 	arcmsr_start_adapter_bgrb(acb);
 	/* empty doorbell Qbuffer if door bell ringed */
 	arcmsr_clear_doorbell_queue_buffer(acb);
 	arcmsr_enable_eoi_mode(acb);
 	/* enable outbound Post Queue, outbound doorbell Interrupt */
 	arcmsr_enable_allintr(acb, intmask_org);
 	acb->acb_flags |= ACB_F_IOP_INITED;
 }
 /*
 **********************************************************************
 **********************************************************************
 */
 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 {
 	struct AdapterControlBlock *acb = arg;
 	struct CommandControlBlock *srb_tmp;
 	u_int32_t i;
 	unsigned long srb_phyaddr = (unsigned long)segs->ds_addr;
 
 	acb->srb_phyaddr.phyaddr = srb_phyaddr; 
 	srb_tmp = (struct CommandControlBlock *)acb->uncacheptr;
 	for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
 		if(bus_dmamap_create(acb->dm_segs_dmat,
 			 /*flags*/0, &srb_tmp->dm_segs_dmamap) != 0) {
 			acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
 			printf("arcmsr%d:"
 			" srb dmamap bus_dmamap_create error\n", acb->pci_unit);
 			return;
 		}
 		if((acb->adapter_type == ACB_ADAPTER_TYPE_C) || (acb->adapter_type == ACB_ADAPTER_TYPE_D))
 		{
 			srb_tmp->cdb_phyaddr_low = srb_phyaddr;
 			srb_tmp->cdb_phyaddr_high = (u_int32_t)((srb_phyaddr >> 16) >> 16);
 		}
 		else
 			srb_tmp->cdb_phyaddr_low = srb_phyaddr >> 5;
 		srb_tmp->acb = acb;
 		acb->srbworkingQ[i] = acb->psrb_pool[i] = srb_tmp;
 		srb_phyaddr = srb_phyaddr + SRB_SIZE;
 		srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp + SRB_SIZE);
 	}
 	acb->vir2phy_offset = (unsigned long)srb_tmp - (unsigned long)srb_phyaddr;
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
 {
 	/* remove the control device */
 	if(acb->ioctl_dev != NULL) {
 		destroy_dev(acb->ioctl_dev);
 	}
 	bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
 	bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
 	bus_dma_tag_destroy(acb->srb_dmat);
 	bus_dma_tag_destroy(acb->dm_segs_dmat);
 	bus_dma_tag_destroy(acb->parent_dmat);
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_mutex_init(struct AdapterControlBlock *acb)
 {
 	ARCMSR_LOCK_INIT(&acb->isr_lock, "arcmsr isr lock");
 	ARCMSR_LOCK_INIT(&acb->srb_lock, "arcmsr srb lock");
 	ARCMSR_LOCK_INIT(&acb->postDone_lock, "arcmsr postQ lock");
 	ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr RW buffer lock");
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static void arcmsr_mutex_destroy(struct AdapterControlBlock *acb)
 {
 	ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
 	ARCMSR_LOCK_DESTROY(&acb->postDone_lock);
 	ARCMSR_LOCK_DESTROY(&acb->srb_lock);
 	ARCMSR_LOCK_DESTROY(&acb->isr_lock);
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static u_int32_t arcmsr_initialize(device_t dev)
 {
 	struct AdapterControlBlock *acb = device_get_softc(dev);
 	u_int16_t pci_command;
 	int i, j,max_coherent_size;
 	u_int32_t vendor_dev_id;
 
 	vendor_dev_id = pci_get_devid(dev);
 	acb->vendor_device_id = vendor_dev_id;
 	acb->sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
 	switch (vendor_dev_id) {
 	case PCIDevVenIDARC1880:
 	case PCIDevVenIDARC1882:
 	case PCIDevVenIDARC1213:
 	case PCIDevVenIDARC1223: {
 			acb->adapter_type = ACB_ADAPTER_TYPE_C;
 			if (acb->sub_device_id == ARECA_SUB_DEV_ID_1883)
 				acb->adapter_bus_speed = ACB_BUS_SPEED_12G;
 			else
 				acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
 			max_coherent_size = ARCMSR_SRBS_POOL_SIZE;
 		}
 		break;
 	case PCIDevVenIDARC1214: {
 			acb->adapter_type = ACB_ADAPTER_TYPE_D;
 			acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
 			max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBD_MessageUnit0));
 		}
 		break;
 	case PCIDevVenIDARC1200:
 	case PCIDevVenIDARC1201: {
 			acb->adapter_type = ACB_ADAPTER_TYPE_B;
 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
 			max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit));
 		}
 		break;
 	case PCIDevVenIDARC1203: {
 			acb->adapter_type = ACB_ADAPTER_TYPE_B;
 			acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
 			max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit));
 		}
 		break;
 	case PCIDevVenIDARC1110:
 	case PCIDevVenIDARC1120:
 	case PCIDevVenIDARC1130:
 	case PCIDevVenIDARC1160:
 	case PCIDevVenIDARC1170:
 	case PCIDevVenIDARC1210:
 	case PCIDevVenIDARC1220:
 	case PCIDevVenIDARC1230:
 	case PCIDevVenIDARC1231:
 	case PCIDevVenIDARC1260:
 	case PCIDevVenIDARC1261:
 	case PCIDevVenIDARC1270:
 	case PCIDevVenIDARC1280:
 	case PCIDevVenIDARC1212:
 	case PCIDevVenIDARC1222:
 	case PCIDevVenIDARC1380:
 	case PCIDevVenIDARC1381:
 	case PCIDevVenIDARC1680:
 	case PCIDevVenIDARC1681: {
 			acb->adapter_type = ACB_ADAPTER_TYPE_A;
 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
 			max_coherent_size = ARCMSR_SRBS_POOL_SIZE;
 		}
 		break;
 	default: {
 			printf("arcmsr%d:"
 			" unknown RAID adapter type \n", device_get_unit(dev));
 			return ENOMEM;
 		}
 	}
 #if __FreeBSD_version >= 700000
 	if(bus_dma_tag_create(  /*PCI parent*/		bus_get_dma_tag(dev),
 #else
 	if(bus_dma_tag_create(  /*PCI parent*/		NULL,
 #endif
 				/*alignemnt*/		1,
 				/*boundary*/		0,
 				/*lowaddr*/		BUS_SPACE_MAXADDR,
 				/*highaddr*/		BUS_SPACE_MAXADDR,
 				/*filter*/		NULL,
 				/*filterarg*/		NULL,
 				/*maxsize*/		BUS_SPACE_MAXSIZE_32BIT,
 				/*nsegments*/		BUS_SPACE_UNRESTRICTED,
 				/*maxsegsz*/		BUS_SPACE_MAXSIZE_32BIT,
 				/*flags*/		0,
 #if __FreeBSD_version >= 501102
 				/*lockfunc*/		NULL,
 				/*lockarg*/		NULL,
 #endif
 							&acb->parent_dmat) != 0)
 	{
 		printf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
 		return ENOMEM;
 	}
 
 	/* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
 	if(bus_dma_tag_create(  /*parent_dmat*/		acb->parent_dmat,
 				/*alignment*/		1,
 				/*boundary*/		0,
 #ifdef PAE
 				/*lowaddr*/		BUS_SPACE_MAXADDR_32BIT,
 #else
 				/*lowaddr*/		BUS_SPACE_MAXADDR,
 #endif
 				/*highaddr*/		BUS_SPACE_MAXADDR,
 				/*filter*/		NULL,
 				/*filterarg*/		NULL,
 				/*maxsize*/		ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
 				/*nsegments*/		ARCMSR_MAX_SG_ENTRIES,
 				/*maxsegsz*/		BUS_SPACE_MAXSIZE_32BIT,
 				/*flags*/		0,
 #if __FreeBSD_version >= 501102
 				/*lockfunc*/		busdma_lock_mutex,
 				/*lockarg*/		&acb->isr_lock,
 #endif
 							&acb->dm_segs_dmat) != 0)
 	{
 		bus_dma_tag_destroy(acb->parent_dmat);
 		printf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
 		return ENOMEM;
 	}
 
 	/* DMA tag for our srb structures.... Allocate the freesrb memory */
 	if(bus_dma_tag_create(  /*parent_dmat*/		acb->parent_dmat,
 				/*alignment*/		0x20,
 				/*boundary*/		0,
 				/*lowaddr*/		BUS_SPACE_MAXADDR_32BIT,
 				/*highaddr*/		BUS_SPACE_MAXADDR,
 				/*filter*/		NULL,
 				/*filterarg*/		NULL,
 				/*maxsize*/		max_coherent_size,
 				/*nsegments*/		1,
 				/*maxsegsz*/		BUS_SPACE_MAXSIZE_32BIT,
 				/*flags*/		0,
 #if __FreeBSD_version >= 501102
 				/*lockfunc*/		NULL,
 				/*lockarg*/		NULL,
 #endif
 							&acb->srb_dmat) != 0)
 	{
 		bus_dma_tag_destroy(acb->dm_segs_dmat);
 		bus_dma_tag_destroy(acb->parent_dmat);
 		printf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
 		return ENXIO;
 	}
 	/* Allocation for our srbs */
 	if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) {
 		bus_dma_tag_destroy(acb->srb_dmat);
 		bus_dma_tag_destroy(acb->dm_segs_dmat);
 		bus_dma_tag_destroy(acb->parent_dmat);
 		printf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev));
 		return ENXIO;
 	}
 	/* And permanently map them */
 	if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) {
 		bus_dma_tag_destroy(acb->srb_dmat);
 		bus_dma_tag_destroy(acb->dm_segs_dmat);
 		bus_dma_tag_destroy(acb->parent_dmat);
 		printf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev));
 		return ENXIO;
 	}
 	pci_command = pci_read_config(dev, PCIR_COMMAND, 2);
 	pci_command |= PCIM_CMD_BUSMASTEREN;
 	pci_command |= PCIM_CMD_PERRESPEN;
 	pci_command |= PCIM_CMD_MWRICEN;
 	/* Enable Busmaster */
 	pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
 	switch(acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
 			u_int32_t rid0 = PCIR_BAR(0);
 			vm_offset_t	mem_base0;
 
 			acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE);
 			if(acb->sys_res_arcmsr[0] == NULL) {
 				arcmsr_free_resource(acb);
 				printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
 				return ENOMEM;
 			}
 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
 				arcmsr_free_resource(acb);
 				printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
 				return ENXIO;
 			}
 			mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
 			if(mem_base0 == 0) {
 				arcmsr_free_resource(acb);
 				printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
 				return ENXIO;
 			}
 			acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
 			acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
 			acb->pmu = (struct MessageUnit_UNION *)mem_base0;
 		}
 		break;
 	case ACB_ADAPTER_TYPE_B: {
 			struct HBB_MessageUnit *phbbmu;
 			struct CommandControlBlock *freesrb;
 			u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
 			vm_offset_t	mem_base[]={0,0};
 			u_long	size;
 			if (vendor_dev_id == PCIDevVenIDARC1203)
 				size = sizeof(struct HBB_DOORBELL_1203);
 			else
 				size = sizeof(struct HBB_DOORBELL);
 			for(i=0; i < 2; i++) {
 				if(i == 0) {
 					acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid[i],
 											RF_ACTIVE);
 				} else {
 					acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid[i],
 											RF_ACTIVE);
 				}
 				if(acb->sys_res_arcmsr[i] == NULL) {
 					arcmsr_free_resource(acb);
 					printf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
 					return ENOMEM;
 				}
 				if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) {
 					arcmsr_free_resource(acb);
 					printf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i);
 					return ENXIO;
 				}
 				mem_base[i] = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]);
 				if(mem_base[i] == 0) {
 					arcmsr_free_resource(acb);
 					printf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i);
 					return ENXIO;
 				}
 				acb->btag[i] = rman_get_bustag(acb->sys_res_arcmsr[i]);
 				acb->bhandle[i] = rman_get_bushandle(acb->sys_res_arcmsr[i]);
 			}
 			freesrb = (struct CommandControlBlock *)acb->uncacheptr;
 			acb->pmu = (struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE);
 			phbbmu = (struct HBB_MessageUnit *)acb->pmu;
 			phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)mem_base[0];
 			phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)mem_base[1];
 			if (vendor_dev_id == PCIDevVenIDARC1203) {
 				phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell);
 				phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell_mask);
 				phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell);
 				phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell_mask);
 			} else {
 				phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL, drv2iop_doorbell);
 				phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL, drv2iop_doorbell_mask);
 				phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL, iop2drv_doorbell);
 				phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL, iop2drv_doorbell_mask);
 			}
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 			u_int32_t rid0 = PCIR_BAR(1);
 			vm_offset_t	mem_base0;
 
 			acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE);
 			if(acb->sys_res_arcmsr[0] == NULL) {
 				arcmsr_free_resource(acb);
 				printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
 				return ENOMEM;
 			}
 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
 				arcmsr_free_resource(acb);
 				printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
 				return ENXIO;
 			}
 			mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
 			if(mem_base0 == 0) {
 				arcmsr_free_resource(acb);
 				printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
 				return ENXIO;
 			}
 			acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
 			acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
 			acb->pmu = (struct MessageUnit_UNION *)mem_base0;
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 			struct HBD_MessageUnit0 *phbdmu;
 			u_int32_t rid0 = PCIR_BAR(0);
 			vm_offset_t	mem_base0;
 
 			acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE);
 			if(acb->sys_res_arcmsr[0] == NULL) {
 				arcmsr_free_resource(acb);
 				printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
 				return ENOMEM;
 			}
 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
 				arcmsr_free_resource(acb);
 				printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
 				return ENXIO;
 			}
 			mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
 			if(mem_base0 == 0) {
 				arcmsr_free_resource(acb);
 				printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
 				return ENXIO;
 			}
 			acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
 			acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
 			acb->pmu = (struct MessageUnit_UNION *)((unsigned long)acb->uncacheptr+ARCMSR_SRBS_POOL_SIZE);
 			phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
 			phbdmu->phbdmu = (struct HBD_MessageUnit *)mem_base0;
 		}
 		break;
 	}
 	if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
 		arcmsr_free_resource(acb);
 		printf("arcmsr%d: map free srb failure!\n", device_get_unit(dev));
 		return ENXIO;
 	}
 	acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
 	/*
 	********************************************************************
 	** init raid volume state
 	********************************************************************
 	*/
 	for(i=0; i < ARCMSR_MAX_TARGETID; i++) {
 		for(j=0; j < ARCMSR_MAX_TARGETLUN; j++) {
 			acb->devstate[i][j] = ARECA_RAID_GONE;
 		}
 	}
 	arcmsr_iop_init(acb);
 	return(0);
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static int arcmsr_attach(device_t dev)
 {
 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
 	u_int32_t unit=device_get_unit(dev);
 	struct ccb_setasync csa;
 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
 	struct resource	*irqres;
 	int	rid;
 
 	if(acb == NULL) {
 		printf("arcmsr%d: cannot allocate softc\n", unit);
 		return (ENOMEM);
 	}
 	arcmsr_mutex_init(acb);
 	acb->pci_dev = dev;
 	acb->pci_unit = unit;
 	if(arcmsr_initialize(dev)) {
 		printf("arcmsr%d: initialize failure!\n", unit);
 		arcmsr_mutex_destroy(acb);
 		return ENXIO;
 	}
 	/* After setting up the adapter, map our interrupt */
 	rid = 0;
 	irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE);
 	if(irqres == NULL || 
 #if __FreeBSD_version >= 700025
 		bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, NULL, arcmsr_intr_handler, acb, &acb->ih)) {
 #else
 		bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih)) {
 #endif
 		arcmsr_free_resource(acb);
 		arcmsr_mutex_destroy(acb);
 		printf("arcmsr%d: unable to register interrupt handler!\n", unit);
 		return ENXIO;
 	}
 	acb->irqres = irqres;
 	/*
 	 * Now let the CAM generic SCSI layer find the SCSI devices on
 	 * the bus *  start queue to reset to the idle loop. *
 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
 	 * max_sim_transactions
 	*/
 	devq = cam_simq_alloc(acb->maxOutstanding);
 	if(devq == NULL) {
 		arcmsr_free_resource(acb);
 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
 		arcmsr_mutex_destroy(acb);
 		printf("arcmsr%d: cam_simq_alloc failure!\n", unit);
 		return ENXIO;
 	}
 #if __FreeBSD_version >= 700025
 	acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->isr_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
 #else
 	acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
 #endif
 	if(acb->psim == NULL) {
 		arcmsr_free_resource(acb);
 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
 		cam_simq_free(devq);
 		arcmsr_mutex_destroy(acb);
 		printf("arcmsr%d: cam_sim_alloc failure!\n", unit);
 		return ENXIO;
 	}
 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
 #if __FreeBSD_version >= 700044
 	if(xpt_bus_register(acb->psim, dev, 0) != CAM_SUCCESS) {
 #else
 	if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
 #endif
 		arcmsr_free_resource(acb);
 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
 		cam_sim_free(acb->psim, /*free_devq*/TRUE);
 		arcmsr_mutex_destroy(acb);
 		printf("arcmsr%d: xpt_bus_register failure!\n", unit);
 		return ENXIO;
 	}
 	if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
 		arcmsr_free_resource(acb);
 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
 		xpt_bus_deregister(cam_sim_path(acb->psim));
 		cam_sim_free(acb->psim, /* free_simq */ TRUE);
 		arcmsr_mutex_destroy(acb);
 		printf("arcmsr%d: xpt_create_path failure!\n", unit);
 		return ENXIO;
 	}
 	/*
 	****************************************************
 	*/
 	xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5);
 	csa.ccb_h.func_code = XPT_SASYNC_CB;
 	csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
 	csa.callback = arcmsr_async;
 	csa.callback_arg = acb->psim;
 	xpt_action((union ccb *)&csa);
 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
 	/* Create the control device.  */
 	acb->ioctl_dev = make_dev(&arcmsr_cdevsw, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit);
 		
 #if __FreeBSD_version < 503000
 	acb->ioctl_dev->si_drv1 = acb;
 #endif
 #if __FreeBSD_version > 500005
 	(void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
 #endif
 	arcmsr_callout_init(&acb->devmap_callout);
 	callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb);
 	return (0);
 }
 
 /*
 ************************************************************************
 ************************************************************************
 */
 static int arcmsr_probe(device_t dev)
 {
 	u_int32_t id;
 	u_int16_t sub_device_id;
 	static char buf[256];
 	char x_type[]={"unknown"};
 	char *type;
 	int raid6 = 1;
 
 	if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
 		return (ENXIO);
 	}
 	sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
 	switch(id = pci_get_devid(dev)) {
 	case PCIDevVenIDARC1110:
 	case PCIDevVenIDARC1200:
 	case PCIDevVenIDARC1201:
 	case PCIDevVenIDARC1210:
 		raid6 = 0;
 		/*FALLTHRU*/
 	case PCIDevVenIDARC1120:
 	case PCIDevVenIDARC1130:
 	case PCIDevVenIDARC1160:
 	case PCIDevVenIDARC1170:
 	case PCIDevVenIDARC1220:
 	case PCIDevVenIDARC1230:
 	case PCIDevVenIDARC1231:
 	case PCIDevVenIDARC1260:
 	case PCIDevVenIDARC1261:
 	case PCIDevVenIDARC1270:
 	case PCIDevVenIDARC1280:
 		type = "SATA 3G";
 		break;
 	case PCIDevVenIDARC1212:
 	case PCIDevVenIDARC1222:
 	case PCIDevVenIDARC1380:
 	case PCIDevVenIDARC1381:
 	case PCIDevVenIDARC1680:
 	case PCIDevVenIDARC1681:
 		type = "SAS 3G";
 		break;
 	case PCIDevVenIDARC1880:
 	case PCIDevVenIDARC1882:
 	case PCIDevVenIDARC1213:
 	case PCIDevVenIDARC1223:
 		if (sub_device_id == ARECA_SUB_DEV_ID_1883)
 			type = "SAS 12G";
 		else
 			type = "SAS 6G";
 		break;
 	case PCIDevVenIDARC1214:
 	case PCIDevVenIDARC1203:
 		type = "SATA 6G";
 		break;
 	default:
 		type = x_type;
 		raid6 = 0;
 		break;
 	}
 	if(type == x_type)
 		return(ENXIO);
 	sprintf(buf, "Areca %s Host Adapter RAID Controller %s\n%s\n",
 		type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
 	device_set_desc_copy(dev, buf);
 	return (BUS_PROBE_DEFAULT);
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static int arcmsr_shutdown(device_t dev)
 {
 	u_int32_t  i;
 	u_int32_t intmask_org;
 	struct CommandControlBlock *srb;
 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
 
 	/* stop adapter background rebuild */
 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
 	/* disable all outbound interrupt */
 	intmask_org = arcmsr_disable_allintr(acb);
 	arcmsr_stop_adapter_bgrb(acb);
 	arcmsr_flush_adapter_cache(acb);
 	/* abort all outstanding command */
 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
 	acb->acb_flags &= ~ACB_F_IOP_INITED;
 	if(acb->srboutstandingcount != 0) {
 		/*clear and abort all outbound posted Q*/
 		arcmsr_done4abort_postqueue(acb);
 		/* talk to iop 331 outstanding command aborted*/
 		arcmsr_abort_allcmd(acb);
 		for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) {
 			srb = acb->psrb_pool[i];
 			if(srb->srb_state == ARCMSR_SRB_START) {
 				srb->srb_state = ARCMSR_SRB_ABORTED;
 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
 				arcmsr_srb_complete(srb, 1);
 			}
 		}
 	}
 	acb->srboutstandingcount = 0;
 	acb->workingsrb_doneindex = 0;
 	acb->workingsrb_startindex = 0;
 	acb->pktRequestCount = 0;
 	acb->pktReturnCount = 0;
 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
 	return (0);
 }
 /*
 ************************************************************************
 ************************************************************************
 */
 static int arcmsr_detach(device_t dev)
 {
 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
 	int i;
 
 	callout_stop(&acb->devmap_callout);
 	bus_teardown_intr(dev, acb->irqres, acb->ih);
 	arcmsr_shutdown(dev);
 	arcmsr_free_resource(acb);
 	for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) {
 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]);
 	}
 	bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
 	ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
 	xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
 	xpt_free_path(acb->ppath);
 	xpt_bus_deregister(cam_sim_path(acb->psim));
 	cam_sim_free(acb->psim, TRUE);
 	ARCMSR_LOCK_RELEASE(&acb->isr_lock);
 	arcmsr_mutex_destroy(acb);
 	return (0);
 }
 
 #ifdef ARCMSR_DEBUG1
 static void arcmsr_dump_data(struct AdapterControlBlock *acb)
 {
 	if((acb->pktRequestCount - acb->pktReturnCount) == 0)
 		return;
 	printf("Command Request Count   =0x%x\n",acb->pktRequestCount);
 	printf("Command Return Count    =0x%x\n",acb->pktReturnCount);
 	printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount));
 	printf("Queued Command Count    =0x%x\n",acb->srboutstandingcount);
 }
 #endif
 
Index: stable/10/sys/dev/iir/iir.c
===================================================================
--- stable/10/sys/dev/iir/iir.c	(revision 312849)
+++ stable/10/sys/dev/iir/iir.c	(revision 312850)
@@ -1,1912 +1,1912 @@
 /*-
  *       Copyright (c) 2000-04 ICP vortex GmbH
  *       Copyright (c) 2002-04 Intel Corporation
  *       Copyright (c) 2003-04 Adaptec Inc.
  *       All Rights Reserved
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions, and the following disclaimer,
  *    without modification, immediately at the beginning of the file.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. The name of the author may not be used to endorse or promote products
  *    derived from this software without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
 
 /*
  * iir.c: SCSI dependant code for the Intel Integrated RAID Controller driver
  *
  * Written by: Achim Leubner <achim_leubner@adaptec.com>
  * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
  *
  * credits:     Niklas Hallqvist;       OpenBSD driver for the ICP Controllers.
  *              Mike Smith;             Some driver source code.
  *              FreeBSD.ORG;            Great O/S to work on and for.
  *
  * $Id: iir.c 1.5 2004/03/30 10:17:53 achim Exp $"
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #define _IIR_C_
 
 /* #include "opt_iir.h" */
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/endian.h>
 #include <sys/eventhandler.h>
 #include <sys/malloc.h>
 #include <sys/kernel.h>
 #include <sys/bus.h>
 
 #include <machine/bus.h>
 #include <machine/stdarg.h>
 
 #include <cam/cam.h>
 #include <cam/cam_ccb.h>
 #include <cam/cam_sim.h>
 #include <cam/cam_xpt_sim.h>
 #include <cam/cam_debug.h>
 #include <cam/scsi/scsi_all.h>
 #include <cam/scsi/scsi_message.h>
 
 #include <dev/iir/iir.h>
 
 static MALLOC_DEFINE(M_GDTBUF, "iirbuf", "iir driver buffer");
 
 #ifdef GDT_DEBUG
 int     gdt_debug = GDT_DEBUG;
 #ifdef __SERIAL__
 #define MAX_SERBUF 160
 static void ser_init(void);
 static void ser_puts(char *str);
 static void ser_putc(int c);
 static char strbuf[MAX_SERBUF+1];
 #ifdef __COM2__
 #define COM_BASE 0x2f8
 #else
 #define COM_BASE 0x3f8
 #endif
 static void ser_init()
 {
     unsigned port=COM_BASE;
 
     outb(port+3, 0x80);
     outb(port+1, 0);
     /* 19200 Baud, if 9600: outb(12,port) */
     outb(port, 6);
     outb(port+3, 3);
     outb(port+1, 0);
 }
 
 static void ser_puts(char *str)
 {
     char *ptr;
 
     ser_init();
     for (ptr=str;*ptr;++ptr)
         ser_putc((int)(*ptr));
 }
 
 static void ser_putc(int c)
 {
     unsigned port=COM_BASE;
 
     while ((inb(port+5) & 0x20)==0);
     outb(port, c);
     if (c==0x0a)
     {
         while ((inb(port+5) & 0x20)==0);
         outb(port, 0x0d);
     }
 }
 
 int ser_printf(const char *fmt, ...)
 {
     va_list args;
     int i;
 
     va_start(args,fmt);
     i = vsprintf(strbuf,fmt,args);
     ser_puts(strbuf);
     va_end(args);
     return i;
 }
 #endif
 #endif
 
 /* controller cnt. */
 int gdt_cnt = 0;
 /* event buffer */
 static gdt_evt_str ebuffer[GDT_MAX_EVENTS];
 static int elastidx, eoldidx;
 static struct mtx elock;
 MTX_SYSINIT(iir_elock, &elock, "iir events", MTX_DEF);
 /* statistics */
 gdt_statist_t gdt_stat;
 
 /* Definitions for our use of the SIM private CCB area */
 #define ccb_sim_ptr     spriv_ptr0
 #define ccb_priority    spriv_field1
 
 static void     iir_action(struct cam_sim *sim, union ccb *ccb);
 static int	iir_intr_locked(struct gdt_softc *gdt);
 static void     iir_poll(struct cam_sim *sim);
 static void     iir_shutdown(void *arg, int howto);
 static void     iir_timeout(void *arg);
 
 static void     gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, 
                                  int *secs);
 static int      gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb, 
                                  u_int8_t service, u_int16_t opcode, 
                                  u_int32_t arg1, u_int32_t arg2, u_int32_t arg3);
 static int      gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *ccb, 
                          int timeout);
 
 static struct gdt_ccb *gdt_get_ccb(struct gdt_softc *gdt);
 
 static int      gdt_sync_event(struct gdt_softc *gdt, int service, 
                                u_int8_t index, struct gdt_ccb *gccb);
 static int      gdt_async_event(struct gdt_softc *gdt, int service);
 static struct gdt_ccb *gdt_raw_cmd(struct gdt_softc *gdt, 
                                    union ccb *ccb);
 static struct gdt_ccb *gdt_cache_cmd(struct gdt_softc *gdt, 
                                      union ccb *ccb);
 static struct gdt_ccb *gdt_ioctl_cmd(struct gdt_softc *gdt, 
                                      gdt_ucmd_t *ucmd);
 static void     gdt_internal_cache_cmd(struct gdt_softc *gdt, union ccb *ccb);
 
 static void     gdtmapmem(void *arg, bus_dma_segment_t *dm_segs,
                           int nseg, int error);
 static void     gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
                               int nseg, int error);
 
 int
 iir_init(struct gdt_softc *gdt)
 {
     u_int16_t cdev_cnt;
     int i, id, drv_cyls, drv_hds, drv_secs;
     struct gdt_ccb *gccb;
 
     GDT_DPRINTF(GDT_D_DEBUG, ("iir_init()\n"));
 
     gdt->sc_state = GDT_POLLING;
     gdt_clear_events(); 
     bzero(&gdt_stat, sizeof(gdt_statist_t));
 
     SLIST_INIT(&gdt->sc_free_gccb);
     SLIST_INIT(&gdt->sc_pending_gccb);
     TAILQ_INIT(&gdt->sc_ccb_queue);
     TAILQ_INIT(&gdt->sc_ucmd_queue);
 
     /* DMA tag for mapping buffers into device visible space. */
     if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
                            /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
                            /*highaddr*/BUS_SPACE_MAXADDR,
                            /*filter*/NULL, /*filterarg*/NULL,
 			   /*maxsize*/DFLTPHYS,
 			   /*nsegments*/GDT_MAXSG,
                            /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
                            /*flags*/BUS_DMA_ALLOCNOW,
 			   /*lockfunc*/busdma_lock_mutex,
 			   /*lockarg*/&gdt->sc_lock,
                            &gdt->sc_buffer_dmat) != 0) {
 	device_printf(gdt->sc_devnode,
 	    "bus_dma_tag_create(..., gdt->sc_buffer_dmat) failed\n");
         return (1);
     }
     gdt->sc_init_level++;
 
     /* DMA tag for our ccb structures */
     if (bus_dma_tag_create(gdt->sc_parent_dmat,
 			   /*alignment*/1,
 			   /*boundary*/0,
                            /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
                            /*highaddr*/BUS_SPACE_MAXADDR,
                            /*filter*/NULL,
 			   /*filterarg*/NULL,
                            GDT_MAXCMDS * GDT_SCRATCH_SZ, /* maxsize */
                            /*nsegments*/1,
                            /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
 			   /*flags*/0, /*lockfunc*/busdma_lock_mutex,
 			   /*lockarg*/&gdt->sc_lock,
 			   &gdt->sc_gcscratch_dmat) != 0) {
         device_printf(gdt->sc_devnode,
 	    "bus_dma_tag_create(...,gdt->sc_gcscratch_dmat) failed\n");
         return (1);
     }
     gdt->sc_init_level++;
 
     /* Allocation for our ccb scratch area */
     if (bus_dmamem_alloc(gdt->sc_gcscratch_dmat, (void **)&gdt->sc_gcscratch,
                          BUS_DMA_NOWAIT, &gdt->sc_gcscratch_dmamap) != 0) {
         device_printf(gdt->sc_devnode,
 	    "bus_dmamem_alloc(...,&gdt->sc_gccbs,...) failed\n");
         return (1);
     }
     gdt->sc_init_level++;
 
     /* And permanently map them */
     bus_dmamap_load(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch_dmamap,
                     gdt->sc_gcscratch, GDT_MAXCMDS * GDT_SCRATCH_SZ,
                     gdtmapmem, &gdt->sc_gcscratch_busbase, /*flags*/0);
     gdt->sc_init_level++;
 
     /* Clear them out. */
     bzero(gdt->sc_gcscratch, GDT_MAXCMDS * GDT_SCRATCH_SZ);
 
     /* Initialize the ccbs */
     gdt->sc_gccbs = malloc(sizeof(struct gdt_ccb) * GDT_MAXCMDS, M_GDTBUF,
         M_NOWAIT | M_ZERO);
     if (gdt->sc_gccbs == NULL) {
         device_printf(gdt->sc_devnode, "no memory for gccbs.\n");
         return (1);
     }
     for (i = GDT_MAXCMDS-1; i >= 0; i--) {
         gccb = &gdt->sc_gccbs[i];
         gccb->gc_cmd_index = i + 2;
         gccb->gc_flags = GDT_GCF_UNUSED;
         gccb->gc_map_flag = FALSE;
         if (bus_dmamap_create(gdt->sc_buffer_dmat, /*flags*/0,
                               &gccb->gc_dmamap) != 0)
             return(1);
         gccb->gc_map_flag = TRUE;
         gccb->gc_scratch = &gdt->sc_gcscratch[GDT_SCRATCH_SZ * i];
         gccb->gc_scratch_busbase = gdt->sc_gcscratch_busbase + GDT_SCRATCH_SZ * i;
 	callout_init_mtx(&gccb->gc_timeout, &gdt->sc_lock, 0);
         SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
     }
     gdt->sc_init_level++;
 
     /* create the control device */
     gdt->sc_dev = gdt_make_dev(gdt);
 
     /* allocate ccb for gdt_internal_cmd() */
     mtx_lock(&gdt->sc_lock);
     gccb = gdt_get_ccb(gdt);
     if (gccb == NULL) {
 	mtx_unlock(&gdt->sc_lock);
         device_printf(gdt->sc_devnode, "No free command index found\n");
         return (1);
     }
     bzero(gccb->gc_cmd, GDT_CMD_SZ);
 
     if (!gdt_internal_cmd(gdt, gccb, GDT_SCREENSERVICE, GDT_INIT, 
                           0, 0, 0)) {
         device_printf(gdt->sc_devnode,
 	    "Screen service initialization error %d\n", gdt->sc_status);
         gdt_free_ccb(gdt, gccb);
 	mtx_unlock(&gdt->sc_lock);
         return (1);
     }
 
     gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_UNFREEZE_IO,
                      0, 0, 0);
 
     if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT, 
                           GDT_LINUX_OS, 0, 0)) {
         device_printf(gdt->sc_devnode, "Cache service initialization error %d\n",
                gdt->sc_status);
         gdt_free_ccb(gdt, gccb);
 	mtx_unlock(&gdt->sc_lock);
         return (1);
     }
     cdev_cnt = (u_int16_t)gdt->sc_info;
     gdt->sc_fw_vers = gdt->sc_service;
 
     /* Detect number of buses */
     gdt_enc32(gccb->gc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
     gccb->gc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
     gccb->gc_scratch[GDT_IOC_FIRST_CHAN] = 0;
     gccb->gc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
     gdt_enc32(gccb->gc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
     if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
                          GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
                          GDT_IOC_HDR_SZ + GDT_MAXBUS * GDT_RAWIOC_SZ)) {
         gdt->sc_bus_cnt = gccb->gc_scratch[GDT_IOC_CHAN_COUNT];
         for (i = 0; i < gdt->sc_bus_cnt; i++) {
             id = gccb->gc_scratch[GDT_IOC_HDR_SZ +
                                  i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
             gdt->sc_bus_id[i] = id < GDT_MAXID_FC ? id : 0xff;
         }
     } else {
         /* New method failed, use fallback. */
         for (i = 0; i < GDT_MAXBUS; i++) {
             gdt_enc32(gccb->gc_scratch + GDT_GETCH_CHANNEL_NO, i);
             if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
                                   GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
                                   GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
                                   GDT_GETCH_SZ)) {
                 if (i == 0) {
                     device_printf(gdt->sc_devnode, "Cannot get channel count, "
                            "error %d\n", gdt->sc_status);
                     gdt_free_ccb(gdt, gccb);
 		    mtx_unlock(&gdt->sc_lock);
                     return (1);
                 }
                 break;
             }
             gdt->sc_bus_id[i] =
                 (gccb->gc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID_FC) ?
                 gccb->gc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
         }
         gdt->sc_bus_cnt = i;
     }
     /* add one "virtual" channel for the host drives */
     gdt->sc_virt_bus = gdt->sc_bus_cnt;
     gdt->sc_bus_cnt++;
 
     if (!gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_INIT, 
                           0, 0, 0)) {
             device_printf(gdt->sc_devnode,
 		"Raw service initialization error %d\n", gdt->sc_status);
             gdt_free_ccb(gdt, gccb);
 	    mtx_unlock(&gdt->sc_lock);
             return (1);
     }
 
     /* Set/get features raw service (scatter/gather) */
     gdt->sc_raw_feat = 0;
     if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
                          GDT_SCATTER_GATHER, 0, 0)) {
         if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_GET_FEAT, 
                              0, 0, 0)) {
             gdt->sc_raw_feat = gdt->sc_info;
             if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
                 panic("%s: Scatter/Gather Raw Service "
 		    "required but not supported!\n",
 		    device_get_nameunit(gdt->sc_devnode));
                 gdt_free_ccb(gdt, gccb);
 		mtx_unlock(&gdt->sc_lock);
                 return (1);
             }
         }
     }
 
     /* Set/get features cache service (scatter/gather) */
     gdt->sc_cache_feat = 0;
     if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_SET_FEAT, 
                          0, GDT_SCATTER_GATHER, 0)) {
         if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_GET_FEAT, 
                              0, 0, 0)) {
             gdt->sc_cache_feat = gdt->sc_info;
             if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
                 panic("%s: Scatter/Gather Cache Service "
 		    "required but not supported!\n",
 		    device_get_nameunit(gdt->sc_devnode));
                 gdt_free_ccb(gdt, gccb);
 		mtx_unlock(&gdt->sc_lock);
                 return (1);
             }
         }
     }
 
     /* OEM */
     gdt_enc32(gccb->gc_scratch + GDT_OEM_VERSION, 0x01);
     gdt_enc32(gccb->gc_scratch + GDT_OEM_BUFSIZE, sizeof(gdt_oem_record_t));
     if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
                          GDT_OEM_STR_RECORD, GDT_INVALID_CHANNEL,
                          sizeof(gdt_oem_str_record_t))) {
 	    strncpy(gdt->oem_name, ((gdt_oem_str_record_t *)
             gccb->gc_scratch)->text.scsi_host_drive_inquiry_vendor_id, 7);
 		gdt->oem_name[7]='\0';
 	} else {
 		/* Old method, based on PCI ID */
 		if (gdt->sc_vendor == INTEL_VENDOR_ID_IIR)
             strcpy(gdt->oem_name,"Intel  ");
         else 
        	    strcpy(gdt->oem_name,"ICP    ");
     }
 
     /* Scan for cache devices */
     for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) {
         if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INFO, 
                              i, 0, 0)) {
             gdt->sc_hdr[i].hd_present = 1;
             gdt->sc_hdr[i].hd_size = gdt->sc_info;
             
             /*
              * Evaluate mapping (sectors per head, heads per cyl)
              */
             gdt->sc_hdr[i].hd_size &= ~GDT_SECS32;
             if (gdt->sc_info2 == 0)
                 gdt_eval_mapping(gdt->sc_hdr[i].hd_size,
                                  &drv_cyls, &drv_hds, &drv_secs);
             else {
                 drv_hds = gdt->sc_info2 & 0xff;
                 drv_secs = (gdt->sc_info2 >> 8) & 0xff;
                 drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds /
                     drv_secs;
             }
             gdt->sc_hdr[i].hd_heads = drv_hds;
             gdt->sc_hdr[i].hd_secs = drv_secs;
             /* Round the size */
             gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
             
             if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE,
                                  GDT_DEVTYPE, i, 0, 0))
                 gdt->sc_hdr[i].hd_devtype = gdt->sc_info;
         }
     }
     
     GDT_DPRINTF(GDT_D_INIT, ("dpmem %x %d-bus %d cache device%s\n", 
                              gdt->sc_dpmembase,
                              gdt->sc_bus_cnt, cdev_cnt, 
                              cdev_cnt == 1 ? "" : "s"));
     gdt_free_ccb(gdt, gccb);
     mtx_unlock(&gdt->sc_lock);
 
     atomic_add_int(&gdt_cnt, 1);
     return (0);
 }
 
 void
 iir_free(struct gdt_softc *gdt)
 {
     int i;
 
     GDT_DPRINTF(GDT_D_INIT, ("iir_free()\n"));
 
     switch (gdt->sc_init_level) {
       default:
         gdt_destroy_dev(gdt->sc_dev);
       case 5:
         for (i = GDT_MAXCMDS-1; i >= 0; i--) 
             if (gdt->sc_gccbs[i].gc_map_flag) {
 		callout_drain(&gdt->sc_gccbs[i].gc_timeout);
                 bus_dmamap_destroy(gdt->sc_buffer_dmat,
                                    gdt->sc_gccbs[i].gc_dmamap);
 	    }
         bus_dmamap_unload(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch_dmamap);
         free(gdt->sc_gccbs, M_GDTBUF);
       case 4:
         bus_dmamem_free(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch, gdt->sc_gcscratch_dmamap);
       case 3:
         bus_dma_tag_destroy(gdt->sc_gcscratch_dmat);
       case 2:
         bus_dma_tag_destroy(gdt->sc_buffer_dmat);
       case 1:
         bus_dma_tag_destroy(gdt->sc_parent_dmat);
       case 0:
         break;
     }
 }
 
 void
 iir_attach(struct gdt_softc *gdt)
 {
     struct cam_devq *devq;
     int i;
 
     GDT_DPRINTF(GDT_D_INIT, ("iir_attach()\n"));
 
     /*
      * Create the device queue for our SIM.
      * XXX Throttle this down since the card has problems under load.
      */
     devq = cam_simq_alloc(32);
     if (devq == NULL)
         return;
 
     for (i = 0; i < gdt->sc_bus_cnt; i++) {
         /*
          * Construct our SIM entry
          */
         gdt->sims[i] = cam_sim_alloc(iir_action, iir_poll, "iir",
 	    gdt, device_get_unit(gdt->sc_devnode), &gdt->sc_lock,
 	    /*untagged*/1, /*tagged*/GDT_MAXCMDS, devq);
 	mtx_lock(&gdt->sc_lock);
         if (xpt_bus_register(gdt->sims[i], gdt->sc_devnode, i) != CAM_SUCCESS) {
             cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
 	    mtx_unlock(&gdt->sc_lock);
             break;
         }
 
         if (xpt_create_path(&gdt->paths[i], /*periph*/NULL,
                             cam_sim_path(gdt->sims[i]),
                             CAM_TARGET_WILDCARD,
                             CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
             xpt_bus_deregister(cam_sim_path(gdt->sims[i]));
             cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
 	    mtx_unlock(&gdt->sc_lock);
             break;
         }
 	mtx_unlock(&gdt->sc_lock);
     }
     if (i > 0)
         EVENTHANDLER_REGISTER(shutdown_final, iir_shutdown,
                               gdt, SHUTDOWN_PRI_DEFAULT);
     /* iir_watchdog(gdt); */
     gdt->sc_state = GDT_NORMAL;
 }
 
 static void
 gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
 {
     *cyls = size / GDT_HEADS / GDT_SECS;
     if (*cyls < GDT_MAXCYLS) {
         *heads = GDT_HEADS;
         *secs = GDT_SECS;
     } else {
         /* Too high for 64 * 32 */
         *cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
         if (*cyls < GDT_MAXCYLS) {
             *heads = GDT_MEDHEADS;
             *secs = GDT_MEDSECS;
         } else {
             /* Too high for 127 * 63 */
             *cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
             *heads = GDT_BIGHEADS;
             *secs = GDT_BIGSECS;
         }
     }
 }
 
 static int
 gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *gccb, 
          int timeout)
 {
     int rv = 0;
 
     GDT_DPRINTF(GDT_D_INIT,
                 ("gdt_wait(%p, %p, %d)\n", gdt, gccb, timeout));
 
     gdt->sc_state |= GDT_POLL_WAIT;
     do {
         if (iir_intr_locked(gdt) == gccb->gc_cmd_index) {
             rv = 1;
             break;
         }
         DELAY(1);
     } while (--timeout);
     gdt->sc_state &= ~GDT_POLL_WAIT;
     
     while (gdt->sc_test_busy(gdt))
         DELAY(1);               /* XXX correct? */
 
     return (rv);
 }
 
 static int
 gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
                  u_int8_t service, u_int16_t opcode, 
                  u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
 {
     int retries;
     
     GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d)\n",
                             gdt, service, opcode, arg1, arg2, arg3));
 
     bzero(gccb->gc_cmd, GDT_CMD_SZ);
 
     for (retries = GDT_RETRIES; ; ) {
         gccb->gc_service = service;
         gccb->gc_flags = GDT_GCF_INTERNAL;
         
         gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
                   gccb->gc_cmd_index);
         gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, opcode);
 
         switch (service) {
           case GDT_CACHESERVICE:
             if (opcode == GDT_IOCTL) {
                 gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION +
                           GDT_IOCTL_SUBFUNC, arg1);
                 gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION +
                           GDT_IOCTL_CHANNEL, arg2);
                 gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION +
                           GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
                 gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
                           gccb->gc_scratch_busbase);
             } else {
                 gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION +
                           GDT_CACHE_DEVICENO, (u_int16_t)arg1);
                 gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION +
                           GDT_CACHE_BLOCKNO, arg2);
             }
             break;
 
           case GDT_SCSIRAWSERVICE:
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION +
                       GDT_RAW_DIRECTION, arg1);
             gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
                 (u_int8_t)arg2;
             gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
                 (u_int8_t)arg3;
             gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
                 (u_int8_t)(arg3 >> 8);
         }
 
         gdt->sc_set_sema0(gdt);
         gccb->gc_cmd_len = GDT_CMD_SZ;
         gdt->sc_cmd_off = 0;
         gdt->sc_cmd_cnt = 0;
         gdt->sc_copy_cmd(gdt, gccb);
         gdt->sc_release_event(gdt);
         DELAY(20);
         if (!gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT))
             return (0);
         if (gdt->sc_status != GDT_S_BSY || --retries == 0)
             break;
         DELAY(1);
     }
     return (gdt->sc_status == GDT_S_OK);
 }
 
 static struct gdt_ccb *
 gdt_get_ccb(struct gdt_softc *gdt)
 {
     struct gdt_ccb *gccb;
     
     GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p)\n", gdt));
 
     mtx_assert(&gdt->sc_lock, MA_OWNED);
     gccb = SLIST_FIRST(&gdt->sc_free_gccb);
     if (gccb != NULL) {
         SLIST_REMOVE_HEAD(&gdt->sc_free_gccb, sle);
         SLIST_INSERT_HEAD(&gdt->sc_pending_gccb, gccb, sle);
         ++gdt_stat.cmd_index_act;
         if (gdt_stat.cmd_index_act > gdt_stat.cmd_index_max)
             gdt_stat.cmd_index_max = gdt_stat.cmd_index_act;
     }
     return (gccb);
 }
 
 void
 gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb)
 {
 
     GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p)\n", gdt, gccb));
 
     mtx_assert(&gdt->sc_lock, MA_OWNED);
     gccb->gc_flags = GDT_GCF_UNUSED;
     SLIST_REMOVE(&gdt->sc_pending_gccb, gccb, gdt_ccb, sle);
     SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
     --gdt_stat.cmd_index_act;
     if (gdt->sc_state & GDT_SHUTDOWN)
         wakeup(gccb);
 }
 
 void    
 gdt_next(struct gdt_softc *gdt)
 {
     union ccb *ccb;
     gdt_ucmd_t *ucmd;
     struct cam_sim *sim;
     int bus, target, lun;
     int next_cmd;
 
     struct ccb_scsiio *csio;
     struct ccb_hdr *ccbh;
     struct gdt_ccb *gccb = NULL;
     u_int8_t cmd;
 
     GDT_DPRINTF(GDT_D_QUEUE, ("gdt_next(%p)\n", gdt));
 
     mtx_assert(&gdt->sc_lock, MA_OWNED);
     if (gdt->sc_test_busy(gdt)) {
         if (!(gdt->sc_state & GDT_POLLING)) {
             return;
         }
         while (gdt->sc_test_busy(gdt))
             DELAY(1);
     }
 
     gdt->sc_cmd_cnt = gdt->sc_cmd_off = 0;
     next_cmd = TRUE;
     for (;;) {
         /* I/Os in queue? controller ready? */
         if (!TAILQ_FIRST(&gdt->sc_ucmd_queue) &&
             !TAILQ_FIRST(&gdt->sc_ccb_queue))
             break;
 
         /* 1.: I/Os without ccb (IOCTLs) */
         ucmd = TAILQ_FIRST(&gdt->sc_ucmd_queue);
         if (ucmd != NULL) {
             TAILQ_REMOVE(&gdt->sc_ucmd_queue, ucmd, links);
             if ((gccb = gdt_ioctl_cmd(gdt, ucmd)) == NULL) {
                 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
                 break;
             }
             break;      
             /* wenn mehrere Kdos. zulassen: if (!gdt_polling) continue; */
         }
 
         /* 2.: I/Os with ccb */
         ccb = (union ccb *)TAILQ_FIRST(&gdt->sc_ccb_queue); 
         /* ist dann immer != NULL, da oben getestet */
         sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
         bus = cam_sim_bus(sim);
         target = ccb->ccb_h.target_id;
         lun = ccb->ccb_h.target_lun;
     
         TAILQ_REMOVE(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
         --gdt_stat.req_queue_act;
         /* ccb->ccb_h.func_code is XPT_SCSI_IO */
         GDT_DPRINTF(GDT_D_QUEUE, ("XPT_SCSI_IO flags 0x%x)\n", 
                                   ccb->ccb_h.flags));
         csio = &ccb->csio;
         ccbh = &ccb->ccb_h;
-        cmd  = csio->cdb_io.cdb_bytes[0];
-        /* Max CDB length is 12 bytes */
-        if (csio->cdb_len > 12) { 
+        cmd  = scsiio_cdb_ptr(csio)[0];
+        /* Max CDB length is 12 bytes, can't be phys addr */
+        if (csio->cdb_len > 12 || (ccbh->flags & CAM_CDB_PHYS)) { 
             ccbh->status = CAM_REQ_INVALID;
             --gdt_stat.io_count_act;
             xpt_done(ccb);
         } else if (bus != gdt->sc_virt_bus) {
             /* raw service command */
             if ((gccb = gdt_raw_cmd(gdt, ccb)) == NULL) {
                 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, 
                                   sim_links.tqe);
                 ++gdt_stat.req_queue_act;
                 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
                     gdt_stat.req_queue_max = gdt_stat.req_queue_act;
                 next_cmd = FALSE;
             }
         } else if (target >= GDT_MAX_HDRIVES || 
                    !gdt->sc_hdr[target].hd_present || lun != 0) {
             ccbh->status = CAM_DEV_NOT_THERE;
             --gdt_stat.io_count_act;
             xpt_done(ccb);
         } else {
             /* cache service command */
             if (cmd == READ_6  || cmd == WRITE_6 ||
                 cmd == READ_10 || cmd == WRITE_10) {
                 if ((gccb = gdt_cache_cmd(gdt, ccb)) == NULL) {
                     TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, 
                                       sim_links.tqe);
                     ++gdt_stat.req_queue_act;
                     if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
                         gdt_stat.req_queue_max = gdt_stat.req_queue_act;
                     next_cmd = FALSE;
                 }
             } else {
                 gdt_internal_cache_cmd(gdt, ccb);
             }
         }           
         if ((gdt->sc_state & GDT_POLLING) || !next_cmd)
             break;
     }
     if (gdt->sc_cmd_cnt > 0)
         gdt->sc_release_event(gdt);
 
     if ((gdt->sc_state & GDT_POLLING) && gdt->sc_cmd_cnt > 0) {
         gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT);
     }
 }
 
 static struct gdt_ccb *
 gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb)
 {
     struct gdt_ccb *gccb;
     struct cam_sim *sim;
     int error;
 
     GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb));
 
     if (roundup(GDT_CMD_UNION + GDT_RAW_SZ, sizeof(u_int32_t)) +
         gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
         gdt->sc_ic_all_size) {
         GDT_DPRINTF(GDT_D_INVALID, ("%s: gdt_raw_cmd(): DPMEM overflow\n", 
 		device_get_nameunit(gdt->sc_devnode)));
         return (NULL);
     }
 
     gccb = gdt_get_ccb(gdt);
     if (gccb == NULL) {
         GDT_DPRINTF(GDT_D_INVALID, ("%s: No free command index found\n",
 		device_get_nameunit(gdt->sc_devnode)));
         return (gccb);
     }
     bzero(gccb->gc_cmd, GDT_CMD_SZ);
     sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
     gccb->gc_ccb = ccb;
     gccb->gc_service = GDT_SCSIRAWSERVICE;
     gccb->gc_flags = GDT_GCF_SCSI;
         
     if (gdt->sc_cmd_cnt == 0)
         gdt->sc_set_sema0(gdt);
     gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
               gccb->gc_cmd_index);
     gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
 
     gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
               (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
               GDT_DATA_IN : GDT_DATA_OUT);
     gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
               ccb->csio.dxfer_len);
     gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
               ccb->csio.cdb_len);
     bcopy(ccb->csio.cdb_io.cdb_bytes, gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
           ccb->csio.cdb_len);
     gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] = 
         ccb->ccb_h.target_id;
     gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] = 
         ccb->ccb_h.target_lun;
     gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] = 
         cam_sim_bus(sim);
     gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
               sizeof(struct scsi_sense_data));
     gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
               gccb->gc_scratch_busbase);
  
     error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat,
 			        gccb->gc_dmamap,
 			        ccb,
 			        gdtexecuteccb,
 			        gccb, /*flags*/0);
     if (error == EINPROGRESS) {
         xpt_freeze_simq(sim, 1);
         gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
     }
 
     return (gccb);
 }
 
 static struct gdt_ccb *
 gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb)
 {
     struct gdt_ccb *gccb;
     struct cam_sim *sim;
     u_int8_t *cmdp;
     u_int16_t opcode;
     u_int32_t blockno, blockcnt;
     int error;
 
     GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb));
 
     if (roundup(GDT_CMD_UNION + GDT_CACHE_SZ, sizeof(u_int32_t)) +
         gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
         gdt->sc_ic_all_size) {
         GDT_DPRINTF(GDT_D_INVALID, ("%s: gdt_cache_cmd(): DPMEM overflow\n", 
 		device_get_nameunit(gdt->sc_devnode)));
         return (NULL);
     }
 
     gccb = gdt_get_ccb(gdt);
     if (gccb == NULL) {
         GDT_DPRINTF(GDT_D_DEBUG, ("%s: No free command index found\n",
 		device_get_nameunit(gdt->sc_devnode)));
         return (gccb);
     }
     bzero(gccb->gc_cmd, GDT_CMD_SZ);
     sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
     gccb->gc_ccb = ccb;
     gccb->gc_service = GDT_CACHESERVICE;
     gccb->gc_flags = GDT_GCF_SCSI;
         
     if (gdt->sc_cmd_cnt == 0)
         gdt->sc_set_sema0(gdt);
     gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
               gccb->gc_cmd_index);
     cmdp = ccb->csio.cdb_io.cdb_bytes;
     opcode = (*cmdp == WRITE_6 || *cmdp == WRITE_10) ? GDT_WRITE : GDT_READ;
     if ((gdt->sc_state & GDT_SHUTDOWN) && opcode == GDT_WRITE)
         opcode = GDT_WRITE_THR;
     gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, opcode);
  
     gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
               ccb->ccb_h.target_id);
     if (ccb->csio.cdb_len == 6) {
         struct scsi_rw_6 *rw = (struct scsi_rw_6 *)cmdp;
         blockno = scsi_3btoul(rw->addr) & ((SRW_TOPADDR<<16) | 0xffff);
         blockcnt = rw->length ? rw->length : 0x100;
     } else {
         struct scsi_rw_10 *rw = (struct scsi_rw_10 *)cmdp;
         blockno = scsi_4btoul(rw->addr);
         blockcnt = scsi_2btoul(rw->length);
     }
     gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
               blockno);
     gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
               blockcnt);
 
     error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat,
                                 gccb->gc_dmamap,
                                 ccb,
                                 gdtexecuteccb,
                                 gccb, /*flags*/0);
     if (error == EINPROGRESS) {
         xpt_freeze_simq(sim, 1);
         gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
     }
     return (gccb);
 }
 
 static struct gdt_ccb *
 gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd)
 {
     struct gdt_ccb *gccb;
     u_int32_t cnt;
 
     GDT_DPRINTF(GDT_D_DEBUG, ("gdt_ioctl_cmd(%p, %p)\n", gdt, ucmd));
 
     gccb = gdt_get_ccb(gdt);
     if (gccb == NULL) {
         GDT_DPRINTF(GDT_D_DEBUG, ("%s: No free command index found\n",
 		device_get_nameunit(gdt->sc_devnode)));
         return (gccb);
     }
     bzero(gccb->gc_cmd, GDT_CMD_SZ);
     gccb->gc_ucmd = ucmd;
     gccb->gc_service = ucmd->service;
     gccb->gc_flags = GDT_GCF_IOCTL;
         
     /* check DPMEM space, copy data buffer from user space */
     if (ucmd->service == GDT_CACHESERVICE) {
         if (ucmd->OpCode == GDT_IOCTL) {
             gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_IOCTL_SZ,
                                       sizeof(u_int32_t));
             cnt = ucmd->u.ioctl.param_size;
             if (cnt > GDT_SCRATCH_SZ) {
                 device_printf(gdt->sc_devnode,
 		    "Scratch buffer too small (%d/%d)\n", GDT_SCRATCH_SZ, cnt);
                 gdt_free_ccb(gdt, gccb);
                 return (NULL);
             }
         } else {
             gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
                                       GDT_SG_SZ, sizeof(u_int32_t));
             cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
             if (cnt > GDT_SCRATCH_SZ) {
                 device_printf(gdt->sc_devnode,
 		    "Scratch buffer too small (%d/%d)\n", GDT_SCRATCH_SZ, cnt);
                 gdt_free_ccb(gdt, gccb);
                 return (NULL);
             }
         }
     } else {
         gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
                                   GDT_SG_SZ, sizeof(u_int32_t));
         cnt = ucmd->u.raw.sdlen;
         if (cnt + ucmd->u.raw.sense_len > GDT_SCRATCH_SZ) {
             device_printf(gdt->sc_devnode, "Scratch buffer too small (%d/%d)\n", 
 		GDT_SCRATCH_SZ, cnt + ucmd->u.raw.sense_len);
             gdt_free_ccb(gdt, gccb);
             return (NULL);
         }
     }
     if (cnt != 0) 
         bcopy(ucmd->data, gccb->gc_scratch, cnt);
 
     if (gdt->sc_cmd_off + gccb->gc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
         gdt->sc_ic_all_size) {
         GDT_DPRINTF(GDT_D_INVALID, ("%s: gdt_ioctl_cmd(): DPMEM overflow\n", 
 		device_get_nameunit(gdt->sc_devnode)));
         gdt_free_ccb(gdt, gccb);
         return (NULL);
     }
 
     if (gdt->sc_cmd_cnt == 0)
         gdt->sc_set_sema0(gdt);
 
     /* fill cmd structure */
     gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
               gccb->gc_cmd_index);
     gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, 
               ucmd->OpCode);
 
     if (ucmd->service == GDT_CACHESERVICE) {
         if (ucmd->OpCode == GDT_IOCTL) {
             /* IOCTL */
             gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE,
                       ucmd->u.ioctl.param_size);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC,
                       ucmd->u.ioctl.subfunc);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL,
                       ucmd->u.ioctl.channel);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
                       gccb->gc_scratch_busbase);
         } else {
             /* cache service command */
             gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
                       ucmd->u.cache.DeviceNo);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
                       ucmd->u.cache.BlockNo);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
                       ucmd->u.cache.BlockCnt);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
                       0xffffffffUL);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
                       1);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST + 
                       GDT_SG_PTR, gccb->gc_scratch_busbase);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
                       GDT_SG_LEN, ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE);
         }
     } else {
         /* raw service command */
         gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
                   ucmd->u.raw.direction);
         gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
                   0xffffffffUL);
         gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
                   ucmd->u.raw.sdlen);
         gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
                   ucmd->u.raw.clen);
         bcopy(ucmd->u.raw.cmd, gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
               12);
         gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] = 
             ucmd->u.raw.target;
         gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] = 
             ucmd->u.raw.lun;
         gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] = 
             ucmd->u.raw.bus;
         gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
                   ucmd->u.raw.sense_len);
         gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
                   gccb->gc_scratch_busbase + ucmd->u.raw.sdlen);
         gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
                   1);
         gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST + 
                   GDT_SG_PTR, gccb->gc_scratch_busbase);
         gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
                   GDT_SG_LEN, ucmd->u.raw.sdlen);
     }
 
     gdt_stat.sg_count_act = 1;
     gdt->sc_copy_cmd(gdt, gccb);
     return (gccb);
 }
 
 static void 
 gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb)
 {
     int t;
 
     t = ccb->ccb_h.target_id;
     GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd(%p, %p, 0x%x, %d)\n", 
         gdt, ccb, ccb->csio.cdb_io.cdb_bytes[0], t));
 
     switch (ccb->csio.cdb_io.cdb_bytes[0]) {
       case TEST_UNIT_READY:
       case START_STOP:
         break;
       case REQUEST_SENSE:
         GDT_DPRINTF(GDT_D_MISC, ("REQUEST_SENSE\n"));
         break;
       case INQUIRY:
         {
             struct scsi_inquiry_data inq;
             size_t copylen = MIN(sizeof(inq), ccb->csio.dxfer_len);
 
             bzero(&inq, sizeof(inq));
             inq.device = (gdt->sc_hdr[t].hd_devtype & 4) ?
                 T_CDROM : T_DIRECT;
             inq.dev_qual2 = (gdt->sc_hdr[t].hd_devtype & 1) ? 0x80 : 0;
             inq.version = SCSI_REV_2;
             inq.response_format = 2; 
             inq.additional_length = 32; 
             inq.flags = SID_CmdQue | SID_Sync; 
             strncpy(inq.vendor, gdt->oem_name, sizeof(inq.vendor));
             snprintf(inq.product, sizeof(inq.product),
                      "Host Drive   #%02d", t);
             strncpy(inq.revision, "   ", sizeof(inq.revision));
             bcopy(&inq, ccb->csio.data_ptr, copylen );
             if( ccb->csio.dxfer_len > copylen )
                 bzero( ccb->csio.data_ptr+copylen,
                        ccb->csio.dxfer_len - copylen );
             break;
         }
       case MODE_SENSE_6:
         {
             struct mpd_data {
                 struct scsi_mode_hdr_6 hd;
                 struct scsi_mode_block_descr bd;
                 struct scsi_control_page cp;
             } mpd;
             size_t copylen = MIN(sizeof(mpd), ccb->csio.dxfer_len);
             u_int8_t page;
 
             /*mpd = (struct mpd_data *)ccb->csio.data_ptr;*/
             bzero(&mpd, sizeof(mpd));
             mpd.hd.datalen = sizeof(struct scsi_mode_hdr_6) +
                 sizeof(struct scsi_mode_block_descr);
             mpd.hd.dev_specific = (gdt->sc_hdr[t].hd_devtype & 2) ? 0x80 : 0;
             mpd.hd.block_descr_len = sizeof(struct scsi_mode_block_descr);
             mpd.bd.block_len[0] = (GDT_SECTOR_SIZE & 0x00ff0000) >> 16;
             mpd.bd.block_len[1] = (GDT_SECTOR_SIZE & 0x0000ff00) >> 8;
             mpd.bd.block_len[2] = (GDT_SECTOR_SIZE & 0x000000ff);
 
             bcopy(&mpd, ccb->csio.data_ptr, copylen );
             if( ccb->csio.dxfer_len > copylen )
                 bzero( ccb->csio.data_ptr+copylen,
                        ccb->csio.dxfer_len - copylen );
             page=((struct scsi_mode_sense_6 *)ccb->csio.cdb_io.cdb_bytes)->page;
             switch (page) {
               default:
                 GDT_DPRINTF(GDT_D_MISC, ("MODE_SENSE_6: page 0x%x\n", page));
                 break;
             }
             break;
         }
       case READ_CAPACITY:
         {
             struct scsi_read_capacity_data rcd;
             size_t copylen = MIN(sizeof(rcd), ccb->csio.dxfer_len);
               
             /*rcd = (struct scsi_read_capacity_data *)ccb->csio.data_ptr;*/
             bzero(&rcd, sizeof(rcd));
             scsi_ulto4b(gdt->sc_hdr[t].hd_size - 1, rcd.addr);
             scsi_ulto4b(GDT_SECTOR_SIZE, rcd.length);
             bcopy(&rcd, ccb->csio.data_ptr, copylen );
             if( ccb->csio.dxfer_len > copylen )
                 bzero( ccb->csio.data_ptr+copylen,
                        ccb->csio.dxfer_len - copylen );
             break;
         }
       default:
         GDT_DPRINTF(GDT_D_MISC, ("gdt_internal_cache_cmd(%d) unknown\n", 
                                     ccb->csio.cdb_io.cdb_bytes[0]));
         break;
     }
     ccb->ccb_h.status |= CAM_REQ_CMP;
     --gdt_stat.io_count_act;
     xpt_done(ccb);
 }
 
 static void     
 gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
 {
     bus_addr_t *busaddrp;
     
     busaddrp = (bus_addr_t *)arg;
     *busaddrp = dm_segs->ds_addr;
 }
 
 static void     
 gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
 {
     struct gdt_ccb *gccb;
     union ccb *ccb;
     struct gdt_softc *gdt;
     int i;
 
     gccb = (struct gdt_ccb *)arg;
     ccb = gccb->gc_ccb;
     gdt = cam_sim_softc((struct cam_sim *)ccb->ccb_h.ccb_sim_ptr);
     mtx_assert(&gdt->sc_lock, MA_OWNED);
 
     GDT_DPRINTF(GDT_D_CMD, ("gdtexecuteccb(%p, %p, %p, %d, %d)\n", 
                             gdt, gccb, dm_segs, nseg, error));
     gdt_stat.sg_count_act = nseg;
     if (nseg > gdt_stat.sg_count_max)
         gdt_stat.sg_count_max = nseg;
 
     /* Copy the segments into our SG list */
     if (gccb->gc_service == GDT_CACHESERVICE) {
         for (i = 0; i < nseg; ++i) {
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
                       i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
                       i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
             dm_segs++;
         }
         gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,      
                   nseg);
         gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR, 
                   0xffffffffUL);
 
         gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
                                   nseg * GDT_SG_SZ, sizeof(u_int32_t));
     } else {
         for (i = 0; i < nseg; ++i) {
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
                       i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
                       i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
             dm_segs++;
         }
         gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,        
                   nseg);
         gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA, 
                   0xffffffffUL);
 
         gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
                                   nseg * GDT_SG_SZ, sizeof(u_int32_t));
     }
 
     if (nseg != 0) {
         bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, 
             (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
             BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
     }
     
     /* We must NOT abort the command here if CAM_REQ_INPROG is not set,
      * because command semaphore is already set!
      */
     
     ccb->ccb_h.status |= CAM_SIM_QUEUED;
     /* timeout handling */
     callout_reset_sbt(&gccb->gc_timeout, SBT_1MS * ccb->ccb_h.timeout, 0,
       iir_timeout, (caddr_t)gccb, 0);
 
     gdt->sc_copy_cmd(gdt, gccb);
 }
 
 
 static void     
 iir_action( struct cam_sim *sim, union ccb *ccb )
 {
     struct gdt_softc *gdt;
     int bus, target, lun;
 
     gdt = (struct gdt_softc *)cam_sim_softc( sim );
     mtx_assert(&gdt->sc_lock, MA_OWNED);
     ccb->ccb_h.ccb_sim_ptr = sim;
     bus = cam_sim_bus(sim);
     target = ccb->ccb_h.target_id;
     lun = ccb->ccb_h.target_lun;
     GDT_DPRINTF(GDT_D_CMD, 
                 ("iir_action(%p) func 0x%x cmd 0x%x bus %d target %d lun %d\n", 
                  gdt, ccb->ccb_h.func_code, ccb->csio.cdb_io.cdb_bytes[0], 
                  bus, target, lun)); 
     ++gdt_stat.io_count_act;
     if (gdt_stat.io_count_act > gdt_stat.io_count_max)
         gdt_stat.io_count_max = gdt_stat.io_count_act;
 
     switch (ccb->ccb_h.func_code) {
       case XPT_SCSI_IO:
         TAILQ_INSERT_TAIL(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
         ++gdt_stat.req_queue_act;
         if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
             gdt_stat.req_queue_max = gdt_stat.req_queue_act;
         gdt_next(gdt);
         break;
       case XPT_RESET_DEV:   /* Bus Device Reset the specified SCSI device */
       case XPT_ABORT:                       /* Abort the specified CCB */
         /* XXX Implement */
         ccb->ccb_h.status = CAM_REQ_INVALID;
         --gdt_stat.io_count_act;
         xpt_done(ccb);
         break;
       case XPT_SET_TRAN_SETTINGS:
         ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
         --gdt_stat.io_count_act;
         xpt_done(ccb);  
         break;
       case XPT_GET_TRAN_SETTINGS:
         /* Get default/user set transfer settings for the target */
           {
               struct        ccb_trans_settings *cts = &ccb->cts;
               struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
               struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
 
               cts->protocol = PROTO_SCSI;
               cts->protocol_version = SCSI_REV_2;
               cts->transport = XPORT_SPI;
               cts->transport_version = 2;
 
               if (cts->type == CTS_TYPE_USER_SETTINGS) {
 		  spi->flags = CTS_SPI_FLAGS_DISC_ENB;
                   scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
                   spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
                   spi->sync_period = 25; /* 10MHz */
                   if (spi->sync_period != 0)
                       spi->sync_offset = 15;
                   
                   spi->valid = CTS_SPI_VALID_SYNC_RATE
                       | CTS_SPI_VALID_SYNC_OFFSET
                       | CTS_SPI_VALID_BUS_WIDTH
                       | CTS_SPI_VALID_DISC;
                   scsi->valid = CTS_SCSI_VALID_TQ;
                   ccb->ccb_h.status = CAM_REQ_CMP;
               } else {
                   ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
               }
               --gdt_stat.io_count_act;
               xpt_done(ccb);
               break;
           }
       case XPT_CALC_GEOMETRY:
           {
               struct ccb_calc_geometry *ccg;
               u_int32_t secs_per_cylinder;
 
               ccg = &ccb->ccg;
               ccg->heads = gdt->sc_hdr[target].hd_heads;
               ccg->secs_per_track = gdt->sc_hdr[target].hd_secs;
               secs_per_cylinder = ccg->heads * ccg->secs_per_track;
               ccg->cylinders = ccg->volume_size / secs_per_cylinder;
               ccb->ccb_h.status = CAM_REQ_CMP;
               --gdt_stat.io_count_act;
               xpt_done(ccb);
               break;
           }
       case XPT_RESET_BUS:           /* Reset the specified SCSI bus */
           {
               /* XXX Implement */
               ccb->ccb_h.status = CAM_REQ_CMP;
               --gdt_stat.io_count_act;
               xpt_done(ccb);
               break;
           }
       case XPT_TERM_IO:             /* Terminate the I/O process */
         /* XXX Implement */
         ccb->ccb_h.status = CAM_REQ_INVALID;
         --gdt_stat.io_count_act;
         xpt_done(ccb);
         break;
       case XPT_PATH_INQ:            /* Path routing inquiry */
           {
               struct ccb_pathinq *cpi = &ccb->cpi;
               
               cpi->version_num = 1;
               cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
               cpi->hba_inquiry |= PI_WIDE_16;
               cpi->target_sprt = 1;
               cpi->hba_misc = 0;
               cpi->hba_eng_cnt = 0;
               if (bus == gdt->sc_virt_bus)
                   cpi->max_target = GDT_MAX_HDRIVES - 1;
               else if (gdt->sc_class & GDT_FC)
                   cpi->max_target = GDT_MAXID_FC - 1;
               else
                   cpi->max_target = GDT_MAXID - 1;
               cpi->max_lun = 7;
               cpi->unit_number = cam_sim_unit(sim);
               cpi->bus_id = bus;
               cpi->initiator_id = 
                   (bus == gdt->sc_virt_bus ? 127 : gdt->sc_bus_id[bus]);
               cpi->base_transfer_speed = 3300;
               strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
               if (gdt->sc_vendor == INTEL_VENDOR_ID_IIR)
                   strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
               else
                   strncpy(cpi->hba_vid, "ICP vortex ", HBA_IDLEN);
               strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
               cpi->transport = XPORT_SPI;
               cpi->transport_version = 2;
               cpi->protocol = PROTO_SCSI;
               cpi->protocol_version = SCSI_REV_2;
               cpi->ccb_h.status = CAM_REQ_CMP;
               --gdt_stat.io_count_act;
               xpt_done(ccb);
               break;
           }
       default:
         GDT_DPRINTF(GDT_D_INVALID, ("gdt_next(%p) cmd 0x%x invalid\n", 
                                     gdt, ccb->ccb_h.func_code));
         ccb->ccb_h.status = CAM_REQ_INVALID;
         --gdt_stat.io_count_act;
         xpt_done(ccb);
         break;
     }
 }
 
 static void     
 iir_poll( struct cam_sim *sim )
 {
     struct gdt_softc *gdt;
 
     gdt = (struct gdt_softc *)cam_sim_softc( sim );
     GDT_DPRINTF(GDT_D_CMD, ("iir_poll sim %p gdt %p\n", sim, gdt));
     iir_intr_locked(gdt);
 }
 
 static void     
 iir_timeout(void *arg)
 {
     GDT_DPRINTF(GDT_D_TIMEOUT, ("iir_timeout(%p)\n", gccb));
 }
 
 static void     
 iir_shutdown( void *arg, int howto )
 {
     struct gdt_softc *gdt;
     struct gdt_ccb *gccb;
     gdt_ucmd_t *ucmd;
     int i;
 
     gdt = (struct gdt_softc *)arg;
     GDT_DPRINTF(GDT_D_CMD, ("iir_shutdown(%p, %d)\n", gdt, howto));
 
     device_printf(gdt->sc_devnode,
 	"Flushing all Host Drives. Please wait ...  ");
 
     /* allocate ucmd buffer */
     ucmd = malloc(sizeof(gdt_ucmd_t), M_GDTBUF, M_NOWAIT);
     if (ucmd == NULL) {
 	printf("\n");
         device_printf(gdt->sc_devnode,
 	    "iir_shutdown(): Cannot allocate resource\n");
         return;
     }
     bzero(ucmd, sizeof(gdt_ucmd_t));
 
     /* wait for pending IOs */
     mtx_lock(&gdt->sc_lock);
     gdt->sc_state = GDT_SHUTDOWN;
     if ((gccb = SLIST_FIRST(&gdt->sc_pending_gccb)) != NULL)
         mtx_sleep(gccb, &gdt->sc_lock, PCATCH | PRIBIO, "iirshw", 100 * hz);
 
     /* flush */
     for (i = 0; i < GDT_MAX_HDRIVES; ++i) {
         if (gdt->sc_hdr[i].hd_present) {
             ucmd->service = GDT_CACHESERVICE;
             ucmd->OpCode = GDT_FLUSH;
             ucmd->u.cache.DeviceNo = i;
             TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links);
             ucmd->complete_flag = FALSE;
             gdt_next(gdt);
             if (!ucmd->complete_flag)
                 mtx_sleep(ucmd, &gdt->sc_lock, PCATCH | PRIBIO, "iirshw",
 		    10 * hz);
         }
     }
     mtx_unlock(&gdt->sc_lock);
 
     free(ucmd, M_DEVBUF);
     printf("Done.\n");
 }
 
 void
 iir_intr(void *arg)
 {
     struct gdt_softc *gdt = arg;
 
     mtx_lock(&gdt->sc_lock);
     iir_intr_locked(gdt);
     mtx_unlock(&gdt->sc_lock);
 }
 
 int
 iir_intr_locked(struct gdt_softc *gdt)
 {
     struct gdt_intr_ctx ctx;
     struct gdt_ccb *gccb;
     gdt_ucmd_t *ucmd;
     u_int32_t cnt;
 
     GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p)\n", gdt));
 
     mtx_assert(&gdt->sc_lock, MA_OWNED);
 
     /* If polling and we were not called from gdt_wait, just return */
     if ((gdt->sc_state & GDT_POLLING) &&
         !(gdt->sc_state & GDT_POLL_WAIT))
         return (0);
 
     ctx.istatus = gdt->sc_get_status(gdt);
     if (ctx.istatus == 0x00) {
         gdt->sc_status = GDT_S_NO_STATUS;
         return (ctx.istatus);
     }
 
     gdt->sc_intr(gdt, &ctx);
 
     gdt->sc_status = ctx.cmd_status;
     gdt->sc_service = ctx.service;
     gdt->sc_info = ctx.info;
     gdt->sc_info2 = ctx.info2;
 
     if (ctx.istatus == GDT_ASYNCINDEX) {
         gdt_async_event(gdt, ctx.service);
         return (ctx.istatus);
     }
     if (ctx.istatus == GDT_SPEZINDEX) {
         GDT_DPRINTF(GDT_D_INVALID, 
                     ("%s: Service unknown or not initialized!\n", 
 		     device_get_nameunit(gdt->sc_devnode)));   
         gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
         gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
         gdt_store_event(GDT_ES_DRIVER, 4, &gdt->sc_dvr);
         return (ctx.istatus);
     }
 
     gccb = &gdt->sc_gccbs[ctx.istatus - 2];
     ctx.service = gccb->gc_service;
 
     switch (gccb->gc_flags) {
       case GDT_GCF_UNUSED:
         GDT_DPRINTF(GDT_D_INVALID, ("%s: Index (%d) to unused command!\n",
 		    device_get_nameunit(gdt->sc_devnode), ctx.istatus));
         gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
         gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
         gdt->sc_dvr.eu.driver.index = ctx.istatus;
         gdt_store_event(GDT_ES_DRIVER, 1, &gdt->sc_dvr);
         gdt_free_ccb(gdt, gccb);
 	break;
 
       case GDT_GCF_INTERNAL:
         break;
 
       case GDT_GCF_IOCTL:
         ucmd = gccb->gc_ucmd; 
         if (gdt->sc_status == GDT_S_BSY) {
             GDT_DPRINTF(GDT_D_DEBUG, ("iir_intr(%p) ioctl: gccb %p busy\n", 
                                       gdt, gccb));
             TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
         } else {
             ucmd->status = gdt->sc_status;
             ucmd->info = gdt->sc_info;
             ucmd->complete_flag = TRUE;
             if (ucmd->service == GDT_CACHESERVICE) {
                 if (ucmd->OpCode == GDT_IOCTL) {
                     cnt = ucmd->u.ioctl.param_size;
                     if (cnt != 0)
                         bcopy(gccb->gc_scratch, ucmd->data, cnt);
                 } else {
                     cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
                     if (cnt != 0)
                         bcopy(gccb->gc_scratch, ucmd->data, cnt);
                 }
             } else {
                 cnt = ucmd->u.raw.sdlen;
                 if (cnt != 0)
                     bcopy(gccb->gc_scratch, ucmd->data, cnt);
                 if (ucmd->u.raw.sense_len != 0) 
                     bcopy(gccb->gc_scratch, ucmd->data, cnt);
             }
             gdt_free_ccb(gdt, gccb);
             /* wakeup */
             wakeup(ucmd);
         }
         gdt_next(gdt); 
         break;
 
       default:
         gdt_free_ccb(gdt, gccb);
         gdt_sync_event(gdt, ctx.service, ctx.istatus, gccb);
         gdt_next(gdt); 
         break;
     }
 
     return (ctx.istatus);
 }
 
 int
 gdt_async_event(struct gdt_softc *gdt, int service)
 {
     struct gdt_ccb *gccb;
 
     GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d)\n", gdt, service));
 
     if (service == GDT_SCREENSERVICE) {
         if (gdt->sc_status == GDT_MSG_REQUEST) {
             while (gdt->sc_test_busy(gdt))
                 DELAY(1);
             gccb = gdt_get_ccb(gdt);
             if (gccb == NULL) {
                 device_printf(gdt->sc_devnode, "No free command index found\n");
                 return (1);
             }
             bzero(gccb->gc_cmd, GDT_CMD_SZ);
             gccb->gc_service = service;
             gccb->gc_flags = GDT_GCF_SCREEN;
             gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
                       gccb->gc_cmd_index);
             gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_READ);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
                       GDT_MSG_INV_HANDLE);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
                       gccb->gc_scratch_busbase);
             gdt->sc_set_sema0(gdt);
             gdt->sc_cmd_off = 0;
             gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ, 
                                       sizeof(u_int32_t));
             gdt->sc_cmd_cnt = 0;
             gdt->sc_copy_cmd(gdt, gccb);
             device_printf(gdt->sc_devnode, "[PCI %d/%d] ", gdt->sc_bus,
 		gdt->sc_slot);
             gdt->sc_release_event(gdt);
         }
 
     } else {
         if ((gdt->sc_fw_vers & 0xff) >= 0x1a) {
             gdt->sc_dvr.size = 0;
             gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
             gdt->sc_dvr.eu.async.status  = gdt->sc_status;
             /* severity and event_string already set! */
         } else {        
             gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.async);
             gdt->sc_dvr.eu.async.ionode   = gdt->sc_hanum;
             gdt->sc_dvr.eu.async.service = service;
             gdt->sc_dvr.eu.async.status  = gdt->sc_status;
             gdt->sc_dvr.eu.async.info    = gdt->sc_info;
             *(u_int32_t *)gdt->sc_dvr.eu.async.scsi_coord  = gdt->sc_info2;
         }
         gdt_store_event(GDT_ES_ASYNC, service, &gdt->sc_dvr);
         device_printf(gdt->sc_devnode, "%s\n", gdt->sc_dvr.event_string);
     }
     
     return (0);
 }
 
 int
 gdt_sync_event(struct gdt_softc *gdt, int service, 
                u_int8_t index, struct gdt_ccb *gccb)
 {
     union ccb *ccb;
 
     GDT_DPRINTF(GDT_D_INTR,
                 ("gdt_sync_event(%p, %d, %d, %p)\n", gdt,service,index,gccb));
 
     ccb = gccb->gc_ccb;
 
     if (service == GDT_SCREENSERVICE) {
         u_int32_t msg_len;
 
         msg_len = gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_LEN);
         if (msg_len)
             if (!(gccb->gc_scratch[GDT_SCR_MSG_ANSWER] && 
                   gccb->gc_scratch[GDT_SCR_MSG_EXT])) {
                 gccb->gc_scratch[GDT_SCR_MSG_TEXT + msg_len] = '\0';
                 printf("%s",&gccb->gc_scratch[GDT_SCR_MSG_TEXT]);
             }
 
         if (gccb->gc_scratch[GDT_SCR_MSG_EXT] && 
             !gccb->gc_scratch[GDT_SCR_MSG_ANSWER]) {
             while (gdt->sc_test_busy(gdt))
                 DELAY(1);
             bzero(gccb->gc_cmd, GDT_CMD_SZ);
             gccb = gdt_get_ccb(gdt);
             if (gccb == NULL) {
                 device_printf(gdt->sc_devnode, "No free command index found\n");
                 return (1);
             }
             gccb->gc_service = service;
             gccb->gc_flags = GDT_GCF_SCREEN;
             gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
                       gccb->gc_cmd_index);
             gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_READ);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
                       gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
                       gccb->gc_scratch_busbase);
             gdt->sc_set_sema0(gdt);
             gdt->sc_cmd_off = 0;
             gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ, 
                                       sizeof(u_int32_t));
             gdt->sc_cmd_cnt = 0;
             gdt->sc_copy_cmd(gdt, gccb);
             gdt->sc_release_event(gdt);
             return (0);
         }
 
         if (gccb->gc_scratch[GDT_SCR_MSG_ANSWER] && 
             gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN)) {
             /* default answers (getchar() not possible) */
             if (gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) == 1) {
                 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 0);
                 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 1);
                 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 0;
             } else {
                 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 
                           gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) - 2);
                 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 2);
                 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 1;
                 gccb->gc_scratch[GDT_SCR_MSG_TEXT + 1] = 0;
             }
             gccb->gc_scratch[GDT_SCR_MSG_EXT] = 0;
             gccb->gc_scratch[GDT_SCR_MSG_ANSWER] = 0;
             while (gdt->sc_test_busy(gdt))
                 DELAY(1);
             bzero(gccb->gc_cmd, GDT_CMD_SZ);
             gccb = gdt_get_ccb(gdt);
             if (gccb == NULL) {
                 device_printf(gdt->sc_devnode, "No free command index found\n");
                 return (1);
             }
             gccb->gc_service = service;
             gccb->gc_flags = GDT_GCF_SCREEN;
             gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
                       gccb->gc_cmd_index);
             gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
                       gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
             gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
                       gccb->gc_scratch_busbase);
             gdt->sc_set_sema0(gdt);
             gdt->sc_cmd_off = 0;
             gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ, 
                                       sizeof(u_int32_t));
             gdt->sc_cmd_cnt = 0;
             gdt->sc_copy_cmd(gdt, gccb);
             gdt->sc_release_event(gdt);
             return (0);
         }
         printf("\n");
         return (0);
     } else {
 	callout_stop(&gccb->gc_timeout);
         if (gdt->sc_status == GDT_S_BSY) {
             GDT_DPRINTF(GDT_D_DEBUG, ("gdt_sync_event(%p) gccb %p busy\n", 
                                       gdt, gccb));
             TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
             ++gdt_stat.req_queue_act;
             if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
                 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
             return (2);
         }
 
         bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, 
             (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
             BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
         bus_dmamap_unload(gdt->sc_buffer_dmat, gccb->gc_dmamap);
 
         ccb->csio.resid = 0;
         if (gdt->sc_status == GDT_S_OK) {
             ccb->ccb_h.status |= CAM_REQ_CMP;
             ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
         } else {
             /* error */
             if (gccb->gc_service == GDT_CACHESERVICE) {
                 struct scsi_sense_data *sense;
 
                 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
                 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
                 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
                 bzero(&ccb->csio.sense_data, ccb->csio.sense_len);
                 sense = &ccb->csio.sense_data;
                 scsi_set_sense_data(sense,
                                     /*sense_format*/ SSD_TYPE_NONE,
                                     /*current_error*/ 1,
                                     /*sense_key*/ SSD_KEY_NOT_READY,
                                     /*asc*/ 0x4,
                                     /*ascq*/ 0x01,
                                     SSD_ELEM_NONE);
 
                 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.sync);
                 gdt->sc_dvr.eu.sync.ionode  = gdt->sc_hanum;
                 gdt->sc_dvr.eu.sync.service = service;
                 gdt->sc_dvr.eu.sync.status  = gdt->sc_status;
                 gdt->sc_dvr.eu.sync.info    = gdt->sc_info;
                 gdt->sc_dvr.eu.sync.hostdrive = ccb->ccb_h.target_id;
                 if (gdt->sc_status >= 0x8000)
                     gdt_store_event(GDT_ES_SYNC, 0, &gdt->sc_dvr);
                 else
                     gdt_store_event(GDT_ES_SYNC, service, &gdt->sc_dvr);
             } else {
                 /* raw service */
                 if (gdt->sc_status != GDT_S_RAW_SCSI || gdt->sc_info >= 0x100) {
                     ccb->ccb_h.status = CAM_DEV_NOT_THERE;
                 } else {
                     ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID;
                     ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
                     ccb->csio.scsi_status = gdt->sc_info;
                     bcopy(gccb->gc_scratch, &ccb->csio.sense_data,
                           ccb->csio.sense_len);
                 }
             }
         }
         --gdt_stat.io_count_act;
         xpt_done(ccb);
     }
     return (0);
 }
 
 /* Controller event handling functions */
 void gdt_store_event(u_int16_t source, u_int16_t idx,
                              gdt_evt_data *evt)
 {
     gdt_evt_str *e;
     struct timeval tv;
 
     GDT_DPRINTF(GDT_D_MISC, ("gdt_store_event(%d, %d)\n", source, idx));
     if (source == 0)                        /* no source -> no event */
         return;
 
     mtx_lock(&elock);
     if (ebuffer[elastidx].event_source == source &&
         ebuffer[elastidx].event_idx == idx &&
         ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
           !memcmp((char *)&ebuffer[elastidx].event_data.eu,
                   (char *)&evt->eu, evt->size)) ||
          (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
           !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
                   (char *)&evt->event_string)))) { 
         e = &ebuffer[elastidx];
         getmicrotime(&tv);
         e->last_stamp = tv.tv_sec;
         ++e->same_count;
     } else {
         if (ebuffer[elastidx].event_source != 0) {  /* entry not free ? */
             ++elastidx;
             if (elastidx == GDT_MAX_EVENTS)
                 elastidx = 0;
             if (elastidx == eoldidx) {              /* reached mark ? */
                 ++eoldidx;
                 if (eoldidx == GDT_MAX_EVENTS)
                     eoldidx = 0;
             }
         }
         e = &ebuffer[elastidx];
         e->event_source = source;
         e->event_idx = idx;
         getmicrotime(&tv);
         e->first_stamp = e->last_stamp = tv.tv_sec;
         e->same_count = 1;
         e->event_data = *evt;
         e->application = 0;
     }
     mtx_unlock(&elock);
 }
 
 int gdt_read_event(int handle, gdt_evt_str *estr)
 {
     gdt_evt_str *e;
     int eindex;
     
     GDT_DPRINTF(GDT_D_MISC, ("gdt_read_event(%d)\n", handle));
     mtx_lock(&elock);
     if (handle == -1)
         eindex = eoldidx;
     else
         eindex = handle;
     estr->event_source = 0;
 
     if (eindex >= GDT_MAX_EVENTS) {
 	mtx_unlock(&elock);
         return eindex;
     }
     e = &ebuffer[eindex];
     if (e->event_source != 0) {
         if (eindex != elastidx) {
             if (++eindex == GDT_MAX_EVENTS)
                 eindex = 0;
         } else {
             eindex = -1;
         }
         memcpy(estr, e, sizeof(gdt_evt_str));
     }
     mtx_unlock(&elock);
     return eindex;
 }
 
 void gdt_readapp_event(u_int8_t application, gdt_evt_str *estr)
 {
     gdt_evt_str *e;
     int found = FALSE;
     int eindex;
     
     GDT_DPRINTF(GDT_D_MISC, ("gdt_readapp_event(%d)\n", application));
     mtx_lock(&elock);
     eindex = eoldidx;
     for (;;) {
         e = &ebuffer[eindex];
         if (e->event_source == 0)
             break;
         if ((e->application & application) == 0) {
             e->application |= application;
             found = TRUE;
             break;
         }
         if (eindex == elastidx)
             break;
         if (++eindex == GDT_MAX_EVENTS)
             eindex = 0;
     }
     if (found)
         memcpy(estr, e, sizeof(gdt_evt_str));
     else
         estr->event_source = 0;
     mtx_unlock(&elock);
 }
 
 void gdt_clear_events()
 {
     GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events\n"));
 
     mtx_lock(&elock);
     eoldidx = elastidx = 0;
     ebuffer[0].event_source = 0;
     mtx_unlock(&elock);
 }
Index: stable/10/sys/dev/isci/isci_controller.c
===================================================================
--- stable/10/sys/dev/isci/isci_controller.c	(revision 312849)
+++ stable/10/sys/dev/isci/isci_controller.c	(revision 312850)
@@ -1,832 +1,838 @@
 /*-
  * BSD LICENSE
  *
  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  *
  *   * Redistributions of source code must retain the above copyright
  *     notice, this list of conditions and the following disclaimer.
  *   * Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in
  *     the documentation and/or other materials provided with the
  *     distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include <dev/isci/isci.h>
 
 #include <sys/conf.h>
 #include <sys/malloc.h>
 
 #include <cam/cam_periph.h>
 #include <cam/cam_xpt_periph.h>
 
 #include <dev/isci/scil/sci_memory_descriptor_list.h>
 #include <dev/isci/scil/sci_memory_descriptor_list_decorator.h>
 
 #include <dev/isci/scil/scif_controller.h>
 #include <dev/isci/scil/scif_library.h>
 #include <dev/isci/scil/scif_io_request.h>
 #include <dev/isci/scil/scif_task_request.h>
 #include <dev/isci/scil/scif_remote_device.h>
 #include <dev/isci/scil/scif_domain.h>
 #include <dev/isci/scil/scif_user_callback.h>
 #include <dev/isci/scil/scic_sgpio.h>
 
 #include <dev/led/led.h>
 
 void isci_action(struct cam_sim *sim, union ccb *ccb);
 void isci_poll(struct cam_sim *sim);
 
 #define ccb_sim_ptr sim_priv.entries[0].ptr
 
 /**
  * @brief This user callback will inform the user that the controller has
  *        had a serious unexpected error.  The user should not the error,
  *        disable interrupts, and wait for current ongoing processing to
  *        complete.  Subsequently, the user should reset the controller.
  *
  * @param[in]  controller This parameter specifies the controller that had
  *                        an error.
  *
  * @return none
  */
 void scif_cb_controller_error(SCI_CONTROLLER_HANDLE_T controller,
     SCI_CONTROLLER_ERROR error)
 {
 
 	isci_log_message(0, "ISCI", "scif_cb_controller_error: 0x%x\n",
 	    error);
 }
 
 /**
  * @brief This user callback will inform the user that the controller has
  *        finished the start process.
  *
  * @param[in]  controller This parameter specifies the controller that was
  *             started.
  * @param[in]  completion_status This parameter specifies the results of
  *             the start operation.  SCI_SUCCESS indicates successful
  *             completion.
  *
  * @return none
  */
 void scif_cb_controller_start_complete(SCI_CONTROLLER_HANDLE_T controller,
     SCI_STATUS completion_status)
 {
 	uint32_t index;
 	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
 	    sci_object_get_association(controller);
 
 	isci_controller->is_started = TRUE;
 
 	/* Set bits for all domains.  We will clear them one-by-one once
 	 *  the domains complete discovery, or return error when calling
 	 *  scif_domain_discover.  Once all bits are clear, we will register
 	 *  the controller with CAM.
 	 */
 	isci_controller->initial_discovery_mask = (1 << SCI_MAX_DOMAINS) - 1;
 
 	for(index = 0; index < SCI_MAX_DOMAINS; index++) {
 		SCI_STATUS status;
 		SCI_DOMAIN_HANDLE_T domain =
 		    isci_controller->domain[index].sci_object;
 
 		status = scif_domain_discover(
 			domain,
 			scif_domain_get_suggested_discover_timeout(domain),
 			DEVICE_TIMEOUT
 		);
 
 		if (status != SCI_SUCCESS)
 		{
 			isci_controller_domain_discovery_complete(
 			    isci_controller, &isci_controller->domain[index]);
 		}
 	}
 }
 
 /**
  * @brief This user callback will inform the user that the controller has
  *        finished the stop process. Note, after user calls
  *        scif_controller_stop(), before user receives this controller stop
  *        complete callback, user should not expect any callback from
  *        framework, such like scif_cb_domain_change_notification().
  *
  * @param[in]  controller This parameter specifies the controller that was
  *             stopped.
  * @param[in]  completion_status This parameter specifies the results of
  *             the stop operation.  SCI_SUCCESS indicates successful
  *             completion.
  *
  * @return none
  */
 void scif_cb_controller_stop_complete(SCI_CONTROLLER_HANDLE_T controller,
     SCI_STATUS completion_status)
 {
 	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
 	    sci_object_get_association(controller);
 
 	isci_controller->is_started = FALSE;
 }
 
 static void
 isci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
 {
 	SCI_PHYSICAL_ADDRESS *phys_addr = arg;
 
 	*phys_addr = seg[0].ds_addr;
 }
 
 /**
  * @brief This method will be invoked to allocate memory dynamically.
  *
  * @param[in]  controller This parameter represents the controller
  *             object for which to allocate memory.
  * @param[out] mde This parameter represents the memory descriptor to
  *             be filled in by the user that will reference the newly
  *             allocated memory.
  *
  * @return none
  */
 void scif_cb_controller_allocate_memory(SCI_CONTROLLER_HANDLE_T controller,
     SCI_PHYSICAL_MEMORY_DESCRIPTOR_T *mde)
 {
 	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
 	    sci_object_get_association(controller);
 
 	/*
 	 * Note this routine is only used for buffers needed to translate
 	 * SCSI UNMAP commands to ATA DSM commands for SATA disks.
 	 *
 	 * We first try to pull a buffer from the controller's pool, and only
 	 * call contigmalloc if one isn't there.
 	 */
 	if (!sci_pool_empty(isci_controller->unmap_buffer_pool)) {
 		sci_pool_get(isci_controller->unmap_buffer_pool,
 		    mde->virtual_address);
 	} else
 		mde->virtual_address = contigmalloc(PAGE_SIZE,
 		    M_ISCI, M_NOWAIT, 0, BUS_SPACE_MAXADDR,
 		    mde->constant_memory_alignment, 0);
 
 	if (mde->virtual_address != NULL)
 		bus_dmamap_load(isci_controller->buffer_dma_tag,
 		    NULL, mde->virtual_address, PAGE_SIZE,
 		    isci_single_map, &mde->physical_address,
 		    BUS_DMA_NOWAIT);
 }
 
 /**
  * @brief This method will be invoked to allocate memory dynamically.
  *
  * @param[in]  controller This parameter represents the controller
  *             object for which to allocate memory.
  * @param[out] mde This parameter represents the memory descriptor to
  *             be filled in by the user that will reference the newly
  *             allocated memory.
  *
  * @return none
  */
 void scif_cb_controller_free_memory(SCI_CONTROLLER_HANDLE_T controller,
     SCI_PHYSICAL_MEMORY_DESCRIPTOR_T * mde)
 {
 	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
 	    sci_object_get_association(controller);
 
 	/*
 	 * Put the buffer back into the controller's buffer pool, rather
 	 * than invoking configfree.  This helps reduce chance we won't
 	 * have buffers available when system is under memory pressure.
 	 */ 
 	sci_pool_put(isci_controller->unmap_buffer_pool,
 	    mde->virtual_address);
 }
 
 void isci_controller_construct(struct ISCI_CONTROLLER *controller,
     struct isci_softc *isci)
 {
 	SCI_CONTROLLER_HANDLE_T scif_controller_handle;
 
 	scif_library_allocate_controller(isci->sci_library_handle,
 	    &scif_controller_handle);
 
 	scif_controller_construct(isci->sci_library_handle,
 	    scif_controller_handle, NULL);
 
 	controller->isci = isci;
 	controller->scif_controller_handle = scif_controller_handle;
 
 	/* This allows us to later use
 	 *  sci_object_get_association(scif_controller_handle)
 	 * inside of a callback routine to get our struct ISCI_CONTROLLER object
 	 */
 	sci_object_set_association(scif_controller_handle, (void *)controller);
 
 	controller->is_started = FALSE;
 	controller->is_frozen = FALSE;
 	controller->release_queued_ccbs = FALSE;
 	controller->sim = NULL;
 	controller->initial_discovery_mask = 0;
 
 	sci_fast_list_init(&controller->pending_device_reset_list);
 
 	mtx_init(&controller->lock, "isci", NULL, MTX_DEF);
 
 	uint32_t domain_index;
 
 	for(domain_index = 0; domain_index < SCI_MAX_DOMAINS; domain_index++) {
 		isci_domain_construct( &controller->domain[domain_index],
 		    domain_index, controller);
 	}
 
 	controller->timer_memory = malloc(
 	    sizeof(struct ISCI_TIMER) * SCI_MAX_TIMERS, M_ISCI,
 	    M_NOWAIT | M_ZERO);
 
 	sci_pool_initialize(controller->timer_pool);
 
 	struct ISCI_TIMER *timer = (struct ISCI_TIMER *)
 	    controller->timer_memory;
 
 	for ( int i = 0; i < SCI_MAX_TIMERS; i++ ) {
 		sci_pool_put(controller->timer_pool, timer++);
 	}
 
 	sci_pool_initialize(controller->unmap_buffer_pool);
 }
 
 static void isci_led_fault_func(void *priv, int onoff)
 {
 	struct ISCI_PHY *phy = priv;
 
 	/* map onoff to the fault LED */
 	phy->led_fault = onoff;
 	scic_sgpio_update_led_state(phy->handle, 1 << phy->index, 
 		phy->led_fault, phy->led_locate, 0);
 }
 
 static void isci_led_locate_func(void *priv, int onoff)
 {
 	struct ISCI_PHY *phy = priv;
 
 	/* map onoff to the locate LED */
 	phy->led_locate = onoff;
 	scic_sgpio_update_led_state(phy->handle, 1 << phy->index, 
 		phy->led_fault, phy->led_locate, 0);
 }
 
 SCI_STATUS isci_controller_initialize(struct ISCI_CONTROLLER *controller)
 {
 	SCIC_USER_PARAMETERS_T scic_user_parameters;
 	SCI_CONTROLLER_HANDLE_T scic_controller_handle;
 	char led_name[64];
 	unsigned long tunable;
 	uint32_t io_shortage;
 	uint32_t fail_on_timeout;
 	int i;
 
 	scic_controller_handle =
 	    scif_controller_get_scic_handle(controller->scif_controller_handle);
 
 	if (controller->isci->oem_parameters_found == TRUE)
 	{
 		scic_oem_parameters_set(
 		    scic_controller_handle,
 		    &controller->oem_parameters,
 		    (uint8_t)(controller->oem_parameters_version));
 	}
 
 	scic_user_parameters_get(scic_controller_handle, &scic_user_parameters);
 
 	if (TUNABLE_ULONG_FETCH("hw.isci.no_outbound_task_timeout", &tunable))
 		scic_user_parameters.sds1.no_outbound_task_timeout =
 		    (uint8_t)tunable;
 
 	if (TUNABLE_ULONG_FETCH("hw.isci.ssp_max_occupancy_timeout", &tunable))
 		scic_user_parameters.sds1.ssp_max_occupancy_timeout =
 		    (uint16_t)tunable;
 
 	if (TUNABLE_ULONG_FETCH("hw.isci.stp_max_occupancy_timeout", &tunable))
 		scic_user_parameters.sds1.stp_max_occupancy_timeout =
 		    (uint16_t)tunable;
 
 	if (TUNABLE_ULONG_FETCH("hw.isci.ssp_inactivity_timeout", &tunable))
 		scic_user_parameters.sds1.ssp_inactivity_timeout =
 		    (uint16_t)tunable;
 
 	if (TUNABLE_ULONG_FETCH("hw.isci.stp_inactivity_timeout", &tunable))
 		scic_user_parameters.sds1.stp_inactivity_timeout =
 		    (uint16_t)tunable;
 
 	if (TUNABLE_ULONG_FETCH("hw.isci.max_speed_generation", &tunable))
 		for (i = 0; i < SCI_MAX_PHYS; i++)
 			scic_user_parameters.sds1.phys[i].max_speed_generation =
 			    (uint8_t)tunable;
 
 	scic_user_parameters_set(scic_controller_handle, &scic_user_parameters);
 
 	/* Scheduler bug in SCU requires SCIL to reserve some task contexts as a
 	 *  a workaround - one per domain.
 	 */
 	controller->queue_depth = SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS;
 
 	if (TUNABLE_INT_FETCH("hw.isci.controller_queue_depth",
 	    &controller->queue_depth)) {
 		controller->queue_depth = max(1, min(controller->queue_depth,
 		    SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS));
 	}
 
 	/* Reserve one request so that we can ensure we have one available TC
 	 *  to do internal device resets.
 	 */
 	controller->sim_queue_depth = controller->queue_depth - 1;
 
 	/* Although we save one TC to do internal device resets, it is possible
 	 *  we could end up using several TCs for simultaneous device resets
 	 *  while at the same time having CAM fill our controller queue.  To
 	 *  simulate this condition, and how our driver handles it, we can set
 	 *  this io_shortage parameter, which will tell CAM that we have a
 	 *  large queue depth than we really do.
 	 */
 	io_shortage = 0;
 	TUNABLE_INT_FETCH("hw.isci.io_shortage", &io_shortage);
 	controller->sim_queue_depth += io_shortage;
 
 	fail_on_timeout = 1;
 	TUNABLE_INT_FETCH("hw.isci.fail_on_task_timeout", &fail_on_timeout);
 	controller->fail_on_task_timeout = fail_on_timeout;
 
 	/* Attach to CAM using xpt_bus_register now, then immediately freeze
 	 *  the simq.  It will get released later when initial domain discovery
 	 *  is complete.
 	 */
 	controller->has_been_scanned = FALSE;
 	mtx_lock(&controller->lock);
 	isci_controller_attach_to_cam(controller);
 	xpt_freeze_simq(controller->sim, 1);
 	mtx_unlock(&controller->lock);
 
 	for (i = 0; i < SCI_MAX_PHYS; i++) {
 		controller->phys[i].handle = scic_controller_handle;
 		controller->phys[i].index = i;
 
 		/* fault */
 		controller->phys[i].led_fault = 0;
 		sprintf(led_name, "isci.bus%d.port%d.fault", controller->index, i);
 		controller->phys[i].cdev_fault = led_create(isci_led_fault_func,
 		    &controller->phys[i], led_name);
 			
 		/* locate */
 		controller->phys[i].led_locate = 0;
 		sprintf(led_name, "isci.bus%d.port%d.locate", controller->index, i);
 		controller->phys[i].cdev_locate = led_create(isci_led_locate_func,
 		    &controller->phys[i], led_name);
 	}
 
 	return (scif_controller_initialize(controller->scif_controller_handle));
 }
 
 int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller)
 {
 	int error;
 	device_t device =  controller->isci->device;
 	uint32_t max_segment_size = isci_io_request_get_max_io_size();
 	uint32_t status = 0;
 	struct ISCI_MEMORY *uncached_controller_memory =
 	    &controller->uncached_controller_memory;
 	struct ISCI_MEMORY *cached_controller_memory =
 	    &controller->cached_controller_memory;
 	struct ISCI_MEMORY *request_memory =
 	    &controller->request_memory;
 	POINTER_UINT virtual_address;
 	bus_addr_t physical_address;
 
 	controller->mdl = sci_controller_get_memory_descriptor_list_handle(
 	    controller->scif_controller_handle);
 
 	uncached_controller_memory->size = sci_mdl_decorator_get_memory_size(
 	    controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS);
 
 	error = isci_allocate_dma_buffer(device, uncached_controller_memory);
 
 	if (error != 0)
 	    return (error);
 
 	sci_mdl_decorator_assign_memory( controller->mdl,
 	    SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
 	    uncached_controller_memory->virtual_address,
 	    uncached_controller_memory->physical_address);
 
 	cached_controller_memory->size = sci_mdl_decorator_get_memory_size(
 	    controller->mdl,
 	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
 	);
 
 	error = isci_allocate_dma_buffer(device, cached_controller_memory);
 
 	if (error != 0)
 	    return (error);
 
 	sci_mdl_decorator_assign_memory(controller->mdl,
 	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
 	    cached_controller_memory->virtual_address,
 	    cached_controller_memory->physical_address);
 
 	request_memory->size =
 	    controller->queue_depth * isci_io_request_get_object_size();
 
 	error = isci_allocate_dma_buffer(device, request_memory);
 
 	if (error != 0)
 	    return (error);
 
 	/* For STP PIO testing, we want to ensure we can force multiple SGLs
 	 *  since this has been a problem area in SCIL.  This tunable parameter
 	 *  will allow us to force DMA segments to a smaller size, ensuring
 	 *  that even if a physically contiguous buffer is attached to this
 	 *  I/O, the DMA subsystem will pass us multiple segments in our DMA
 	 *  load callback.
 	 */
 	TUNABLE_INT_FETCH("hw.isci.max_segment_size", &max_segment_size);
 
 	/* Create DMA tag for our I/O requests.  Then we can create DMA maps based off
 	 *  of this tag and store them in each of our ISCI_IO_REQUEST objects.  This
 	 *  will enable better performance than creating the DMA maps everytime we get
 	 *  an I/O.
 	 */
 	status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1, 0x0,
 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
 	    isci_io_request_get_max_io_size(),
 	    SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0, NULL, NULL,
 	    &controller->buffer_dma_tag);
 
 	sci_pool_initialize(controller->request_pool);
 
 	virtual_address = request_memory->virtual_address;
 	physical_address = request_memory->physical_address;
 
 	for (int i = 0; i < controller->queue_depth; i++) {
 		struct ISCI_REQUEST *request =
 		    (struct ISCI_REQUEST *)virtual_address;
 
 		isci_request_construct(request,
 		    controller->scif_controller_handle,
 		    controller->buffer_dma_tag, physical_address);
 
 		sci_pool_put(controller->request_pool, request);
 
 		virtual_address += isci_request_get_object_size();
 		physical_address += isci_request_get_object_size();
 	}
 
 	uint32_t remote_device_size = sizeof(struct ISCI_REMOTE_DEVICE) +
 	    scif_remote_device_get_object_size();
 
 	controller->remote_device_memory = (uint8_t *) malloc(
 	    remote_device_size * SCI_MAX_REMOTE_DEVICES, M_ISCI,
 	    M_NOWAIT | M_ZERO);
 
 	sci_pool_initialize(controller->remote_device_pool);
 
 	uint8_t *remote_device_memory_ptr = controller->remote_device_memory;
 
 	for (int i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
 		struct ISCI_REMOTE_DEVICE *remote_device =
 		    (struct ISCI_REMOTE_DEVICE *)remote_device_memory_ptr;
 
 		controller->remote_device[i] = NULL;
 		remote_device->index = i;
 		remote_device->is_resetting = FALSE;
 		remote_device->frozen_lun_mask = 0;
 		sci_fast_list_element_init(remote_device,
 		    &remote_device->pending_device_reset_element);
 		TAILQ_INIT(&remote_device->queued_ccbs);
 		remote_device->release_queued_ccb = FALSE;
 		remote_device->queued_ccb_in_progress = NULL;
 
 		/*
 		 * For the first SCI_MAX_DOMAINS device objects, do not put
 		 *  them in the pool, rather assign them to each domain.  This
 		 *  ensures that any device attached directly to port "i" will
 		 *  always get CAM target id "i".
 		 */
 		if (i < SCI_MAX_DOMAINS)
 			controller->domain[i].da_remote_device = remote_device;
 		else
 			sci_pool_put(controller->remote_device_pool,
 			    remote_device);
 		remote_device_memory_ptr += remote_device_size;
 	}
 
 	return (0);
 }
 
 void isci_controller_start(void *controller_handle)
 {
 	struct ISCI_CONTROLLER *controller =
 	    (struct ISCI_CONTROLLER *)controller_handle;
 	SCI_CONTROLLER_HANDLE_T scif_controller_handle =
 	    controller->scif_controller_handle;
 
 	scif_controller_start(scif_controller_handle,
 	    scif_controller_get_suggested_start_timeout(scif_controller_handle));
 
 	scic_controller_enable_interrupts(
 	    scif_controller_get_scic_handle(controller->scif_controller_handle));
 }
 
 void isci_controller_domain_discovery_complete(
     struct ISCI_CONTROLLER *isci_controller, struct ISCI_DOMAIN *isci_domain)
 {
 	if (!isci_controller->has_been_scanned)
 	{
 		/* Controller has not been scanned yet.  We'll clear
 		 *  the discovery bit for this domain, then check if all bits
 		 *  are now clear.  That would indicate that all domains are
 		 *  done with discovery and we can then proceed with initial
 		 *  scan.
 		 */
 
 		isci_controller->initial_discovery_mask &=
 		    ~(1 << isci_domain->index);
 
 		if (isci_controller->initial_discovery_mask == 0) {
 			struct isci_softc *driver = isci_controller->isci;
 			uint8_t next_index = isci_controller->index + 1;
 
 			isci_controller->has_been_scanned = TRUE;
 
 			/* Unfreeze simq to allow initial scan to proceed. */
 			xpt_release_simq(isci_controller->sim, TRUE);
 
 #if __FreeBSD_version < 800000
 			/* When driver is loaded after boot, we need to
 			 *  explicitly rescan here for versions <8.0, because
 			 *  CAM only automatically scans new buses at boot
 			 *  time.
 			 */
 			union ccb *ccb = xpt_alloc_ccb_nowait();
 
 			xpt_create_path(&ccb->ccb_h.path, NULL,
 			    cam_sim_path(isci_controller->sim),
 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
 
 			xpt_rescan(ccb);
 #endif
 
 			if (next_index < driver->controller_count) {
 				/*  There are more controllers that need to
 				 *   start.  So start the next one.
 				 */
 				isci_controller_start(
 				    &driver->controllers[next_index]);
 			}
 			else
 			{
 				/* All controllers have been started and completed discovery.
 				 *  Disestablish the config hook while will signal to the
 				 *  kernel during boot that it is safe to try to find and
 				 *  mount the root partition.
 				 */
 				config_intrhook_disestablish(
 				    &driver->config_hook);
 			}
 		}
 	}
 }
 
 int isci_controller_attach_to_cam(struct ISCI_CONTROLLER *controller)
 {
 	struct isci_softc *isci = controller->isci;
 	device_t parent = device_get_parent(isci->device);
 	int unit = device_get_unit(isci->device);
 	struct cam_devq *isci_devq = cam_simq_alloc(controller->sim_queue_depth);
 
 	if(isci_devq == NULL) {
 		isci_log_message(0, "ISCI", "isci_devq is NULL \n");
 		return (-1);
 	}
 
 	controller->sim = cam_sim_alloc(isci_action, isci_poll, "isci",
 	    controller, unit, &controller->lock, controller->sim_queue_depth,
 	    controller->sim_queue_depth, isci_devq);
 
 	if(controller->sim == NULL) {
 		isci_log_message(0, "ISCI", "cam_sim_alloc... fails\n");
 		cam_simq_free(isci_devq);
 		return (-1);
 	}
 
 	if(xpt_bus_register(controller->sim, parent, controller->index)
 	    != CAM_SUCCESS) {
 		isci_log_message(0, "ISCI", "xpt_bus_register...fails \n");
 		cam_sim_free(controller->sim, TRUE);
 		mtx_unlock(&controller->lock);
 		return (-1);
 	}
 
 	if(xpt_create_path(&controller->path, NULL,
 	    cam_sim_path(controller->sim), CAM_TARGET_WILDCARD,
 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
 		isci_log_message(0, "ISCI", "xpt_create_path....fails\n");
 		xpt_bus_deregister(cam_sim_path(controller->sim));
 		cam_sim_free(controller->sim, TRUE);
 		mtx_unlock(&controller->lock);
 		return (-1);
 	}
 
 	return (0);
 }
 
 void isci_poll(struct cam_sim *sim)
 {
 	struct ISCI_CONTROLLER *controller =
 	    (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
 
 	isci_interrupt_poll_handler(controller);
 }
 
 void isci_action(struct cam_sim *sim, union ccb *ccb)
 {
 	struct ISCI_CONTROLLER *controller =
 	    (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
 
 	switch ( ccb->ccb_h.func_code ) {
 	case XPT_PATH_INQ:
 		{
 			struct ccb_pathinq *cpi = &ccb->cpi;
 			int bus = cam_sim_bus(sim);
 			ccb->ccb_h.ccb_sim_ptr = sim;
 			cpi->version_num = 1;
 			cpi->hba_inquiry = PI_TAG_ABLE;
 			cpi->target_sprt = 0;
 			cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN |
 			    PIM_UNMAPPED;
 			cpi->hba_eng_cnt = 0;
 			cpi->max_target = SCI_MAX_REMOTE_DEVICES - 1;
 			cpi->max_lun = ISCI_MAX_LUN;
 #if __FreeBSD_version >= 800102
 			cpi->maxio = isci_io_request_get_max_io_size();
 #endif
 			cpi->unit_number = cam_sim_unit(sim);
 			cpi->bus_id = bus;
 			cpi->initiator_id = SCI_MAX_REMOTE_DEVICES;
 			cpi->base_transfer_speed = 300000;
 			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 			strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
 			strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
 			cpi->transport = XPORT_SAS;
 			cpi->transport_version = 0;
 			cpi->protocol = PROTO_SCSI;
 			cpi->protocol_version = SCSI_REV_SPC2;
 			cpi->ccb_h.status = CAM_REQ_CMP;
 			xpt_done(ccb);
 		}
 		break;
 	case XPT_GET_TRAN_SETTINGS:
 		{
 			struct ccb_trans_settings *general_settings = &ccb->cts;
 			struct ccb_trans_settings_sas *sas_settings =
 			    &general_settings->xport_specific.sas;
 			struct ccb_trans_settings_scsi *scsi_settings =
 			    &general_settings->proto_specific.scsi;
 			struct ISCI_REMOTE_DEVICE *remote_device;
 
 			remote_device = controller->remote_device[ccb->ccb_h.target_id];
 
 			if (remote_device == NULL) {
 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
 				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
 				ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
 				xpt_done(ccb);
 				break;
 			}
 
 			general_settings->protocol = PROTO_SCSI;
 			general_settings->transport = XPORT_SAS;
 			general_settings->protocol_version = SCSI_REV_SPC2;
 			general_settings->transport_version = 0;
 			scsi_settings->valid = CTS_SCSI_VALID_TQ;
 			scsi_settings->flags = CTS_SCSI_FLAGS_TAG_ENB;
 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
 			ccb->ccb_h.status |= CAM_REQ_CMP;
 
 			sas_settings->bitrate =
 			    isci_remote_device_get_bitrate(remote_device);
 
 			if (sas_settings->bitrate != 0)
 				sas_settings->valid = CTS_SAS_VALID_SPEED;
 
 			xpt_done(ccb);
 		}
 		break;
 	case XPT_SCSI_IO:
+		if (ccb->ccb_h.flags & CAM_CDB_PHYS) {
+			ccb->ccb_h.status = CAM_REQ_INVALID;
+			xpt_done(ccb);
+			break;
+		}
 		isci_io_request_execute_scsi_io(ccb, controller);
 		break;
 #if __FreeBSD_version >= 900026
 	case XPT_SMP_IO:
 		isci_io_request_execute_smp_io(ccb, controller);
 		break;
 #endif
 	case XPT_SET_TRAN_SETTINGS:
 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
 		ccb->ccb_h.status |= CAM_REQ_CMP;
 		xpt_done(ccb);
 		break;
 	case XPT_CALC_GEOMETRY:
 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
 		xpt_done(ccb);
 		break;
 	case XPT_RESET_DEV:
 		{
 			struct ISCI_REMOTE_DEVICE *remote_device =
 			    controller->remote_device[ccb->ccb_h.target_id];
 
 			if (remote_device != NULL)
 				isci_remote_device_reset(remote_device, ccb);
 			else {
 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
 				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
 				ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
 				xpt_done(ccb);
 			}
 		}
 		break;
 	case XPT_RESET_BUS:
 		ccb->ccb_h.status = CAM_REQ_CMP;
 		xpt_done(ccb);
 		break;
 	default:
 		isci_log_message(0, "ISCI", "Unhandled func_code 0x%x\n",
 		    ccb->ccb_h.func_code);
 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
 		ccb->ccb_h.status |= CAM_REQ_INVALID;
 		xpt_done(ccb);
 		break;
 	}
 }
 
 /*
  * Unfortunately, SCIL doesn't cleanly handle retry conditions.
  *  CAM_REQUEUE_REQ works only when no one is using the pass(4) interface.  So
  *  when SCIL denotes an I/O needs to be retried (typically because of mixing
  *  tagged/non-tagged ATA commands, or running out of NCQ slots), we queue
  *  these I/O internally.  Once SCIL completes an I/O to this device, or we get
  *  a ready notification, we will retry the first I/O on the queue.
  *  Unfortunately, SCIL also doesn't cleanly handle starting the new I/O within
  *  the context of the completion handler, so we need to retry these I/O after
  *  the completion handler is done executing.
  */
 void
 isci_controller_release_queued_ccbs(struct ISCI_CONTROLLER *controller)
 {
 	struct ISCI_REMOTE_DEVICE *dev;
 	struct ccb_hdr *ccb_h;
+	uint8_t *ptr;
 	int dev_idx;
 
 	KASSERT(mtx_owned(&controller->lock), ("controller lock not owned"));
 
 	controller->release_queued_ccbs = FALSE;
 	for (dev_idx = 0;
 	     dev_idx < SCI_MAX_REMOTE_DEVICES;
 	     dev_idx++) {
 
 		dev = controller->remote_device[dev_idx];
 		if (dev != NULL &&
 		    dev->release_queued_ccb == TRUE &&
 		    dev->queued_ccb_in_progress == NULL) {
 			dev->release_queued_ccb = FALSE;
 			ccb_h = TAILQ_FIRST(&dev->queued_ccbs);
 
 			if (ccb_h == NULL)
 				continue;
 
-			isci_log_message(1, "ISCI", "release %p %x\n", ccb_h,
-			    ((union ccb *)ccb_h)->csio.cdb_io.cdb_bytes[0]);
+			ptr = scsiio_cdb_ptr(&((union ccb *)ccb_h)->csio);
+			isci_log_message(1, "ISCI", "release %p %x\n", ccb_h, *ptr);
 
 			dev->queued_ccb_in_progress = (union ccb *)ccb_h;
 			isci_io_request_execute_scsi_io(
 			    (union ccb *)ccb_h, controller);
 		}
 	}
 }
Index: stable/10/sys/dev/isci/isci_io_request.c
===================================================================
--- stable/10/sys/dev/isci/isci_io_request.c	(revision 312849)
+++ stable/10/sys/dev/isci/isci_io_request.c	(revision 312850)
@@ -1,991 +1,991 @@
 /*-
  * BSD LICENSE
  *
  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  *
  *   * Redistributions of source code must retain the above copyright
  *     notice, this list of conditions and the following disclaimer.
  *   * Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in
  *     the documentation and/or other materials provided with the
  *     distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include <dev/isci/isci.h>
 
 #include <cam/scsi/scsi_all.h>
 #include <cam/scsi/scsi_message.h>
 
 #include <dev/isci/scil/intel_sas.h>
 
 #include <dev/isci/scil/sci_util.h>
 
 #include <dev/isci/scil/scif_io_request.h>
 #include <dev/isci/scil/scif_controller.h>
 #include <dev/isci/scil/scif_remote_device.h>
 #include <dev/isci/scil/scif_user_callback.h>
 
 #include <dev/isci/scil/scic_io_request.h>
 #include <dev/isci/scil/scic_user_callback.h>
 
 /**
  * @brief This user callback will inform the user that an IO request has
  *        completed.
  *
  * @param[in]  controller This parameter specifies the controller on
  *             which the IO request is completing.
  * @param[in]  remote_device This parameter specifies the remote device on
  *             which this request is completing.
  * @param[in]  io_request This parameter specifies the IO request that has
  *             completed.
  * @param[in]  completion_status This parameter specifies the results of
  *             the IO request operation.  SCI_IO_SUCCESS indicates
  *             successful completion.
  *
  * @return none
  */
 void
 scif_cb_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
     SCI_REMOTE_DEVICE_HANDLE_T remote_device,
     SCI_IO_REQUEST_HANDLE_T io_request, SCI_IO_STATUS completion_status)
 {
 	struct ISCI_IO_REQUEST *isci_request =
 	    (struct ISCI_IO_REQUEST *)sci_object_get_association(io_request);
 
 	scif_controller_complete_io(scif_controller, remote_device, io_request);
 	isci_io_request_complete(scif_controller, remote_device, isci_request,
 	    completion_status);
 }
 
 void
 isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
     SCI_REMOTE_DEVICE_HANDLE_T remote_device,
     struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status)
 {
 	struct ISCI_CONTROLLER *isci_controller;
 	struct ISCI_REMOTE_DEVICE *isci_remote_device;
 	union ccb *ccb;
 	BOOL complete_ccb;
+	struct ccb_scsiio *csio;
 
 	complete_ccb = TRUE;
 	isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller);
 	isci_remote_device =
 		(struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device);
 
 	ccb = isci_request->ccb;
-
+	csio = &ccb->csio;
 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
 
 	switch (completion_status) {
 	case SCI_IO_SUCCESS:
 	case SCI_IO_SUCCESS_COMPLETE_BEFORE_START:
 #if __FreeBSD_version >= 900026
 		if (ccb->ccb_h.func_code == XPT_SMP_IO) {
 			void *smp_response =
 			    scif_io_request_get_response_iu_address(
 			        isci_request->sci_object);
 
 			memcpy(ccb->smpio.smp_response, smp_response,
 			    ccb->smpio.smp_response_len);
 		}
 #endif
 		ccb->ccb_h.status |= CAM_REQ_CMP;
 		break;
 
 	case SCI_IO_SUCCESS_IO_DONE_EARLY:
 		ccb->ccb_h.status |= CAM_REQ_CMP;
 		ccb->csio.resid = ccb->csio.dxfer_len -
 		    scif_io_request_get_number_of_bytes_transferred(
 		        isci_request->sci_object);
 		break;
 
 	case SCI_IO_FAILURE_RESPONSE_VALID:
 	{
 		SCI_SSP_RESPONSE_IU_T * response_buffer;
 		uint32_t sense_length;
 		int error_code, sense_key, asc, ascq;
-		struct ccb_scsiio *csio = &ccb->csio;
 
 		response_buffer = (SCI_SSP_RESPONSE_IU_T *)
 		    scif_io_request_get_response_iu_address(
 		        isci_request->sci_object);
 
 		sense_length = sci_ssp_get_sense_data_length(
 		    response_buffer->sense_data_length);
 
 		sense_length = MIN(csio->sense_len, sense_length);
 
 		memcpy(&csio->sense_data, response_buffer->data, sense_length);
 
 		csio->sense_resid = csio->sense_len - sense_length;
 		csio->scsi_status = response_buffer->status;
 		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
 		scsi_extract_sense( &csio->sense_data, &error_code, &sense_key,
 		    &asc, &ascq );
 		isci_log_message(1, "ISCI",
 		    "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n",
 		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
-		    ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0],
+		    ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio),
 		    csio->scsi_status, sense_key, asc, ascq);
 		break;
 	}
 
 	case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
 		isci_remote_device_reset(isci_remote_device, NULL);
 		ccb->ccb_h.status |= CAM_REQ_TERMIO;
 		isci_log_message(0, "ISCI",
 		    "isci: bus=%x target=%x lun=%x cdb[0]=%x remote device reset required\n",
 		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
-		    ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]);
+		    ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio));
 		break;
 
 	case SCI_IO_FAILURE_TERMINATED:
 		ccb->ccb_h.status |= CAM_REQ_TERMIO;
 		isci_log_message(0, "ISCI",
 		    "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n",
 		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
-		    ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]);
+		    ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio));
 		break;
 
 	case SCI_IO_FAILURE_INVALID_STATE:
 	case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES:
 		complete_ccb = FALSE;
 		break;
 
 	case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE:
 		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
 		break;
 
 	case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE:
 		{
 			struct ccb_relsim ccb_relsim;
 			struct cam_path *path;
 
 			xpt_create_path(&path, NULL,
 			    cam_sim_path(isci_controller->sim),
 			    isci_remote_device->index, 0);
 
 			xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5);
 			ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ;
 			ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE;
 			ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS;
 			ccb_relsim.openings =
 			    scif_remote_device_get_max_queue_depth(remote_device);
 			xpt_action((union ccb *)&ccb_relsim);
 			xpt_free_path(path);
 			complete_ccb = FALSE;
 		}
 		break;
 
 	case SCI_IO_FAILURE:
 	case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT:
 	case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL:
 	case SCI_IO_FAILURE_PROTOCOL_VIOLATION:
 	case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE:
 	case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR:
 	default:
 		isci_log_message(1, "ISCI",
 		    "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n",
 		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
-		    ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0],
+		    ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio),
 		    completion_status);
 		ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
 		break;
 	}
 
 	callout_stop(&isci_request->parent.timer);
 	bus_dmamap_sync(isci_request->parent.dma_tag,
 	    isci_request->parent.dma_map,
 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 
 	bus_dmamap_unload(isci_request->parent.dma_tag,
 	    isci_request->parent.dma_map);
 
 	isci_request->ccb = NULL;
 
 	sci_pool_put(isci_controller->request_pool,
 	    (struct ISCI_REQUEST *)isci_request);
 
 	if (complete_ccb) {
 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 			/* ccb will be completed with some type of non-success
 			 *  status.  So temporarily freeze the queue until the
 			 *  upper layers can act on the status.  The
 			 *  CAM_DEV_QFRZN flag will then release the queue
 			 *  after the status is acted upon.
 			 */
 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
 			xpt_freeze_devq(ccb->ccb_h.path, 1);
 		}
 
 		if (ccb->ccb_h.status & CAM_SIM_QUEUED) {
 
 			KASSERT(ccb == isci_remote_device->queued_ccb_in_progress,
 			    ("multiple internally queued ccbs in flight"));
 
 			TAILQ_REMOVE(&isci_remote_device->queued_ccbs,
 			    &ccb->ccb_h, sim_links.tqe);
 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
 
 			/*
 			 * This CCB that was in the queue was completed, so
 			 *  set the in_progress pointer to NULL denoting that
 			 *  we can retry another CCB from the queue.  We only
 			 *  allow one CCB at a time from the queue to be
 			 *  in progress so that we can effectively maintain
 			 *  ordering.
 			 */
 			isci_remote_device->queued_ccb_in_progress = NULL;
 		}
 
 		if (isci_remote_device->frozen_lun_mask != 0) {
 			isci_remote_device_release_device_queue(isci_remote_device);
 		}
 
 		xpt_done(ccb);
 
 		if (isci_controller->is_frozen == TRUE) {
 			isci_controller->is_frozen = FALSE;
 			xpt_release_simq(isci_controller->sim, TRUE);
 		}
 	} else {
 		isci_remote_device_freeze_lun_queue(isci_remote_device,
 		    ccb->ccb_h.target_lun);
 
 		if (ccb->ccb_h.status & CAM_SIM_QUEUED) {
 
 			KASSERT(ccb == isci_remote_device->queued_ccb_in_progress,
 			    ("multiple internally queued ccbs in flight"));
 
 			/*
 			 *  Do nothing, CCB is already on the device's queue.
 			 *   We leave it on the queue, to be retried again
 			 *   next time a CCB on this device completes, or we
 			 *   get a ready notification for this device.
 			 */
 			isci_log_message(1, "ISCI", "already queued %p %x\n",
-			    ccb, ccb->csio.cdb_io.cdb_bytes[0]);
+			    ccb, scsiio_cdb_ptr(csio));
 
 			isci_remote_device->queued_ccb_in_progress = NULL;
 
 		} else {
 			isci_log_message(1, "ISCI", "queue %p %x\n", ccb,
-			    ccb->csio.cdb_io.cdb_bytes[0]);
+			    scsiio_cdb_ptr(csio));
 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
 
 			TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs,
 			    &ccb->ccb_h, sim_links.tqe);
 		}
 	}
 }
 
 /**
  * @brief This callback method asks the user to provide the physical
  *        address for the supplied virtual address when building an
  *        io request object.
  *
  * @param[in] controller This parameter is the core controller object
  *            handle.
  * @param[in] io_request This parameter is the io request object handle
  *            for which the physical address is being requested.
  * @param[in] virtual_address This paramter is the virtual address which
  *            is to be returned as a physical address.
  * @param[out] physical_address The physical address for the supplied virtual
  *             address.
  *
  * @return None.
  */
 void
 scic_cb_io_request_get_physical_address(SCI_CONTROLLER_HANDLE_T	controller,
     SCI_IO_REQUEST_HANDLE_T io_request, void *virtual_address,
     SCI_PHYSICAL_ADDRESS *physical_address)
 {
 	SCI_IO_REQUEST_HANDLE_T scif_request =
 	    sci_object_get_association(io_request);
 	struct ISCI_REQUEST *isci_request =
 	    sci_object_get_association(scif_request);
 
 	if(isci_request != NULL) {
 		/* isci_request is not NULL, meaning this is a request initiated
 		 *  by CAM or the isci layer (i.e. device reset for I/O
 		 *  timeout).  Therefore we can calculate the physical address
 		 *  based on the address we stored in the struct ISCI_REQUEST
 		 *  object.
 		 */
 		*physical_address = isci_request->physical_address +
 		    (uintptr_t)virtual_address -
 		    (uintptr_t)isci_request;
 	} else {
 		/* isci_request is NULL, meaning this is a request generated
 		 *  internally by SCIL (i.e. for SMP requests or NCQ error
 		 *  recovery).  Therefore we calculate the physical address
 		 *  based on the controller's uncached controller memory buffer,
 		 *  since we know that this is what SCIL uses for internal
 		 *  framework requests.
 		 */
 		SCI_CONTROLLER_HANDLE_T scif_controller =
 		    (SCI_CONTROLLER_HANDLE_T) sci_object_get_association(controller);
 		struct ISCI_CONTROLLER *isci_controller =
 		    (struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller);
 		U64 virt_addr_offset = (uintptr_t)virtual_address -
 		    (U64)isci_controller->uncached_controller_memory.virtual_address;
 
 		*physical_address =
 		    isci_controller->uncached_controller_memory.physical_address
 		    + virt_addr_offset;
 	}
 }
 
 /**
  * @brief This callback method asks the user to provide the address for
  *        the command descriptor block (CDB) associated with this IO request.
  *
  * @param[in] scif_user_io_request This parameter points to the user's
  *            IO request object.  It is a cookie that allows the user to
  *            provide the necessary information for this callback.
  *
  * @return This method returns the virtual address of the CDB.
  */
 void *
 scif_cb_io_request_get_cdb_address(void * scif_user_io_request)
 {
 	struct ISCI_IO_REQUEST *isci_request =
 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
 
-	return (isci_request->ccb->csio.cdb_io.cdb_bytes);
+	return (scsiio_cdb_ptr(&isci_request->ccb->csio));
 }
 
 /**
  * @brief This callback method asks the user to provide the length of
  *        the command descriptor block (CDB) associated with this IO request.
  *
  * @param[in] scif_user_io_request This parameter points to the user's
  *            IO request object.  It is a cookie that allows the user to
  *            provide the necessary information for this callback.
  *
  * @return This method returns the length of the CDB.
  */
 uint32_t
 scif_cb_io_request_get_cdb_length(void * scif_user_io_request)
 {
 	struct ISCI_IO_REQUEST *isci_request =
 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
 
 	return (isci_request->ccb->csio.cdb_len);
 }
 
 /**
  * @brief This callback method asks the user to provide the Logical Unit (LUN)
  *        associated with this IO request.
  *
  * @note The contents of the value returned from this callback are defined
  *       by the protocol standard (e.g. T10 SAS specification).  Please
  *       refer to the transport command information unit description
  *       in the associated standard.
  *
  * @param[in] scif_user_io_request This parameter points to the user's
  *            IO request object.  It is a cookie that allows the user to
  *            provide the necessary information for this callback.
  *
  * @return This method returns the LUN associated with this request.
  */
 uint32_t
 scif_cb_io_request_get_lun(void * scif_user_io_request)
 {
 	struct ISCI_IO_REQUEST *isci_request =
 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
 
 	return (isci_request->ccb->ccb_h.target_lun);
 }
 
 /**
  * @brief This callback method asks the user to provide the task attribute
  *        associated with this IO request.
  *
  * @note The contents of the value returned from this callback are defined
  *       by the protocol standard (e.g. T10 SAS specification).  Please
  *       refer to the transport command information unit description
  *       in the associated standard.
  *
  * @param[in] scif_user_io_request This parameter points to the user's
  *            IO request object.  It is a cookie that allows the user to
  *            provide the necessary information for this callback.
  *
  * @return This method returns the task attribute associated with this
  *         IO request.
  */
 uint32_t
 scif_cb_io_request_get_task_attribute(void * scif_user_io_request)
 {
 	struct ISCI_IO_REQUEST *isci_request =
 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
 	uint32_t task_attribute;
 
 	if((isci_request->ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0)
 		switch(isci_request->ccb->csio.tag_action) {
 		case MSG_HEAD_OF_Q_TAG:
 			task_attribute = SCI_SAS_HEAD_OF_QUEUE_ATTRIBUTE;
 			break;
 
 		case MSG_ORDERED_Q_TAG:
 			task_attribute = SCI_SAS_ORDERED_ATTRIBUTE;
 			break;
 
 		case MSG_ACA_TASK:
 			task_attribute = SCI_SAS_ACA_ATTRIBUTE;
 			break;
 
 		default:
 			task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE;
 			break;
 		}
 	else
 		task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE;
 
 	return (task_attribute);
 }
 
 /**
  * @brief This callback method asks the user to provide the command priority
  *        associated with this IO request.
  *
  * @note The contents of the value returned from this callback are defined
  *       by the protocol standard (e.g. T10 SAS specification).  Please
  *       refer to the transport command information unit description
  *       in the associated standard.
  *
  * @param[in] scif_user_io_request This parameter points to the user's
  *            IO request object.  It is a cookie that allows the user to
  *            provide the necessary information for this callback.
  *
  * @return This method returns the command priority associated with this
  *         IO request.
  */
 uint32_t
 scif_cb_io_request_get_command_priority(void * scif_user_io_request)
 {
 	return (0);
 }
 
 /**
  * @brief This method simply returns the virtual address associated
  *        with the scsi_io and byte_offset supplied parameters.
  *
  * @note This callback is not utilized in the fast path.  The expectation
  *       is that this method is utilized for items such as SCSI to ATA
  *       translation for commands like INQUIRY, READ CAPACITY, etc.
  *
  * @param[in] scif_user_io_request This parameter points to the user's
  *            IO request object.  It is a cookie that allows the user to
  *            provide the necessary information for this callback.
  * @param[in] byte_offset This parameter specifies the offset into the data
  *            buffers pointed to by the SGL.  The byte offset starts at 0
  *            and continues until the last byte pointed to be the last SGL
  *            element.
  *
  * @return A virtual address pointer to the location specified by the
  *         parameters.
  */
 uint8_t *
 scif_cb_io_request_get_virtual_address_from_sgl(void * scif_user_io_request,
     uint32_t byte_offset)
 {
 	struct ISCI_IO_REQUEST	*isci_request;
 	union ccb		*ccb;
 
 
 	isci_request = scif_user_io_request;
 	ccb = isci_request->ccb;
 
 	/*
 	 * This callback is only invoked for SCSI/ATA translation of
 	 *  PIO commands such as INQUIRY and READ_CAPACITY, to allow
 	 *  the driver to write the translated data directly into the
 	 *  data buffer.  It is never invoked for READ/WRITE commands.
 	 *  The driver currently assumes only READ/WRITE commands will
 	 *  be unmapped.
 	 *
 	 * As a safeguard against future changes to unmapped commands,
 	 *  add an explicit panic here should the DATA_MASK != VADDR.
 	 *  Otherwise, we would return some garbage pointer back to the
 	 *  caller which would result in a panic or more subtle data
 	 *  corruption later on.
 	 */
 	if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
 		panic("%s: requesting pointer into unmapped ccb", __func__);
 
 	return (ccb->csio.data_ptr + byte_offset);
 }
 
 /**
  * @brief This callback method asks the user to provide the number of
  *        bytes to be transfered as part of this request.
  *
  * @param[in] scif_user_io_request This parameter points to the user's
  *            IO request object.  It is a cookie that allows the user to
  *            provide the necessary information for this callback.
  *
  * @return This method returns the number of payload data bytes to be
  *         transfered for this IO request.
  */
 uint32_t
 scif_cb_io_request_get_transfer_length(void * scif_user_io_request)
 {
 	struct ISCI_IO_REQUEST *isci_request =
 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
 
 	return (isci_request->ccb->csio.dxfer_len);
 
 }
 
 /**
  * @brief This callback method asks the user to provide the data direction
  *        for this request.
  *
  * @param[in] scif_user_io_request This parameter points to the user's
  *            IO request object.  It is a cookie that allows the user to
  *            provide the necessary information for this callback.
  *
  * @return This method returns the value of SCI_IO_REQUEST_DATA_OUT,
  *         SCI_IO_REQUEST_DATA_IN, or SCI_IO_REQUEST_NO_DATA.
  */
 SCI_IO_REQUEST_DATA_DIRECTION
 scif_cb_io_request_get_data_direction(void * scif_user_io_request)
 {
 	struct ISCI_IO_REQUEST *isci_request =
 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
 
 	switch (isci_request->ccb->ccb_h.flags & CAM_DIR_MASK) {
 	case CAM_DIR_IN:
 		return (SCI_IO_REQUEST_DATA_IN);
 	case CAM_DIR_OUT:
 		return (SCI_IO_REQUEST_DATA_OUT);
 	default:
 		return (SCI_IO_REQUEST_NO_DATA);
 	}
 }
 
 /**
  * @brief This callback method asks the user to provide the address
  *        to where the next Scatter-Gather Element is located.
  *
  * Details regarding usage:
  *   - Regarding the first SGE: the user should initialize an index,
  *     or a pointer, prior to construction of the request that will
  *     reference the very first scatter-gather element.  This is
  *     important since this method is called for every scatter-gather
  *     element, including the first element.
  *   - Regarding the last SGE: the user should return NULL from this
  *     method when this method is called and the SGL has exhausted
  *     all elements.
  *
  * @param[in] scif_user_io_request This parameter points to the user's
  *            IO request object.  It is a cookie that allows the user to
  *            provide the necessary information for this callback.
  * @param[in] current_sge_address This parameter specifies the address for
  *            the current SGE (i.e. the one that has just processed).
  * @param[out] next_sge An address specifying the location for the next scatter
  *             gather element to be processed.
  *
  * @return None.
  */
 void
 scif_cb_io_request_get_next_sge(void * scif_user_io_request,
     void * current_sge_address, void ** next_sge)
 {
 	struct ISCI_IO_REQUEST *isci_request =
 	    (struct ISCI_IO_REQUEST *)scif_user_io_request;
 
 	if (isci_request->current_sge_index == isci_request->num_segments)
 		*next_sge = NULL;
 	else {
 		bus_dma_segment_t *sge =
 		    &isci_request->sge[isci_request->current_sge_index];
 
 		isci_request->current_sge_index++;
 		*next_sge = sge;
 	}
 }
 
 /**
  * @brief This callback method asks the user to provide the contents of the
  *        "address" field in the Scatter-Gather Element.
  *
  * @param[in] scif_user_io_request This parameter points to the user's
  *            IO request object.  It is a cookie that allows the user to
  *            provide the necessary information for this callback.
  * @param[in] sge_address This parameter specifies the address for the
  *            SGE from which to retrieve the address field.
  *
  * @return A physical address specifying the contents of the SGE's address
  *         field.
  */
 SCI_PHYSICAL_ADDRESS
 scif_cb_sge_get_address_field(void *scif_user_io_request, void *sge_address)
 {
 	bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address;
 
 	return ((SCI_PHYSICAL_ADDRESS)sge->ds_addr);
 }
 
 /**
  * @brief This callback method asks the user to provide the contents of the
  *        "length" field in the Scatter-Gather Element.
  *
  * @param[in] scif_user_io_request This parameter points to the user's
  *            IO request object.  It is a cookie that allows the user to
  *            provide the necessary information for this callback.
  * @param[in] sge_address This parameter specifies the address for the
  *            SGE from which to retrieve the address field.
  *
  * @return This method returns the length field specified inside the SGE
  *         referenced by the sge_address parameter.
  */
 uint32_t
 scif_cb_sge_get_length_field(void *scif_user_io_request, void *sge_address)
 {
 	bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address;
 
 	return ((uint32_t)sge->ds_len);
 }
 
 void
 isci_request_construct(struct ISCI_REQUEST *request,
     SCI_CONTROLLER_HANDLE_T scif_controller_handle,
     bus_dma_tag_t io_buffer_dma_tag, bus_addr_t physical_address)
 {
 
 	request->controller_handle = scif_controller_handle;
 	request->dma_tag = io_buffer_dma_tag;
 	request->physical_address = physical_address;
 	bus_dmamap_create(request->dma_tag, 0, &request->dma_map);
 	callout_init(&request->timer, CALLOUT_MPSAFE);
 }
 
 static void
 isci_io_request_construct(void *arg, bus_dma_segment_t *seg, int nseg,
     int error)
 {
 	union ccb *ccb;
 	struct ISCI_IO_REQUEST *io_request = (struct ISCI_IO_REQUEST *)arg;
 	SCI_REMOTE_DEVICE_HANDLE_T *device = io_request->parent.remote_device_handle;
 	SCI_STATUS status;
 
 	io_request->num_segments = nseg;
 	io_request->sge = seg;
 	ccb = io_request->ccb;
 
 	if (error != 0) {
 		ccb->ccb_h.status = CAM_REQ_INVALID;
 		xpt_done(ccb);
 		return;
 	}
 
 	status = scif_io_request_construct(
 	    io_request->parent.controller_handle,
 	    io_request->parent.remote_device_handle,
 	    SCI_CONTROLLER_INVALID_IO_TAG, (void *)io_request,
 	    (void *)((char*)io_request + sizeof(struct ISCI_IO_REQUEST)),
 	    &io_request->sci_object);
 
 	if (status != SCI_SUCCESS) {
 		isci_io_request_complete(io_request->parent.controller_handle,
 		    device, io_request, (SCI_IO_STATUS)status);
 		return;
 	}
 
 	sci_object_set_association(io_request->sci_object, io_request);
 
 	bus_dmamap_sync(io_request->parent.dma_tag, io_request->parent.dma_map,
 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 
 	status = (SCI_STATUS)scif_controller_start_io(
 	    io_request->parent.controller_handle, device,
 	    io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG);
 
 	if (status != SCI_SUCCESS) {
 		isci_io_request_complete(io_request->parent.controller_handle,
 		    device, io_request, (SCI_IO_STATUS)status);
 		return;
 	}
 
 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY)
 		callout_reset_sbt(&io_request->parent.timer,
 		    SBT_1MS * ccb->ccb_h.timeout, 0, isci_io_request_timeout,
 		    io_request, 0);
 }
 
 void
 isci_io_request_execute_scsi_io(union ccb *ccb,
     struct ISCI_CONTROLLER *controller)
 {
 	target_id_t target_id = ccb->ccb_h.target_id;
 	struct ISCI_REQUEST *request;
 	struct ISCI_IO_REQUEST *io_request;
 	struct ISCI_REMOTE_DEVICE *device =
 	    controller->remote_device[target_id];
 	int error;
 
 	if (device == NULL) {
 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
 		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
 		xpt_done(ccb);
 		return;
 	}
 
 	if (sci_pool_empty(controller->request_pool)) {
 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
 		xpt_freeze_simq(controller->sim, 1);
 		controller->is_frozen = TRUE;
 		xpt_done(ccb);
 		return;
 	}
 
 	ASSERT(device->is_resetting == FALSE);
 
 	sci_pool_get(controller->request_pool, request);
 	io_request = (struct ISCI_IO_REQUEST *)request;
 
 	io_request->ccb = ccb;
 	io_request->current_sge_index = 0;
 	io_request->parent.remote_device_handle = device->sci_object;
 
 	error = bus_dmamap_load_ccb(io_request->parent.dma_tag,
 	    io_request->parent.dma_map, ccb,
 	    isci_io_request_construct, io_request, 0x0);
 	/* A resource shortage from BUSDMA will be automatically
 	 * continued at a later point, pushing the CCB processing
 	 * forward, which will in turn unfreeze the simq.
 	 */
 	if (error == EINPROGRESS) {
 		xpt_freeze_simq(controller->sim, 1);
 		ccb->ccb_h.flags |= CAM_RELEASE_SIMQ;
 	}
 }
 
 void
 isci_io_request_timeout(void *arg)
 {
 	struct ISCI_IO_REQUEST *request = (struct ISCI_IO_REQUEST *)arg;
 	struct ISCI_REMOTE_DEVICE *remote_device = (struct ISCI_REMOTE_DEVICE *)
 		sci_object_get_association(request->parent.remote_device_handle);
 	struct ISCI_CONTROLLER *controller = remote_device->domain->controller;
 
 	mtx_lock(&controller->lock);
 	isci_remote_device_reset(remote_device, NULL);
 	mtx_unlock(&controller->lock);
 }
 
 #if __FreeBSD_version >= 900026
 /**
  * @brief This callback method gets the size of and pointer to the buffer
  *         (if any) containing the request buffer for an SMP request.
  *
  * @param[in]  core_request This parameter specifies the SCI core's request
  *             object associated with the SMP request.
  * @param[out] smp_request_buffer This parameter returns a pointer to the
  *             payload portion of the SMP request - i.e. everything after
  *             the SMP request header.
  *
  * @return Size of the request buffer in bytes.  This does *not* include
  *          the size of the SMP request header.
  */
 static uint32_t
 smp_io_request_cb_get_request_buffer(SCI_IO_REQUEST_HANDLE_T core_request,
     uint8_t ** smp_request_buffer)
 {
 	struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
 	    sci_object_get_association(sci_object_get_association(core_request));
 
 	*smp_request_buffer = isci_request->ccb->smpio.smp_request +
 	    sizeof(SMP_REQUEST_HEADER_T);
 
 	return (isci_request->ccb->smpio.smp_request_len -
 	    sizeof(SMP_REQUEST_HEADER_T));
 }
 
 /**
  * @brief This callback method gets the SMP function for an SMP request.
  *
  * @param[in]  core_request This parameter specifies the SCI core's request
  *             object associated with the SMP request.
  *
  * @return SMP function for the SMP request.
  */
 static uint8_t
 smp_io_request_cb_get_function(SCI_IO_REQUEST_HANDLE_T core_request)
 {
 	struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
 	    sci_object_get_association(sci_object_get_association(core_request));
 	SMP_REQUEST_HEADER_T *header =
 	    (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request;
 
 	return (header->function);
 }
 
 /**
  * @brief This callback method gets the SMP frame type for an SMP request.
  *
  * @param[in]  core_request This parameter specifies the SCI core's request
  *             object associated with the SMP request.
  *
  * @return SMP frame type for the SMP request.
  */
 static uint8_t
 smp_io_request_cb_get_frame_type(SCI_IO_REQUEST_HANDLE_T core_request)
 {
 	struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
 	    sci_object_get_association(sci_object_get_association(core_request));
 	SMP_REQUEST_HEADER_T *header =
 	    (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request;
 
 	return (header->smp_frame_type);
 }
 
 /**
  * @brief This callback method gets the allocated response length for an SMP request.
  *
  * @param[in]  core_request This parameter specifies the SCI core's request
  *             object associated with the SMP request.
  *
  * @return Allocated response length for the SMP request.
  */
 static uint8_t
 smp_io_request_cb_get_allocated_response_length(
     SCI_IO_REQUEST_HANDLE_T core_request)
 {
 	struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
 	    sci_object_get_association(sci_object_get_association(core_request));
 	SMP_REQUEST_HEADER_T *header =
 	    (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request;
 
 	return (header->allocated_response_length);
 }
 
 static SCI_STATUS
 isci_smp_request_construct(struct ISCI_IO_REQUEST *request)
 {
 	SCI_STATUS status;
 	SCIC_SMP_PASSTHRU_REQUEST_CALLBACKS_T callbacks;
 
 	status = scif_request_construct(request->parent.controller_handle,
 	    request->parent.remote_device_handle, SCI_CONTROLLER_INVALID_IO_TAG,
 	    (void *)request,
 	    (void *)((char*)request + sizeof(struct ISCI_IO_REQUEST)),
 	    &request->sci_object);
 
 	if (status == SCI_SUCCESS) {
 		callbacks.scic_cb_smp_passthru_get_request =
 		    &smp_io_request_cb_get_request_buffer;
 		callbacks.scic_cb_smp_passthru_get_function =
 		    &smp_io_request_cb_get_function;
 		callbacks.scic_cb_smp_passthru_get_frame_type =
 		    &smp_io_request_cb_get_frame_type;
 		callbacks.scic_cb_smp_passthru_get_allocated_response_length =
 		    &smp_io_request_cb_get_allocated_response_length;
 
 		/* create the smp passthrough part of the io request */
 		status = scic_io_request_construct_smp_pass_through(
 		    scif_io_request_get_scic_handle(request->sci_object),
 		    &callbacks);
 	}
 
 	return (status);
 }
 
 void
 isci_io_request_execute_smp_io(union ccb *ccb,
     struct ISCI_CONTROLLER *controller)
 {
 	SCI_STATUS status;
 	target_id_t target_id = ccb->ccb_h.target_id;
 	struct ISCI_REQUEST *request;
 	struct ISCI_IO_REQUEST *io_request;
 	SCI_REMOTE_DEVICE_HANDLE_T smp_device_handle;
 	struct ISCI_REMOTE_DEVICE *end_device = controller->remote_device[target_id];
 
 	/* SMP commands are sent to an end device, because SMP devices are not
 	 *  exposed to the kernel.  It is our responsibility to use this method
 	 *  to get the SMP device that contains the specified end device.  If
 	 *  the device is direct-attached, the handle will come back NULL, and
 	 *  we'll just fail the SMP_IO with DEV_NOT_THERE.
 	 */
 	scif_remote_device_get_containing_device(end_device->sci_object,
 	    &smp_device_handle);
 
 	if (smp_device_handle == NULL) {
 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
 		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
 		xpt_done(ccb);
 		return;
 	}
 
 	if (sci_pool_empty(controller->request_pool)) {
 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
 		xpt_freeze_simq(controller->sim, 1);
 		controller->is_frozen = TRUE;
 		xpt_done(ccb);
 		return;
 	}
 
 	ASSERT(device->is_resetting == FALSE);
 
 	sci_pool_get(controller->request_pool, request);
 	io_request = (struct ISCI_IO_REQUEST *)request;
 
 	io_request->ccb = ccb;
 	io_request->parent.remote_device_handle = smp_device_handle;
 
 	status = isci_smp_request_construct(io_request);
 
 	if (status != SCI_SUCCESS) {
 		isci_io_request_complete(controller->scif_controller_handle,
 		    smp_device_handle, io_request, (SCI_IO_STATUS)status);
 		return;
 	}
 
 	sci_object_set_association(io_request->sci_object, io_request);
 
 	status = (SCI_STATUS) scif_controller_start_io(
 	    controller->scif_controller_handle, smp_device_handle,
 	    io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG);
 
 	if (status != SCI_SUCCESS) {
 		isci_io_request_complete(controller->scif_controller_handle,
 		    smp_device_handle, io_request, (SCI_IO_STATUS)status);
 		return;
 	}
 
 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY)
 		callout_reset_sbt(&io_request->parent.timer,
 		    SBT_1MS *  ccb->ccb_h.timeout, 0, isci_io_request_timeout,
 		    request, 0);
 }
 #endif
Index: stable/10/sys/dev/ppbus/vpo.c
===================================================================
--- stable/10/sys/dev/ppbus/vpo.c	(revision 312849)
+++ stable/10/sys/dev/ppbus/vpo.c	(revision 312850)
@@ -1,432 +1,438 @@
 /*-
  * Copyright (c) 1997, 1998, 1999 Nicolas Souchu
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/module.h>
 #include <sys/bus.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/malloc.h>
 
 #include <cam/cam.h>
 #include <cam/cam_ccb.h>
 #include <cam/cam_sim.h>
 #include <cam/cam_xpt_sim.h>
 #include <cam/cam_debug.h>
 #include <cam/cam_periph.h>
 
 #include <cam/scsi/scsi_all.h>
 #include <cam/scsi/scsi_message.h>
 #include <cam/scsi/scsi_da.h>
 
 #include <sys/kernel.h>
 
 #include "opt_vpo.h"
 
 #include <dev/ppbus/ppbconf.h>
 #include <dev/ppbus/vpoio.h>
 
 #include "ppbus_if.h"
 
 struct vpo_sense {
 	struct scsi_sense cmd;
 	unsigned int stat;
 	unsigned int count;
 };
 
 struct vpo_data {
 	device_t vpo_dev;
 	int vpo_stat;
 	int vpo_count;
 	int vpo_error;
 
 	int vpo_isplus;
 
 	struct cam_sim  *sim;
 
 	struct vpo_sense vpo_sense;
 
 	struct vpoio_data vpo_io;	/* interface to low level functions */
 };
 
 #define DEVTOSOFTC(dev) \
 	((struct vpo_data *)device_get_softc(dev))
 
 /* cam related functions */
 static void	vpo_action(struct cam_sim *sim, union ccb *ccb);
 static void	vpo_poll(struct cam_sim *sim);
 
 static void
 vpo_identify(driver_t *driver, device_t parent)
 {
 
 	device_t dev;
 
 	dev = device_find_child(parent, "vpo", -1);
 	if (!dev)
 		BUS_ADD_CHILD(parent, 0, "vpo", -1);
 }
 
 /*
  * vpo_probe()
  */
 static int
 vpo_probe(device_t dev)
 {
 	device_t ppbus = device_get_parent(dev);
 	struct vpo_data *vpo;
 	int error;
 
 	vpo = DEVTOSOFTC(dev);
 	vpo->vpo_dev = dev;
 
 	/* check ZIP before ZIP+ or imm_probe() will send controls to
 	 * the printer or whatelse connected to the port */
 	ppb_lock(ppbus);
 	if ((error = vpoio_probe(dev, &vpo->vpo_io)) == 0) {
 		vpo->vpo_isplus = 0;
 		device_set_desc(dev,
 				"Iomega VPI0 Parallel to SCSI interface");
 	} else if ((error = imm_probe(dev, &vpo->vpo_io)) == 0) {
 		vpo->vpo_isplus = 1;
 		device_set_desc(dev,
 				"Iomega Matchmaker Parallel to SCSI interface");
 	} else {
 		ppb_unlock(ppbus);
 		return (error);
 	}
 	ppb_unlock(ppbus);
 
 	return (0);
 }
 
 /*
  * vpo_attach()
  */
 static int
 vpo_attach(device_t dev)
 {
 	struct vpo_data *vpo = DEVTOSOFTC(dev);
 	device_t ppbus = device_get_parent(dev);
 	struct ppb_data *ppb = device_get_softc(ppbus);	/* XXX: layering */
 	struct cam_devq *devq;
 	int error;
 
 	/* low level attachment */
 	if (vpo->vpo_isplus) {
 		if ((error = imm_attach(&vpo->vpo_io)))
 			return (error);
 	} else {
 		if ((error = vpoio_attach(&vpo->vpo_io)))
 			return (error);
 	}
 
 	/*
 	**	Now tell the generic SCSI layer
 	**	about our bus.
 	*/
 	devq = cam_simq_alloc(/*maxopenings*/1);
 	/* XXX What about low-level detach on error? */
 	if (devq == NULL)
 		return (ENXIO);
 
 	vpo->sim = cam_sim_alloc(vpo_action, vpo_poll, "vpo", vpo,
 				 device_get_unit(dev), ppb->ppc_lock,
 				 /*untagged*/1, /*tagged*/0, devq);
 	if (vpo->sim == NULL) {
 		cam_simq_free(devq);
 		return (ENXIO);
 	}
 
 	ppb_lock(ppbus);
 	if (xpt_bus_register(vpo->sim, dev, /*bus*/0) != CAM_SUCCESS) {
 		cam_sim_free(vpo->sim, /*free_devq*/TRUE);
 		ppb_unlock(ppbus);
 		return (ENXIO);
 	}
 	ppb_unlock(ppbus);
 
 	return (0);
 }
 
 /*
  * vpo_intr()
  */
 static void
 vpo_intr(struct vpo_data *vpo, struct ccb_scsiio *csio)
 {
 	int errno;	/* error in errno.h */
 #ifdef VP0_DEBUG
 	int i;
 #endif
+	uint8_t *ptr;
 
+	ptr = scsiio_cdb_ptr(csio);
 	if (vpo->vpo_isplus) {
 		errno = imm_do_scsi(&vpo->vpo_io, VP0_INITIATOR,
 			csio->ccb_h.target_id,
-			(char *)&csio->cdb_io.cdb_bytes, csio->cdb_len,
+			ptr, csio->cdb_len,
 			(char *)csio->data_ptr, csio->dxfer_len,
 			&vpo->vpo_stat, &vpo->vpo_count, &vpo->vpo_error);
 	} else {
 		errno = vpoio_do_scsi(&vpo->vpo_io, VP0_INITIATOR,
 			csio->ccb_h.target_id,
-			(char *)&csio->cdb_io.cdb_bytes, csio->cdb_len,
+			ptr, csio->cdb_len,
 			(char *)csio->data_ptr, csio->dxfer_len,
 			&vpo->vpo_stat, &vpo->vpo_count, &vpo->vpo_error);
 	}
 
 #ifdef VP0_DEBUG
 	printf("vpo_do_scsi = %d, status = 0x%x, count = %d, vpo_error = %d\n",
 		 errno, vpo->vpo_stat, vpo->vpo_count, vpo->vpo_error);
 
 	/* dump of command */
 	for (i=0; i<csio->cdb_len; i++)
-		printf("%x ", ((char *)&csio->cdb_io.cdb_bytes)[i]);
+		printf("%x ", ((char *)ptr)[i]);
 
 	printf("\n");
 #endif
 
 	if (errno) {
 		/* connection to ppbus interrupted */
 		csio->ccb_h.status = CAM_CMD_TIMEOUT;
 		return;
 	}
 
 	/* if a timeout occured, no sense */
 	if (vpo->vpo_error) {
 		if (vpo->vpo_error != VP0_ESELECT_TIMEOUT)
 			device_printf(vpo->vpo_dev, "VP0 error/timeout (%d)\n",
 				vpo->vpo_error);
 
 		csio->ccb_h.status = CAM_CMD_TIMEOUT;
 		return;
 	}
 
 	/* check scsi status */
 	if (vpo->vpo_stat != SCSI_STATUS_OK) {
 	   csio->scsi_status = vpo->vpo_stat;
 
 	   /* check if we have to sense the drive */
 	   if ((vpo->vpo_stat & SCSI_STATUS_CHECK_COND) != 0) {
 
 		vpo->vpo_sense.cmd.opcode = REQUEST_SENSE;
 		vpo->vpo_sense.cmd.length = csio->sense_len;
 		vpo->vpo_sense.cmd.control = 0;
 
 		if (vpo->vpo_isplus) {
 			errno = imm_do_scsi(&vpo->vpo_io, VP0_INITIATOR,
 				csio->ccb_h.target_id,
 				(char *)&vpo->vpo_sense.cmd,
 				sizeof(vpo->vpo_sense.cmd),
 				(char *)&csio->sense_data, csio->sense_len,
 				&vpo->vpo_sense.stat, &vpo->vpo_sense.count,
 				&vpo->vpo_error);
 		} else {
 			errno = vpoio_do_scsi(&vpo->vpo_io, VP0_INITIATOR,
 				csio->ccb_h.target_id,
 				(char *)&vpo->vpo_sense.cmd,
 				sizeof(vpo->vpo_sense.cmd),
 				(char *)&csio->sense_data, csio->sense_len,
 				&vpo->vpo_sense.stat, &vpo->vpo_sense.count,
 				&vpo->vpo_error);
 		}
 
 
 #ifdef VP0_DEBUG
 		printf("(sense) vpo_do_scsi = %d, status = 0x%x, count = %d, vpo_error = %d\n",
 			errno, vpo->vpo_sense.stat, vpo->vpo_sense.count, vpo->vpo_error);
 #endif
 
 		/* check sense return status */
 		if (errno == 0 && vpo->vpo_sense.stat == SCSI_STATUS_OK) {
 		   /* sense ok */
 		   csio->ccb_h.status = CAM_AUTOSNS_VALID | CAM_SCSI_STATUS_ERROR;
 		   csio->sense_resid = csio->sense_len - vpo->vpo_sense.count;
 
 #ifdef VP0_DEBUG
 		   /* dump of sense info */
 		   printf("(sense) ");
 		   for (i=0; i<vpo->vpo_sense.count; i++)
 			printf("%x ", ((char *)&csio->sense_data)[i]);
 		   printf("\n");
 #endif
 
 		} else {
 		   /* sense failed */
 		   csio->ccb_h.status = CAM_AUTOSENSE_FAIL;
 		}
 	   } else {
 		/* no sense */
 		csio->ccb_h.status = CAM_SCSI_STATUS_ERROR;
 	   }
 
 	   return;
 	}
 
 	csio->resid = csio->dxfer_len - vpo->vpo_count;
 	csio->ccb_h.status = CAM_REQ_CMP;
 }
 
 static void
 vpo_action(struct cam_sim *sim, union ccb *ccb)
 {
 	struct vpo_data *vpo = (struct vpo_data *)sim->softc;
 
 	ppb_assert_locked(device_get_parent(vpo->vpo_dev));
 	switch (ccb->ccb_h.func_code) {
 	case XPT_SCSI_IO:
 	{
 		struct ccb_scsiio *csio;
 
 		csio = &ccb->csio;
 
+		if (ccb->ccb_h.flags & CAM_CDB_PHYS) {
+			ccb->ccb_h.status = CAM_REQ_INVALID;
+			xpt_done(ccb);
+			break;
+		}
 #ifdef VP0_DEBUG
 		device_printf(vpo->vpo_dev, "XPT_SCSI_IO (0x%x) request\n",
-			csio->cdb_io.cdb_bytes[0]);
+		    scsiio_cdb_ptr(csio));
 #endif
-
 		vpo_intr(vpo, csio);
 
 		xpt_done(ccb);
 
 		break;
 	}
 	case XPT_CALC_GEOMETRY:
 	{
 		struct	  ccb_calc_geometry *ccg;
 
 		ccg = &ccb->ccg;
 
 #ifdef VP0_DEBUG
 		device_printf(vpo->vpo_dev, "XPT_CALC_GEOMETRY (bs=%d,vs=%jd,c=%d,h=%d,spt=%d) request\n",
 			ccg->block_size,
 			(intmax_t)ccg->volume_size,
 			ccg->cylinders,
 			ccg->heads,
 			ccg->secs_per_track);
 #endif
 
 		ccg->heads = 64;
 		ccg->secs_per_track = 32;
 		ccg->cylinders = ccg->volume_size /
 				 (ccg->heads * ccg->secs_per_track);
 
 		ccb->ccb_h.status = CAM_REQ_CMP;
 		xpt_done(ccb);
 		break;
 	}
 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
 	{
 
 #ifdef VP0_DEBUG
 		device_printf(vpo->vpo_dev, "XPT_RESET_BUS request\n");
 #endif
 
 		if (vpo->vpo_isplus) {
 			if (imm_reset_bus(&vpo->vpo_io)) {
 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 				xpt_done(ccb);
 				return;
 			}
 		} else {
 			if (vpoio_reset_bus(&vpo->vpo_io)) {
 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 				xpt_done(ccb);
 				return;
 			}
 		}
 
 		ccb->ccb_h.status = CAM_REQ_CMP;
 		xpt_done(ccb);
 		break;
 	}
 	case XPT_PATH_INQ:		/* Path routing inquiry */
 	{
 		struct ccb_pathinq *cpi = &ccb->cpi;
 
 #ifdef VP0_DEBUG
 		device_printf(vpo->vpo_dev, "XPT_PATH_INQ request\n");
 #endif
 		cpi->version_num = 1; /* XXX??? */
 		cpi->hba_inquiry = 0;
 		cpi->target_sprt = 0;
 		cpi->hba_misc = 0;
 		cpi->hba_eng_cnt = 0;
 		cpi->max_target = 7;
 		cpi->max_lun = 0;
 		cpi->initiator_id = VP0_INITIATOR;
 		cpi->bus_id = sim->bus_id;
 		cpi->base_transfer_speed = 93;
 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 		strncpy(cpi->hba_vid, "Iomega", HBA_IDLEN);
 		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
 		cpi->unit_number = sim->unit_number;
 		cpi->transport = XPORT_PPB;
 		cpi->transport_version = 0;
 
 		cpi->ccb_h.status = CAM_REQ_CMP;
 		xpt_done(ccb);
 		break;
 	}
 	default:
 		ccb->ccb_h.status = CAM_REQ_INVALID;
 		xpt_done(ccb);
 		break;
 	}
 
 	return;
 }
 
 static void
 vpo_poll(struct cam_sim *sim)
 {
 
 	/* The ZIP is actually always polled throw vpo_action(). */
 }
 
 static devclass_t vpo_devclass;
 
 static device_method_t vpo_methods[] = {
 	/* device interface */
 	DEVMETHOD(device_identify,	vpo_identify),
 	DEVMETHOD(device_probe,		vpo_probe),
 	DEVMETHOD(device_attach,	vpo_attach),
 
 	{ 0, 0 }
 };
 
 static driver_t vpo_driver = {
 	"vpo",
 	vpo_methods,
 	sizeof(struct vpo_data),
 };
 DRIVER_MODULE(vpo, ppbus, vpo_driver, vpo_devclass, 0, 0);
 MODULE_DEPEND(vpo, ppbus, 1, 1, 1);
 MODULE_DEPEND(vpo, cam, 1, 1, 1);
Index: stable/10
===================================================================
--- stable/10	(revision 312849)
+++ stable/10	(revision 312850)

Property changes on: stable/10
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
   Merged /head:r296891