Index: head/sys/cam/scsi/scsi_da.c =================================================================== --- head/sys/cam/scsi/scsi_da.c (revision 319116) +++ head/sys/cam/scsi/scsi_da.c (revision 319117) @@ -1,6009 +1,6016 @@ /*- * Implementation of SCSI Direct Access Peripheral driver for CAM. * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #include #ifdef _KERNEL /* * Note that there are probe ordering dependencies here. The order isn't * controlled by this enumeration, but by explicit state transitions in * dastart() and dadone(). Here are some of the dependencies: * * 1. RC should come first, before RC16, unless there is evidence that RC16 * is supported. * 2. BDC needs to come before any of the ATA probes, or the ZONE probe. * 3. The ATA probes should go in this order: * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE */ typedef enum { DA_STATE_PROBE_RC, DA_STATE_PROBE_RC16, DA_STATE_PROBE_LBP, DA_STATE_PROBE_BLK_LIMITS, DA_STATE_PROBE_BDC, DA_STATE_PROBE_ATA, DA_STATE_PROBE_ATA_LOGDIR, DA_STATE_PROBE_ATA_IDDIR, DA_STATE_PROBE_ATA_SUP, DA_STATE_PROBE_ATA_ZONE, DA_STATE_PROBE_ZONE, DA_STATE_NORMAL } da_state; typedef enum { DA_FLAG_PACK_INVALID = 0x000001, DA_FLAG_NEW_PACK = 0x000002, DA_FLAG_PACK_LOCKED = 0x000004, DA_FLAG_PACK_REMOVABLE = 0x000008, DA_FLAG_NEED_OTAG = 0x000020, DA_FLAG_WAS_OTAG = 0x000040, DA_FLAG_RETRY_UA = 0x000080, DA_FLAG_OPEN = 0x000100, DA_FLAG_SCTX_INIT = 0x000200, DA_FLAG_CAN_RC16 = 0x000400, DA_FLAG_PROBED = 0x000800, DA_FLAG_DIRTY = 0x001000, DA_FLAG_ANNOUNCED = 0x002000, DA_FLAG_CAN_ATA_DMA = 0x004000, DA_FLAG_CAN_ATA_LOG = 0x008000, DA_FLAG_CAN_ATA_IDLOG = 0x010000, DA_FLAG_CAN_ATA_SUPCAP = 0x020000, DA_FLAG_CAN_ATA_ZONE = 0x040000 } da_flags; typedef enum { DA_Q_NONE = 0x00, DA_Q_NO_SYNC_CACHE = 0x01, DA_Q_NO_6_BYTE = 0x02, DA_Q_NO_PREVENT = 0x04, DA_Q_4K = 0x08, DA_Q_NO_RC16 = 0x10, DA_Q_NO_UNMAP = 0x20, DA_Q_RETRY_BUSY = 0x40, DA_Q_SMR_DM = 0x80, DA_Q_STRICT_UNMAP = 0x100 } da_quirks; #define DA_Q_BIT_STRING \ "\020" \ "\001NO_SYNC_CACHE" \ "\002NO_6_BYTE" \ "\003NO_PREVENT" \ "\0044K" \ "\005NO_RC16" \ "\006NO_UNMAP" \ "\007RETRY_BUSY" \ "\010SMR_DM" \ "\011STRICT_UNMAP" typedef enum { DA_CCB_PROBE_RC = 0x01, DA_CCB_PROBE_RC16 = 0x02, DA_CCB_PROBE_LBP = 0x03, DA_CCB_PROBE_BLK_LIMITS = 0x04, DA_CCB_PROBE_BDC = 0x05, DA_CCB_PROBE_ATA = 0x06, DA_CCB_BUFFER_IO = 0x07, DA_CCB_DUMP = 0x0A, DA_CCB_DELETE = 0x0B, DA_CCB_TUR = 0x0C, DA_CCB_PROBE_ZONE = 0x0D, DA_CCB_PROBE_ATA_LOGDIR = 0x0E, DA_CCB_PROBE_ATA_IDDIR = 0x0F, DA_CCB_PROBE_ATA_SUP = 0x10, DA_CCB_PROBE_ATA_ZONE = 0x11, DA_CCB_TYPE_MASK = 0x1F, DA_CCB_RETRY_UA = 0x20 } da_ccb_state; /* * Order here is important for method choice * * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes * using ATA_TRIM than the corresponding UNMAP results for a real world mysql * import taking 5mins. * */ typedef enum { DA_DELETE_NONE, DA_DELETE_DISABLE, DA_DELETE_ATA_TRIM, DA_DELETE_UNMAP, DA_DELETE_WS16, DA_DELETE_WS10, DA_DELETE_ZERO, DA_DELETE_MIN = DA_DELETE_ATA_TRIM, DA_DELETE_MAX = DA_DELETE_ZERO } da_delete_methods; /* * For SCSI, host managed drives show up as a separate device type. For * ATA, host managed drives also have a different device signature. * XXX KDM figure out the ATA host managed signature. */ typedef enum { DA_ZONE_NONE = 0x00, DA_ZONE_DRIVE_MANAGED = 0x01, DA_ZONE_HOST_AWARE = 0x02, DA_ZONE_HOST_MANAGED = 0x03 } da_zone_mode; /* * We distinguish between these interface cases in addition to the drive type: * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC * o ATA drive behind a SCSI translation layer that does not know about * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this * case, we would need to share the ATA code with the ada(4) driver. * o SCSI drive. */ typedef enum { DA_ZONE_IF_SCSI, DA_ZONE_IF_ATA_PASS, DA_ZONE_IF_ATA_SAT, } da_zone_interface; typedef enum { DA_ZONE_FLAG_RZ_SUP = 0x0001, DA_ZONE_FLAG_OPEN_SUP = 0x0002, DA_ZONE_FLAG_CLOSE_SUP = 0x0004, DA_ZONE_FLAG_FINISH_SUP = 0x0008, DA_ZONE_FLAG_RWP_SUP = 0x0010, DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP | DA_ZONE_FLAG_OPEN_SUP | DA_ZONE_FLAG_CLOSE_SUP | DA_ZONE_FLAG_FINISH_SUP | DA_ZONE_FLAG_RWP_SUP), DA_ZONE_FLAG_URSWRZ = 0x0020, DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET | DA_ZONE_FLAG_OPT_NONSEQ_SET | DA_ZONE_FLAG_MAX_SEQ_SET) } da_zone_flags; static struct da_zone_desc { da_zone_flags value; const char *desc; } da_zone_desc_table[] = { {DA_ZONE_FLAG_RZ_SUP, "Report Zones" }, {DA_ZONE_FLAG_OPEN_SUP, "Open" }, {DA_ZONE_FLAG_CLOSE_SUP, "Close" }, {DA_ZONE_FLAG_FINISH_SUP, "Finish" }, {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, }; typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb, struct bio *bp); static da_delete_func_t da_delete_trim; static da_delete_func_t da_delete_unmap; static da_delete_func_t da_delete_ws; static const void * da_delete_functions[] = { NULL, NULL, da_delete_trim, da_delete_unmap, da_delete_ws, da_delete_ws, da_delete_ws }; static const char *da_delete_method_names[] = { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" }; static const char *da_delete_method_desc[] = { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP", "WRITE SAME(10) with UNMAP", "ZERO" }; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct disk_params { u_int8_t heads; u_int32_t cylinders; u_int8_t secs_per_track; u_int32_t secsize; /* Number of bytes/sector */ u_int64_t sectors; /* total number sectors */ u_int stripesize; u_int stripeoffset; }; #define UNMAP_RANGE_MAX 0xffffffff #define UNMAP_HEAD_SIZE 8 #define UNMAP_RANGE_SIZE 16 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */ #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \ UNMAP_HEAD_SIZE) #define WS10_MAX_BLKS 0xffff #define WS16_MAX_BLKS 0xffffffff #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \ (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE) #define DA_WORK_TUR (1 << 16) struct da_softc { struct cam_iosched_softc *cam_iosched; struct bio_queue_head delete_run_queue; LIST_HEAD(, ccb_hdr) pending_ccbs; int refcount; /* Active xpt_action() calls */ da_state state; da_flags flags; da_quirks quirks; int minimum_cmd_size; int error_inject; int trim_max_ranges; int delete_available; /* Delete methods possibly available */ da_zone_mode zone_mode; da_zone_interface zone_interface; da_zone_flags zone_flags; struct ata_gp_log_dir ata_logdir; int valid_logdir_len; struct ata_identify_log_pages ata_iddir; int valid_iddir_len; uint64_t optimal_seq_zones; uint64_t optimal_nonseq_zones; uint64_t max_seq_zones; u_int maxio; uint32_t unmap_max_ranges; uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */ uint32_t unmap_gran; uint32_t unmap_gran_align; uint64_t ws_max_blks; da_delete_methods delete_method_pref; da_delete_methods delete_method; da_delete_func_t *delete_func; int unmappedio; int rotating; struct disk_params params; struct disk *disk; union ccb saved_ccb; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct callout sendordered_c; uint64_t wwpn; uint8_t unmap_buf[UNMAP_BUF_SIZE]; struct scsi_read_capacity_data_long rcaplong; struct callout mediapoll_c; #ifdef CAM_IO_STATS struct sysctl_ctx_list sysctl_stats_ctx; struct sysctl_oid *sysctl_stats_tree; u_int errors; u_int timeouts; u_int invalidations; #endif #define DA_ANNOUNCETMP_SZ 80 char announce_temp[DA_ANNOUNCETMP_SZ]; #define DA_ANNOUNCE_SZ 400 char announcebuf[DA_ANNOUNCE_SZ]; }; #define dadeleteflag(softc, delete_method, enable) \ if (enable) { \ softc->delete_available |= (1 << delete_method); \ } else { \ softc->delete_available &= ~(1 << delete_method); \ } struct da_quirk_entry { struct scsi_inquiry_pattern inq_pat; da_quirks quirks; }; static const char quantum[] = "QUANTUM"; static const char microp[] = "MICROP"; static struct da_quirk_entry da_quirk_table[] = { /* SPI, FC devices */ { /* * Fujitsu M2513A MO drives. * Tested devices: M2513A2 firmware versions 1200 & 1300. * (dip switch selects whether T_DIRECT or T_OPTICAL device) * Reported by: W.Scholten */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* See above. */ {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This particular Fujitsu drive doesn't like the * synchronize cache command. * Reported by: Tom Jackson */ {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Matthew Jacob * in NetBSD PR kern/6027, August 24, 1998. */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Hellmuth Michaelis (hm@kts.org) * (PR 8882). */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't work correctly with 6 byte reads/writes. * Returns illegal request, and points to byte 9 of the * 6-byte CDB. * Reported by: Adam McDougall */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* See above. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The CISS RAID controllers do not support SYNC_CACHE */ {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The STEC SSDs sometimes hang on UNMAP. */ {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"}, /*quirks*/ DA_Q_NO_UNMAP }, { /* * VMware returns BUSY status when storage has transient * connectivity problems, so better wait. * Also VMware returns odd errors on misaligned UNMAPs. */ {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"}, /*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP }, /* USB mass storage devices supported by umass(4) */ { /* * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player * PR: kern/51675 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Power Quotient Int. (PQI) USB flash key * PR: kern/53067 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative Nomad MUVO mp3 player (USB) * PR: kern/53094 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Jungsoft NEXDISK USB flash key * PR: kern/54737 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * FreeDik USB Mini Data Drive * PR: kern/54786 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sigmatel USB Flash MP3 Player * PR: kern/57046 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Neuros USB Digital Audio Computer * PR: kern/63645 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SEAGRAND NP-900 MP3 Player * PR: kern/64563 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * iRiver iFP MP3 player (with UMS Firmware) * PR: kern/54881, i386/63941, kern/66124 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 * PR: kern/70158 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ZICPlay USB MP3 Player with FM * PR: kern/75057 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TEAC USB floppy mechanisms */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler II+ USB Pen-Drive. * Reported by: Pawel Jakub Dawidek */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * USB DISK Pro PMAP * Reported by: jhs * PR: usb/96381 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Motorola E398 Mobile Phone (TransFlash memory card). * Reported by: Wojciech A. Koszek * PR: usb/89889 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Qware BeatZkey! Pro * PR: usb/79164 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Time DPA20B 1GB MP3 Player * PR: usb/81846 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung USB key 128Mb * PR: usb/90081 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler 2.0 USB Flash memory. * PR: usb/89196 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative MUVO Slim mp3 player (USB) * PR: usb/86131 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3) * PR: usb/80487 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SanDisk Micro Cruzer 128MB * PR: usb/75970 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TOSHIBA TransMemory USB sticks * PR: kern/94660 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * PNY USB 3.0 Flash Drives */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16 }, { /* * PNY USB Flash keys * PR: usb/75578, usb/72344, usb/65436 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* + * Genesys GL3224 + */ + {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", + "120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16 + }, + { + /* * Genesys 6-in-1 Card Reader * PR: usb/94647 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Rekam Digital CAMERA * PR: usb/98713 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver H10 MP3 player * PR: usb/102547 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver U10 MP3 player * PR: usb/92306 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * X-Micro Flash Disk * PR: usb/96901 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * EasyMP3 EM732X USB 2.0 Flash MP3 Player * PR: usb/96546 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*", "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Denver MP3 player * PR: usb/107101 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Philips USB Key Audio KEY013 * PR: usb/68412 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { /* * JNC MP3 Player * PR: usb/94439 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SAMSUNG MP0402H * PR: usb/108427 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * I/O Magic USB flash - Giga Bank * PR: usb/108810 */ {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * JoyFly 128mb USB Flash Drive * PR: 96133 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ChipsBnk usb stick * PR: 103702 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A * PR: 129858 */ {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung YP-U3 mp3-player * PR: 125398 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*", "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sony Cyber-Shot DSC cameras * PR: usb/137035 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3", "1.00"}, /*quirks*/ DA_Q_NO_PREVENT }, { /* At least several Transcent USB sticks lie on RC16. */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*", "*"}, /*quirks*/ DA_Q_NO_RC16 }, { /* * I-O Data USB Flash Disk * PR: usb/211716 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_RC16 }, /* ATA/SATA devices over SAS/USB/... */ { /* Hitachi Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" }, /*quirks*/DA_Q_4K }, { /* Micron Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Olympus FE-210 camera */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LG UP3S MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Laser MP3-2GA13 MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LaCie external 250GB Hard drive des by Porsche * Submitted by: Ben Stuyts * PR: 121474 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, /* SATA SSDs */ { /* * Corsair Force 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Neutron GTX SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force GT & GS SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial M4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial RealSSD C300 SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 320 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 330 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 510 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 520 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel S3610 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel X25-M Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston E100 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston HyperX 3k SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Marvell SSDs (entry taken from OpenSolaris) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Deneva R Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 2 SSDs (inc pro series) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 830 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 840 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 850 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 843T Series SSDs (MZ7WD*) * Samsung PM851 Series SSDs (MZ7TE*) * Samsung PM853T Series SSDs (MZ7GE*) * Samsung SM863 Series SSDs (MZ7KM*) * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" }, /*quirks*/DA_Q_4K }, { /* * SuperTalent TeraDrive CT SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" }, /*quirks*/DA_Q_4K }, { /* * XceedIOPS SATA SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" }, /*quirks*/DA_Q_4K }, { /* * Hama Innostor USB-Stick */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" }, /*quirks*/DA_Q_NO_RC16 }, { /* * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR) * Drive Managed SATA hard drive. This drive doesn't report * in firmware that it is a drive managed SMR drive. */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS0002*", "*" }, /*quirks*/DA_Q_SMR_DM }, { /* * MX-ES USB Drive by Mach Xtreme */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"}, /*quirks*/DA_Q_NO_RC16 }, }; static disk_strategy_t dastrategy; static dumper_t dadump; static periph_init_t dainit; static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void dasysctlinit(void *context, int pending); static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS); static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS); static int dazonemodesysctl(SYSCTL_HANDLER_ARGS); static int dazonesupsysctl(SYSCTL_HANDLER_ARGS); static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS); static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method); static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method); static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method); static void daprobedone(struct cam_periph *periph, union ccb *ccb); static periph_ctor_t daregister; static periph_dtor_t dacleanup; static periph_start_t dastart; static periph_oninv_t daoninvalidate; static void dazonedone(struct cam_periph *periph, union ccb *ccb); static void dadone(struct cam_periph *periph, union ccb *done_ccb); static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static void daprevent(struct cam_periph *periph, int action); static void dareprobe(struct cam_periph *periph); static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_size); static timeout_t dasendorderedtag; static void dashutdown(void *arg, int howto); static timeout_t damediapoll; #ifndef DA_DEFAULT_POLL_PERIOD #define DA_DEFAULT_POLL_PERIOD 3 #endif #ifndef DA_DEFAULT_TIMEOUT #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ #endif #ifndef DA_DEFAULT_SOFTTIMEOUT #define DA_DEFAULT_SOFTTIMEOUT 0 #endif #ifndef DA_DEFAULT_RETRY #define DA_DEFAULT_RETRY 4 #endif #ifndef DA_DEFAULT_SEND_ORDERED #define DA_DEFAULT_SEND_ORDERED 1 #endif static int da_poll_period = DA_DEFAULT_POLL_PERIOD; static int da_retry_count = DA_DEFAULT_RETRY; static int da_default_timeout = DA_DEFAULT_TIMEOUT; static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT; static int da_send_ordered = DA_DEFAULT_SEND_ORDERED; static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, "CAM Direct Access Disk driver"); SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, &da_poll_period, 0, "Media polling period in seconds"); SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN, &da_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN, &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN, &da_send_ordered, 0, "Send Ordered Tags"); SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout, CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I", "Soft I/O timeout (ms)"); TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout); /* * DA_ORDEREDTAG_INTERVAL determines how often, relative * to the default timeout, we check to see whether an ordered * tagged transaction is appropriate to prevent simple tag * starvation. Since we'd like to ensure that there is at least * 1/2 of the timeout length left for a starved transaction to * complete after we've sent an ordered tag, we must poll at least * four times in every timeout period. This takes care of the worst * case where a starved transaction starts during an interval that * meets the requirement "don't send an ordered tag" test so it takes * us two intervals to determine that a tag must be sent. */ #ifndef DA_ORDEREDTAG_INTERVAL #define DA_ORDEREDTAG_INTERVAL 4 #endif static struct periph_driver dadriver = { dainit, "da", TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(da, dadriver); static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); static int daopen(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) { return (ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daopen\n")); softc = (struct da_softc *)periph->softc; dareprobe(periph); /* Wait for the disk size update. */ error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO, "dareprobe", 0); if (error != 0) xpt_print(periph->path, "unable to retrieve capacity data\n"); if (periph->flags & CAM_PERIPH_INVALID) error = ENXIO; if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_PREVENT); if (error == 0) { softc->flags &= ~DA_FLAG_PACK_INVALID; softc->flags |= DA_FLAG_OPEN; } cam_periph_unhold(periph); cam_periph_unlock(periph); if (error != 0) cam_periph_release(periph); return (error); } static int daclose(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daclose\n")); if (cam_periph_hold(periph, PRIBIO) == 0) { /* Flush disk cache. */ if ((softc->flags & DA_FLAG_DIRTY) != 0 && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/1, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, 5 * 60 * 1000); error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, softc->disk->d_devstat); softc->flags &= ~DA_FLAG_DIRTY; xpt_release_ccb(ccb); } /* Allow medium removal. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_ALLOW); cam_periph_unhold(periph); } /* * If we've got removeable media, mark the blocksize as * unavailable, since it could change when new media is * inserted. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; softc->flags &= ~DA_FLAG_OPEN; while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void daschedule(struct cam_periph *periph) { struct da_softc *softc = (struct da_softc *)periph->softc; if (softc->state != DA_STATE_NORMAL) return; cam_iosched_schedule(softc->cam_iosched, periph); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void dastrategy(struct bio *bp) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); /* * If the device has been made invalid, error out */ if ((softc->flags & DA_FLAG_PACK_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp)); /* * Zone commands must be ordered, because they can depend on the * effects of previously issued commands, and they may affect * commands after them. */ if (bp->bio_cmd == BIO_ZONE) bp->bio_flags |= BIO_ORDERED; /* * Place it in the queue of disk activities for this disk */ cam_iosched_queue_work(softc->cam_iosched, bp); /* * Schedule ourselves for performing the work. */ daschedule(periph); cam_periph_unlock(periph); return; } static int dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct cam_periph *periph; struct da_softc *softc; u_int secsize; struct ccb_scsiio csio; struct disk *dp; int error = 0; dp = arg; periph = dp->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); secsize = softc->params.secsize; if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { cam_periph_unlock(periph); return (ENXIO); } if (length > 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_read_write(&csio, /*retries*/0, dadone, MSG_ORDERED_Q_TAG, /*read*/SCSI_RW_WRITE, /*byte2*/0, /*minimum_cmd_size*/ softc->minimum_cmd_size, offset / secsize, length / secsize, /*data_ptr*/(u_int8_t *) virtual, /*dxfer_len*/length, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); xpt_polled_action((union ccb *)&csio); error = cam_periph_error((union ccb *)&csio, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (error != 0) printf("Aborting dump due to I/O error.\n"); cam_periph_unlock(periph); return (error); } /* * Sync the disk cache contents to the physical media. */ if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_synchronize_cache(&csio, /*retries*/0, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0,/* Cover the whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 5 * 1000); xpt_polled_action((union ccb *)&csio); error = cam_periph_error((union ccb *)&csio, 0, SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, NULL); if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); } cam_periph_unlock(periph); return (error); } static int dagetattr(struct bio *bp) { int ret; struct cam_periph *periph; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static void dainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("da: Failed to attach master async callback " "due to status 0x%x!\n", status); } else if (da_send_ordered) { /* Register our shutdown event handler */ if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("dainit: shutdown event registration failed!\n"); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void dadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void daoninvalidate(struct cam_periph *periph) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, daasync, periph, periph->path); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); /* * Tell GEOM that we've gone away, we'll get a callback when it is * done cleaning up its resources. */ disk_gone(softc->disk); } static void dacleanup(struct cam_periph *periph) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) { #ifdef CAM_IO_STATS if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) xpt_print(periph->path, "can't remove sysctl stats context\n"); #endif if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); } callout_drain(&softc->mediapoll_c); disk_destroy(softc->disk); callout_drain(&softc->sendordered_c); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_DIRECT && SID_TYPE(&cgd->inq_data) != T_RBC && SID_TYPE(&cgd->inq_data) != T_OPTICAL && SID_TYPE(&cgd->inq_data) != T_ZBC_HM) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(daregister, daoninvalidate, dacleanup, dastart, "da", CAM_PERIPH_BIO, path, daasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("daasync: Unable to attach to new device " "due to status 0x%x\n", status); return; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct da_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_UNIT_ATTENTION: { union ccb *ccb; int error_code, sense_key, asc, ascq; softc = (struct da_softc *)periph->softc; ccb = (union ccb *)arg; /* * Handle all UNIT ATTENTIONs except our own, * as they will be handled by daerror(). */ if (xpt_path_periph(ccb->ccb_h.path) != periph && scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (asc == 0x2A && ascq == 0x09) { xpt_print(ccb->ccb_h.path, "Capacity data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); } else if (asc == 0x28 && ascq == 0x00) { softc->flags &= ~DA_FLAG_PROBED; disk_media_changed(softc->disk, M_NOWAIT); } else if (asc == 0x3F && ascq == 0x03) { xpt_print(ccb->ccb_h.path, "INQUIRY data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); } } cam_periph_async(periph, code, path, arg); break; } case AC_SCSI_AEN: softc = (struct da_softc *)periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { if (cam_periph_acquire(periph) == CAM_REQ_CMP) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* FALLTHROUGH */ case AC_SENT_BDR: case AC_BUS_RESET: { struct ccb_hdr *ccbh; softc = (struct da_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= DA_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= DA_CCB_RETRY_UA; break; } case AC_INQ_CHANGED: softc = (struct da_softc *)periph->softc; softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); break; default: break; } cam_periph_async(periph, code, path, arg); } static void dasysctlinit(void *context, int pending) { struct cam_periph *periph; struct da_softc *softc; char tmpstr[80], tmpstr2[80]; struct ccb_trans_settings cts; periph = (struct cam_periph *)context; /* * periph was held for us when this task was enqueued */ if (periph->flags & CAM_PERIPH_INVALID) { cam_periph_release(periph); return; } softc = (struct da_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= DA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("dasysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } /* * Now register the sysctl handler, so the user can change the value on * the fly. */ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN, softc, 0, dadeletemethodsysctl, "A", "BIO_DELETE execution method"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW, softc, 0, dadeletemaxsysctl, "Q", "Maximum BIO_DELETE size"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", "Minimum CDB size"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, dazonemodesysctl, "A", "Zone Mode"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, dazonesupsysctl, "A", "Zone Support"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, "Optimal Number of Open Sequential Write Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_nonseq_zones", CTLFLAG_RD, &softc->optimal_nonseq_zones, "Optimal Number of Non-Sequentially Written Sequential Write " "Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, "Maximum Number of Open Sequential Write Required Zones"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "error_inject", CTLFLAG_RW, &softc->error_inject, 0, "error_inject leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "unmapped_io", CTLFLAG_RD, &softc->unmappedio, 0, "Unmapped I/O leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "rotating", CTLFLAG_RD, &softc->rotating, 0, "Rotating media"); /* * Add some addressing info. */ memset(&cts, 0, sizeof (cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cam_periph_lock(periph); xpt_action((union ccb *)&cts); cam_periph_unlock(periph); if (cts.ccb_h.status != CAM_REQ_CMP) { cam_periph_release(periph); return; } if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWPN) { softc->wwpn = fc->wwpn; SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "wwpn", CTLFLAG_RD, &softc->wwpn, "World Wide Port Name"); } } #ifdef CAM_IO_STATS /* * Now add some useful stats. * XXX These should live in cam_periph and be common to all periphs */ softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD, 0, "Statistics"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "errors", CTLFLAG_RD, &softc->errors, 0, "Transport errors reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "timeouts", CTLFLAG_RD, &softc->timeouts, 0, "Device timeouts reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "pack_invalidations", CTLFLAG_RD, &softc->invalidations, 0, "Device pack invalidations"); #endif cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); cam_periph_release(periph); } static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS) { int error; uint64_t value; struct da_softc *softc; softc = (struct da_softc *)arg1; value = softc->disk->d_delmaxsize; error = sysctl_handle_64(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* only accept values smaller than the calculated value */ if (value > dadeletemaxsize(softc, softc->delete_method)) { return (EINVAL); } softc->disk->d_delmaxsize = value; return (0); } static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* * Acceptable values here are 6, 10, 12 or 16. */ if (value < 6) value = 6; else if ((value > 6) && (value <= 10)) value = 10; else if ((value > 10) && (value <= 12)) value = 12; else if (value > 12) value = 16; *(int *)arg1 = value; return (0); } static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS) { sbintime_t value; int error; value = da_default_softtimeout / SBT_1MS; error = sysctl_handle_int(oidp, (int *)&value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* XXX Should clip this to a reasonable level */ if (value > da_default_timeout * 1000) return (EINVAL); da_default_softtimeout = value * SBT_1MS; return (0); } static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method) { softc->delete_method = delete_method; softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method); softc->delete_func = da_delete_functions[delete_method]; if (softc->delete_method > DA_DELETE_DISABLE) softc->disk->d_flags |= DISKFLAG_CANDELETE; else softc->disk->d_flags &= ~DISKFLAG_CANDELETE; } static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method) { off_t sectors; switch(delete_method) { case DA_DELETE_UNMAP: sectors = (off_t)softc->unmap_max_lba; break; case DA_DELETE_ATA_TRIM: sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges; break; case DA_DELETE_WS16: sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS); break; case DA_DELETE_ZERO: case DA_DELETE_WS10: sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS); break; default: return 0; } return (off_t)softc->params.secsize * omin(sectors, softc->params.sectors); } static void daprobedone(struct cam_periph *periph, union ccb *ccb) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; dadeletemethodchoose(softc, DA_DELETE_NONE); if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { char buf[80]; int i, sep; snprintf(buf, sizeof(buf), "Delete methods: <"); sep = 0; for (i = 0; i <= DA_DELETE_MAX; i++) { if ((softc->delete_available & (1 << i)) == 0 && i != softc->delete_method) continue; if (sep) strlcat(buf, ",", sizeof(buf)); strlcat(buf, da_delete_method_names[i], sizeof(buf)); if (i == softc->delete_method) strlcat(buf, "(*)", sizeof(buf)); sep = 1; } strlcat(buf, ">", sizeof(buf)); printf("%s%d: %s\n", periph->periph_name, periph->unit_number, buf); } /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(ccb); softc->state = DA_STATE_NORMAL; softc->flags |= DA_FLAG_PROBED; daschedule(periph); wakeup(&softc->disk->d_mediasize); if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) { softc->flags |= DA_FLAG_ANNOUNCED; cam_periph_unhold(periph); } else cam_periph_release_locked(periph); } static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method) { int i, methods; /* If available, prefer the method requested by user. */ i = softc->delete_method_pref; methods = softc->delete_available | (1 << DA_DELETE_DISABLE); if (methods & (1 << i)) { dadeletemethodset(softc, i); return; } /* Use the pre-defined order to choose the best performing delete. */ for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { if (i == DA_DELETE_ZERO) continue; if (softc->delete_available & (1 << i)) { dadeletemethodset(softc, i); return; } } /* Fallback to default. */ dadeletemethodset(softc, default_method); } static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS) { char buf[16]; const char *p; struct da_softc *softc; int i, error, methods, value; softc = (struct da_softc *)arg1; value = softc->delete_method; if (value < 0 || value > DA_DELETE_MAX) p = "UNKNOWN"; else p = da_delete_method_names[value]; strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); methods = softc->delete_available | (1 << DA_DELETE_DISABLE); for (i = 0; i <= DA_DELETE_MAX; i++) { if (strcmp(buf, da_delete_method_names[i]) == 0) break; } if (i > DA_DELETE_MAX) return (EINVAL); softc->delete_method_pref = i; dadeletemethodchoose(softc, DA_DELETE_NONE); return (0); } static int dazonemodesysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[40]; struct da_softc *softc; int error; softc = (struct da_softc *)arg1; switch (softc->zone_mode) { case DA_ZONE_DRIVE_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); break; case DA_ZONE_HOST_AWARE: snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); break; case DA_ZONE_HOST_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); break; case DA_ZONE_NONE: default: snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); break; } error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); return (error); } static int dazonesupsysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[180]; struct da_softc *softc; struct sbuf sb; int error, first; unsigned int i; softc = (struct da_softc *)arg1; error = 0; first = 1; sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); for (i = 0; i < sizeof(da_zone_desc_table) / sizeof(da_zone_desc_table[0]); i++) { if (softc->zone_flags & da_zone_desc_table[i].value) { if (first == 0) sbuf_printf(&sb, ", "); else first = 0; sbuf_cat(&sb, da_zone_desc_table[i].desc); } } if (first == 1) sbuf_printf(&sb, "None"); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); return (error); } static cam_status daregister(struct cam_periph *periph, void *arg) { struct da_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; char tmpstr[80]; caddr_t match; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("daregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("daregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { printf("daregister: Unable to probe new device. " "Unable to allocate iosched memory\n"); free(softc, M_DEVBUF); return(CAM_REQ_CMP_ERR); } LIST_INIT(&softc->pending_ccbs); softc->state = DA_STATE_PROBE_RC; bioq_init(&softc->delete_run_queue); if (SID_IS_REMOVABLE(&cgd->inq_data)) softc->flags |= DA_FLAG_PACK_REMOVABLE; softc->unmap_max_ranges = UNMAP_MAX_RANGES; softc->unmap_max_lba = UNMAP_RANGE_MAX; softc->unmap_gran = 0; softc->unmap_gran_align = 0; softc->ws_max_blks = WS16_MAX_BLKS; softc->trim_max_ranges = ATA_TRIM_MAX_RANGES; softc->rotating = 1; periph->softc = softc; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)da_quirk_table, nitems(da_quirk_table), sizeof(*da_quirk_table), scsi_inquiry_match); if (match != NULL) softc->quirks = ((struct da_quirk_entry *)match)->quirks; else softc->quirks = DA_Q_NONE; /* Check if the SIM does not want 6 byte commands */ bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) softc->quirks |= DA_Q_NO_6_BYTE; if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM) softc->zone_mode = DA_ZONE_HOST_MANAGED; else if (softc->quirks & DA_Q_SMR_DM) softc->zone_mode = DA_ZONE_DRIVE_MANAGED; else softc->zone_mode = DA_ZONE_NONE; if (softc->zone_mode != DA_ZONE_NONE) { if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) softc->zone_interface = DA_ZONE_IF_ATA_SAT; else softc->zone_interface = DA_ZONE_IF_ATA_PASS; } else softc->zone_interface = DA_ZONE_IF_SCSI; } TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); /* * Take an exclusive refcount on the periph while dastart is called * to finish the probe. The reference will be dropped in dadone at * the end of probe. */ (void)cam_periph_hold(periph, PRIBIO); /* * Schedule a periodic event to occasionally send an * ordered tag to a device. */ callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, softc); cam_periph_unlock(periph); /* * RBC devices don't have to support READ(6), only READ(10). */ if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) softc->minimum_cmd_size = 10; else softc->minimum_cmd_size = 6; /* * Load the user's default, if any. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); /* * 6, 10, 12 and 16 are the currently permissible values. */ if (softc->minimum_cmd_size > 12) softc->minimum_cmd_size = 16; else if (softc->minimum_cmd_size > 10) softc->minimum_cmd_size = 12; else if (softc->minimum_cmd_size > 6) softc->minimum_cmd_size = 10; else softc->minimum_cmd_size = 6; /* Predict whether device may support READ CAPACITY(16). */ if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 && (softc->quirks & DA_Q_NO_RC16) == 0) { softc->flags |= DA_FLAG_CAN_RC16; softc->state = DA_STATE_PROBE_RC16; } /* * Register this media as a disk. */ softc->disk = disk_alloc(); softc->disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); softc->disk->d_open = daopen; softc->disk->d_close = daclose; softc->disk->d_strategy = dastrategy; softc->disk->d_dump = dadump; softc->disk->d_getattr = dagetattr; softc->disk->d_gone = dadiskgonecb; softc->disk->d_name = "da"; softc->disk->d_drv1 = periph; if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; /* for safety */ else softc->maxio = cpi.maxio; softc->disk->d_maxsize = softc->maxio; softc->disk->d_unit = periph->unit_number; softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { softc->unmappedio = 1; softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; } cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], cgd->inq_data.product, sizeof(cgd->inq_data.product), sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * dadiskgonecb()) telling us that our provider has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); /* * Add async callbacks for events of interest. * I don't bother checking if this fails as, * in most cases, the system will function just * fine without them and the only alternative * would be to not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION | AC_INQ_CHANGED, daasync, periph, periph->path); /* * Emit an attribute changed notification just in case * physical path information arrived before our async * event handler was registered, but after anyone attaching * to our disk device polled it. */ disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); /* * Schedule a periodic media polling events. */ callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && (cgd->inq_flags & SID_AEN) == 0 && da_poll_period != 0) callout_reset(&softc->mediapoll_c, da_poll_period * hz, damediapoll, periph); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int da_zone_bio_to_scsi(int disk_zone_cmd) { switch (disk_zone_cmd) { case DISK_ZONE_OPEN: return ZBC_OUT_SA_OPEN; case DISK_ZONE_CLOSE: return ZBC_OUT_SA_CLOSE; case DISK_ZONE_FINISH: return ZBC_OUT_SA_FINISH; case DISK_ZONE_RWP: return ZBC_OUT_SA_RWP; } return -1; } static int da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, int *queue_ccb) { struct da_softc *softc; int error; error = 0; if (bp->bio_cmd != BIO_ZONE) { error = EINVAL; goto bailout; } softc = periph->softc; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: { int zone_flags; int zone_sa; uint64_t lba; zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd); if (zone_sa == -1) { xpt_print(periph->path, "Cannot translate zone " "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd); error = EINVAL; goto bailout; } zone_flags = 0; lba = bp->bio_zone.zone_params.rwp.id; if (bp->bio_zone.zone_params.rwp.flags & DISK_ZONE_RWP_FLAG_ALL) zone_flags |= ZBC_OUT_ALL; if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { scsi_zbc_out(&ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ zone_sa, /*zone_id*/ lba, /*zone_flags*/ zone_flags, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); } else { /* * Note that in this case, even though we can * technically use NCQ, we don't bother for several * reasons: * 1. It hasn't been tested on a SAT layer that * supports it. This is new as of SAT-4. * 2. Even when there is a SAT layer that supports * it, that SAT layer will also probably support * ZBC -> ZAC translation, since they are both * in the SAT-4 spec. * 3. Translation will likely be preferable to ATA * passthrough. LSI / Avago at least single * steps ATA passthrough commands in the HBA, * regardless of protocol, so unless that * changes, there is a performance penalty for * doing ATA passthrough no matter whether * you're using NCQ/FPDMA, DMA or PIO. * 4. It requires a 32-byte CDB, which at least at * this point in CAM requires a CDB pointer, which * would require us to allocate an additional bit * of storage separate from the CCB. */ error = scsi_ata_zac_mgmt_out(&ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*use_ncq*/ 0, /*zm_action*/ zone_sa, /*zone_id*/ lba, /*zone_flags*/ zone_flags, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (error != 0) { error = EINVAL; xpt_print(periph->path, "scsi_ata_zac_mgmt_out() returned an " "error!"); goto bailout; } } *queue_ccb = 1; break; } case DISK_ZONE_REPORT_ZONES: { uint8_t *rz_ptr; uint32_t num_entries, alloc_size; struct disk_zone_report *rep; rep = &bp->bio_zone.zone_params.report; num_entries = rep->entries_allocated; if (num_entries == 0) { xpt_print(periph->path, "No entries allocated for " "Report Zones request\n"); error = EINVAL; goto bailout; } alloc_size = sizeof(struct scsi_report_zones_hdr) + (sizeof(struct scsi_report_zones_desc) * num_entries); alloc_size = min(alloc_size, softc->disk->d_maxsize); rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO); if (rz_ptr == NULL) { xpt_print(periph->path, "Unable to allocate memory " "for Report Zones request\n"); error = ENOMEM; goto bailout; } if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { scsi_zbc_in(&ccb->csio, /*retries*/ da_retry_count, /*cbcfnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ ZBC_IN_SA_REPORT_ZONES, /*zone_start_lba*/ rep->starting_id, /*zone_options*/ rep->rep_options, /*data_ptr*/ rz_ptr, /*dxfer_len*/ alloc_size, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); } else { /* * Note that in this case, even though we can * technically use NCQ, we don't bother for several * reasons: * 1. It hasn't been tested on a SAT layer that * supports it. This is new as of SAT-4. * 2. Even when there is a SAT layer that supports * it, that SAT layer will also probably support * ZBC -> ZAC translation, since they are both * in the SAT-4 spec. * 3. Translation will likely be preferable to ATA * passthrough. LSI / Avago at least single * steps ATA passthrough commands in the HBA, * regardless of protocol, so unless that * changes, there is a performance penalty for * doing ATA passthrough no matter whether * you're using NCQ/FPDMA, DMA or PIO. * 4. It requires a 32-byte CDB, which at least at * this point in CAM requires a CDB pointer, which * would require us to allocate an additional bit * of storage separate from the CCB. */ error = scsi_ata_zac_mgmt_in(&ccb->csio, /*retries*/ da_retry_count, /*cbcfnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*use_ncq*/ 0, /*zm_action*/ ATA_ZM_REPORT_ZONES, /*zone_id*/ rep->starting_id, /*zone_flags*/ rep->rep_options, /*data_ptr*/ rz_ptr, /*dxfer_len*/ alloc_size, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (error != 0) { error = EINVAL; xpt_print(periph->path, "scsi_ata_zac_mgmt_in() returned an " "error!"); goto bailout; } } /* * For BIO_ZONE, this isn't normally needed. However, it * is used by devstat_end_transaction_bio() to determine * how much data was transferred. */ /* * XXX KDM we have a problem. But I'm not sure how to fix * it. devstat uses bio_bcount - bio_resid to calculate * the amount of data transferred. The GEOM disk code * uses bio_length - bio_resid to calculate the amount of * data in bio_completed. We have different structure * sizes above and below the ada(4) driver. So, if we * use the sizes above, the amount transferred won't be * quite accurate for devstat. If we use different sizes * for bio_bcount and bio_length (above and below * respectively), then the residual needs to match one or * the other. Everything is calculated after the bio * leaves the driver, so changing the values around isn't * really an option. For now, just set the count to the * passed in length. This means that the calculations * above (e.g. bio_completed) will be correct, but the * amount of data reported to devstat will be slightly * under or overstated. */ bp->bio_bcount = bp->bio_length; *queue_ccb = 1; break; } case DISK_ZONE_GET_PARAMS: { struct disk_zone_disk_params *params; params = &bp->bio_zone.zone_params.disk_params; bzero(params, sizeof(*params)); switch (softc->zone_mode) { case DA_ZONE_DRIVE_MANAGED: params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; break; case DA_ZONE_HOST_AWARE: params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; break; case DA_ZONE_HOST_MANAGED: params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; break; default: case DA_ZONE_NONE: params->zone_mode = DISK_ZONE_MODE_NONE; break; } if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ) params->flags |= DISK_ZONE_DISK_URSWRZ; if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) { params->optimal_seq_zones = softc->optimal_seq_zones; params->flags |= DISK_ZONE_OPT_SEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) { params->optimal_nonseq_zones = softc->optimal_nonseq_zones; params->flags |= DISK_ZONE_OPT_NONSEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) { params->max_seq_zones = softc->max_seq_zones; params->flags |= DISK_ZONE_MAX_SEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP) params->flags |= DISK_ZONE_RZ_SUP; if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP) params->flags |= DISK_ZONE_OPEN_SUP; if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP) params->flags |= DISK_ZONE_CLOSE_SUP; if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP) params->flags |= DISK_ZONE_FINISH_SUP; if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP) params->flags |= DISK_ZONE_RWP_SUP; break; } default: break; } bailout: return (error); } static void dastart(struct cam_periph *periph, union ccb *start_ccb) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n")); skipstate: switch (softc->state) { case DA_STATE_NORMAL: { struct bio *bp; uint8_t tag_code; more: bp = cam_iosched_next_bio(softc->cam_iosched); if (bp == NULL) { if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); scsi_test_unit_ready(&start_ccb->csio, /*retries*/ da_retry_count, dadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_TUR; xpt_action(start_ccb); } else xpt_release_ccb(start_ccb); break; } if (bp->bio_cmd == BIO_DELETE) { if (softc->delete_func != NULL) { softc->delete_func(periph, start_ccb, bp); goto out; } else { /* Not sure this is possible, but failsafe by lying and saying "sure, done." */ biofinish(bp, NULL, 0); goto more; } } if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); cam_periph_release_locked(periph); /* XXX is this still valid? I think so but unverified */ } if ((bp->bio_flags & BIO_ORDERED) != 0 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { softc->flags &= ~DA_FLAG_NEED_OTAG; softc->flags |= DA_FLAG_WAS_OTAG; tag_code = MSG_ORDERED_Q_TAG; } else { tag_code = MSG_SIMPLE_Q_TAG; } switch (bp->bio_cmd) { case BIO_WRITE: case BIO_READ: { void *data_ptr; int rw_op; biotrack(bp, __func__); if (bp->bio_cmd == BIO_WRITE) { softc->flags |= DA_FLAG_DIRTY; rw_op = SCSI_RW_WRITE; } else { rw_op = SCSI_RW_READ; } data_ptr = bp->bio_data; if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { rw_op |= SCSI_RW_BIO; data_ptr = bp; } scsi_read_write(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/tag_code, rw_op, /*byte2*/0, softc->minimum_cmd_size, /*lba*/bp->bio_pblkno, /*block_count*/bp->bio_bcount / softc->params.secsize, data_ptr, /*dxfer_len*/ bp->bio_bcount, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) start_ccb->csio.bio = bp; #endif break; } case BIO_FLUSH: /* * BIO_FLUSH doesn't currently communicate * range data, so we synchronize the cache * over the whole disk. We also force * ordered tag semantics the flush applies * to all previously queued I/O. */ scsi_synchronize_cache(&start_ccb->csio, /*retries*/1, /*cbfcnp*/dadone, MSG_ORDERED_Q_TAG, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, da_default_timeout*1000); break; case BIO_ZONE: { int error, queue_ccb; queue_ccb = 0; error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb); if ((error != 0) || (queue_ccb == 0)) { biofinish(bp, NULL, error); xpt_release_ccb(start_ccb); return; } break; } } start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout); out: LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); /* We expect a unit attention from this device */ if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; softc->flags &= ~DA_FLAG_RETRY_UA; } start_ccb->ccb_h.ccb_bp = bp; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* May have more work to do, so ensure we stay scheduled */ daschedule(periph); break; } case DA_STATE_PROBE_RC: { struct scsi_read_capacity_data *rcap; rcap = (struct scsi_read_capacity_data *) malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcap == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity(&start_ccb->csio, /*retries*/da_retry_count, dadone, MSG_SIMPLE_Q_TAG, rcap, SSD_FULL_SIZE, /*timeout*/5000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_RC16: { struct scsi_read_capacity_data_long *rcaplong; rcaplong = (struct scsi_read_capacity_data_long *) malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcaplong == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity_16(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*lba*/ 0, /*reladr*/ 0, /*pmi*/ 0, /*rcap_buf*/ (uint8_t *)rcaplong, /*rcap_buf_len*/ sizeof(*rcaplong), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16; xpt_action(start_ccb); break; } case DA_STATE_PROBE_LBP: { struct scsi_vpd_logical_block_prov *lbp; if (!scsi_vpd_supported_page(periph, SVPD_LBP)) { /* * If we get here we don't support any SBC-3 delete * methods with UNMAP as the Logical Block Provisioning * VPD page support is required for devices which * support it according to T10/1799-D Revision 31 * however older revisions of the spec don't mandate * this so we currently don't remove these methods * from the available set. */ softc->state = DA_STATE_PROBE_BLK_LIMITS; goto skipstate; } lbp = (struct scsi_vpd_logical_block_prov *) malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO); if (lbp == NULL) { printf("dastart: Couldn't malloc lbp data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)lbp, /*inq_len*/sizeof(*lbp), /*evpd*/TRUE, /*page_code*/SVPD_LBP, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BLK_LIMITS: { struct scsi_vpd_block_limits *block_limits; if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) { /* Not supported skip to next probe */ softc->state = DA_STATE_PROBE_BDC; goto skipstate; } block_limits = (struct scsi_vpd_block_limits *) malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO); if (block_limits == NULL) { printf("dastart: Couldn't malloc block_limits data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)block_limits, /*inq_len*/sizeof(*block_limits), /*evpd*/TRUE, /*page_code*/SVPD_BLOCK_LIMITS, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BDC: { struct scsi_vpd_block_characteristics *bdc; if (!scsi_vpd_supported_page(periph, SVPD_BDC)) { softc->state = DA_STATE_PROBE_ATA; goto skipstate; } bdc = (struct scsi_vpd_block_characteristics *) malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); if (bdc == NULL) { printf("dastart: Couldn't malloc bdc data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)bdc, /*inq_len*/sizeof(*bdc), /*evpd*/TRUE, /*page_code*/SVPD_BDC, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA: { struct ata_params *ata_params; if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { if ((softc->zone_mode == DA_ZONE_HOST_AWARE) || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { /* * Note that if the ATA VPD page isn't * supported, we aren't talking to an ATA * device anyway. Support for that VPD * page is mandatory for SCSI to ATA (SAT) * translation layers. */ softc->state = DA_STATE_PROBE_ZONE; goto skipstate; } daprobedone(periph, start_ccb); break; } ata_params = (struct ata_params*) malloc(sizeof(*ata_params), M_SCSIDA,M_NOWAIT|M_ZERO); if (ata_params == NULL) { xpt_print(periph->path, "Couldn't malloc ata_params " "data\n"); /* da_free_periph??? */ break; } scsi_ata_identify(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*data_ptr*/(u_int8_t *)ata_params, /*dxfer_len*/sizeof(*ata_params), /*sense_len*/SSD_FULL_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_LOGDIR: { struct ata_gp_log_dir *log_dir; int retval; retval = 0; if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) { /* * If we don't have log support, not much point in * trying to probe zone support. */ daprobedone(periph, start_ccb); break; } /* * If we have an ATA device (the SCSI ATA Information VPD * page should be present and the ATA identify should have * succeeded) and it supports logs, ask for the log directory. */ log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO); if (log_dir == NULL) { xpt_print(periph->path, "Couldn't malloc log_dir " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_LOG_DIRECTORY, /*page_number*/ 0, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)log_dir, /*dxfer_len*/ sizeof(*log_dir), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(log_dir, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_IDDIR: { struct ata_identify_log_pages *id_dir; int retval; retval = 0; /* * Check here to see whether the Identify Device log is * supported in the directory of logs. If so, continue * with requesting the log of identify device pages. */ if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) { daprobedone(periph, start_ccb); break; } id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO); if (id_dir == NULL) { xpt_print(periph->path, "Couldn't malloc id_dir " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_PAGE_LIST, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)id_dir, /*dxfer_len*/ sizeof(*id_dir), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(id_dir, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_SUP: { struct ata_identify_log_sup_cap *sup_cap; int retval; retval = 0; /* * Check here to see whether the Supported Capabilities log * is in the list of Identify Device logs. */ if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) { daprobedone(periph, start_ccb); break; } sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO); if (sup_cap == NULL) { xpt_print(periph->path, "Couldn't malloc sup_cap " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_SUP_CAP, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)sup_cap, /*dxfer_len*/ sizeof(*sup_cap), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(sup_cap, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_ZONE: { struct ata_zoned_info_log *ata_zone; int retval; retval = 0; /* * Check here to see whether the zoned device information * page is supported. If so, continue on to request it. * If not, skip to DA_STATE_PROBE_LOG or done. */ if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) { daprobedone(periph, start_ccb); break; } ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA, M_NOWAIT|M_ZERO); if (ata_zone == NULL) { xpt_print(periph->path, "Couldn't malloc ata_zone " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_ZDI, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)ata_zone, /*dxfer_len*/ sizeof(*ata_zone), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(ata_zone, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ZONE: { struct scsi_vpd_zoned_bdc *bdc; /* * Note that this page will be supported for SCSI protocol * devices that support ZBC (SMR devices), as well as ATA * protocol devices that are behind a SAT (SCSI to ATA * Translation) layer that supports converting ZBC commands * to their ZAC equivalents. */ if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) { daprobedone(periph, start_ccb); break; } bdc = (struct scsi_vpd_zoned_bdc *) malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); if (bdc == NULL) { xpt_release_ccb(start_ccb); xpt_print(periph->path, "Couldn't malloc zone VPD " "data\n"); break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)bdc, /*inq_len*/sizeof(*bdc), /*evpd*/TRUE, /*page_code*/SVPD_ZONED_BDC, /*sense_len*/SSD_FULL_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE; xpt_action(start_ccb); break; } } } /* * In each of the methods below, while its the caller's * responsibility to ensure the request will fit into a * single device request, we might have changed the delete * method due to the device incorrectly advertising either * its supported methods or limits. * * To prevent this causing further issues we validate the * against the methods limits, and warn which would * otherwise be unnecessary. */ static void da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc;; struct bio *bp1; uint8_t *buf = softc->unmap_buf; struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE]; uint64_t lba, lastlba = (uint64_t)-1; uint64_t totalcount = 0; uint64_t count; uint32_t c, lastcount = 0, ranges = 0; /* * Currently this doesn't take the UNMAP * Granularity and Granularity Alignment * fields into account. * * This could result in both unoptimal unmap * requests as as well as UNMAP calls unmapping * fewer LBA's than requested. */ bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { /* * Note: ada and da are different in how they store the * pending bp's in a trim. ada stores all of them in the * trim_req.bps. da stores all but the first one in the * delete_run_queue. ada then completes all the bps in * its adadone() loop. da completes all the bps in the * delete_run_queue in dadone, and relies on the biodone * after to complete. This should be reconciled since there's * no real reason to do it differently. XXX */ if (bp1 != bp) bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, UNMAP_RANGE_MAX - lastcount); lastlba += c; lastcount += c; scsi_ulto4b(lastcount, d[ranges - 1].length); count -= c; lba += c; totalcount += c; } else if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0) { /* Align length of the previous range. */ if ((c = lastcount % softc->unmap_gran) != 0) { if (lastcount <= c) { totalcount -= lastcount; lastlba = (uint64_t)-1; lastcount = 0; ranges--; } else { totalcount -= c; lastlba -= c; lastcount -= c; scsi_ulto4b(lastcount, d[ranges - 1].length); } } /* Align beginning of the new range. */ c = (lba - softc->unmap_gran_align) % softc->unmap_gran; if (c != 0) { c = softc->unmap_gran - c; if (count <= c) { count = 0; } else { lba += c; count -= c; } } } while (count > 0) { c = omin(count, UNMAP_RANGE_MAX); if (totalcount + c > softc->unmap_max_lba || ranges >= softc->unmap_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld" "|| %d >= %d", da_delete_method_desc[softc->delete_method], totalcount + c, softc->unmap_max_lba, ranges, softc->unmap_max_ranges); break; } scsi_u64to8b(lba, d[ranges].lba); scsi_ulto4b(c, d[ranges].length); lba += c; totalcount += c; ranges++; count -= c; lastlba = lba; lastcount = c; } bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (ranges >= softc->unmap_max_ranges || totalcount + bp1->bio_bcount / softc->params.secsize > softc->unmap_max_lba) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); /* Align length of the last range. */ if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 && (c = lastcount % softc->unmap_gran) != 0) { if (lastcount <= c) ranges--; else scsi_ulto4b(lastcount - c, d[ranges - 1].length); } scsi_ulto2b(ranges * 16 + 6, &buf[0]); scsi_ulto2b(ranges * 16, &buf[2]); scsi_unmap(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/0, /*data_ptr*/ buf, /*dxfer_len*/ ranges * 16 + 8, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } static void da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc; struct bio *bp1; uint8_t *buf = softc->unmap_buf; uint64_t lastlba = (uint64_t)-1; uint64_t count; uint64_t lba; uint32_t lastcount = 0, c, requestcount; int ranges = 0, off, block_count; bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; requestcount = count; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, ATA_DSM_RANGE_MAX - lastcount); lastcount += c; off = (ranges - 1) * 8; buf[off + 6] = lastcount & 0xff; buf[off + 7] = (lastcount >> 8) & 0xff; count -= c; lba += c; } while (count > 0) { c = omin(count, ATA_DSM_RANGE_MAX); off = ranges * 8; buf[off + 0] = lba & 0xff; buf[off + 1] = (lba >> 8) & 0xff; buf[off + 2] = (lba >> 16) & 0xff; buf[off + 3] = (lba >> 24) & 0xff; buf[off + 4] = (lba >> 32) & 0xff; buf[off + 5] = (lba >> 40) & 0xff; buf[off + 6] = c & 0xff; buf[off + 7] = (c >> 8) & 0xff; lba += c; ranges++; count -= c; lastcount = c; if (count != 0 && ranges == softc->trim_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], requestcount, (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX); break; } } lastlba = lba; bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (bp1->bio_bcount / softc->params.secsize > (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); block_count = howmany(ranges, ATA_DSM_BLK_RANGES); scsi_ata_trim(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, block_count, /*data_ptr*/buf, /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } /* * We calculate ws_max_blks here based off d_delmaxsize instead * of using softc->ws_max_blks as it is absolute max for the * device not the protocol max which may well be lower. */ static void da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc; struct bio *bp1; uint64_t ws_max_blks; uint64_t lba; uint64_t count; /* forward compat with WS32 */ softc = (struct da_softc *)periph->softc; ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize; lba = bp->bio_pblkno; count = 0; bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); count += bp1->bio_bcount / softc->params.secsize; if (count > ws_max_blks) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], count, ws_max_blks); count = omin(count, ws_max_blks); break; } bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (lba + count != bp1->bio_pblkno || count + bp1->bio_bcount / softc->params.secsize > ws_max_blks) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); scsi_write_same(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/softc->delete_method == DA_DELETE_ZERO ? 0 : SWS_UNMAP, softc->delete_method == DA_DELETE_WS16 ? 16 : 10, /*lba*/lba, /*block_count*/count, /*data_ptr*/ __DECONST(void *, zero_region), /*dxfer_len*/ softc->params.secsize, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } static int cmd6workaround(union ccb *ccb) { struct scsi_rw_6 cmd6; struct scsi_rw_10 *cmd10; struct da_softc *softc; u_int8_t *cdb; struct bio *bp; int frozen; cdb = ccb->csio.cdb_io.cdb_bytes; softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) { da_delete_methods old_method = softc->delete_method; /* * Typically there are two reasons for failure here * 1. Delete method was detected as supported but isn't * 2. Delete failed due to invalid params e.g. too big * * While we will attempt to choose an alternative delete method * this may result in short deletes if the existing delete * requests from geom are big for the new method chosen. * * This method assumes that the error which triggered this * will not retry the io otherwise a panic will occur */ dadeleteflag(softc, old_method, 0); dadeletemethodchoose(softc, DA_DELETE_DISABLE); if (softc->delete_method == DA_DELETE_DISABLE) xpt_print(ccb->ccb_h.path, "%s failed, disabling BIO_DELETE\n", da_delete_method_desc[old_method]); else xpt_print(ccb->ccb_h.path, "%s failed, switching to %s BIO_DELETE\n", da_delete_method_desc[old_method], da_delete_method_desc[softc->delete_method]); while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL) cam_iosched_queue_work(softc->cam_iosched, bp); cam_iosched_queue_work(softc->cam_iosched, (struct bio *)ccb->ccb_h.ccb_bp); ccb->ccb_h.ccb_bp = NULL; return (0); } /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == PREVENT_ALLOW) && (softc->quirks & DA_Q_NO_PREVENT) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "PREVENT ALLOW MEDIUM REMOVAL not supported.\n"); softc->quirks |= DA_Q_NO_PREVENT; return (0); } /* Detect unsupported SYNCHRONIZE CACHE(10). */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == SYNCHRONIZE_CACHE) && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "SYNCHRONIZE CACHE(10) not supported.\n"); softc->quirks |= DA_Q_NO_SYNC_CACHE; softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE; return (0); } /* Translation only possible if CDB is an array and cmd is R/W6 */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || (*cdb != READ_6 && *cdb != WRITE_6)) return 0; xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, " "increasing minimum_cmd_size to 10.\n"); softc->minimum_cmd_size = 10; bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); cmd10 = (struct scsi_rw_10 *)cdb; cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; cmd10->byte2 = 0; scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); cmd10->reserved = 0; scsi_ulto2b(cmd6.length, cmd10->length); cmd10->control = cmd6.control; ccb->csio.cdb_len = sizeof(*cmd10); /* Requeue request, unfreezing queue if necessary */ frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_action(ccb); if (frozen) { cam_release_devq(ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } return (ERESTART); } static void dazonedone(struct cam_periph *periph, union ccb *ccb) { struct da_softc *softc; struct bio *bp; softc = periph->softc; bp = (struct bio *)ccb->ccb_h.ccb_bp; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: break; case DISK_ZONE_REPORT_ZONES: { uint32_t avail_len; struct disk_zone_report *rep; struct scsi_report_zones_hdr *hdr; struct scsi_report_zones_desc *desc; struct disk_zone_rep_entry *entry; uint32_t num_alloced, hdr_len, num_avail; uint32_t num_to_fill, i; int ata; rep = &bp->bio_zone.zone_params.report; avail_len = ccb->csio.dxfer_len - ccb->csio.resid; /* * Note that bio_resid isn't normally used for zone * commands, but it is used by devstat_end_transaction_bio() * to determine how much data was transferred. Because * the size of the SCSI/ATA data structures is different * than the size of the BIO interface structures, the * amount of data actually transferred from the drive will * be different than the amount of data transferred to * the user. */ bp->bio_resid = ccb->csio.resid; num_alloced = rep->entries_allocated; hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr; if (avail_len < sizeof(*hdr)) { /* * Is there a better error than EIO here? We asked * for at least the header, and we got less than * that. */ bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; break; } if (softc->zone_interface == DA_ZONE_IF_ATA_PASS) ata = 1; else ata = 0; hdr_len = ata ? le32dec(hdr->length) : scsi_4btoul(hdr->length); if (hdr_len > 0) rep->entries_available = hdr_len / sizeof(*desc); else rep->entries_available = 0; /* * NOTE: using the same values for the BIO version of the * same field as the SCSI/ATA values. This means we could * get some additional values that aren't defined in bio.h * if more values of the same field are defined later. */ rep->header.same = hdr->byte4 & SRZ_SAME_MASK; rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) : scsi_8btou64(hdr->maximum_lba); /* * If the drive reports no entries that match the query, * we're done. */ if (hdr_len == 0) { rep->entries_filled = 0; break; } num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), hdr_len / sizeof(*desc)); /* * If the drive didn't return any data, then we're done. */ if (num_avail == 0) { rep->entries_filled = 0; break; } num_to_fill = min(num_avail, rep->entries_allocated); /* * If the user didn't allocate any entries for us to fill, * we're done. */ if (num_to_fill == 0) { rep->entries_filled = 0; break; } for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; i < num_to_fill; i++, desc++, entry++) { /* * NOTE: we're mapping the values here directly * from the SCSI/ATA bit definitions to the bio.h * definitons. There is also a warning in * disk_zone.h, but the impact is that if * additional values are added in the SCSI/ATA * specs these will be visible to consumers of * this interface. */ entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; entry->zone_condition = (desc->zone_flags & SRZ_ZONE_COND_MASK) >> SRZ_ZONE_COND_SHIFT; entry->zone_flags |= desc->zone_flags & (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); entry->zone_length = ata ? le64dec(desc->zone_length) : scsi_8btou64(desc->zone_length); entry->zone_start_lba = ata ? le64dec(desc->zone_start_lba) : scsi_8btou64(desc->zone_start_lba); entry->write_pointer_lba = ata ? le64dec(desc->write_pointer_lba) : scsi_8btou64(desc->write_pointer_lba); } rep->entries_filled = num_to_fill; break; } case DISK_ZONE_GET_PARAMS: default: /* * In theory we should not get a GET_PARAMS bio, since it * should be handled without queueing the command to the * drive. */ panic("%s: Invalid zone command %d", __func__, bp->bio_zone.zone_cmd); break; } if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) free(ccb->csio.data_ptr, M_SCSIDA); } static void dadone(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; da_ccb_state state; softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); csio = &done_ccb->csio; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (csio->bio != NULL) biotrack(csio->bio, __func__); #endif state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; switch (state) { case DA_CCB_BUFFER_IO: case DA_CCB_DELETE: { struct bio *bp, *bp1; cam_periph_lock(periph); bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int error; int sf; if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = daerror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* * A retry was scheduled, so * just return. */ cam_periph_unlock(periph); return; } bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if (error != 0) { int queued_error; /* * return all queued I/O with EIO, so that * the client can retry these I/Os in the * proper order should it attempt to recover. */ queued_error = EIO; if (error == ENXIO && (softc->flags & DA_FLAG_PACK_INVALID)== 0) { /* * Catastrophic error. Mark our pack as * invalid. */ /* * XXX See if this is really a media * XXX change first? */ xpt_print(periph->path, "Invalidating pack\n"); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif queued_error = ENXIO; } cam_iosched_flush(softc->cam_iosched, NULL, queued_error); if (bp != NULL) { bp->bio_error = error; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } } else if (bp != NULL) { if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) bp->bio_flags |= BIO_ERROR; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else if (bp != NULL) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); if (bp->bio_cmd == BIO_ZONE) dazonedone(periph, done_ccb); else if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; if ((csio->resid > 0) && (bp->bio_cmd != BIO_ZONE)) bp->bio_flags |= BIO_ERROR; if (softc->error_inject != 0) { bp->bio_error = softc->error_inject; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; softc->error_inject = 0; } } if (bp != NULL) biotrack(bp, __func__); LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); if (LIST_EMPTY(&softc->pending_ccbs)) softc->flags |= DA_FLAG_WAS_OTAG; cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); xpt_release_ccb(done_ccb); if (state == DA_CCB_DELETE) { TAILQ_HEAD(, bio) queue; TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue); softc->delete_run_queue.insert_point = NULL; /* * Normally, the xpt_release_ccb() above would make sure * that when we have more work to do, that work would * get kicked off. However, we specifically keep * delete_running set to 0 before the call above to * allow other I/O to progress when many BIO_DELETE * requests are pushed down. We set delete_running to 0 * and call daschedule again so that we don't stall if * there are no other I/Os pending apart from BIO_DELETEs. */ cam_iosched_trim_done(softc->cam_iosched); daschedule(periph); cam_periph_unlock(periph); while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bp1, bio_queue); bp1->bio_error = bp->bio_error; if (bp->bio_flags & BIO_ERROR) { bp1->bio_flags |= BIO_ERROR; bp1->bio_resid = bp1->bio_bcount; } else bp1->bio_resid = 0; biodone(bp1); } } else { daschedule(periph); cam_periph_unlock(periph); } if (bp != NULL) biodone(bp); return; } case DA_CCB_PROBE_RC: case DA_CCB_PROBE_RC16: { struct scsi_read_capacity_data *rdcap; struct scsi_read_capacity_data_long *rcaplong; char *announce_buf; int lbp; lbp = 0; rdcap = NULL; rcaplong = NULL; /* XXX TODO: can this be a malloc? */ announce_buf = softc->announce_temp; bzero(announce_buf, DA_ANNOUNCETMP_SZ); if (state == DA_CCB_PROBE_RC) rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; else rcaplong = (struct scsi_read_capacity_data_long *) csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct disk_params *dp; uint32_t block_size; uint64_t maxsector; u_int lalba; /* Lowest aligned LBA. */ if (state == DA_CCB_PROBE_RC) { block_size = scsi_4btoul(rdcap->length); maxsector = scsi_4btoul(rdcap->addr); lalba = 0; /* * According to SBC-2, if the standard 10 * byte READ CAPACITY command returns 2^32, * we should issue the 16 byte version of * the command, since the device in question * has more sectors than can be represented * with the short version of the command. */ if (maxsector == 0xffffffff) { free(rdcap, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_RC16; xpt_schedule(periph, priority); return; } } else { block_size = scsi_4btoul(rcaplong->length); maxsector = scsi_8btou64(rcaplong->addr); lalba = scsi_2btoul(rcaplong->lalba_lbp); } /* * Because GEOM code just will panic us if we * give them an 'illegal' value we'll avoid that * here. */ if (block_size == 0) { block_size = 512; if (maxsector == 0) maxsector = -1; } if (block_size >= MAXPHYS) { xpt_print(periph->path, "unsupportable block size %ju\n", (uintmax_t) block_size); announce_buf = NULL; cam_periph_invalidate(periph); } else { /* * We pass rcaplong into dasetgeom(), * because it will only use it if it is * non-NULL. */ dasetgeom(periph, block_size, maxsector, rcaplong, sizeof(*rcaplong)); lbp = (lalba & SRC16_LBPME_A); dp = &softc->params; snprintf(announce_buf, DA_ANNOUNCETMP_SZ, "%juMB (%ju %u byte sectors)", ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024), (uintmax_t)dp->sectors, dp->secsize); } } else { int error; /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } else if (error != 0) { int asc, ascq; int sense_key, error_code; int have_sense; cam_status status; struct ccb_getdev cgd; /* Don't wedge this device's queue */ status = done_ccb->ccb_h.status; if ((status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key, &asc, &ascq)) have_sense = TRUE; else have_sense = FALSE; /* * If we tried READ CAPACITY(16) and failed, * fallback to READ CAPACITY(10). */ if ((state == DA_CCB_PROBE_RC16) && (softc->flags & DA_FLAG_CAN_RC16) && (((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) || ((have_sense) && (error_code == SSD_CURRENT_ERROR) && (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) { softc->flags &= ~DA_FLAG_CAN_RC16; free(rdcap, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_RC; xpt_schedule(periph, priority); return; } /* * Attach to anything that claims to be a * direct access or optical disk device, * as long as it doesn't return a "Logical * unit not supported" (0x25) error. * "Internal Target Failure" (0x44) is also * special and typically means that the * device is a SATA drive behind a SATL * translation that's fallen into a * terminally fatal state. */ if ((have_sense) && (asc != 0x25) && (asc != 0x44) && (error_code == SSD_CURRENT_ERROR)) { const char *sense_key_desc; const char *asc_desc; dasetgeom(periph, 512, -1, NULL, 0); scsi_sense_desc(sense_key, asc, ascq, &cgd.inq_data, &sense_key_desc, &asc_desc); snprintf(announce_buf, DA_ANNOUNCETMP_SZ, "Attempt to query device " "size failed: %s, %s", sense_key_desc, asc_desc); } else { if (have_sense) scsi_sense_print( &done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, " "failed to attach to device\n"); announce_buf = NULL; /* * Free up resources. */ cam_periph_invalidate(periph); } } } free(csio->data_ptr, M_SCSIDA); if (announce_buf != NULL && ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) { struct sbuf sb; sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, announce_buf); xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, DA_Q_BIT_STRING); sbuf_finish(&sb); sbuf_putbuf(&sb); /* * Create our sysctl variables, now that we know * we have successfully attached. */ /* increase the refcount */ if (cam_periph_acquire(periph) == CAM_REQ_CMP) { taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); } else { /* XXX This message is useless! */ xpt_print(periph->path, "fatal error, " "could not acquire reference count\n"); } } /* We already probed the device. */ if (softc->flags & DA_FLAG_PROBED) { daprobedone(periph, done_ccb); return; } /* Ensure re-probe doesn't see old delete. */ softc->delete_available = 0; dadeleteflag(softc, DA_DELETE_ZERO, 1); if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) { /* * Based on older SBC-3 spec revisions * any of the UNMAP methods "may" be * available via LBP given this flag so * we flag all of them as available and * then remove those which further * probes confirm aren't available * later. * * We could also check readcap(16) p_type * flag to exclude one or more invalid * write same (X) types here */ dadeleteflag(softc, DA_DELETE_WS16, 1); dadeleteflag(softc, DA_DELETE_WS10, 1); dadeleteflag(softc, DA_DELETE_UNMAP, 1); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_LBP; xpt_schedule(periph, priority); return; } xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BDC; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_LBP: { struct scsi_vpd_logical_block_prov *lbp; lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { /* * T10/1799-D Revision 31 states at least one of these * must be supported but we don't currently enforce this. */ dadeleteflag(softc, DA_DELETE_WS16, (lbp->flags & SVPD_LBP_WS16)); dadeleteflag(softc, DA_DELETE_WS10, (lbp->flags & SVPD_LBP_WS10)); dadeleteflag(softc, DA_DELETE_UNMAP, (lbp->flags & SVPD_LBP_UNMAP)); } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure indicates we don't support any SBC-3 * delete methods with UNMAP */ } } free(lbp, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BLK_LIMITS; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_BLK_LIMITS: { struct scsi_vpd_block_limits *block_limits; block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t max_txfer_len = scsi_4btoul( block_limits->max_txfer_len); uint32_t max_unmap_lba_cnt = scsi_4btoul( block_limits->max_unmap_lba_cnt); uint32_t max_unmap_blk_cnt = scsi_4btoul( block_limits->max_unmap_blk_cnt); uint32_t unmap_gran = scsi_4btoul( block_limits->opt_unmap_grain); uint32_t unmap_gran_align = scsi_4btoul( block_limits->unmap_grain_align); uint64_t ws_max_blks = scsi_8btou64( block_limits->max_write_same_length); if (max_txfer_len != 0) { softc->disk->d_maxsize = MIN(softc->maxio, (off_t)max_txfer_len * softc->params.secsize); } /* * We should already support UNMAP but we check lba * and block count to be sure */ if (max_unmap_lba_cnt != 0x00L && max_unmap_blk_cnt != 0x00L) { softc->unmap_max_lba = max_unmap_lba_cnt; softc->unmap_max_ranges = min(max_unmap_blk_cnt, UNMAP_MAX_RANGES); if (unmap_gran > 1) { softc->unmap_gran = unmap_gran; if (unmap_gran_align & 0x80000000) { softc->unmap_gran_align = unmap_gran_align & 0x7fffffff; } } } else { /* * Unexpected UNMAP limits which means the * device doesn't actually support UNMAP */ dadeleteflag(softc, DA_DELETE_UNMAP, 0); } if (ws_max_blks != 0x00L) softc->ws_max_blks = ws_max_blks; } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure here doesn't mean UNMAP is not * supported as this is an optional page. */ softc->unmap_max_lba = 1; softc->unmap_max_ranges = 1; } } free(block_limits, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BDC; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_BDC: { struct scsi_vpd_block_device_characteristics *bdc; bdc = (struct scsi_vpd_block_device_characteristics *) csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; /* * Disable queue sorting for non-rotational media * by default. */ u_int16_t old_rate = softc->disk->d_rotation_rate; valid_len = csio->dxfer_len - csio->resid; if (SBDC_IS_PRESENT(bdc, valid_len, medium_rotation_rate)) { softc->disk->d_rotation_rate = scsi_2btoul(bdc->medium_rotation_rate); if (softc->disk->d_rotation_rate == SVPD_BDC_RATE_NON_ROTATING) { cam_iosched_set_sort_queue( softc->cam_iosched, 0); softc->rotating = 0; } if (softc->disk->d_rotation_rate != old_rate) { disk_attr_changed(softc->disk, "GEOM::rotation_rate", M_NOWAIT); } } if ((SBDC_IS_PRESENT(bdc, valid_len, flags)) && (softc->zone_mode == DA_ZONE_NONE)) { int ata_proto; if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) ata_proto = 1; else ata_proto = 0; /* * The Zoned field will only be set for * Drive Managed and Host Aware drives. If * they are Host Managed, the device type * in the standard INQUIRY data should be * set to T_ZBC_HM (0x14). */ if ((bdc->flags & SVPD_ZBC_MASK) == SVPD_HAW_ZBC) { softc->zone_mode = DA_ZONE_HOST_AWARE; softc->zone_interface = (ata_proto) ? DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; } else if ((bdc->flags & SVPD_ZBC_MASK) == SVPD_DM_ZBC) { softc->zone_mode =DA_ZONE_DRIVE_MANAGED; softc->zone_interface = (ata_proto) ? DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; } else if ((bdc->flags & SVPD_ZBC_MASK) != SVPD_ZBC_NR) { xpt_print(periph->path, "Unknown zoned " "type %#x", bdc->flags & SVPD_ZBC_MASK); } } } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(bdc, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_ATA; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_ATA: { int i; struct ata_params *ata_params; int continue_probe; int error; int16_t *ptr; ata_params = (struct ata_params *)csio->data_ptr; ptr = (uint16_t *)ata_params; continue_probe = 0; error = 0; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint16_t old_rate; for (i = 0; i < sizeof(*ata_params) / 2; i++) ptr[i] = le16toh(ptr[i]); if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM && (softc->quirks & DA_Q_NO_UNMAP) == 0) { dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1); if (ata_params->max_dsm_blocks != 0) softc->trim_max_ranges = min( softc->trim_max_ranges, ata_params->max_dsm_blocks * ATA_DSM_BLK_RANGES); } /* * Disable queue sorting for non-rotational media * by default. */ old_rate = softc->disk->d_rotation_rate; softc->disk->d_rotation_rate = ata_params->media_rotation_rate; if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) { cam_iosched_set_sort_queue(softc->cam_iosched, 0); softc->rotating = 0; } if (softc->disk->d_rotation_rate != old_rate) { disk_attr_changed(softc->disk, "GEOM::rotation_rate", M_NOWAIT); } if (ata_params->capabilities1 & ATA_SUPPORT_DMA) softc->flags |= DA_FLAG_CAN_ATA_DMA; if (ata_params->support.extension & ATA_SUPPORT_GENLOG) softc->flags |= DA_FLAG_CAN_ATA_LOG; /* * At this point, if we have a SATA host aware drive, * we communicate via ATA passthrough unless the * SAT layer supports ZBC -> ZAC translation. In * that case, */ /* * XXX KDM figure out how to detect a host managed * SATA drive. */ if (softc->zone_mode == DA_ZONE_NONE) { /* * Note that we don't override the zone * mode or interface if it has already been * set. This is because it has either been * set as a quirk, or when we probed the * SCSI Block Device Characteristics page, * the zoned field was set. The latter * means that the SAT layer supports ZBC to * ZAC translation, and we would prefer to * use that if it is available. */ if ((ata_params->support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) { softc->zone_mode = DA_ZONE_HOST_AWARE; softc->zone_interface = DA_ZONE_IF_ATA_PASS; } else if ((ata_params->support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) { softc->zone_mode =DA_ZONE_DRIVE_MANAGED; softc->zone_interface = DA_ZONE_IF_ATA_PASS; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ata_params, M_SCSIDA); if ((softc->zone_mode == DA_ZONE_HOST_AWARE) || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { /* * If the ATA IDENTIFY failed, we could be talking * to a SCSI drive, although that seems unlikely, * since the drive did report that it supported the * ATA Information VPD page. If the ATA IDENTIFY * succeeded, and the SAT layer doesn't support * ZBC -> ZAC translation, continue on to get the * directory of ATA logs, and complete the rest of * the ZAC probe. If the SAT layer does support * ZBC -> ZAC translation, we want to use that, * and we'll probe the SCSI Zoned Block Device * Characteristics VPD page next. */ if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_LOG) && (softc->zone_interface == DA_ZONE_IF_ATA_PASS)) softc->state = DA_STATE_PROBE_ATA_LOGDIR; else softc->state = DA_STATE_PROBE_ZONE; continue_probe = 1; } if (continue_probe != 0) { xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } else daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ATA_LOGDIR: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { error = 0; softc->valid_logdir_len = 0; bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); softc->valid_logdir_len = csio->dxfer_len - csio->resid; if (softc->valid_logdir_len > 0) bcopy(csio->data_ptr, &softc->ata_logdir, min(softc->valid_logdir_len, sizeof(softc->ata_logdir))); /* * Figure out whether the Identify Device log is * supported. The General Purpose log directory * has a header, and lists the number of pages * available for each GP log identified by the * offset into the list. */ if ((softc->valid_logdir_len >= ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) && (le16dec(softc->ata_logdir.header) == ATA_GP_LOG_DIR_VERSION) && (le16dec(&softc->ata_logdir.num_pages[ (ATA_IDENTIFY_DATA_LOG * sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ softc->flags |= DA_FLAG_CAN_ATA_IDLOG; } else { softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA log directory, * then ATA logs are effectively not * supported even if the bit is set in the * identify data. */ softc->flags &= ~(DA_FLAG_CAN_ATA_LOG | DA_FLAG_CAN_ATA_IDLOG); if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) { softc->state = DA_STATE_PROBE_ATA_IDDIR; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ATA_IDDIR: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { off_t entries_offset, max_entries; error = 0; softc->valid_iddir_len = 0; bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP | DA_FLAG_CAN_ATA_ZONE); softc->valid_iddir_len = csio->dxfer_len - csio->resid; if (softc->valid_iddir_len > 0) bcopy(csio->data_ptr, &softc->ata_iddir, min(softc->valid_iddir_len, sizeof(softc->ata_iddir))); entries_offset = __offsetof(struct ata_identify_log_pages,entries); max_entries = softc->valid_iddir_len - entries_offset; if ((softc->valid_iddir_len > (entries_offset + 1)) && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION) && (softc->ata_iddir.entry_count > 0)) { int num_entries, i; num_entries = softc->ata_iddir.entry_count; num_entries = min(num_entries, softc->valid_iddir_len - entries_offset); for (i = 0; i < num_entries && i < max_entries; i++) { if (softc->ata_iddir.entries[i] == ATA_IDL_SUP_CAP) softc->flags |= DA_FLAG_CAN_ATA_SUPCAP; else if (softc->ata_iddir.entries[i]== ATA_IDL_ZDI) softc->flags |= DA_FLAG_CAN_ATA_ZONE; if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) break; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data log * directory, then it effectively isn't * supported even if the ATA Log directory * a non-zero number of pages present for * this log. */ softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) { softc->state = DA_STATE_PROBE_ATA_SUP; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ATA_SUP: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; size_t needed_size; struct ata_identify_log_sup_cap *sup_cap; error = 0; sup_cap = (struct ata_identify_log_sup_cap *) csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_size = __offsetof(struct ata_identify_log_sup_cap, sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); if (valid_len >= needed_size) { uint64_t zoned, zac_cap; zoned = le64dec(sup_cap->zoned_cap); if (zoned & ATA_ZONED_VALID) { /* * This should have already been * set, because this is also in the * ATA identify data. */ if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) softc->zone_mode = DA_ZONE_HOST_AWARE; else if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) softc->zone_mode = DA_ZONE_DRIVE_MANAGED; } zac_cap = le64dec(sup_cap->sup_zac_cap); if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { if (zac_cap & ATA_REPORT_ZONES_SUP) softc->zone_flags |= DA_ZONE_FLAG_RZ_SUP; if (zac_cap & ATA_ND_OPEN_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_OPEN_SUP; if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_CLOSE_SUP; if (zac_cap & ATA_ND_FINISH_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_FINISH_SUP; if (zac_cap & ATA_ND_RWP_SUP) softc->zone_flags |= DA_ZONE_FLAG_RWP_SUP; } else { /* * This field was introduced in * ACS-4, r08 on April 28th, 2015. * If the drive firmware was written * to an earlier spec, it won't have * the field. So, assume all * commands are supported. */ softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data * Supported Capabilities page, clear the * flag... */ softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP; /* * And clear zone capabilities. */ softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) { softc->state = DA_STATE_PROBE_ATA_ZONE; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ATA_ZONE: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct ata_zoned_info_log *zi_log; uint32_t valid_len; size_t needed_size; zi_log = (struct ata_zoned_info_log *)csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_size = __offsetof(struct ata_zoned_info_log, version_info) + 1 + sizeof(zi_log->version_info); if (valid_len >= needed_size) { uint64_t tmpvar; tmpvar = le64dec(zi_log->zoned_cap); if (tmpvar & ATA_ZDI_CAP_VALID) { if (tmpvar & ATA_ZDI_CAP_URSWRZ) softc->zone_flags |= DA_ZONE_FLAG_URSWRZ; else softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ; } tmpvar = le64dec(zi_log->optimal_seq_zones); if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = (tmpvar & ATA_ZDI_OPT_SEQ_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = 0; } tmpvar =le64dec(zi_log->optimal_nonseq_zones); if (tmpvar & ATA_ZDI_OPT_NS_VALID) { softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = (tmpvar & ATA_ZDI_OPT_NS_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = 0; } tmpvar = le64dec(zi_log->max_seq_req_zones); if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = (tmpvar & ATA_ZDI_MAX_SEQ_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = 0; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { softc->flags &= ~DA_FLAG_CAN_ATA_ZONE; softc->flags &= ~DA_ZONE_FLAG_SET_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ZONE: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; size_t needed_len; struct scsi_vpd_zoned_bdc *zoned_bdc; error = 0; zoned_bdc = (struct scsi_vpd_zoned_bdc *) csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_len = __offsetof(struct scsi_vpd_zoned_bdc, max_seq_req_zones) + 1 + sizeof(zoned_bdc->max_seq_req_zones); if ((valid_len >= needed_len) && (scsi_2btoul(zoned_bdc->page_length) >= SVPD_ZBDC_PL)) { if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ) softc->zone_flags |= DA_ZONE_FLAG_URSWRZ; else softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ; softc->optimal_seq_zones = scsi_4btoul(zoned_bdc->optimal_seq_zones); softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_nonseq_zones = scsi_4btoul( zoned_bdc->optimal_nonseq_zones); softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->max_seq_zones = scsi_4btoul(zoned_bdc->max_seq_req_zones); softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; } /* * All of the zone commands are mandatory for SCSI * devices. * * XXX KDM this is valid as of September 2015. * Re-check this assumption once the SAT spec is * updated to support SCSI ZBC to ATA ZAC mapping. * Since ATA allows zone commands to be reported * as supported or not, this may not necessarily * be true for an ATA device behind a SAT (SCSI to * ATA Translation) layer. */ softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } daprobedone(periph, done_ccb); return; } case DA_CCB_DUMP: /* No-op. We're polling */ return; case DA_CCB_TUR: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } xpt_release_ccb(done_ccb); cam_periph_release_locked(periph); return; } default: break; } xpt_release_ccb(done_ccb); } static void dareprobe(struct cam_periph *periph) { struct da_softc *softc; cam_status status; softc = (struct da_softc *)periph->softc; /* Probe in progress; don't interfere. */ if (softc->state != DA_STATE_NORMAL) return; status = cam_periph_acquire(periph); KASSERT(status == CAM_REQ_CMP, ("dareprobe: cam_periph_acquire failed")); if (softc->flags & DA_FLAG_CAN_RC16) softc->state = DA_STATE_PROBE_RC16; else softc->state = DA_STATE_PROBE_RC; xpt_schedule(periph, CAM_PRIORITY_DEV); } static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct da_softc *softc; struct cam_periph *periph; int error, error_code, sense_key, asc, ascq; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->csio.bio != NULL) biotrack(ccb->csio.bio, __func__); #endif periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct da_softc *)periph->softc; /* * Automatically detect devices that do not support * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. */ error = 0; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { error = cmd6workaround(ccb); } else if (scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (sense_key == SSD_KEY_ILLEGAL_REQUEST) error = cmd6workaround(ccb); /* * If the target replied with CAPACITY DATA HAS CHANGED UA, * query the capacity and notify upper layers. */ else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x2A && ascq == 0x09) { xpt_print(periph->path, "Capacity data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x28 && ascq == 0x00) { softc->flags &= ~DA_FLAG_PROBED; disk_media_changed(softc->disk, M_NOWAIT); } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x3F && ascq == 0x03) { xpt_print(periph->path, "INQUIRY data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_NOT_READY && asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { softc->flags |= DA_FLAG_PACK_INVALID; disk_media_gone(softc->disk, M_NOWAIT); } } if (error == ERESTART) return (ERESTART); #ifdef CAM_IO_STATS switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: softc->timeouts++; break; case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: softc->errors++; break; default: break; } #endif /* * XXX * Until we have a better way of doing pack validation, * don't treat UAs as errors. */ sense_flags |= SF_RETRY_UA; if (softc->quirks & DA_Q_RETRY_BUSY) sense_flags |= SF_RETRY_BUSY; return(cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } static void damediapoll(void *arg) { struct cam_periph *periph = arg; struct da_softc *softc = periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && LIST_EMPTY(&softc->pending_ccbs)) { if (cam_periph_acquire(periph) == CAM_REQ_CMP) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* Queue us up again */ if (da_poll_period != 0) callout_schedule(&softc->mediapoll_c, da_poll_period * hz); } static void daprevent(struct cam_periph *periph, int action) { struct da_softc *softc; union ccb *ccb; int error; softc = (struct da_softc *)periph->softc; if (((action == PR_ALLOW) && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) || ((action == PR_PREVENT) && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { return; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_prevent(&ccb->csio, /*retries*/1, /*cbcfp*/dadone, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, 5000); error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~DA_FLAG_PACK_LOCKED; else softc->flags |= DA_FLAG_PACK_LOCKED; } xpt_release_ccb(ccb); } static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len) { struct ccb_calc_geometry ccg; struct da_softc *softc; struct disk_params *dp; u_int lbppbe, lalba; int error; softc = (struct da_softc *)periph->softc; dp = &softc->params; dp->secsize = block_len; dp->sectors = maxsector + 1; if (rcaplong != NULL) { lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; lalba = scsi_2btoul(rcaplong->lalba_lbp); lalba &= SRC16_LALBA_A; } else { lbppbe = 0; lalba = 0; } if (lbppbe > 0) { dp->stripesize = block_len << lbppbe; dp->stripeoffset = (dp->stripesize - block_len * lalba) % dp->stripesize; } else if (softc->quirks & DA_Q_4K) { dp->stripesize = 4096; dp->stripeoffset = 0; } else if (softc->unmap_gran != 0) { dp->stripesize = block_len * softc->unmap_gran; dp->stripeoffset = (dp->stripesize - block_len * softc->unmap_gran_align) % dp->stripesize; } else { dp->stripesize = 0; dp->stripeoffset = 0; } /* * Have the controller provide us with a geometry * for this disk. The only time the geometry * matters is when we boot and the controller * is the only one knowledgeable enough to come * up with something that will make this a bootable * device. */ xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; ccg.block_size = dp->secsize; ccg.volume_size = dp->sectors; ccg.heads = 0; ccg.secs_per_track = 0; ccg.cylinders = 0; xpt_action((union ccb*)&ccg); if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* * We don't know what went wrong here- but just pick * a geometry so we don't have nasty things like divide * by zero. */ dp->heads = 255; dp->secs_per_track = 255; dp->cylinders = dp->sectors / (255 * 255); if (dp->cylinders == 0) { dp->cylinders = 1; } } else { dp->heads = ccg.heads; dp->secs_per_track = ccg.secs_per_track; dp->cylinders = ccg.cylinders; } /* * If the user supplied a read capacity buffer, and if it is * different than the previous buffer, update the data in the EDT. * If it's the same, we don't bother. This avoids sending an * update every time someone opens this device. */ if ((rcaplong != NULL) && (bcmp(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)) != 0)) { struct ccb_dev_advinfo cdai; xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.buftype = CDAI_TYPE_RCAPLONG; cdai.flags = CDAI_FLAG_STORE; cdai.bufsiz = rcap_len; cdai.buf = (uint8_t *)rcaplong; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.ccb_h.status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: failed to set read " "capacity advinfo\n", __func__); /* Use cam_error_print() to decode the status */ cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS, CAM_EPF_ALL); } else { bcopy(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)); } } softc->disk->d_sectorsize = softc->params.secsize; softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; softc->disk->d_stripesize = softc->params.stripesize; softc->disk->d_stripeoffset = softc->params.stripeoffset; /* XXX: these are not actually "firmware" values, so they may be wrong */ softc->disk->d_fwsectors = softc->params.secs_per_track; softc->disk->d_fwheads = softc->params.heads; softc->disk->d_devstat->block_size = softc->params.secsize; softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; error = disk_resize(softc->disk, M_NOWAIT); if (error != 0) xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error); } static void dasendorderedtag(void *arg) { struct da_softc *softc = arg; if (da_send_ordered) { if (!LIST_EMPTY(&softc->pending_ccbs)) { if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) softc->flags |= DA_FLAG_NEED_OTAG; softc->flags &= ~DA_FLAG_WAS_OTAG; } } /* Queue us up again */ callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, softc); } /* * Step through all DA peripheral drivers, and if the device is still open, * sync the disk cache to physical media. */ static void dashutdown(void * arg, int howto) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &dadriver) { softc = (struct da_softc *)periph->softc; if (SCHEDULER_STOPPED()) { /* If we paniced with the lock held, do not recurse. */ if (!cam_periph_owned(periph) && (softc->flags & DA_FLAG_OPEN)) { dadump(softc->disk, NULL, 0, 0, 0); } continue; } cam_periph_lock(periph); /* * We only sync the cache if the drive is still open, and * if the drive is capable of it.. */ if (((softc->flags & DA_FLAG_OPEN) == 0) || (softc->quirks & DA_Q_NO_SYNC_CACHE)) { cam_periph_unlock(periph); continue; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/0, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /* whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 60 * 60 * 1000); error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } #else /* !_KERNEL */ /* * XXX These are only left out of the kernel build to silence warnings. If, * for some reason these functions are used in the kernel, the ifdefs should * be moved so they are included both in the kernel and userland. */ void scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_format_unit *scsi_cmd; scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = FORMAT_UNIT; scsi_cmd->byte2 = byte2; scsi_ulto2b(ileave, scsi_cmd->interleave); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t list_format, uint32_t addr_desc_index, uint8_t *data_ptr, uint32_t dxfer_len, int minimum_cmd_size, uint8_t sense_len, uint32_t timeout) { uint8_t cdb_len; /* * These conditions allow using the 10 byte command. Otherwise we * need to use the 12 byte command. */ if ((minimum_cmd_size <= 10) && (addr_desc_index == 0) && (dxfer_len <= SRDD10_MAX_LENGTH)) { struct scsi_read_defect_data_10 *cdb10; cdb10 = (struct scsi_read_defect_data_10 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb10); bzero(cdb10, cdb_len); cdb10->opcode = READ_DEFECT_DATA_10; cdb10->format = list_format; scsi_ulto2b(dxfer_len, cdb10->alloc_length); } else { struct scsi_read_defect_data_12 *cdb12; cdb12 = (struct scsi_read_defect_data_12 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb12); bzero(cdb12, cdb_len); cdb12->opcode = READ_DEFECT_DATA_12; cdb12->format = list_format; scsi_ulto4b(dxfer_len, cdb12->alloc_length); scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index); } cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, cdb_len, timeout); } void scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t control, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_sanitize *scsi_cmd; scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = SANITIZE; scsi_cmd->byte2 = byte2; scsi_cmd->control = control; scsi_ulto2b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } #endif /* _KERNEL */ void scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t service_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout) { struct scsi_zbc_out *scsi_cmd; scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = ZBC_OUT; scsi_cmd->service_action = service_action; scsi_u64to8b(zone_id, scsi_cmd->zone_id); scsi_cmd->zone_flags = zone_flags; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba, uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout) { struct scsi_zbc_in *scsi_cmd; scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = ZBC_IN; scsi_cmd->service_action = service_action; scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba); scsi_cmd->zone_options = zone_options; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } int scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout) { uint8_t command_out, protocol, ata_flags; uint16_t features_out; uint32_t sectors_out, auxiliary; int retval; retval = 0; if (use_ncq == 0) { command_out = ATA_ZAC_MANAGEMENT_OUT; features_out = (zm_action & 0xf) | (zone_flags << 8); ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; if (dxfer_len == 0) { protocol = AP_PROTO_NON_DATA; ata_flags |= AP_FLAG_TLEN_NO_DATA; sectors_out = 0; } else { protocol = AP_PROTO_DMA; ata_flags |= AP_FLAG_TLEN_SECT_CNT | AP_FLAG_TDIR_TO_DEV; sectors_out = ((dxfer_len >> 9) & 0xffff); } auxiliary = 0; } else { ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; if (dxfer_len == 0) { command_out = ATA_NCQ_NON_DATA; features_out = ATA_NCQ_ZAC_MGMT_OUT; /* * We're assuming the SCSI to ATA translation layer * will set the NCQ tag number in the tag field. * That isn't clear from the SAT-4 spec (as of rev 05). */ sectors_out = 0; ata_flags |= AP_FLAG_TLEN_NO_DATA; } else { command_out = ATA_SEND_FPDMA_QUEUED; /* * Note that we're defaulting to normal priority, * and assuming that the SCSI to ATA translation * layer will insert the NCQ tag number in the tag * field. That isn't clear in the SAT-4 spec (as * of rev 05). */ sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8; ata_flags |= AP_FLAG_TLEN_FEAT | AP_FLAG_TDIR_TO_DEV; /* * For SEND FPDMA QUEUED, the transfer length is * encoded in the FEATURE register, and 0 means * that 65536 512 byte blocks are to be tranferred. * In practice, it seems unlikely that we'll see * a transfer that large, and it may confuse the * the SAT layer, because generally that means that * 0 bytes should be transferred. */ if (dxfer_len == (65536 * 512)) { features_out = 0; } else if (dxfer_len <= (65535 * 512)) { features_out = ((dxfer_len >> 9) & 0xffff); } else { /* The transfer is too big. */ retval = 1; goto bailout; } } auxiliary = (zm_action & 0xf) | (zone_flags << 8); protocol = AP_PROTO_FPDMA; } protocol |= AP_EXTEND; retval = scsi_ata_pass(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features_out, /*sector_count*/ sectors_out, /*lba*/ zone_id, /*command*/ command_out, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ auxiliary, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*cdb_storage*/ cdb_storage, /*cdb_storage_len*/ cdb_storage_len, /*minimum_cmd_size*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout); bailout: return (retval); } int scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout) { uint8_t command_out, protocol; uint16_t features_out, sectors_out; uint32_t auxiliary; int ata_flags; int retval; retval = 0; ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS; if (use_ncq == 0) { command_out = ATA_ZAC_MANAGEMENT_IN; /* XXX KDM put a macro here */ features_out = (zm_action & 0xf) | (zone_flags << 8); sectors_out = dxfer_len >> 9; /* XXX KDM macro */ protocol = AP_PROTO_DMA; ata_flags |= AP_FLAG_TLEN_SECT_CNT; auxiliary = 0; } else { ata_flags |= AP_FLAG_TLEN_FEAT; command_out = ATA_RECV_FPDMA_QUEUED; sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8; /* * For RECEIVE FPDMA QUEUED, the transfer length is * encoded in the FEATURE register, and 0 means * that 65536 512 byte blocks are to be tranferred. * In practice, it seems unlikely that we'll see * a transfer that large, and it may confuse the * the SAT layer, because generally that means that * 0 bytes should be transferred. */ if (dxfer_len == (65536 * 512)) { features_out = 0; } else if (dxfer_len <= (65535 * 512)) { features_out = ((dxfer_len >> 9) & 0xffff); } else { /* The transfer is too big. */ retval = 1; goto bailout; } auxiliary = (zm_action & 0xf) | (zone_flags << 8), protocol = AP_PROTO_FPDMA; } protocol |= AP_EXTEND; retval = scsi_ata_pass(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features_out, /*sector_count*/ sectors_out, /*lba*/ zone_id, /*command*/ command_out, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ auxiliary, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */ /*cdb_storage*/ cdb_storage, /*cdb_storage_len*/ cdb_storage_len, /*minimum_cmd_size*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout); bailout: return (retval); } Index: head/sys/cam/scsi/scsi_xpt.c =================================================================== --- head/sys/cam/scsi/scsi_xpt.c (revision 319116) +++ head/sys/cam/scsi/scsi_xpt.c (revision 319117) @@ -1,3232 +1,3240 @@ /*- * Implementation of the SCSI Transport * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for xpt_print below */ #include "opt_cam.h" struct scsi_quirk_entry { struct scsi_inquiry_pattern inq_pat; u_int8_t quirks; #define CAM_QUIRK_NOLUNS 0x01 #define CAM_QUIRK_NOVPDS 0x02 #define CAM_QUIRK_HILUNS 0x04 #define CAM_QUIRK_NOHILUNS 0x08 #define CAM_QUIRK_NORPTLUNS 0x10 u_int mintags; u_int maxtags; }; #define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk)) static int cam_srch_hi = 0; static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT | CTLFLAG_RWTUN, 0, 0, sysctl_cam_search_luns, "I", "allow search above LUN 7 for SCSI3 and greater devices"); #define CAM_SCSI2_MAXLUN 8 #define CAM_CAN_GET_SIMPLE_LUN(x, i) \ ((((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ RPL_LUNDATA_ATYP_PERIPH) || \ (((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ RPL_LUNDATA_ATYP_FLAT)) #define CAM_GET_SIMPLE_LUN(lp, i, lval) \ if (((lp)->luns[(i)].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ RPL_LUNDATA_ATYP_PERIPH) { \ (lval) = (lp)->luns[(i)].lundata[1]; \ } else { \ (lval) = (lp)->luns[(i)].lundata[0]; \ (lval) &= RPL_LUNDATA_FLAT_LUN_MASK; \ (lval) <<= 8; \ (lval) |= (lp)->luns[(i)].lundata[1]; \ } #define CAM_GET_LUN(lp, i, lval) \ (lval) = scsi_8btou64((lp)->luns[(i)].lundata); \ (lval) = CAM_EXTLUN_BYTE_SWIZZLE(lval); /* * If we're not quirked to search <= the first 8 luns * and we are either quirked to search above lun 8, * or we're > SCSI-2 and we've enabled hilun searching, * or we're > SCSI-2 and the last lun was a success, * we can look for luns above lun 8. */ #define CAN_SRCH_HI_SPARSE(dv) \ (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0) \ && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS) \ || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi))) #define CAN_SRCH_HI_DENSE(dv) \ (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0) \ && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS) \ || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2))) static periph_init_t probe_periph_init; static struct periph_driver probe_driver = { probe_periph_init, "probe", TAILQ_HEAD_INITIALIZER(probe_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(probe, probe_driver); typedef enum { PROBE_TUR, PROBE_INQUIRY, /* this counts as DV0 for Basic Domain Validation */ PROBE_FULL_INQUIRY, PROBE_REPORT_LUNS, PROBE_MODE_SENSE, PROBE_SUPPORTED_VPD_LIST, PROBE_DEVICE_ID, PROBE_EXTENDED_INQUIRY, PROBE_SERIAL_NUM, PROBE_TUR_FOR_NEGOTIATION, PROBE_INQUIRY_BASIC_DV1, PROBE_INQUIRY_BASIC_DV2, PROBE_DV_EXIT, PROBE_DONE, PROBE_INVALID } probe_action; static char *probe_action_text[] = { "PROBE_TUR", "PROBE_INQUIRY", "PROBE_FULL_INQUIRY", "PROBE_REPORT_LUNS", "PROBE_MODE_SENSE", "PROBE_SUPPORTED_VPD_LIST", "PROBE_DEVICE_ID", "PROBE_EXTENDED_INQUIRY", "PROBE_SERIAL_NUM", "PROBE_TUR_FOR_NEGOTIATION", "PROBE_INQUIRY_BASIC_DV1", "PROBE_INQUIRY_BASIC_DV2", "PROBE_DV_EXIT", "PROBE_DONE", "PROBE_INVALID" }; #define PROBE_SET_ACTION(softc, newaction) \ do { \ char **text; \ text = probe_action_text; \ CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ ("Probe %s to %s\n", text[(softc)->action], \ text[(newaction)])); \ (softc)->action = (newaction); \ } while(0) typedef enum { PROBE_INQUIRY_CKSUM = 0x01, PROBE_SERIAL_CKSUM = 0x02, PROBE_NO_ANNOUNCE = 0x04, PROBE_EXTLUN = 0x08 } probe_flags; typedef struct { TAILQ_HEAD(, ccb_hdr) request_ccbs; probe_action action; union ccb saved_ccb; probe_flags flags; MD5_CTX context; u_int8_t digest[16]; struct cam_periph *periph; } probe_softc; static const char quantum[] = "QUANTUM"; static const char sony[] = "SONY"; static const char west_digital[] = "WDIGTL"; static const char samsung[] = "SAMSUNG"; static const char seagate[] = "SEAGATE"; static const char microp[] = "MICROP"; static struct scsi_quirk_entry scsi_quirk_table[] = { { /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Unfortunately, the Quantum Atlas III has the same * problem as the Atlas II drives above. * Reported by: "Johan Granlund" * * For future reference, the drive with the problem was: * QUANTUM QM39100TD-SW N1B0 * * It's possible that Quantum will fix the problem in later * firmware revisions. If that happens, the quirk entry * will need to be made specific to the firmware revisions * with the problem. * */ /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* * 18 Gig Atlas III, same problem as the 9G version. * Reported by: Andre Albsmeier * * * For future reference, the drive with the problem was: * QUANTUM QM318000TD-S N491 */ /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* * Broken tagged queuing drive * Reported by: Bret Ford * and: Martin Renters */ { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, /* * The Seagate Medalist Pro drives have very poor write * performance with anything more than 2 tags. * * Reported by: Paul van der Zwan * Drive: * * Reported by: Jeremy Lea * Drive: * * No one has actually reported that the 9G version * (ST39140*) of the Medalist Pro has the same problem, but * we're assuming that it does because the 4G and 6.5G * versions of the drive are broken. */ { { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/2 }, { { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/2 }, { { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/2 }, { /* * Experiences command timeouts under load with a * tag count higher than 55. */ { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST3146855LW", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/55 }, { /* * Slow when tagged queueing is enabled. Write performance * steadily drops off with more and more concurrent * transactions. Best sequential write performance with * tagged queueing turned off and write caching turned on. * * PR: kern/10398 * Submitted by: Hideaki Okada * Drive: DCAS-34330 w/ "S65A" firmware. * * The drive with the problem had the "S65A" firmware * revision, and has also been reported (by Stephen J. * Roznowski ) for a drive with the "S61A" * firmware revision. * * Although no one has reported problems with the 2 gig * version of the DCAS drive, the assumption is that it * has the same problems as the 4 gig version. Therefore * this quirk entries disables tagged queueing for all * DCAS drives. */ { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* This does not support other than LUN 0 */ { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255 }, { /* * Broken tagged queuing drive. * Submitted by: * NAKAJI Hiroyuki * in PR kern/9535 */ { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Slow when tagged queueing is enabled. (1.5MB/sec versus * 8MB/sec.) * Submitted by: Andrew Gallatin * Best performance with these drives is achieved with * tagged queueing turned off, and write caching turned on. */ { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Slow when tagged queueing is enabled. (1.5MB/sec versus * 8MB/sec.) * Submitted by: Andrew Gallatin * Best performance with these drives is achieved with * tagged queueing turned off, and write caching turned on. */ { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Doesn't handle queue full condition correctly, * so we need to limit maxtags to what the device * can handle instead of determining this automatically. */ { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" }, /*quirks*/0, /*mintags*/2, /*maxtags*/32 }, { /* Really only one LUN */ { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* I can't believe we need a quirk for DPT volumes. */ { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/255 }, { /* * Many Sony CDROM drives don't like multi-LUN probing. */ { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * This drive doesn't like multiple LUN probing. * Submitted by: Parag Patel */ { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * The 8200 doesn't like multi-lun probing, and probably * don't like serial number requests either. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", "EXB-8200*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * Let's try the same as above, but for a drive that says * it's an IPL-6860 but is actually an EXB 8200. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", "IPL-6860*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * These Hitachi drives don't like multi-lun probing. * The PR submitter has a DK319H, but says that the Linux * kernel has a similar work-around for the DK312 and DK314, * so all DK31* drives are quirked here. * PR: misc/18793 * Submitted by: Paul Haddad */ { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255 }, { /* * The Hitachi CJ series with J8A8 firmware apparently has * problems with tagged commands. * PR: 23536 * Reported by: amagai@nue.org */ { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * These are the large storage arrays. * Submitted by: William Carrel */ { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" }, CAM_QUIRK_HILUNS, 2, 1024 }, { /* * This old revision of the TDC3600 is also SCSI-1, and * hangs upon serial number probing. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 3600", "U07:" }, CAM_QUIRK_NOVPDS, /*mintags*/0, /*maxtags*/0 }, { /* * Would repond to all LUNs if asked for. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER", "CP150", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * Would repond to all LUNs if asked for. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", "96X2*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* Submitted by: Matthew Dodd */ { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* Submitted by: Matthew Dodd */ { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* TeraSolutions special settings for TRC-22 RAID */ { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" }, /*quirks*/0, /*mintags*/55, /*maxtags*/255 }, { /* Veritas Storage Appliance */ { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" }, CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024 }, { /* * Would respond to all LUNs. Device type and removable * flag are jumper-selectable. */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix", "Tahiti 1", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* EasyRAID E5A aka. areca ARC-6010 */ { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" }, CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255 }, { { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { { T_DIRECT, SIP_MEDIA_REMOVABLE, "Garmin", "*", "*" }, CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255 }, { + { T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic", "STORAGE DEVICE*", "120?" }, + CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255 + }, + { + { T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic", "MassStorageClass", "1533" }, + CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255 + }, + { /* Default tagged queuing parameters for all devices */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, /*vendor*/"*", /*product*/"*", /*revision*/"*" }, /*quirks*/0, /*mintags*/2, /*maxtags*/255 }, }; static cam_status proberegister(struct cam_periph *periph, void *arg); static void probeschedule(struct cam_periph *probe_periph); static void probestart(struct cam_periph *periph, union ccb *start_ccb); static void proberequestdefaultnegotiation(struct cam_periph *periph); static int proberequestbackoff(struct cam_periph *periph, struct cam_ed *device); static void probedone(struct cam_periph *periph, union ccb *done_ccb); static void probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new, probe_flags flags); static void probecleanup(struct cam_periph *periph); static void scsi_find_quirk(struct cam_ed *device); static void scsi_scan_bus(struct cam_periph *periph, union ccb *ccb); static void scsi_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *ccb); static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); static struct cam_ed * scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static void scsi_devise_transport(struct cam_path *path); static void scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update); static void scsi_toggle_tags(struct cam_path *path); static void scsi_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static void scsi_action(union ccb *start_ccb); static void scsi_announce_periph(struct cam_periph *periph); static void scsi_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb); static void scsi_proto_announce(struct cam_ed *device); static void scsi_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb); static void scsi_proto_denounce(struct cam_ed *device); static void scsi_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb); static void scsi_proto_debug_out(union ccb *ccb); static void _scsi_announce_periph(struct cam_periph *, u_int *, u_int *, struct ccb_trans_settings *); static struct xpt_xport_ops scsi_xport_ops = { .alloc_device = scsi_alloc_device, .action = scsi_action, .async = scsi_dev_async, .announce = scsi_announce_periph, .announce_sbuf = scsi_announce_periph_sbuf, }; #define SCSI_XPT_XPORT(x, X) \ static struct xpt_xport scsi_xport_ ## x = { \ .xport = XPORT_ ## X, \ .name = #x, \ .ops = &scsi_xport_ops, \ }; \ CAM_XPT_XPORT(scsi_xport_ ## x); SCSI_XPT_XPORT(spi, SPI); SCSI_XPT_XPORT(sas, SAS); SCSI_XPT_XPORT(fc, FC); SCSI_XPT_XPORT(usb, USB); SCSI_XPT_XPORT(iscsi, ISCSI); SCSI_XPT_XPORT(srp, SRP); SCSI_XPT_XPORT(ppb, PPB); #undef SCSI_XPORT_XPORT static struct xpt_proto_ops scsi_proto_ops = { .announce = scsi_proto_announce, .announce_sbuf = scsi_proto_announce_sbuf, .denounce = scsi_proto_denounce, .denounce_sbuf = scsi_proto_denounce_sbuf, .debug_out = scsi_proto_debug_out, }; static struct xpt_proto scsi_proto = { .proto = PROTO_SCSI, .name = "scsi", .ops = &scsi_proto_ops, }; CAM_XPT_PROTO(scsi_proto); static void probe_periph_init() { } static cam_status proberegister(struct cam_periph *periph, void *arg) { union ccb *request_ccb; /* CCB representing the probe request */ cam_status status; probe_softc *softc; request_ccb = (union ccb *)arg; if (request_ccb == NULL) { printf("proberegister: no probe CCB, " "can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT); if (softc == NULL) { printf("proberegister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } TAILQ_INIT(&softc->request_ccbs); TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->flags = 0; periph->softc = softc; softc->periph = periph; softc->action = PROBE_INVALID; status = cam_periph_acquire(periph); if (status != CAM_REQ_CMP) { return (status); } CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); scsi_devise_transport(periph->path); /* * Ensure we've waited at least a bus settle * delay before attempting to probe the device. * For HBAs that don't do bus resets, this won't make a difference. */ cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset, scsi_delay); probeschedule(periph); return(CAM_REQ_CMP); } static void probeschedule(struct cam_periph *periph) { struct ccb_pathinq cpi; union ccb *ccb; probe_softc *softc; softc = (probe_softc *)periph->softc; ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* * If a device has gone away and another device, or the same one, * is back in the same place, it should have a unit attention * condition pending. It will not report the unit attention in * response to an inquiry, which may leave invalid transfer * negotiations in effect. The TUR will reveal the unit attention * condition. Only send the TUR for lun 0, since some devices * will get confused by commands other than inquiry to non-existent * luns. If you think a device has gone away start your scan from * lun 0. This will insure that any bogus transfer settings are * invalidated. * * If we haven't seen the device before and the controller supports * some kind of transfer negotiation, negotiate with the first * sent command if no bus reset was performed at startup. This * ensures that the device is not confused by transfer negotiation * settings left over by loader or BIOS action. */ if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0) && (ccb->ccb_h.target_lun == 0)) { PROBE_SET_ACTION(softc, PROBE_TUR); } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) { proberequestdefaultnegotiation(periph); PROBE_SET_ACTION(softc, PROBE_INQUIRY); } else { PROBE_SET_ACTION(softc, PROBE_INQUIRY); } if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) softc->flags |= PROBE_NO_ANNOUNCE; else softc->flags &= ~PROBE_NO_ANNOUNCE; if (cpi.hba_misc & PIM_EXTLUNS) softc->flags |= PROBE_EXTLUN; else softc->flags &= ~PROBE_EXTLUN; xpt_schedule(periph, CAM_PRIORITY_XPT); } static void probestart(struct cam_periph *periph, union ccb *start_ccb) { /* Probe the device that our peripheral driver points to */ struct ccb_scsiio *csio; probe_softc *softc; CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); softc = (probe_softc *)periph->softc; csio = &start_ccb->csio; again: switch (softc->action) { case PROBE_TUR: case PROBE_TUR_FOR_NEGOTIATION: case PROBE_DV_EXIT: { scsi_test_unit_ready(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, /*timeout*/60000); break; } case PROBE_INQUIRY: case PROBE_FULL_INQUIRY: case PROBE_INQUIRY_BASIC_DV1: case PROBE_INQUIRY_BASIC_DV2: { u_int inquiry_len; struct scsi_inquiry_data *inq_buf; inq_buf = &periph->path->device->inq_data; /* * If the device is currently configured, we calculate an * MD5 checksum of the inquiry data, and if the serial number * length is greater than 0, add the serial number data * into the checksum as well. Once the inquiry and the * serial number check finish, we attempt to figure out * whether we still have the same device. */ if (((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) && ((softc->flags & PROBE_INQUIRY_CKSUM) == 0)) { MD5Init(&softc->context); MD5Update(&softc->context, (unsigned char *)inq_buf, sizeof(struct scsi_inquiry_data)); softc->flags |= PROBE_INQUIRY_CKSUM; if (periph->path->device->serial_num_len > 0) { MD5Update(&softc->context, periph->path->device->serial_num, periph->path->device->serial_num_len); softc->flags |= PROBE_SERIAL_CKSUM; } MD5Final(softc->digest, &softc->context); } if (softc->action == PROBE_INQUIRY) inquiry_len = SHORT_INQUIRY_LENGTH; else inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf); /* * Some parallel SCSI devices fail to send an * ignore wide residue message when dealing with * odd length inquiry requests. Round up to be * safe. */ inquiry_len = roundup2(inquiry_len, 2); if (softc->action == PROBE_INQUIRY_BASIC_DV1 || softc->action == PROBE_INQUIRY_BASIC_DV2) { inq_buf = malloc(inquiry_len, M_CAMXPT, M_NOWAIT); } if (inq_buf == NULL) { xpt_print(periph->path, "malloc failure- skipping Basic" "Domain Validation\n"); PROBE_SET_ACTION(softc, PROBE_DV_EXIT); scsi_test_unit_ready(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, /*timeout*/60000); break; } scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (u_int8_t *)inq_buf, inquiry_len, /*evpd*/FALSE, /*page_code*/0, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } case PROBE_REPORT_LUNS: { void *rp; rp = malloc(periph->path->target->rpl_size, M_CAMXPT, M_NOWAIT | M_ZERO); if (rp == NULL) { struct scsi_inquiry_data *inq_buf; inq_buf = &periph->path->device->inq_data; xpt_print(periph->path, "Unable to alloc report luns storage\n"); if (INQ_DATA_TQ_ENABLED(inq_buf)) PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); else PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); goto again; } scsi_report_luns(csio, 5, probedone, MSG_SIMPLE_Q_TAG, RPL_REPORT_DEFAULT, rp, periph->path->target->rpl_size, SSD_FULL_SIZE, 60000); break; break; } case PROBE_MODE_SENSE: { void *mode_buf; int mode_buf_len; mode_buf_len = sizeof(struct scsi_mode_header_6) + sizeof(struct scsi_mode_blk_desc) + sizeof(struct scsi_control_page); mode_buf = malloc(mode_buf_len, M_CAMXPT, M_NOWAIT); if (mode_buf != NULL) { scsi_mode_sense(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, /*dbd*/FALSE, SMS_PAGE_CTRL_CURRENT, SMS_CONTROL_MODE_PAGE, mode_buf, mode_buf_len, SSD_FULL_SIZE, /*timeout*/60000); break; } xpt_print(periph->path, "Unable to mode sense control page - " "malloc failure\n"); PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); } /* FALLTHROUGH */ case PROBE_SUPPORTED_VPD_LIST: { struct scsi_vpd_supported_page_list *vpd_list; struct cam_ed *device; vpd_list = NULL; device = periph->path->device; if ((SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOVPDS) == 0) vpd_list = malloc(sizeof(*vpd_list), M_CAMXPT, M_NOWAIT | M_ZERO); if (vpd_list != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (u_int8_t *)vpd_list, sizeof(*vpd_list), /*evpd*/TRUE, SVPD_SUPPORTED_PAGE_LIST, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } done: /* * We'll have to do without, let our probedone * routine finish up for us. */ start_ccb->csio.data_ptr = NULL; cam_freeze_devq(periph->path); cam_periph_doacquire(periph); probedone(periph, start_ccb); return; } case PROBE_DEVICE_ID: { struct scsi_vpd_device_id *devid; devid = NULL; if (scsi_vpd_supported_page(periph, SVPD_DEVICE_ID)) devid = malloc(SVPD_DEVICE_ID_MAX_SIZE, M_CAMXPT, M_NOWAIT | M_ZERO); if (devid != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (uint8_t *)devid, SVPD_DEVICE_ID_MAX_SIZE, /*evpd*/TRUE, SVPD_DEVICE_ID, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } goto done; } case PROBE_EXTENDED_INQUIRY: { struct scsi_vpd_extended_inquiry_data *ext_inq; ext_inq = NULL; if (scsi_vpd_supported_page(periph, SVPD_EXTENDED_INQUIRY_DATA)) ext_inq = malloc(sizeof(*ext_inq), M_CAMXPT, M_NOWAIT | M_ZERO); if (ext_inq != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (uint8_t *)ext_inq, sizeof(*ext_inq), /*evpd*/TRUE, SVPD_EXTENDED_INQUIRY_DATA, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } /* * We'll have to do without, let our probedone * routine finish up for us. */ goto done; } case PROBE_SERIAL_NUM: { struct scsi_vpd_unit_serial_number *serial_buf; struct cam_ed* device; serial_buf = NULL; device = periph->path->device; if (device->serial_num != NULL) { free(device->serial_num, M_CAMXPT); device->serial_num = NULL; device->serial_num_len = 0; } if (scsi_vpd_supported_page(periph, SVPD_UNIT_SERIAL_NUMBER)) serial_buf = (struct scsi_vpd_unit_serial_number *) malloc(sizeof(*serial_buf), M_CAMXPT, M_NOWAIT|M_ZERO); if (serial_buf != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (u_int8_t *)serial_buf, sizeof(*serial_buf), /*evpd*/TRUE, SVPD_UNIT_SERIAL_NUMBER, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } goto done; } default: panic("probestart: invalid action state 0x%x\n", softc->action); } start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; cam_periph_doacquire(periph); xpt_action(start_ccb); } static void proberequestdefaultnegotiation(struct cam_periph *periph) { struct ccb_trans_settings cts; xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) { return; } cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); } /* * Backoff Negotiation Code- only pertinent for SPI devices. */ static int proberequestbackoff(struct cam_periph *periph, struct cam_ed *device) { struct ccb_trans_settings cts; struct ccb_trans_settings_spi *spi; memset(&cts, 0, sizeof (cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) { if (bootverbose) { xpt_print(periph->path, "failed to get current device settings\n"); } return (0); } if (cts.transport != XPORT_SPI) { if (bootverbose) { xpt_print(periph->path, "not SPI transport\n"); } return (0); } spi = &cts.xport_specific.spi; /* * We cannot renegotiate sync rate if we don't have one. */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { if (bootverbose) { xpt_print(periph->path, "no sync rate known\n"); } return (0); } /* * We'll assert that we don't have to touch PPR options- the * SIM will see what we do with period and offset and adjust * the PPR options as appropriate. */ /* * A sync rate with unknown or zero offset is nonsensical. * A sync period of zero means Async. */ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0 || spi->sync_offset == 0 || spi->sync_period == 0) { if (bootverbose) { xpt_print(periph->path, "no sync rate available\n"); } return (0); } if (device->flags & CAM_DEV_DV_HIT_BOTTOM) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("hit async: giving up on DV\n")); return (0); } /* * Jump sync_period up by one, but stop at 5MHz and fall back to Async. * We don't try to remember 'last' settings to see if the SIM actually * gets into the speed we want to set. We check on the SIM telling * us that a requested speed is bad, but otherwise don't try and * check the speed due to the asynchronous and handshake nature * of speed setting. */ spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; for (;;) { spi->sync_period++; if (spi->sync_period >= 0xf) { spi->sync_period = 0; spi->sync_offset = 0; CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("setting to async for DV\n")); /* * Once we hit async, we don't want to try * any more settings. */ device->flags |= CAM_DEV_DV_HIT_BOTTOM; } else if (bootverbose) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("DV: period 0x%x\n", spi->sync_period)); printf("setting period to 0x%x\n", spi->sync_period); } cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) { break; } CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("DV: failed to set period 0x%x\n", spi->sync_period)); if (spi->sync_period == 0) { return (0); } } return (1); } #define CCB_COMPLETED_OK(ccb) (((ccb).status & CAM_STATUS_MASK) == CAM_REQ_CMP) static void probedone(struct cam_periph *periph, union ccb *done_ccb) { probe_softc *softc; struct cam_path *path; struct scsi_inquiry_data *inq_buf; u_int32_t priority; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); softc = (probe_softc *)periph->softc; path = done_ccb->ccb_h.path; priority = done_ccb->ccb_h.pinfo.priority; switch (softc->action) { case PROBE_TUR: { if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, SF_NO_PRINT, NULL) == ERESTART) { outr: /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } PROBE_SET_ACTION(softc, PROBE_INQUIRY); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); out: /* Drop freeze taken due to CAM_DEV_QFREEZE and release. */ cam_release_devq(path, 0, 0, 0, FALSE); cam_periph_release_locked(periph); return; } case PROBE_INQUIRY: case PROBE_FULL_INQUIRY: { if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { u_int8_t periph_qual; path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; scsi_find_quirk(path->device); inq_buf = &path->device->inq_data; periph_qual = SID_QUAL(inq_buf); if (periph_qual == SID_QUAL_LU_CONNECTED || periph_qual == SID_QUAL_LU_OFFLINE) { u_int8_t len; /* * We conservatively request only * SHORT_INQUIRY_LEN bytes of inquiry * information during our first try * at sending an INQUIRY. If the device * has more information to give, * perform a second request specifying * the amount of information the device * is willing to give. */ len = inq_buf->additional_length + offsetof(struct scsi_inquiry_data, additional_length) + 1; if (softc->action == PROBE_INQUIRY && len > SHORT_INQUIRY_LENGTH) { PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } scsi_devise_transport(path); if (path->device->lun_id == 0 && SID_ANSI_REV(inq_buf) > SCSI_REV_SPC2 && (SCSI_QUIRK(path->device)->quirks & CAM_QUIRK_NORPTLUNS) == 0) { PROBE_SET_ACTION(softc, PROBE_REPORT_LUNS); /* * Start with room for *one* lun. */ periph->path->target->rpl_size = 16; } else if (INQ_DATA_TQ_ENABLED(inq_buf)) PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); else PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); if (path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } else if (path->device->lun_id == 0 && SID_ANSI_REV(inq_buf) >= SCSI_REV_SPC2 && (SCSI_QUIRK(path->device)->quirks & CAM_QUIRK_NORPTLUNS) == 0) { PROBE_SET_ACTION(softc, PROBE_REPORT_LUNS); periph->path->target->rpl_size = 16; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } } else if (cam_periph_error(done_ccb, 0, done_ccb->ccb_h.target_lun > 0 ? SF_RETRY_UA|SF_QUIET_IR : SF_RETRY_UA, &softc->saved_ccb) == ERESTART) { goto outr; } else { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } path->device->flags &= ~CAM_DEV_INQUIRY_DATA_VALID; } /* * If we get to this point, we got an error status back * from the inquiry and the error status doesn't require * automatically retrying the command. Therefore, the * inquiry failed. If we had inquiry information before * for this device, but this latest inquiry command failed, * the device has probably gone away. If this device isn't * already marked unconfigured, notify the peripheral * drivers that this device is no more. */ if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) /* Send the async notification. */ xpt_async(AC_LOST_DEVICE, path, NULL); PROBE_SET_ACTION(softc, PROBE_INVALID); xpt_release_ccb(done_ccb); break; } case PROBE_REPORT_LUNS: { struct ccb_scsiio *csio; struct scsi_report_luns_data *lp; u_int nlun, maxlun; csio = &done_ccb->csio; lp = (struct scsi_report_luns_data *)csio->data_ptr; nlun = scsi_4btoul(lp->length) / 8; maxlun = (csio->dxfer_len / 8) - 1; if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, done_ccb->ccb_h.target_lun > 0 ? SF_RETRY_UA|SF_QUIET_IR : SF_RETRY_UA, &softc->saved_ccb) == ERESTART) { goto outr; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { xpt_release_devq(done_ccb->ccb_h.path, 1, TRUE); } free(lp, M_CAMXPT); lp = NULL; } else if (nlun > maxlun) { /* * Reallocate and retry to cover all luns */ CAM_DEBUG(path, CAM_DEBUG_PROBE, ("Probe: reallocating REPORT_LUNS for %u luns\n", nlun)); free(lp, M_CAMXPT); path->target->rpl_size = (nlun << 3) + 8; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } else if (nlun == 0) { /* * If there don't appear to be any luns, bail. */ free(lp, M_CAMXPT); lp = NULL; } else { lun_id_t lun; int idx; CAM_DEBUG(path, CAM_DEBUG_PROBE, ("Probe: %u lun(s) reported\n", nlun)); CAM_GET_LUN(lp, 0, lun); /* * If the first lun is not lun 0, then either there * is no lun 0 in the list, or the list is unsorted. */ if (lun != 0) { for (idx = 0; idx < nlun; idx++) { CAM_GET_LUN(lp, idx, lun); if (lun == 0) { break; } } if (idx != nlun) { uint8_t tlun[8]; memcpy(tlun, lp->luns[0].lundata, 8); memcpy(lp->luns[0].lundata, lp->luns[idx].lundata, 8); memcpy(lp->luns[idx].lundata, tlun, 8); CAM_DEBUG(path, CAM_DEBUG_PROBE, ("lun 0 in position %u\n", idx)); } } /* * If we have an old lun list, We can either * retest luns that appear to have been dropped, * or just nuke them. We'll opt for the latter. * This function will also install the new list * in the target structure. */ probe_purge_old(path, lp, softc->flags); lp = NULL; } inq_buf = &path->device->inq_data; if (path->device->flags & CAM_DEV_INQUIRY_DATA_VALID && (SID_QUAL(inq_buf) == SID_QUAL_LU_CONNECTED || SID_QUAL(inq_buf) == SID_QUAL_LU_OFFLINE)) { if (INQ_DATA_TQ_ENABLED(inq_buf)) PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); else PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } if (lp) { free(lp, M_CAMXPT); } PROBE_SET_ACTION(softc, PROBE_INVALID); xpt_release_ccb(done_ccb); break; } case PROBE_MODE_SENSE: { struct ccb_scsiio *csio; struct scsi_mode_header_6 *mode_hdr; csio = &done_ccb->csio; mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr; if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { struct scsi_control_page *page; u_int8_t *offset; offset = ((u_int8_t *)&mode_hdr[1]) + mode_hdr->blk_desc_len; page = (struct scsi_control_page *)offset; path->device->queue_flags = page->queue_flags; } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT, &softc->saved_ccb) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } xpt_release_ccb(done_ccb); free(mode_hdr, M_CAMXPT); PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); xpt_schedule(periph, priority); goto out; } case PROBE_SUPPORTED_VPD_LIST: { struct ccb_scsiio *csio; struct scsi_vpd_supported_page_list *page_list; csio = &done_ccb->csio; page_list = (struct scsi_vpd_supported_page_list *)csio->data_ptr; if (path->device->supported_vpds != NULL) { free(path->device->supported_vpds, M_CAMXPT); path->device->supported_vpds = NULL; path->device->supported_vpds_len = 0; } if (page_list == NULL) { /* * Don't process the command as it was never sent */ } else if (CCB_COMPLETED_OK(csio->ccb_h)) { /* Got vpd list */ path->device->supported_vpds_len = page_list->length + SVPD_SUPPORTED_PAGES_HDR_LEN; path->device->supported_vpds = (uint8_t *)page_list; xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_DEVICE_ID); xpt_schedule(periph, priority); goto out; } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT, &softc->saved_ccb) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } if (page_list) free(page_list, M_CAMXPT); /* No VPDs available, skip to device check. */ csio->data_ptr = NULL; goto probe_device_check; } case PROBE_DEVICE_ID: { struct scsi_vpd_device_id *devid; struct ccb_scsiio *csio; uint32_t length = 0; csio = &done_ccb->csio; devid = (struct scsi_vpd_device_id *)csio->data_ptr; /* Clean up from previous instance of this device */ if (path->device->device_id != NULL) { path->device->device_id_len = 0; free(path->device->device_id, M_CAMXPT); path->device->device_id = NULL; } if (devid == NULL) { /* Don't process the command as it was never sent */ } else if (CCB_COMPLETED_OK(csio->ccb_h)) { length = scsi_2btoul(devid->length); if (length != 0) { /* * NB: device_id_len is actual response * size, not buffer size. */ path->device->device_id_len = length + SVPD_DEVICE_ID_HDR_LEN; path->device->device_id = (uint8_t *)devid; } } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA, &softc->saved_ccb) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* Free the device id space if we don't use it */ if (devid && length == 0) free(devid, M_CAMXPT); xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_EXTENDED_INQUIRY); xpt_schedule(periph, priority); goto out; } case PROBE_EXTENDED_INQUIRY: { struct scsi_vpd_extended_inquiry_data *ext_inq; struct ccb_scsiio *csio; int32_t length = 0; csio = &done_ccb->csio; ext_inq = (struct scsi_vpd_extended_inquiry_data *) csio->data_ptr; if (path->device->ext_inq != NULL) { path->device->ext_inq_len = 0; free(path->device->ext_inq, M_CAMXPT); path->device->ext_inq = NULL; } if (ext_inq == NULL) { /* Don't process the command as it was never sent */ } else if (CCB_COMPLETED_OK(csio->ccb_h)) { length = scsi_2btoul(ext_inq->page_length) + __offsetof(struct scsi_vpd_extended_inquiry_data, flags1); length = min(length, sizeof(*ext_inq)); length -= csio->resid; if (length > 0) { path->device->ext_inq_len = length; path->device->ext_inq = (uint8_t *)ext_inq; } } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA, &softc->saved_ccb) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* Free the device id space if we don't use it */ if (ext_inq && length <= 0) free(ext_inq, M_CAMXPT); xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM); xpt_schedule(periph, priority); goto out; } probe_device_check: case PROBE_SERIAL_NUM: { struct ccb_scsiio *csio; struct scsi_vpd_unit_serial_number *serial_buf; u_int32_t priority; int changed; int have_serialnum; changed = 1; have_serialnum = 0; csio = &done_ccb->csio; priority = done_ccb->ccb_h.pinfo.priority; serial_buf = (struct scsi_vpd_unit_serial_number *)csio->data_ptr; if (serial_buf == NULL) { /* * Don't process the command as it was never sent */ } else if (cam_ccb_status(done_ccb) == CAM_REQ_CMP && (serial_buf->length > 0)) { have_serialnum = 1; path->device->serial_num = (u_int8_t *)malloc((serial_buf->length + 1), M_CAMXPT, M_NOWAIT); if (path->device->serial_num != NULL) { int start, slen; start = strspn(serial_buf->serial_num, " "); slen = serial_buf->length - start; if (slen <= 0) { /* * SPC5r05 says that an all-space serial * number means no product serial number * is available */ slen = 0; } memcpy(path->device->serial_num, &serial_buf->serial_num[start], slen); path->device->serial_num_len = slen; path->device->serial_num[slen] = '\0'; } } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT, &softc->saved_ccb) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* * Let's see if we have seen this device before. */ if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) { MD5_CTX context; u_int8_t digest[16]; MD5Init(&context); MD5Update(&context, (unsigned char *)&path->device->inq_data, sizeof(struct scsi_inquiry_data)); if (have_serialnum) MD5Update(&context, path->device->serial_num, path->device->serial_num_len); MD5Final(digest, &context); if (bcmp(softc->digest, digest, 16) == 0) changed = 0; /* * XXX Do we need to do a TUR in order to ensure * that the device really hasn't changed??? */ if ((changed != 0) && ((softc->flags & PROBE_NO_ANNOUNCE) == 0)) xpt_async(AC_LOST_DEVICE, path, NULL); } if (serial_buf != NULL) free(serial_buf, M_CAMXPT); if (changed != 0) { /* * Now that we have all the necessary * information to safely perform transfer * negotiations... Controllers don't perform * any negotiation or tagged queuing until * after the first XPT_SET_TRAN_SETTINGS ccb is * received. So, on a new device, just retrieve * the user settings, and set them as the current * settings to set the device up. */ proberequestdefaultnegotiation(periph); xpt_release_ccb(done_ccb); /* * Perform a TUR to allow the controller to * perform any necessary transfer negotiation. */ PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION); xpt_schedule(periph, priority); goto out; } xpt_release_ccb(done_ccb); break; } case PROBE_TUR_FOR_NEGOTIATION: case PROBE_DV_EXIT: if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { cam_periph_error(done_ccb, 0, SF_NO_PRINT | SF_NO_RECOVERY | SF_NO_RETRY, NULL); } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* * Do Domain Validation for lun 0 on devices that claim * to support Synchronous Transfer modes. */ if (softc->action == PROBE_TUR_FOR_NEGOTIATION && done_ccb->ccb_h.target_lun == 0 && (path->device->inq_data.flags & SID_Sync) != 0 && (path->device->flags & CAM_DEV_IN_DV) == 0) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Begin Domain Validation\n")); path->device->flags |= CAM_DEV_IN_DV; xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV1); xpt_schedule(periph, priority); goto out; } if (softc->action == PROBE_DV_EXIT) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Leave Domain Validation\n")); } if (path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); } path->device->flags &= ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM); if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { /* Inform the XPT that a new device has been found */ done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); xpt_release_ccb(done_ccb); break; case PROBE_INQUIRY_BASIC_DV1: case PROBE_INQUIRY_BASIC_DV2: { struct scsi_inquiry_data *nbuf; struct ccb_scsiio *csio; if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { cam_periph_error(done_ccb, 0, SF_NO_PRINT | SF_NO_RECOVERY | SF_NO_RETRY, NULL); } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } csio = &done_ccb->csio; nbuf = (struct scsi_inquiry_data *)csio->data_ptr; if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) { xpt_print(path, "inquiry data fails comparison at DV%d step\n", softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2); if (proberequestbackoff(periph, path->device)) { path->device->flags &= ~CAM_DEV_IN_DV; PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION); } else { /* give up */ PROBE_SET_ACTION(softc, PROBE_DV_EXIT); } free(nbuf, M_CAMXPT); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } free(nbuf, M_CAMXPT); if (softc->action == PROBE_INQUIRY_BASIC_DV1) { PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV2); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } if (softc->action == PROBE_INQUIRY_BASIC_DV2) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Leave Domain Validation Successfully\n")); } if (path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); } path->device->flags &= ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM); if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { /* Inform the XPT that a new device has been found */ done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); xpt_release_ccb(done_ccb); break; } default: panic("probedone: invalid action state 0x%x\n", softc->action); } done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); done_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(done_ccb); if (TAILQ_FIRST(&softc->request_ccbs) == NULL) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n")); /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(path, 0, 0, 0, FALSE); cam_periph_release_locked(periph); cam_periph_invalidate(periph); cam_periph_release_locked(periph); } else { probeschedule(periph); goto out; } } static void probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new, probe_flags flags) { struct cam_path *tp; struct scsi_report_luns_data *old; u_int idx1, idx2, nlun_old, nlun_new; lun_id_t this_lun; u_int8_t *ol, *nl; if (path->target == NULL) { return; } mtx_lock(&path->target->luns_mtx); old = path->target->luns; path->target->luns = new; mtx_unlock(&path->target->luns_mtx); if (old == NULL) return; nlun_old = scsi_4btoul(old->length) / 8; nlun_new = scsi_4btoul(new->length) / 8; /* * We are not going to assume sorted lists. Deal. */ for (idx1 = 0; idx1 < nlun_old; idx1++) { ol = old->luns[idx1].lundata; for (idx2 = 0; idx2 < nlun_new; idx2++) { nl = new->luns[idx2].lundata; if (memcmp(nl, ol, 8) == 0) { break; } } if (idx2 < nlun_new) { continue; } /* * An 'old' item not in the 'new' list. * Nuke it. Except that if it is lun 0, * that would be what the probe state * machine is currently working on, * so we won't do that. */ CAM_GET_LUN(old, idx1, this_lun); if (this_lun == 0) { continue; } /* * We also cannot nuke it if it is * not in a lun format we understand * and replace the LUN with a "simple" LUN * if that is all the HBA supports. */ if (!(flags & PROBE_EXTLUN)) { if (!CAM_CAN_GET_SIMPLE_LUN(old, idx1)) continue; CAM_GET_SIMPLE_LUN(old, idx1, this_lun); } if (xpt_create_path(&tp, NULL, xpt_path_path_id(path), xpt_path_target_id(path), this_lun) == CAM_REQ_CMP) { xpt_async(AC_LOST_DEVICE, tp, NULL); xpt_free_path(tp); } } free(old, M_CAMXPT); } static void probecleanup(struct cam_periph *periph) { free(periph->softc, M_CAMXPT); } static void scsi_find_quirk(struct cam_ed *device) { struct scsi_quirk_entry *quirk; caddr_t match; match = cam_quirkmatch((caddr_t)&device->inq_data, (caddr_t)scsi_quirk_table, nitems(scsi_quirk_table), sizeof(*scsi_quirk_table), scsi_inquiry_match); if (match == NULL) panic("xpt_find_quirk: device didn't match wildcard entry!!"); quirk = (struct scsi_quirk_entry *)match; device->quirk = quirk; device->mintags = quirk->mintags; device->maxtags = quirk->maxtags; } static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS) { int error, val; val = cam_srch_hi; error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (val == 0 || val == 1) { cam_srch_hi = val; return (0); } else { return (EINVAL); } } typedef struct { union ccb *request_ccb; struct ccb_pathinq *cpi; int counter; int lunindex[0]; } scsi_scan_bus_info; /* * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. * As the scan progresses, scsi_scan_bus is used as the * callback on completion function. */ static void scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) { struct mtx *mtx; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("scsi_scan_bus\n")); switch (request_ccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_SCAN_TGT: { scsi_scan_bus_info *scan_info; union ccb *work_ccb, *reset_ccb; struct cam_path *path; u_int i; u_int low_target, max_target; u_int initiator_id; /* Find out the characteristics of the bus */ work_ccb = xpt_alloc_ccb_nowait(); if (work_ccb == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(request_ccb); return; } xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, request_ccb->ccb_h.pinfo.priority); work_ccb->ccb_h.func_code = XPT_PATH_INQ; xpt_action(work_ccb); if (work_ccb->ccb_h.status != CAM_REQ_CMP) { request_ccb->ccb_h.status = work_ccb->ccb_h.status; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) { /* * Can't scan the bus on an adapter that * cannot perform the initiator role. */ request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } /* We may need to reset bus first, if we haven't done it yet. */ if ((work_ccb->cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) && !(work_ccb->cpi.hba_misc & PIM_NOBUSRESET) && !timevalisset(&request_ccb->ccb_h.path->bus->last_reset) && (reset_ccb = xpt_alloc_ccb_nowait()) != NULL) { xpt_setup_ccb(&reset_ccb->ccb_h, request_ccb->ccb_h.path, CAM_PRIORITY_NONE); reset_ccb->ccb_h.func_code = XPT_RESET_BUS; xpt_action(reset_ccb); if (reset_ccb->ccb_h.status != CAM_REQ_CMP) { request_ccb->ccb_h.status = reset_ccb->ccb_h.status; xpt_free_ccb(reset_ccb); xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } xpt_free_ccb(reset_ccb); } /* Save some state for use while we probe for devices */ scan_info = (scsi_scan_bus_info *) malloc(sizeof(scsi_scan_bus_info) + (work_ccb->cpi.max_target * sizeof (u_int)), M_CAMXPT, M_ZERO|M_NOWAIT); if (scan_info == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("SCAN start for %p\n", scan_info)); scan_info->request_ccb = request_ccb; scan_info->cpi = &work_ccb->cpi; /* Cache on our stack so we can work asynchronously */ max_target = scan_info->cpi->max_target; low_target = 0; initiator_id = scan_info->cpi->initiator_id; /* * We can scan all targets in parallel, or do it sequentially. */ if (request_ccb->ccb_h.func_code == XPT_SCAN_TGT) { max_target = low_target = request_ccb->ccb_h.target_id; scan_info->counter = 0; } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) { max_target = 0; scan_info->counter = 0; } else { scan_info->counter = scan_info->cpi->max_target + 1; if (scan_info->cpi->initiator_id < scan_info->counter) { scan_info->counter--; } } mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); mtx_unlock(mtx); for (i = low_target; i <= max_target; i++) { cam_status status; if (i == initiator_id) continue; status = xpt_create_path(&path, NULL, request_ccb->ccb_h.path_id, i, 0); if (status != CAM_REQ_CMP) { printf("scsi_scan_bus: xpt_create_path failed" " with status %#x, bus scan halted\n", status); free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = status; xpt_free_ccb(work_ccb); xpt_done(request_ccb); break; } work_ccb = xpt_alloc_ccb_nowait(); if (work_ccb == NULL) { xpt_free_ccb((union ccb *)scan_info->cpi); free(scan_info, M_CAMXPT); xpt_free_path(path); request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(request_ccb); break; } xpt_setup_ccb(&work_ccb->ccb_h, path, request_ccb->ccb_h.pinfo.priority); work_ccb->ccb_h.func_code = XPT_SCAN_LUN; work_ccb->ccb_h.cbfcnp = scsi_scan_bus; work_ccb->ccb_h.flags |= CAM_UNLOCKED; work_ccb->ccb_h.ppriv_ptr0 = scan_info; work_ccb->crcn.flags = request_ccb->crcn.flags; xpt_action(work_ccb); } mtx_lock(mtx); break; } case XPT_SCAN_LUN: { cam_status status; struct cam_path *path, *oldpath; scsi_scan_bus_info *scan_info; struct cam_et *target; struct cam_ed *device, *nextdev; int next_target; path_id_t path_id; target_id_t target_id; lun_id_t lun_id; oldpath = request_ccb->ccb_h.path; status = cam_ccb_status(request_ccb); scan_info = (scsi_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0; path_id = request_ccb->ccb_h.path_id; target_id = request_ccb->ccb_h.target_id; lun_id = request_ccb->ccb_h.target_lun; target = request_ccb->ccb_h.path->target; next_target = 1; mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); mtx_lock(mtx); mtx_lock(&target->luns_mtx); if (target->luns) { lun_id_t first; u_int nluns = scsi_4btoul(target->luns->length) / 8; /* * Make sure we skip over lun 0 if it's the first member * of the list as we've actually just finished probing * it. */ CAM_GET_LUN(target->luns, 0, first); if (first == 0 && scan_info->lunindex[target_id] == 0) { scan_info->lunindex[target_id]++; } /* * Skip any LUNs that the HBA can't deal with. */ while (scan_info->lunindex[target_id] < nluns) { if (scan_info->cpi->hba_misc & PIM_EXTLUNS) { CAM_GET_LUN(target->luns, scan_info->lunindex[target_id], lun_id); break; } if (CAM_CAN_GET_SIMPLE_LUN(target->luns, scan_info->lunindex[target_id])) { CAM_GET_SIMPLE_LUN(target->luns, scan_info->lunindex[target_id], lun_id); break; } scan_info->lunindex[target_id]++; } if (scan_info->lunindex[target_id] < nluns) { mtx_unlock(&target->luns_mtx); next_target = 0; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("next lun to try at index %u is %jx\n", scan_info->lunindex[target_id], (uintmax_t)lun_id)); scan_info->lunindex[target_id]++; } else { mtx_unlock(&target->luns_mtx); /* We're done with scanning all luns. */ } } else { mtx_unlock(&target->luns_mtx); device = request_ccb->ccb_h.path->device; /* Continue sequential LUN scan if: */ /* -- we have more LUNs that need recheck */ mtx_lock(&target->bus->eb_mtx); nextdev = device; while ((nextdev = TAILQ_NEXT(nextdev, links)) != NULL) if ((nextdev->flags & CAM_DEV_UNCONFIGURED) == 0) break; mtx_unlock(&target->bus->eb_mtx); if (nextdev != NULL) { next_target = 0; /* -- stop if CAM_QUIRK_NOLUNS is set. */ } else if (SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOLUNS) { next_target = 1; /* -- this LUN is connected and its SCSI version * allows more LUNs. */ } else if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) { if (lun_id < (CAM_SCSI2_MAXLUN-1) || CAN_SRCH_HI_DENSE(device)) next_target = 0; /* -- this LUN is disconnected, its SCSI version * allows more LUNs and we guess they may be. */ } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) { if (lun_id < (CAM_SCSI2_MAXLUN-1) || CAN_SRCH_HI_SPARSE(device)) next_target = 0; } if (next_target == 0) { lun_id++; if (lun_id > scan_info->cpi->max_lun) next_target = 1; } } /* * Check to see if we scan any further luns. */ if (next_target) { int done; /* * Free the current request path- we're done with it. */ xpt_free_path(oldpath); hop_again: done = 0; if (scan_info->request_ccb->ccb_h.func_code == XPT_SCAN_TGT) { done = 1; } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) { scan_info->counter++; if (scan_info->counter == scan_info->cpi->initiator_id) { scan_info->counter++; } if (scan_info->counter >= scan_info->cpi->max_target+1) { done = 1; } } else { scan_info->counter--; if (scan_info->counter == 0) { done = 1; } } if (done) { mtx_unlock(mtx); xpt_free_ccb(request_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("SCAN done for %p\n", scan_info)); free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(request_ccb); break; } if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) { mtx_unlock(mtx); xpt_free_ccb(request_ccb); break; } status = xpt_create_path(&path, NULL, scan_info->request_ccb->ccb_h.path_id, scan_info->counter, 0); if (status != CAM_REQ_CMP) { mtx_unlock(mtx); printf("scsi_scan_bus: xpt_create_path failed" " with status %#x, bus scan halted\n", status); xpt_free_ccb(request_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = status; xpt_done(request_ccb); break; } xpt_setup_ccb(&request_ccb->ccb_h, path, request_ccb->ccb_h.pinfo.priority); request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.cbfcnp = scsi_scan_bus; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->ccb_h.ppriv_ptr0 = scan_info; request_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; } else { status = xpt_create_path(&path, NULL, path_id, target_id, lun_id); /* * Free the old request path- we're done with it. We * do this *after* creating the new path so that * we don't remove a target that has our lun list * in the case that lun 0 is not present. */ xpt_free_path(oldpath); if (status != CAM_REQ_CMP) { printf("scsi_scan_bus: xpt_create_path failed " "with status %#x, halting LUN scan\n", status); goto hop_again; } xpt_setup_ccb(&request_ccb->ccb_h, path, request_ccb->ccb_h.pinfo.priority); request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.cbfcnp = scsi_scan_bus; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->ccb_h.ppriv_ptr0 = scan_info; request_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; } mtx_unlock(mtx); xpt_action(request_ccb); break; } default: break; } } static void scsi_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *request_ccb) { struct ccb_pathinq cpi; cam_status status; struct cam_path *new_path; struct cam_periph *old_periph; int lock; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("scsi_scan_lun\n")); xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status != CAM_REQ_CMP) { if (request_ccb != NULL) { request_ccb->ccb_h.status = cpi.ccb_h.status; xpt_done(request_ccb); } return; } if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) { /* * Can't scan the bus on an adapter that * cannot perform the initiator role. */ if (request_ccb != NULL) { request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(request_ccb); } return; } if (request_ccb == NULL) { request_ccb = xpt_alloc_ccb_nowait(); if (request_ccb == NULL) { xpt_print(path, "scsi_scan_lun: can't allocate CCB, " "can't continue\n"); return; } status = xpt_create_path(&new_path, NULL, path->bus->path_id, path->target->target_id, path->device->lun_id); if (status != CAM_REQ_CMP) { xpt_print(path, "scsi_scan_lun: can't create path, " "can't continue\n"); xpt_free_ccb(request_ccb); return; } xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT); request_ccb->ccb_h.cbfcnp = xptscandone; request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->crcn.flags = flags; } lock = (xpt_path_owned(path) == 0); if (lock) xpt_path_lock(path); if ((old_periph = cam_periph_find(path, "probe")) != NULL) { if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { probe_softc *softc; softc = (probe_softc *)old_periph->softc; TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); } else { request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(request_ccb); } } else { status = cam_periph_alloc(proberegister, NULL, probecleanup, probestart, "probe", CAM_PERIPH_BIO, request_ccb->ccb_h.path, NULL, 0, request_ccb); if (status != CAM_REQ_CMP) { xpt_print(path, "scsi_scan_lun: cam_alloc_periph " "returned an error, can't continue probe\n"); request_ccb->ccb_h.status = status; xpt_done(request_ccb); } } if (lock) xpt_path_unlock(path); } static void xptscandone(struct cam_periph *periph, union ccb *done_ccb) { xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } static struct cam_ed * scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct scsi_quirk_entry *quirk; struct cam_ed *device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); /* * Take the default quirk entry until we have inquiry * data and can determine a better quirk to use. */ quirk = &scsi_quirk_table[nitems(scsi_quirk_table) - 1]; device->quirk = (void *)quirk; device->mintags = quirk->mintags; device->maxtags = quirk->maxtags; bzero(&device->inq_data, sizeof(device->inq_data)); device->inq_flags = 0; device->queue_flags = 0; device->serial_num = NULL; device->serial_num_len = 0; device->device_id = NULL; device->device_id_len = 0; device->supported_vpds = NULL; device->supported_vpds_len = 0; return (device); } static void scsi_devise_transport(struct cam_path *path) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; struct scsi_inquiry_data *inq_buf; /* Get transport information from the SIM */ xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); inq_buf = NULL; if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) inq_buf = &path->device->inq_data; path->device->protocol = PROTO_SCSI; path->device->protocol_version = inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version; path->device->transport = cpi.transport; path->device->transport_version = cpi.transport_version; /* * Any device not using SPI3 features should * be considered SPI2 or lower. */ if (inq_buf != NULL) { if (path->device->transport == XPORT_SPI && (inq_buf->spi3data & SID_SPI_MASK) == 0 && path->device->transport_version > 2) path->device->transport_version = 2; } else { struct cam_ed* otherdev; for (otherdev = TAILQ_FIRST(&path->target->ed_entries); otherdev != NULL; otherdev = TAILQ_NEXT(otherdev, links)) { if (otherdev != path->device) break; } if (otherdev != NULL) { /* * Initially assume the same versioning as * prior luns for this target. */ path->device->protocol_version = otherdev->protocol_version; path->device->transport_version = otherdev->transport_version; } else { /* Until we know better, opt for safety */ path->device->protocol_version = 2; if (path->device->transport == XPORT_SPI) path->device->transport_version = 2; else path->device->transport_version = 0; } } /* * XXX * For a device compliant with SPC-2 we should be able * to determine the transport version supported by * scrutinizing the version descriptors in the * inquiry buffer. */ /* Tell the controller what we think */ xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.transport = path->device->transport; cts.transport_version = path->device->transport_version; cts.protocol = path->device->protocol; cts.protocol_version = path->device->protocol_version; cts.proto_specific.valid = 0; cts.xport_specific.valid = 0; xpt_action((union ccb *)&cts); } static void scsi_dev_advinfo(union ccb *start_ccb) { struct cam_ed *device; struct ccb_dev_advinfo *cdai; off_t amt; start_ccb->ccb_h.status = CAM_REQ_INVALID; device = start_ccb->ccb_h.path->device; cdai = &start_ccb->cdai; switch(cdai->buftype) { case CDAI_TYPE_SCSI_DEVID: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->device_id_len; if (device->device_id_len == 0) break; amt = device->device_id_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->device_id, amt); break; case CDAI_TYPE_SERIAL_NUM: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->serial_num_len; if (device->serial_num_len == 0) break; amt = device->serial_num_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->serial_num, amt); break; case CDAI_TYPE_PHYS_PATH: if (cdai->flags & CDAI_FLAG_STORE) { if (device->physpath != NULL) { free(device->physpath, M_CAMXPT); device->physpath = NULL; } device->physpath_len = cdai->bufsiz; /* Clear existing buffer if zero length */ if (cdai->bufsiz == 0) break; device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); if (device->physpath == NULL) { start_ccb->ccb_h.status = CAM_REQ_ABORTED; return; } memcpy(device->physpath, cdai->buf, cdai->bufsiz); } else { cdai->provsiz = device->physpath_len; if (device->physpath_len == 0) break; amt = device->physpath_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->physpath, amt); } break; case CDAI_TYPE_RCAPLONG: if (cdai->flags & CDAI_FLAG_STORE) { if (device->rcap_buf != NULL) { free(device->rcap_buf, M_CAMXPT); device->rcap_buf = NULL; } device->rcap_len = cdai->bufsiz; /* Clear existing buffer if zero length */ if (cdai->bufsiz == 0) break; device->rcap_buf = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); if (device->rcap_buf == NULL) { start_ccb->ccb_h.status = CAM_REQ_ABORTED; return; } memcpy(device->rcap_buf, cdai->buf, cdai->bufsiz); } else { cdai->provsiz = device->rcap_len; if (device->rcap_len == 0) break; amt = device->rcap_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->rcap_buf, amt); } break; case CDAI_TYPE_EXT_INQ: /* * We fetch extended inquiry data during probe, if * available. We don't allow changing it. */ if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->ext_inq_len; if (device->ext_inq_len == 0) break; amt = device->ext_inq_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->ext_inq, amt); break; default: return; } start_ccb->ccb_h.status = CAM_REQ_CMP; if (cdai->flags & CDAI_FLAG_STORE) { xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, (void *)(uintptr_t)cdai->buftype); } } static void scsi_action(union ccb *start_ccb) { switch (start_ccb->ccb_h.func_code) { case XPT_SET_TRAN_SETTINGS: { scsi_set_transfer_settings(&start_ccb->cts, start_ccb->ccb_h.path, /*async_update*/FALSE); break; } case XPT_SCAN_BUS: case XPT_SCAN_TGT: scsi_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); break; case XPT_SCAN_LUN: scsi_scan_lun(start_ccb->ccb_h.path->periph, start_ccb->ccb_h.path, start_ccb->crcn.flags, start_ccb); break; case XPT_DEV_ADVINFO: { scsi_dev_advinfo(start_ccb); break; } default: xpt_action_default(start_ccb); break; } } static void scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update) { struct ccb_pathinq cpi; struct ccb_trans_settings cur_cts; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_scsi *cur_scsi; struct scsi_inquiry_data *inq_data; struct cam_ed *device; if (path == NULL || (device = path->device) == NULL) { cts->ccb_h.status = CAM_PATH_INVALID; xpt_done((union ccb *)cts); return; } if (cts->protocol == PROTO_UNKNOWN || cts->protocol == PROTO_UNSPECIFIED) { cts->protocol = device->protocol; cts->protocol_version = device->protocol_version; } if (cts->protocol_version == PROTO_VERSION_UNKNOWN || cts->protocol_version == PROTO_VERSION_UNSPECIFIED) cts->protocol_version = device->protocol_version; if (cts->protocol != device->protocol) { xpt_print(path, "Uninitialized Protocol %x:%x?\n", cts->protocol, device->protocol); cts->protocol = device->protocol; } if (cts->protocol_version > device->protocol_version) { if (bootverbose) { xpt_print(path, "Down reving Protocol " "Version from %d to %d?\n", cts->protocol_version, device->protocol_version); } cts->protocol_version = device->protocol_version; } if (cts->transport == XPORT_UNKNOWN || cts->transport == XPORT_UNSPECIFIED) { cts->transport = device->transport; cts->transport_version = device->transport_version; } if (cts->transport_version == XPORT_VERSION_UNKNOWN || cts->transport_version == XPORT_VERSION_UNSPECIFIED) cts->transport_version = device->transport_version; if (cts->transport != device->transport) { xpt_print(path, "Uninitialized Transport %x:%x?\n", cts->transport, device->transport); cts->transport = device->transport; } if (cts->transport_version > device->transport_version) { if (bootverbose) { xpt_print(path, "Down reving Transport " "Version from %d to %d?\n", cts->transport_version, device->transport_version); } cts->transport_version = device->transport_version; } /* * Nothing more of interest to do unless * this is a device connected via the * SCSI protocol. */ if (cts->protocol != PROTO_SCSI) { if (async_update == FALSE) xpt_action_default((union ccb *)cts); return; } inq_data = &device->inq_data; scsi = &cts->proto_specific.scsi; xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* SCSI specific sanity checking */ if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0 || (device->queue_flags & SCP_QUEUE_DQUE) != 0 || (device->mintags == 0)) { /* * Can't tag on hardware that doesn't support tags, * doesn't have it enabled, or has broken tag support. */ scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } if (async_update == FALSE) { /* * Perform sanity checking against what the * controller and device can do. */ xpt_setup_ccb(&cur_cts.ccb_h, path, CAM_PRIORITY_NONE); cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cur_cts.type = cts->type; xpt_action((union ccb *)&cur_cts); if (cam_ccb_status((union ccb *)&cur_cts) != CAM_REQ_CMP) { return; } cur_scsi = &cur_cts.proto_specific.scsi; if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) { scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB; } if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0) scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } /* SPI specific sanity checking */ if (cts->transport == XPORT_SPI && async_update == FALSE) { u_int spi3caps; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings_spi *cur_spi; spi = &cts->xport_specific.spi; cur_spi = &cur_cts.xport_specific.spi; /* Fill in any gaps in what the user gave us */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) spi->sync_period = cur_spi->sync_period; if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) spi->sync_period = 0; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) spi->sync_offset = cur_spi->sync_offset; if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) spi->sync_offset = 0; if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) spi->ppr_options = cur_spi->ppr_options; if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) spi->ppr_options = 0; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) spi->bus_width = cur_spi->bus_width; if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) spi->bus_width = 0; if ((spi->valid & CTS_SPI_VALID_DISC) == 0) { spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB; } if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0) spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 && (inq_data->flags & SID_Sync) == 0 && cts->type == CTS_TYPE_CURRENT_SETTINGS) || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)) { /* Force async */ spi->sync_period = 0; spi->sync_offset = 0; } switch (spi->bus_width) { case MSG_EXT_WDTR_BUS_32_BIT: if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 || (inq_data->flags & SID_WBus32) != 0 || cts->type == CTS_TYPE_USER_SETTINGS) && (cpi.hba_inquiry & PI_WIDE_32) != 0) break; /* Fall Through to 16-bit */ case MSG_EXT_WDTR_BUS_16_BIT: if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 || (inq_data->flags & SID_WBus16) != 0 || cts->type == CTS_TYPE_USER_SETTINGS) && (cpi.hba_inquiry & PI_WIDE_16) != 0) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* Fall Through to 8-bit */ default: /* New bus width?? */ case MSG_EXT_WDTR_BUS_8_BIT: /* All targets can do this */ spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } spi3caps = cpi.xport_specific.spi.ppr_options; if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 && cts->type == CTS_TYPE_CURRENT_SETTINGS) spi3caps &= inq_data->spi3data; if ((spi3caps & SID_SPI_CLOCK_DT) == 0) spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; if ((spi3caps & SID_SPI_IUS) == 0) spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ; if ((spi3caps & SID_SPI_QAS) == 0) spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ; /* No SPI Transfer settings are allowed unless we are wide */ if (spi->bus_width == 0) spi->ppr_options = 0; if ((spi->valid & CTS_SPI_VALID_DISC) && ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0)) { /* * Can't tag queue without disconnection. */ scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; scsi->valid |= CTS_SCSI_VALID_TQ; } /* * If we are currently performing tagged transactions to * this device and want to change its negotiation parameters, * go non-tagged for a bit to give the controller a chance to * negotiate unhampered by tag messages. */ if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (device->inq_flags & SID_CmdQue) != 0 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE| CTS_SPI_VALID_SYNC_OFFSET| CTS_SPI_VALID_BUS_WIDTH)) != 0) scsi_toggle_tags(path); } if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) { int device_tagenb; /* * If we are transitioning from tags to no-tags or * vice-versa, we need to carefully freeze and restart * the queue so that we don't overlap tagged and non-tagged * commands. We also temporarily stop tags if there is * a change in transfer negotiation settings to allow * "tag-less" negotiation. */ if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (device->inq_flags & SID_CmdQue) != 0) device_tagenb = TRUE; else device_tagenb = FALSE; if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 && device_tagenb == FALSE) || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0 && device_tagenb == TRUE)) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) { /* * Delay change to use tags until after a * few commands have gone to this device so * the controller has time to perform transfer * negotiations without tagged messages getting * in the way. */ device->tag_delay_count = CAM_TAG_DELAY_COUNT; device->flags |= CAM_DEV_TAG_AFTER_COUNT; } else { xpt_stop_tags(path); } } } if (async_update == FALSE) xpt_action_default((union ccb *)cts); } static void scsi_toggle_tags(struct cam_path *path) { struct cam_ed *dev; /* * Give controllers a chance to renegotiate * before starting tag operations. We * "toggle" tagged queuing off then on * which causes the tag enable command delay * counter to come into effect. */ dev = path->device; if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || ((dev->inq_flags & SID_CmdQue) != 0 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) { struct ccb_trans_settings cts; xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.protocol = PROTO_SCSI; cts.protocol_version = PROTO_VERSION_UNSPECIFIED; cts.transport = XPORT_UNSPECIFIED; cts.transport_version = XPORT_VERSION_UNSPECIFIED; cts.proto_specific.scsi.flags = 0; cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; scsi_set_transfer_settings(&cts, path, /*async_update*/TRUE); cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; scsi_set_transfer_settings(&cts, path, /*async_update*/TRUE); } } /* * Handle any per-device event notifications that require action by the XPT. */ static void scsi_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { cam_status status; struct cam_path newpath; /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; /* * We need our own path with wildcards expanded to * handle certain types of events. */ if ((async_code == AC_SENT_BDR) || (async_code == AC_BUS_RESET) || (async_code == AC_INQ_CHANGED)) status = xpt_compile_path(&newpath, NULL, bus->path_id, target->target_id, device->lun_id); else status = CAM_REQ_CMP_ERR; if (status == CAM_REQ_CMP) { /* * Allow transfer negotiation to occur in a * tag free environment and after settle delay. */ if (async_code == AC_SENT_BDR || async_code == AC_BUS_RESET) { cam_freeze_devq(&newpath); cam_release_devq(&newpath, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/scsi_delay, /*getcount_only*/0); scsi_toggle_tags(&newpath); } if (async_code == AC_INQ_CHANGED) { /* * We've sent a start unit command, or * something similar to a device that * may have caused its inquiry data to * change. So we re-scan the device to * refresh the inquiry data for it. */ scsi_scan_lun(newpath.periph, &newpath, CAM_EXPECT_INQ_CHANGE, NULL); } xpt_release_path(&newpath); } else if (async_code == AC_LOST_DEVICE && (device->flags & CAM_DEV_UNCONFIGURED) == 0) { device->flags |= CAM_DEV_UNCONFIGURED; xpt_release_device(device); } else if (async_code == AC_TRANSFER_NEG) { struct ccb_trans_settings *settings; struct cam_path path; settings = (struct ccb_trans_settings *)async_arg; xpt_compile_path(&path, NULL, bus->path_id, target->target_id, device->lun_id); scsi_set_transfer_settings(settings, &path, /*async_update*/TRUE); xpt_release_path(&path); } } static void _scsi_announce_periph(struct cam_periph *periph, u_int *speed, u_int *freq, struct ccb_trans_settings *cts) { struct ccb_pathinq cpi; struct cam_path *path = periph->path; cam_periph_assert(periph, MA_OWNED); xpt_setup_ccb(&cts->ccb_h, path, CAM_PRIORITY_NORMAL); cts->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts->type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb*)cts); if (cam_ccb_status((union ccb *)cts) != CAM_REQ_CMP) return; /* Ask the SIM for its base transfer speed */ xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* Report connection speed */ *speed = cpi.base_transfer_speed; *freq = 0; if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0 && spi->sync_offset != 0) { *freq = scsi_calc_syncsrate(spi->sync_period); *speed = *freq; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) *speed *= (0x01 << spi->bus_width); } if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; if (fc->valid & CTS_FC_VALID_SPEED) *speed = fc->bitrate; } if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SAS) { struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; if (sas->valid & CTS_SAS_VALID_SPEED) *speed = sas->bitrate; } } static void scsi_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) { struct ccb_trans_settings cts; u_int speed, freq, mb; _scsi_announce_periph(periph, &speed, &freq, &cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) return; mb = speed / 1000; if (mb > 0) sbuf_printf(sb, "%s%d: %d.%03dMB/s transfers", periph->periph_name, periph->unit_number, mb, speed % 1000); else sbuf_printf(sb, "%s%d: %dKB/s transfers", periph->periph_name, periph->unit_number, speed); /* Report additional information about SPI connections */ if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi; spi = &cts.xport_specific.spi; if (freq != 0) { sbuf_printf(sb, " (%d.%03dMHz%s, offset %d", freq / 1000, freq % 1000, (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 ? " DT" : "", spi->sync_offset); } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0 && spi->bus_width > 0) { if (freq != 0) { sbuf_printf(sb, ", "); } else { sbuf_printf(sb, " ("); } sbuf_printf(sb, "%dbit)", 8 * (0x01 << spi->bus_width)); } else if (freq != 0) { sbuf_printf(sb, ")"); } } if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc; fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWNN) sbuf_printf(sb, " WWNN 0x%llx", (long long) fc->wwnn); if (fc->valid & CTS_FC_VALID_WWPN) sbuf_printf(sb, " WWPN 0x%llx", (long long) fc->wwpn); if (fc->valid & CTS_FC_VALID_PORT) sbuf_printf(sb, " PortID 0x%x", fc->port); } sbuf_printf(sb, "\n"); } static void scsi_announce_periph(struct cam_periph *periph) { struct ccb_trans_settings cts; u_int speed, freq, mb; _scsi_announce_periph(periph, &speed, &freq, &cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) return; mb = speed / 1000; if (mb > 0) printf("%s%d: %d.%03dMB/s transfers", periph->periph_name, periph->unit_number, mb, speed % 1000); else printf("%s%d: %dKB/s transfers", periph->periph_name, periph->unit_number, speed); /* Report additional information about SPI connections */ if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi; spi = &cts.xport_specific.spi; if (freq != 0) { printf(" (%d.%03dMHz%s, offset %d", freq / 1000, freq % 1000, (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 ? " DT" : "", spi->sync_offset); } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0 && spi->bus_width > 0) { if (freq != 0) { printf(", "); } else { printf(" ("); } printf("%dbit)", 8 * (0x01 << spi->bus_width)); } else if (freq != 0) { printf(")"); } } if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc; fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWNN) printf(" WWNN 0x%llx", (long long) fc->wwnn); if (fc->valid & CTS_FC_VALID_WWPN) printf(" WWPN 0x%llx", (long long) fc->wwpn); if (fc->valid & CTS_FC_VALID_PORT) printf(" PortID 0x%x", fc->port); } printf("\n"); } static void scsi_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb) { scsi_print_inquiry_sbuf(sb, &device->inq_data); } static void scsi_proto_announce(struct cam_ed *device) { scsi_print_inquiry(&device->inq_data); } static void scsi_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb) { scsi_print_inquiry_short_sbuf(sb, &device->inq_data); } static void scsi_proto_denounce(struct cam_ed *device) { scsi_print_inquiry_short(&device->inq_data); } static void scsi_proto_debug_out(union ccb *ccb) { char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; struct cam_ed *device; if (ccb->ccb_h.func_code != XPT_SCSI_IO) return; device = ccb->ccb_h.path->device; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_CDB,("%s. CDB: %s\n", scsi_op_desc(scsiio_cdb_ptr(&ccb->csio)[0], &device->inq_data), scsi_cdb_string(scsiio_cdb_ptr(&ccb->csio), cdb_str, sizeof(cdb_str)))); }