Index: head/sys/cam/ata/ata_da.c =================================================================== --- head/sys/cam/ata/ata_da.c (revision 328917) +++ head/sys/cam/ata/ata_da.c (revision 328918) @@ -1,3584 +1,3584 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ada.h" #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #include #include #include /* geometry translation */ #ifdef _KERNEL #define ATA_MAX_28BIT_LBA 268435455UL extern int iosched_debug; typedef enum { ADA_STATE_RAHEAD, ADA_STATE_WCACHE, ADA_STATE_LOGDIR, ADA_STATE_IDDIR, ADA_STATE_SUP_CAP, ADA_STATE_ZONE, ADA_STATE_NORMAL } ada_state; typedef enum { ADA_FLAG_CAN_48BIT = 0x00000002, ADA_FLAG_CAN_FLUSHCACHE = 0x00000004, ADA_FLAG_CAN_NCQ = 0x00000008, ADA_FLAG_CAN_DMA = 0x00000010, ADA_FLAG_NEED_OTAG = 0x00000020, ADA_FLAG_WAS_OTAG = 0x00000040, ADA_FLAG_CAN_TRIM = 0x00000080, ADA_FLAG_OPEN = 0x00000100, ADA_FLAG_SCTX_INIT = 0x00000200, ADA_FLAG_CAN_CFA = 0x00000400, ADA_FLAG_CAN_POWERMGT = 0x00000800, ADA_FLAG_CAN_DMA48 = 0x00001000, ADA_FLAG_CAN_LOG = 0x00002000, ADA_FLAG_CAN_IDLOG = 0x00004000, ADA_FLAG_CAN_SUPCAP = 0x00008000, ADA_FLAG_CAN_ZONE = 0x00010000, ADA_FLAG_CAN_WCACHE = 0x00020000, ADA_FLAG_CAN_RAHEAD = 0x00040000, ADA_FLAG_PROBED = 0x00080000, ADA_FLAG_ANNOUNCED = 0x00100000, ADA_FLAG_DIRTY = 0x00200000, ADA_FLAG_CAN_NCQ_TRIM = 0x00400000, /* CAN_TRIM also set */ ADA_FLAG_PIM_ATA_EXT = 0x00800000 } ada_flags; typedef enum { ADA_Q_NONE = 0x00, ADA_Q_4K = 0x01, ADA_Q_NCQ_TRIM_BROKEN = 0x02, ADA_Q_LOG_BROKEN = 0x04, ADA_Q_SMR_DM = 0x08 } ada_quirks; #define ADA_Q_BIT_STRING \ "\020" \ "\0014K" \ "\002NCQ_TRIM_BROKEN" \ "\003LOG_BROKEN" \ "\004SMR_DM" typedef enum { ADA_CCB_RAHEAD = 0x01, ADA_CCB_WCACHE = 0x02, ADA_CCB_BUFFER_IO = 0x03, ADA_CCB_DUMP = 0x05, ADA_CCB_TRIM = 0x06, ADA_CCB_LOGDIR = 0x07, ADA_CCB_IDDIR = 0x08, ADA_CCB_SUP_CAP = 0x09, ADA_CCB_ZONE = 0x0a, ADA_CCB_TYPE_MASK = 0x0F, } ada_ccb_state; typedef enum { ADA_ZONE_NONE = 0x00, ADA_ZONE_DRIVE_MANAGED = 0x01, ADA_ZONE_HOST_AWARE = 0x02, ADA_ZONE_HOST_MANAGED = 0x03 } ada_zone_mode; typedef enum { ADA_ZONE_FLAG_RZ_SUP = 0x0001, ADA_ZONE_FLAG_OPEN_SUP = 0x0002, ADA_ZONE_FLAG_CLOSE_SUP = 0x0004, ADA_ZONE_FLAG_FINISH_SUP = 0x0008, ADA_ZONE_FLAG_RWP_SUP = 0x0010, ADA_ZONE_FLAG_SUP_MASK = (ADA_ZONE_FLAG_RZ_SUP | ADA_ZONE_FLAG_OPEN_SUP | ADA_ZONE_FLAG_CLOSE_SUP | ADA_ZONE_FLAG_FINISH_SUP | ADA_ZONE_FLAG_RWP_SUP), ADA_ZONE_FLAG_URSWRZ = 0x0020, ADA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, ADA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, ADA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, ADA_ZONE_FLAG_SET_MASK = (ADA_ZONE_FLAG_OPT_SEQ_SET | ADA_ZONE_FLAG_OPT_NONSEQ_SET | ADA_ZONE_FLAG_MAX_SEQ_SET) } ada_zone_flags; static struct ada_zone_desc { ada_zone_flags value; const char *desc; } ada_zone_desc_table[] = { {ADA_ZONE_FLAG_RZ_SUP, "Report Zones" }, {ADA_ZONE_FLAG_OPEN_SUP, "Open" }, {ADA_ZONE_FLAG_CLOSE_SUP, "Close" }, {ADA_ZONE_FLAG_FINISH_SUP, "Finish" }, {ADA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, }; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 typedef enum { ADA_DELETE_NONE, ADA_DELETE_DISABLE, ADA_DELETE_CFA_ERASE, ADA_DELETE_DSM_TRIM, ADA_DELETE_NCQ_DSM_TRIM, ADA_DELETE_MIN = ADA_DELETE_CFA_ERASE, ADA_DELETE_MAX = ADA_DELETE_NCQ_DSM_TRIM, } ada_delete_methods; static const char *ada_delete_method_names[] = { "NONE", "DISABLE", "CFA_ERASE", "DSM_TRIM", "NCQ_DSM_TRIM" }; #if 0 static const char *ada_delete_method_desc[] = { "NONE", "DISABLED", "CFA Erase", "DSM Trim", "DSM Trim via NCQ" }; #endif struct disk_params { u_int8_t heads; u_int8_t secs_per_track; u_int32_t cylinders; u_int32_t secsize; /* Number of bytes/logical sector */ u_int64_t sectors; /* Total number sectors */ }; #define TRIM_MAX_BLOCKS 8 #define TRIM_MAX_RANGES (TRIM_MAX_BLOCKS * ATA_DSM_BLK_RANGES) struct trim_request { uint8_t data[TRIM_MAX_RANGES * ATA_DSM_RANGE_SIZE]; TAILQ_HEAD(, bio) bps; }; struct ada_softc { struct cam_iosched_softc *cam_iosched; int outstanding_cmds; /* Number of active commands */ int refcount; /* Active xpt_action() calls */ ada_state state; ada_flags flags; ada_zone_mode zone_mode; ada_zone_flags zone_flags; struct ata_gp_log_dir ata_logdir; int valid_logdir_len; struct ata_identify_log_pages ata_iddir; int valid_iddir_len; uint64_t optimal_seq_zones; uint64_t optimal_nonseq_zones; uint64_t max_seq_zones; ada_quirks quirks; ada_delete_methods delete_method; int trim_max_ranges; int read_ahead; int write_cache; int unmappedio; int rotating; #ifdef ADA_TEST_FAILURE int force_read_error; int force_write_error; int periodic_read_error; int periodic_read_count; #endif struct disk_params params; struct disk *disk; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct callout sendordered_c; struct trim_request trim_req; #ifdef CAM_IO_STATS struct sysctl_ctx_list sysctl_stats_ctx; struct sysctl_oid *sysctl_stats_tree; u_int timeouts; u_int errors; u_int invalidations; #endif #define ADA_ANNOUNCETMP_SZ 80 char announce_temp[ADA_ANNOUNCETMP_SZ]; #define ADA_ANNOUNCE_SZ 400 char announce_buffer[ADA_ANNOUNCE_SZ]; }; struct ada_quirk_entry { struct scsi_inquiry_pattern inq_pat; ada_quirks quirks; }; static struct ada_quirk_entry ada_quirk_table[] = { { /* Hitachi Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Hitachi H??????????E3*", "*" }, /*quirks*/ADA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD155UI*", "*" }, /*quirks*/ADA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD204UI*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DL*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Barracuda Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???DM*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Barracuda Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DM*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500423AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500424AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640423AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640424AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750420AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750422AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750423AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???LT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Red Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????CX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RS*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green/Red Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Red Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????CX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????AZEX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????FZEX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RS*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PKT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PKT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PVT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PVT*", "*" }, /*quirks*/ADA_Q_4K }, /* SSDs */ { /* * Corsair Force 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair CSSD-F*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Corsair Force 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force 3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Corsair Neutron GTX SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Corsair Force GT & GS SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force G*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Crucial M4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "M4-CT???M4SSD2*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Crucial M500 SSDs MU07 firmware * NCQ Trim works */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "MU07" }, /*quirks*/0 }, { /* * Crucial M500 SSDs all other firmware * NCQ Trim doesn't work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "*" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Crucial M550 SSDs * NCQ Trim doesn't work, but only on MU01 firmware */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M550*", "MU01" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Crucial MX100 SSDs * NCQ Trim doesn't work, but only on MU01 firmware */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*MX100*", "MU01" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Crucial RealSSD C300 SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "C300-CTFDDAC???MAG*", "*" }, /*quirks*/ADA_Q_4K }, { /* * FCCT M500 SSDs * NCQ Trim doesn't work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "FCCT*M500*", "*" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Intel 320 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2CW*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel 330 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2CT*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel 510 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2MH*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel 520 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2BW*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel S3610 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2BX*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel X25-M Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2M*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Kingston E100 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SE100S3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Kingston HyperX 3k SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SH103S3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Marvell SSDs (entry taken from OpenSolaris) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "MARVELL SD88SA02*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Micron M500 SSDs firmware MU07 * NCQ Trim works? */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "MU07" }, /*quirks*/0 }, { /* * Micron M500 SSDs all other firmware * NCQ Trim doesn't work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "*" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Micron M5[15]0 SSDs * NCQ Trim doesn't work, but only MU01 firmware */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M5[15]0*", "MU01" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Micron 5100 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron 5100 MTFDDAK*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Agility 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Agility 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Deneva R Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "DENRSTE251M45*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Vertex 2 SSDs (inc pro series) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ?VERTEX2*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Vertex 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Vertex 4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX4*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung 750 SSDs * 4k optimised, NCQ TRIM seems to work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 750*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung 830 Series SSDs * 4k optimised, NCQ TRIM Broken (normal TRIM is fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD 830 Series*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung 840 SSDs * 4k optimised, NCQ TRIM Broken (normal TRIM is fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 840*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung 845 SSDs * 4k optimised, NCQ TRIM Broken (normal TRIM is fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 845*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung 850 SSDs * 4k optimised, NCQ TRIM broken (normal TRIM fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 850*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung SM863 Series SSDs (MZ7KM*) * 4k optimised, NCQ believed to be working */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7KM*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung 843T Series SSDs (MZ7WD*) * Samsung PM851 Series SSDs (MZ7TE*) * Samsung PM853T Series SSDs (MZ7GE*) * 4k optimised, NCQ believed to be broken since these are * appear to be built with the same controllers as the 840/850. */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Same as for SAMSUNG MZ7* but enable the quirks for SSD * starting with MZ7* too */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "MZ7*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung PM851 Series SSDs Dell OEM * device model "SAMSUNG SSD PM851 mSATA 256GB" * 4k optimised, NCQ broken */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD PM851*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * SuperTalent TeraDrive CT SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "FTM??CT25H*", "*" }, /*quirks*/ADA_Q_4K }, { /* * XceedIOPS SATA SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SG9XCS2D*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung drive that doesn't support READ LOG EXT or * READ LOG DMA EXT, despite reporting that it does in * ATA identify data: * SAMSUNG HD200HJ KF100-06 */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD200*", "*" }, /*quirks*/ADA_Q_LOG_BROKEN }, { /* * Samsung drive that doesn't support READ LOG EXT or * READ LOG DMA EXT, despite reporting that it does in * ATA identify data: * SAMSUNG HD501LJ CR100-10 */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD501*", "*" }, /*quirks*/ADA_Q_LOG_BROKEN }, { /* * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR) * Drive Managed SATA hard drive. This drive doesn't report * in firmware that it is a drive managed SMR drive. */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST8000AS000[23]*", "*" }, /*quirks*/ADA_Q_SMR_DM }, { /* Default */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, /*vendor*/"*", /*product*/"*", /*revision*/"*" }, /*quirks*/0 }, }; static disk_strategy_t adastrategy; static dumper_t adadump; static periph_init_t adainit; static void adadiskgonecb(struct disk *dp); static periph_oninv_t adaoninvalidate; static periph_dtor_t adacleanup; static void adaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static int adazonemodesysctl(SYSCTL_HANDLER_ARGS); static int adazonesupsysctl(SYSCTL_HANDLER_ARGS); static void adasysctlinit(void *context, int pending); static int adagetattr(struct bio *bp); static void adasetflags(struct ada_softc *softc, struct ccb_getdev *cgd); static periph_ctor_t adaregister; static void ada_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio); static void ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio); static int ada_zone_bio_to_ata(int disk_zone_cmd); static int ada_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, int *queue_ccb); static periph_start_t adastart; static void adaprobedone(struct cam_periph *periph, union ccb *ccb); static void adazonedone(struct cam_periph *periph, union ccb *ccb); static void adadone(struct cam_periph *periph, union ccb *done_ccb); static int adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static void adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd); static timeout_t adasendorderedtag; static void adashutdown(void *arg, int howto); static void adasuspend(void *arg); static void adaresume(void *arg); #ifndef ADA_DEFAULT_TIMEOUT #define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ #endif #ifndef ADA_DEFAULT_RETRY #define ADA_DEFAULT_RETRY 4 #endif #ifndef ADA_DEFAULT_SEND_ORDERED #define ADA_DEFAULT_SEND_ORDERED 1 #endif #ifndef ADA_DEFAULT_SPINDOWN_SHUTDOWN #define ADA_DEFAULT_SPINDOWN_SHUTDOWN 1 #endif #ifndef ADA_DEFAULT_SPINDOWN_SUSPEND #define ADA_DEFAULT_SPINDOWN_SUSPEND 1 #endif #ifndef ADA_DEFAULT_READ_AHEAD #define ADA_DEFAULT_READ_AHEAD 1 #endif #ifndef ADA_DEFAULT_WRITE_CACHE #define ADA_DEFAULT_WRITE_CACHE 1 #endif #define ADA_RA (softc->read_ahead >= 0 ? \ softc->read_ahead : ada_read_ahead) #define ADA_WC (softc->write_cache >= 0 ? \ softc->write_cache : ada_write_cache) /* * Most platforms map firmware geometry to actual, but some don't. If * not overridden, default to nothing. */ #ifndef ata_disk_firmware_geom_adjust #define ata_disk_firmware_geom_adjust(disk) #endif static int ada_retry_count = ADA_DEFAULT_RETRY; static int ada_default_timeout = ADA_DEFAULT_TIMEOUT; static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED; static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN; static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND; static int ada_read_ahead = ADA_DEFAULT_READ_AHEAD; static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE; static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0, "CAM Direct Access Disk driver"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RWTUN, &ada_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RWTUN, &ada_default_timeout, 0, "Normal I/O timeout (in seconds)"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RWTUN, &ada_send_ordered, 0, "Send Ordered Tags"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RWTUN, &ada_spindown_shutdown, 0, "Spin down upon shutdown"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RWTUN, &ada_spindown_suspend, 0, "Spin down upon suspend"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RWTUN, &ada_read_ahead, 0, "Enable disk read-ahead"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RWTUN, &ada_write_cache, 0, "Enable disk write cache"); /* * ADA_ORDEREDTAG_INTERVAL determines how often, relative * to the default timeout, we check to see whether an ordered * tagged transaction is appropriate to prevent simple tag * starvation. Since we'd like to ensure that there is at least * 1/2 of the timeout length left for a starved transaction to * complete after we've sent an ordered tag, we must poll at least * four times in every timeout period. This takes care of the worst * case where a starved transaction starts during an interval that * meets the requirement "don't send an ordered tag" test so it takes * us two intervals to determine that a tag must be sent. */ #ifndef ADA_ORDEREDTAG_INTERVAL #define ADA_ORDEREDTAG_INTERVAL 4 #endif static struct periph_driver adadriver = { adainit, "ada", TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0 }; static int adadeletemethodsysctl(SYSCTL_HANDLER_ARGS); PERIPHDRIVER_DECLARE(ada, adadriver); static MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers"); static int adaopen(struct disk *dp) { struct cam_periph *periph; struct ada_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { return(ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("adaopen\n")); softc = (struct ada_softc *)periph->softc; softc->flags |= ADA_FLAG_OPEN; cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } static int adaclose(struct disk *dp) { struct cam_periph *periph; struct ada_softc *softc; union ccb *ccb; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct ada_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("adaclose\n")); /* We only sync the cache if the drive is capable of it. */ if ((softc->flags & ADA_FLAG_DIRTY) != 0 && (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 && (periph->flags & CAM_PERIPH_INVALID) == 0 && cam_periph_hold(periph, PRIBIO) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); cam_fill_ataio(&ccb->ataio, 1, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0); error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0, /*sense_flags*/0, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); softc->flags &= ~ADA_FLAG_DIRTY; xpt_release_ccb(ccb); cam_periph_unhold(periph); } softc->flags &= ~ADA_FLAG_OPEN; while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "adaclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void adaschedule(struct cam_periph *periph) { struct ada_softc *softc = (struct ada_softc *)periph->softc; if (softc->state != ADA_STATE_NORMAL) return; cam_iosched_schedule(softc->cam_iosched, periph); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void adastrategy(struct bio *bp) { struct cam_periph *periph; struct ada_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct ada_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastrategy(%p)\n", bp)); /* * If the device has been made invalid, error out */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * Zone commands must be ordered, because they can depend on the * effects of previously issued commands, and they may affect * commands after them. */ if (bp->bio_cmd == BIO_ZONE) bp->bio_flags |= BIO_ORDERED; /* * Place it in the queue of disk activities for this disk */ cam_iosched_queue_work(softc->cam_iosched, bp); /* * Schedule ourselves for performing the work. */ adaschedule(periph); cam_periph_unlock(periph); return; } static int adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct cam_periph *periph; struct ada_softc *softc; u_int secsize; struct ccb_ataio ataio; struct disk *dp; uint64_t lba; uint16_t count; int error = 0; dp = arg; periph = dp->d_drv1; softc = (struct ada_softc *)periph->softc; secsize = softc->params.secsize; lba = offset / secsize; count = length / secsize; if ((periph->flags & CAM_PERIPH_INVALID) != 0) return (ENXIO); memset(&ataio, 0, sizeof(ataio)); if (length > 0) { xpt_setup_ccb(&ataio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ataio.ccb_h.ccb_state = ADA_CCB_DUMP; cam_fill_ataio(&ataio, 0, adadone, CAM_DIR_OUT, 0, (u_int8_t *) virtual, length, ada_default_timeout*1000); if ((softc->flags & ADA_FLAG_CAN_48BIT) && (lba + count >= ATA_MAX_28BIT_LBA || count >= 256)) { ata_48bit_cmd(&ataio, ATA_WRITE_DMA48, 0, lba, count); } else { ata_28bit_cmd(&ataio, ATA_WRITE_DMA, 0, lba, count); } error = cam_periph_runccb((union ccb *)&ataio, adaerror, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) printf("Aborting dump due to I/O error.\n"); return (error); } if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) { xpt_setup_ccb(&ataio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); /* * Tell the drive to flush its internal cache. if we * can't flush in 5s we have big problems. No need to * wait the default 60s to detect problems. */ ataio.ccb_h.ccb_state = ADA_CCB_DUMP; cam_fill_ataio(&ataio, 0, adadone, CAM_DIR_NONE, 0, NULL, 0, 5*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(&ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(&ataio, ATA_FLUSHCACHE, 0, 0, 0); error = cam_periph_runccb((union ccb *)&ataio, adaerror, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); } return (error); } static void adainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ada: Failed to attach master async callback " "due to status 0x%x!\n", status); } else if (ada_send_ordered) { /* Register our event handlers */ if ((EVENTHANDLER_REGISTER(power_suspend, adasuspend, NULL, EVENTHANDLER_PRI_LAST)) == NULL) printf("adainit: power event registration failed!\n"); if ((EVENTHANDLER_REGISTER(power_resume, adaresume, NULL, EVENTHANDLER_PRI_LAST)) == NULL) printf("adainit: power event registration failed!\n"); if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("adainit: shutdown event registration failed!\n"); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void adadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void adaoninvalidate(struct cam_periph *periph) { struct ada_softc *softc; softc = (struct ada_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, adaasync, periph, periph->path); #ifdef CAM_IO_STATS softc->invalidations++; #endif /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); disk_gone(softc->disk); } static void adacleanup(struct cam_periph *periph) { struct ada_softc *softc; softc = (struct ada_softc *)periph->softc; cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0) { #ifdef CAM_IO_STATS if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) xpt_print(periph->path, "can't remove sysctl stats context\n"); #endif if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); } disk_destroy(softc->disk); callout_drain(&softc->sendordered_c); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void adasetdeletemethod(struct ada_softc *softc) { if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) softc->delete_method = ADA_DELETE_NCQ_DSM_TRIM; else if (softc->flags & ADA_FLAG_CAN_TRIM) softc->delete_method = ADA_DELETE_DSM_TRIM; else if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT)) softc->delete_method = ADA_DELETE_CFA_ERASE; else softc->delete_method = ADA_DELETE_NONE; } static void adaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct ccb_getdev cgd; struct cam_periph *periph; struct ada_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_ATA) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(adaregister, adaoninvalidate, adacleanup, adastart, "ada", CAM_PERIPH_BIO, path, adaasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("adaasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_GETDEV_CHANGED: { softc = (struct ada_softc *)periph->softc; xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); /* * Set/clear support flags based on the new Identify data. */ adasetflags(softc, &cgd); cam_periph_async(periph, code, path, arg); break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct ada_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_SENT_BDR: case AC_BUS_RESET: { softc = (struct ada_softc *)periph->softc; cam_periph_async(periph, code, path, arg); if (softc->state != ADA_STATE_NORMAL) break; xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD) softc->state = ADA_STATE_RAHEAD; else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_WCACHE) softc->state = ADA_STATE_WCACHE; else if ((softc->flags & ADA_FLAG_CAN_LOG) && (softc->zone_mode != ADA_ZONE_NONE)) softc->state = ADA_STATE_LOGDIR; else break; - if (cam_periph_acquire(periph) != CAM_REQ_CMP) + if (cam_periph_acquire(periph) != 0) softc->state = ADA_STATE_NORMAL; else xpt_schedule(periph, CAM_PRIORITY_DEV); } default: cam_periph_async(periph, code, path, arg); break; } } static int adazonemodesysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[40]; struct ada_softc *softc; int error; softc = (struct ada_softc *)arg1; switch (softc->zone_mode) { case ADA_ZONE_DRIVE_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); break; case ADA_ZONE_HOST_AWARE: snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); break; case ADA_ZONE_HOST_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); break; case ADA_ZONE_NONE: default: snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); break; } error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); return (error); } static int adazonesupsysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[180]; struct ada_softc *softc; struct sbuf sb; int error, first; unsigned int i; softc = (struct ada_softc *)arg1; error = 0; first = 1; sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); for (i = 0; i < sizeof(ada_zone_desc_table) / sizeof(ada_zone_desc_table[0]); i++) { if (softc->zone_flags & ada_zone_desc_table[i].value) { if (first == 0) sbuf_printf(&sb, ", "); else first = 0; sbuf_cat(&sb, ada_zone_desc_table[i].desc); } } if (first == 1) sbuf_printf(&sb, "None"); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); return (error); } static void adasysctlinit(void *context, int pending) { struct cam_periph *periph; struct ada_softc *softc; char tmpstr[32], tmpstr2[16]; periph = (struct cam_periph *)context; /* periph was held for us when this task was enqueued */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_release(periph); return; } softc = (struct ada_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d",periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= ADA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("adasysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RW, softc, 0, adadeletemethodsysctl, "A", "BIO_DELETE execution method"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "read_ahead", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->read_ahead, 0, "Enable disk read ahead."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->write_cache, 0, "Enable disk write cache."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "unmapped_io", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->unmappedio, 0, "Unmapped I/O leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "rotating", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->rotating, 0, "Rotating media"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, adazonemodesysctl, "A", "Zone Mode"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, adazonesupsysctl, "A", "Zone Support"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, "Optimal Number of Open Sequential Write Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_nonseq_zones", CTLFLAG_RD, &softc->optimal_nonseq_zones, "Optimal Number of Non-Sequentially Written Sequential Write " "Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, "Maximum Number of Open Sequential Write Required Zones"); #ifdef ADA_TEST_FAILURE /* * Add a 'door bell' sysctl which allows one to set it from userland * and cause something bad to happen. For the moment, we only allow * whacking the next read or write. */ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->force_read_error, 0, "Force a read error for the next N reads."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->force_write_error, 0, "Force a write error for the next N writes."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->periodic_read_error, 0, "Force a read error every N reads (don't set too low)."); #endif #ifdef CAM_IO_STATS softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD, 0, "Statistics"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "timeouts", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->timeouts, 0, "Device timeouts reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "errors", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->errors, 0, "Transport errors reported by the SIM."); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "pack_invalidations", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->invalidations, 0, "Device pack invalidations."); #endif cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); cam_periph_release(periph); } static int adagetattr(struct bio *bp) { int ret; struct cam_periph *periph; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static int adadeletemethodsysctl(SYSCTL_HANDLER_ARGS) { char buf[16]; const char *p; struct ada_softc *softc; int i, error, value, methods; softc = (struct ada_softc *)arg1; value = softc->delete_method; if (value < 0 || value > ADA_DELETE_MAX) p = "UNKNOWN"; else p = ada_delete_method_names[value]; strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); methods = 1 << ADA_DELETE_DISABLE; if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT)) methods |= 1 << ADA_DELETE_CFA_ERASE; if (softc->flags & ADA_FLAG_CAN_TRIM) methods |= 1 << ADA_DELETE_DSM_TRIM; if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) methods |= 1 << ADA_DELETE_NCQ_DSM_TRIM; for (i = 0; i <= ADA_DELETE_MAX; i++) { if (!(methods & (1 << i)) || strcmp(buf, ada_delete_method_names[i]) != 0) continue; softc->delete_method = i; return (0); } return (EINVAL); } static void adasetflags(struct ada_softc *softc, struct ccb_getdev *cgd) { if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) && (cgd->inq_flags & SID_DMA)) softc->flags |= ADA_FLAG_CAN_DMA; else softc->flags &= ~ADA_FLAG_CAN_DMA; if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) { softc->flags |= ADA_FLAG_CAN_48BIT; if (cgd->inq_flags & SID_DMA48) softc->flags |= ADA_FLAG_CAN_DMA48; else softc->flags &= ~ADA_FLAG_CAN_DMA48; } else softc->flags &= ~(ADA_FLAG_CAN_48BIT | ADA_FLAG_CAN_DMA48); if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE) softc->flags |= ADA_FLAG_CAN_FLUSHCACHE; else softc->flags &= ~ADA_FLAG_CAN_FLUSHCACHE; if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT) softc->flags |= ADA_FLAG_CAN_POWERMGT; else softc->flags &= ~ADA_FLAG_CAN_POWERMGT; if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) && (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue)) softc->flags |= ADA_FLAG_CAN_NCQ; else softc->flags &= ~ADA_FLAG_CAN_NCQ; if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) && (cgd->inq_flags & SID_DMA)) { softc->flags |= ADA_FLAG_CAN_TRIM; softc->trim_max_ranges = TRIM_MAX_RANGES; if (cgd->ident_data.max_dsm_blocks != 0) { softc->trim_max_ranges = min(cgd->ident_data.max_dsm_blocks * ATA_DSM_BLK_RANGES, softc->trim_max_ranges); } /* * If we can do RCVSND_FPDMA_QUEUED commands, we may be able * to do NCQ trims, if we support trims at all. We also need * support from the SIM to do things properly. Perhaps we * should look at log 13 dword 0 bit 0 and dword 1 bit 0 are * set too... */ if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 && (softc->flags & ADA_FLAG_PIM_ATA_EXT) != 0 && (cgd->ident_data.satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 && (softc->flags & ADA_FLAG_CAN_TRIM) != 0) softc->flags |= ADA_FLAG_CAN_NCQ_TRIM; else softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM; } else softc->flags &= ~(ADA_FLAG_CAN_TRIM | ADA_FLAG_CAN_NCQ_TRIM); if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA) softc->flags |= ADA_FLAG_CAN_CFA; else softc->flags &= ~ADA_FLAG_CAN_CFA; /* * Now that we've set the appropriate flags, setup the delete * method. */ adasetdeletemethod(softc); if ((cgd->ident_data.support.extension & ATA_SUPPORT_GENLOG) && ((softc->quirks & ADA_Q_LOG_BROKEN) == 0)) softc->flags |= ADA_FLAG_CAN_LOG; else softc->flags &= ~ADA_FLAG_CAN_LOG; if ((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) softc->zone_mode = ADA_ZONE_HOST_AWARE; else if (((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) || (softc->quirks & ADA_Q_SMR_DM)) softc->zone_mode = ADA_ZONE_DRIVE_MANAGED; else softc->zone_mode = ADA_ZONE_NONE; if (cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) softc->flags |= ADA_FLAG_CAN_RAHEAD; else softc->flags &= ~ADA_FLAG_CAN_RAHEAD; if (cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) softc->flags |= ADA_FLAG_CAN_WCACHE; else softc->flags &= ~ADA_FLAG_CAN_WCACHE; } static cam_status adaregister(struct cam_periph *periph, void *arg) { struct ada_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; struct disk_params *dp; struct sbuf sb; char *announce_buf; caddr_t match; u_int maxio; int quirks; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("adaregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("adaregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } announce_buf = softc->announce_temp; bzero(announce_buf, ADA_ANNOUNCETMP_SZ); if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { printf("adaregister: Unable to probe new device. " "Unable to allocate iosched memory\n"); free(softc, M_DEVBUF); return(CAM_REQ_CMP_ERR); } periph->softc = softc; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->ident_data, (caddr_t)ada_quirk_table, nitems(ada_quirk_table), sizeof(*ada_quirk_table), ata_identify_match); if (match != NULL) softc->quirks = ((struct ada_quirk_entry *)match)->quirks; else softc->quirks = ADA_Q_NONE; xpt_path_inq(&cpi, periph->path); TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph); /* * Register this media as a disk */ (void)cam_periph_hold(periph, PRIBIO); cam_periph_unlock(periph); snprintf(announce_buf, ADA_ANNOUNCETMP_SZ, "kern.cam.ada.%d.quirks", periph->unit_number); quirks = softc->quirks; TUNABLE_INT_FETCH(announce_buf, &quirks); softc->quirks = quirks; softc->read_ahead = -1; snprintf(announce_buf, ADA_ANNOUNCETMP_SZ, "kern.cam.ada.%d.read_ahead", periph->unit_number); TUNABLE_INT_FETCH(announce_buf, &softc->read_ahead); softc->write_cache = -1; snprintf(announce_buf, ADA_ANNOUNCETMP_SZ, "kern.cam.ada.%d.write_cache", periph->unit_number); TUNABLE_INT_FETCH(announce_buf, &softc->write_cache); /* * Set support flags based on the Identify data and quirks. */ adasetflags(softc, cgd); /* Disable queue sorting for non-rotational media by default. */ if (cgd->ident_data.media_rotation_rate == ATA_RATE_NON_ROTATING) { softc->rotating = 0; } else { softc->rotating = 1; } cam_iosched_set_sort_queue(softc->cam_iosched, softc->rotating ? -1 : 0); adagetparams(periph, cgd); softc->disk = disk_alloc(); softc->disk->d_rotation_rate = cgd->ident_data.media_rotation_rate; softc->disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, softc->params.secsize, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); softc->disk->d_open = adaopen; softc->disk->d_close = adaclose; softc->disk->d_strategy = adastrategy; softc->disk->d_getattr = adagetattr; softc->disk->d_dump = adadump; softc->disk->d_gone = adadiskgonecb; softc->disk->d_name = "ada"; softc->disk->d_drv1 = periph; maxio = cpi.maxio; /* Honor max I/O size of SIM */ if (maxio == 0) maxio = DFLTPHYS; /* traditional default */ else if (maxio > MAXPHYS) maxio = MAXPHYS; /* for safety */ if (softc->flags & ADA_FLAG_CAN_48BIT) maxio = min(maxio, 65536 * softc->params.secsize); else /* 28bit ATA command limit */ maxio = min(maxio, 256 * softc->params.secsize); softc->disk->d_maxsize = maxio; softc->disk->d_unit = periph->unit_number; softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if (softc->flags & ADA_FLAG_CAN_TRIM) { softc->disk->d_flags |= DISKFLAG_CANDELETE; softc->disk->d_delmaxsize = softc->params.secsize * ATA_DSM_RANGE_MAX * softc->trim_max_ranges; } else if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT)) { softc->disk->d_flags |= DISKFLAG_CANDELETE; softc->disk->d_delmaxsize = 256 * softc->params.secsize; } else softc->disk->d_delmaxsize = maxio; if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; softc->unmappedio = 1; } if (cpi.hba_misc & PIM_ATA_EXT) softc->flags |= ADA_FLAG_PIM_ATA_EXT; strlcpy(softc->disk->d_descr, cgd->ident_data.model, MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model))); strlcpy(softc->disk->d_ident, cgd->ident_data.serial, MIN(sizeof(softc->disk->d_ident), sizeof(cgd->ident_data.serial))); softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; softc->disk->d_sectorsize = softc->params.secsize; softc->disk->d_mediasize = (off_t)softc->params.sectors * softc->params.secsize; if (ata_physical_sector_size(&cgd->ident_data) != softc->params.secsize) { softc->disk->d_stripesize = ata_physical_sector_size(&cgd->ident_data); softc->disk->d_stripeoffset = (softc->disk->d_stripesize - ata_logical_sector_offset(&cgd->ident_data)) % softc->disk->d_stripesize; } else if (softc->quirks & ADA_Q_4K) { softc->disk->d_stripesize = 4096; softc->disk->d_stripeoffset = 0; } softc->disk->d_fwsectors = softc->params.secs_per_track; softc->disk->d_fwheads = softc->params.heads; ata_disk_firmware_geom_adjust(softc->disk); /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * adadiskgonecb()) telling us that our provider has been freed. */ - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); dp = &softc->params; snprintf(announce_buf, ADA_ANNOUNCETMP_SZ, "%juMB (%ju %u byte sectors)", ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024), (uintmax_t)dp->sectors, dp->secsize); sbuf_new(&sb, softc->announce_buffer, ADA_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, announce_buf); xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, ADA_Q_BIT_STRING); sbuf_finish(&sb); sbuf_putbuf(&sb); /* * Create our sysctl variables, now that we know * we have successfully attached. */ - if (cam_periph_acquire(periph) == CAM_REQ_CMP) + if (cam_periph_acquire(periph) == 0) taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); /* * Add async callbacks for bus reset and * bus device reset calls. I don't bother * checking if this fails as, in most cases, * the system will function just fine without * them and the only alternative would be to * not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_GETDEV_CHANGED | AC_ADVINFO_CHANGED, adaasync, periph, periph->path); /* * Schedule a periodic event to occasionally send an * ordered tag to a device. */ callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); callout_reset(&softc->sendordered_c, (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL, adasendorderedtag, softc); if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD) { softc->state = ADA_STATE_RAHEAD; } else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_WCACHE) { softc->state = ADA_STATE_WCACHE; } else if ((softc->flags & ADA_FLAG_CAN_LOG) && (softc->zone_mode != ADA_ZONE_NONE)) { softc->state = ADA_STATE_LOGDIR; } else { /* * Nothing to probe, so we can just transition to the * normal state. */ adaprobedone(periph, NULL); return(CAM_REQ_CMP); } xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int ada_dsmtrim_req_create(struct ada_softc *softc, struct bio *bp, struct trim_request *req) { uint64_t lastlba = (uint64_t)-1; int c, lastcount = 0, off, ranges = 0; bzero(req, sizeof(*req)); TAILQ_INIT(&req->bps); do { uint64_t lba = bp->bio_pblkno; int count = bp->bio_bcount / softc->params.secsize; /* Try to extend the previous range. */ if (lba == lastlba) { c = min(count, ATA_DSM_RANGE_MAX - lastcount); lastcount += c; off = (ranges - 1) * ATA_DSM_RANGE_SIZE; req->data[off + 6] = lastcount & 0xff; req->data[off + 7] = (lastcount >> 8) & 0xff; count -= c; lba += c; } while (count > 0) { c = min(count, ATA_DSM_RANGE_MAX); off = ranges * ATA_DSM_RANGE_SIZE; req->data[off + 0] = lba & 0xff; req->data[off + 1] = (lba >> 8) & 0xff; req->data[off + 2] = (lba >> 16) & 0xff; req->data[off + 3] = (lba >> 24) & 0xff; req->data[off + 4] = (lba >> 32) & 0xff; req->data[off + 5] = (lba >> 40) & 0xff; req->data[off + 6] = c & 0xff; req->data[off + 7] = (c >> 8) & 0xff; lba += c; count -= c; lastcount = c; ranges++; /* * Its the caller's responsibility to ensure the * request will fit so we don't need to check for * overrun here */ } lastlba = lba; TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue); bp = cam_iosched_next_trim(softc->cam_iosched); if (bp == NULL) break; if (bp->bio_bcount / softc->params.secsize > (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { cam_iosched_put_back_trim(softc->cam_iosched, bp); break; } } while (1); return (ranges); } static void ada_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio) { struct trim_request *req = &softc->trim_req; int ranges; ranges = ada_dsmtrim_req_create(softc, bp, req); cam_fill_ataio(ataio, ada_retry_count, adadone, CAM_DIR_OUT, 0, req->data, howmany(ranges, ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE, ada_default_timeout * 1000); ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT, ATA_DSM_TRIM, 0, howmany(ranges, ATA_DSM_BLK_RANGES)); } static void ada_ncq_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio) { struct trim_request *req = &softc->trim_req; int ranges; ranges = ada_dsmtrim_req_create(softc, bp, req); cam_fill_ataio(ataio, ada_retry_count, adadone, CAM_DIR_OUT, 0, req->data, howmany(ranges, ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE, ada_default_timeout * 1000); ata_ncq_cmd(ataio, ATA_SEND_FPDMA_QUEUED, 0, howmany(ranges, ATA_DSM_BLK_RANGES)); ataio->cmd.sector_count_exp = ATA_SFPDMA_DSM; ataio->ata_flags |= ATA_FLAG_AUX; ataio->aux = 1; } static void ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio) { struct trim_request *req = &softc->trim_req; uint64_t lba = bp->bio_pblkno; uint16_t count = bp->bio_bcount / softc->params.secsize; bzero(req, sizeof(*req)); TAILQ_INIT(&req->bps); TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue); cam_fill_ataio(ataio, ada_retry_count, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (count >= 256) count = 0; ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count); } static int ada_zone_bio_to_ata(int disk_zone_cmd) { switch (disk_zone_cmd) { case DISK_ZONE_OPEN: return ATA_ZM_OPEN_ZONE; case DISK_ZONE_CLOSE: return ATA_ZM_CLOSE_ZONE; case DISK_ZONE_FINISH: return ATA_ZM_FINISH_ZONE; case DISK_ZONE_RWP: return ATA_ZM_RWP; } return -1; } static int ada_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, int *queue_ccb) { struct ada_softc *softc; int error; error = 0; if (bp->bio_cmd != BIO_ZONE) { error = EINVAL; goto bailout; } softc = periph->softc; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: { int zone_flags; int zone_sa; uint64_t lba; zone_sa = ada_zone_bio_to_ata(bp->bio_zone.zone_cmd); if (zone_sa == -1) { xpt_print(periph->path, "Cannot translate zone " "cmd %#x to ATA\n", bp->bio_zone.zone_cmd); error = EINVAL; goto bailout; } zone_flags = 0; lba = bp->bio_zone.zone_params.rwp.id; if (bp->bio_zone.zone_params.rwp.flags & DISK_ZONE_RWP_FLAG_ALL) zone_flags |= ZBC_OUT_ALL; ata_zac_mgmt_out(&ccb->ataio, /*retries*/ ada_retry_count, /*cbfcnp*/ adadone, /*use_ncq*/ (softc->flags & ADA_FLAG_PIM_ATA_EXT) ? 1 : 0, /*zm_action*/ zone_sa, /*zone_id*/ lba, /*zone_flags*/ zone_flags, /*sector_count*/ 0, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*timeout*/ ada_default_timeout * 1000); *queue_ccb = 1; break; } case DISK_ZONE_REPORT_ZONES: { uint8_t *rz_ptr; uint32_t num_entries, alloc_size; struct disk_zone_report *rep; rep = &bp->bio_zone.zone_params.report; num_entries = rep->entries_allocated; if (num_entries == 0) { xpt_print(periph->path, "No entries allocated for " "Report Zones request\n"); error = EINVAL; goto bailout; } alloc_size = sizeof(struct scsi_report_zones_hdr) + (sizeof(struct scsi_report_zones_desc) * num_entries); alloc_size = min(alloc_size, softc->disk->d_maxsize); rz_ptr = malloc(alloc_size, M_ATADA, M_NOWAIT | M_ZERO); if (rz_ptr == NULL) { xpt_print(periph->path, "Unable to allocate memory " "for Report Zones request\n"); error = ENOMEM; goto bailout; } ata_zac_mgmt_in(&ccb->ataio, /*retries*/ ada_retry_count, /*cbcfnp*/ adadone, /*use_ncq*/ (softc->flags & ADA_FLAG_PIM_ATA_EXT) ? 1 : 0, /*zm_action*/ ATA_ZM_REPORT_ZONES, /*zone_id*/ rep->starting_id, /*zone_flags*/ rep->rep_options, /*data_ptr*/ rz_ptr, /*dxfer_len*/ alloc_size, /*timeout*/ ada_default_timeout * 1000); /* * For BIO_ZONE, this isn't normally needed. However, it * is used by devstat_end_transaction_bio() to determine * how much data was transferred. */ /* * XXX KDM we have a problem. But I'm not sure how to fix * it. devstat uses bio_bcount - bio_resid to calculate * the amount of data transferred. The GEOM disk code * uses bio_length - bio_resid to calculate the amount of * data in bio_completed. We have different structure * sizes above and below the ada(4) driver. So, if we * use the sizes above, the amount transferred won't be * quite accurate for devstat. If we use different sizes * for bio_bcount and bio_length (above and below * respectively), then the residual needs to match one or * the other. Everything is calculated after the bio * leaves the driver, so changing the values around isn't * really an option. For now, just set the count to the * passed in length. This means that the calculations * above (e.g. bio_completed) will be correct, but the * amount of data reported to devstat will be slightly * under or overstated. */ bp->bio_bcount = bp->bio_length; *queue_ccb = 1; break; } case DISK_ZONE_GET_PARAMS: { struct disk_zone_disk_params *params; params = &bp->bio_zone.zone_params.disk_params; bzero(params, sizeof(*params)); switch (softc->zone_mode) { case ADA_ZONE_DRIVE_MANAGED: params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; break; case ADA_ZONE_HOST_AWARE: params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; break; case ADA_ZONE_HOST_MANAGED: params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; break; default: case ADA_ZONE_NONE: params->zone_mode = DISK_ZONE_MODE_NONE; break; } if (softc->zone_flags & ADA_ZONE_FLAG_URSWRZ) params->flags |= DISK_ZONE_DISK_URSWRZ; if (softc->zone_flags & ADA_ZONE_FLAG_OPT_SEQ_SET) { params->optimal_seq_zones = softc->optimal_seq_zones; params->flags |= DISK_ZONE_OPT_SEQ_SET; } if (softc->zone_flags & ADA_ZONE_FLAG_OPT_NONSEQ_SET) { params->optimal_nonseq_zones = softc->optimal_nonseq_zones; params->flags |= DISK_ZONE_OPT_NONSEQ_SET; } if (softc->zone_flags & ADA_ZONE_FLAG_MAX_SEQ_SET) { params->max_seq_zones = softc->max_seq_zones; params->flags |= DISK_ZONE_MAX_SEQ_SET; } if (softc->zone_flags & ADA_ZONE_FLAG_RZ_SUP) params->flags |= DISK_ZONE_RZ_SUP; if (softc->zone_flags & ADA_ZONE_FLAG_OPEN_SUP) params->flags |= DISK_ZONE_OPEN_SUP; if (softc->zone_flags & ADA_ZONE_FLAG_CLOSE_SUP) params->flags |= DISK_ZONE_CLOSE_SUP; if (softc->zone_flags & ADA_ZONE_FLAG_FINISH_SUP) params->flags |= DISK_ZONE_FINISH_SUP; if (softc->zone_flags & ADA_ZONE_FLAG_RWP_SUP) params->flags |= DISK_ZONE_RWP_SUP; break; } default: break; } bailout: return (error); } static void adastart(struct cam_periph *periph, union ccb *start_ccb) { struct ada_softc *softc = (struct ada_softc *)periph->softc; struct ccb_ataio *ataio = &start_ccb->ataio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastart\n")); switch (softc->state) { case ADA_STATE_NORMAL: { struct bio *bp; u_int8_t tag_code; bp = cam_iosched_next_bio(softc->cam_iosched); if (bp == NULL) { xpt_release_ccb(start_ccb); break; } if ((bp->bio_flags & BIO_ORDERED) != 0 || (bp->bio_cmd != BIO_DELETE && (softc->flags & ADA_FLAG_NEED_OTAG) != 0)) { softc->flags &= ~ADA_FLAG_NEED_OTAG; softc->flags |= ADA_FLAG_WAS_OTAG; tag_code = 0; } else { tag_code = 1; } switch (bp->bio_cmd) { case BIO_WRITE: case BIO_READ: { uint64_t lba = bp->bio_pblkno; uint16_t count = bp->bio_bcount / softc->params.secsize; void *data_ptr; int rw_op; if (bp->bio_cmd == BIO_WRITE) { softc->flags |= ADA_FLAG_DIRTY; rw_op = CAM_DIR_OUT; } else { rw_op = CAM_DIR_IN; } data_ptr = bp->bio_data; if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { rw_op |= CAM_DATA_BIO; data_ptr = bp; } #ifdef ADA_TEST_FAILURE int fail = 0; /* * Support the failure ioctls. If the command is a * read, and there are pending forced read errors, or * if a write and pending write errors, then fail this * operation with EIO. This is useful for testing * purposes. Also, support having every Nth read fail. * * This is a rather blunt tool. */ if (bp->bio_cmd == BIO_READ) { if (softc->force_read_error) { softc->force_read_error--; fail = 1; } if (softc->periodic_read_error > 0) { if (++softc->periodic_read_count >= softc->periodic_read_error) { softc->periodic_read_count = 0; fail = 1; } } } else { if (softc->force_write_error) { softc->force_write_error--; fail = 1; } } if (fail) { biofinish(bp, NULL, EIO); xpt_release_ccb(start_ccb); adaschedule(periph); return; } #endif KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 || round_page(bp->bio_bcount + bp->bio_ma_offset) / PAGE_SIZE == bp->bio_ma_n, ("Short bio %p", bp)); cam_fill_ataio(ataio, ada_retry_count, adadone, rw_op, 0, data_ptr, bp->bio_bcount, ada_default_timeout*1000); if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) { if (bp->bio_cmd == BIO_READ) { ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED, lba, count); } else { ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED, lba, count); } } else if ((softc->flags & ADA_FLAG_CAN_48BIT) && (lba + count >= ATA_MAX_28BIT_LBA || count > 256)) { if (softc->flags & ADA_FLAG_CAN_DMA48) { if (bp->bio_cmd == BIO_READ) { ata_48bit_cmd(ataio, ATA_READ_DMA48, 0, lba, count); } else { ata_48bit_cmd(ataio, ATA_WRITE_DMA48, 0, lba, count); } } else { if (bp->bio_cmd == BIO_READ) { ata_48bit_cmd(ataio, ATA_READ_MUL48, 0, lba, count); } else { ata_48bit_cmd(ataio, ATA_WRITE_MUL48, 0, lba, count); } } } else { if (count == 256) count = 0; if (softc->flags & ADA_FLAG_CAN_DMA) { if (bp->bio_cmd == BIO_READ) { ata_28bit_cmd(ataio, ATA_READ_DMA, 0, lba, count); } else { ata_28bit_cmd(ataio, ATA_WRITE_DMA, 0, lba, count); } } else { if (bp->bio_cmd == BIO_READ) { ata_28bit_cmd(ataio, ATA_READ_MUL, 0, lba, count); } else { ata_28bit_cmd(ataio, ATA_WRITE_MUL, 0, lba, count); } } } break; } case BIO_DELETE: switch (softc->delete_method) { case ADA_DELETE_NCQ_DSM_TRIM: ada_ncq_dsmtrim(softc, bp, ataio); break; case ADA_DELETE_DSM_TRIM: ada_dsmtrim(softc, bp, ataio); break; case ADA_DELETE_CFA_ERASE: ada_cfaerase(softc, bp, ataio); break; default: biofinish(bp, NULL, EOPNOTSUPP); xpt_release_ccb(start_ccb); adaschedule(periph); return; } start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM; start_ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); goto out; case BIO_FLUSH: cam_fill_ataio(ataio, 1, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0); break; case BIO_ZONE: { int error, queue_ccb; queue_ccb = 0; error = ada_zone_cmd(periph, start_ccb, bp, &queue_ccb); if ((error != 0) || (queue_ccb == 0)) { biofinish(bp, NULL, error); xpt_release_ccb(start_ccb); return; } break; } } start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO; start_ccb->ccb_h.flags |= CAM_UNLOCKED; out: start_ccb->ccb_h.ccb_bp = bp; softc->outstanding_cmds++; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* May have more work to do, so ensure we stay scheduled */ adaschedule(periph); break; } case ADA_STATE_RAHEAD: case ADA_STATE_WCACHE: { cam_fill_ataio(ataio, 1, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->state == ADA_STATE_RAHEAD) { ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_RA ? ATA_SF_ENAB_RCACHE : ATA_SF_DIS_RCACHE, 0, 0); start_ccb->ccb_h.ccb_state = ADA_CCB_RAHEAD; } else { ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_WC ? ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0); start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE; } start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; xpt_action(start_ccb); break; } case ADA_STATE_LOGDIR: { struct ata_gp_log_dir *log_dir; if ((softc->flags & ADA_FLAG_CAN_LOG) == 0) { adaprobedone(periph, start_ccb); break; } log_dir = malloc(sizeof(*log_dir), M_ATADA, M_NOWAIT|M_ZERO); if (log_dir == NULL) { xpt_print(periph->path, "Couldn't malloc log_dir " "data\n"); softc->state = ADA_STATE_NORMAL; xpt_release_ccb(start_ccb); break; } ata_read_log(ataio, /*retries*/1, /*cbfcnp*/adadone, /*log_address*/ ATA_LOG_DIRECTORY, /*page_number*/ 0, /*block_count*/ 1, /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? CAM_ATAIO_DMA : 0, /*data_ptr*/ (uint8_t *)log_dir, /*dxfer_len*/sizeof(*log_dir), /*timeout*/ada_default_timeout*1000); start_ccb->ccb_h.ccb_state = ADA_CCB_LOGDIR; xpt_action(start_ccb); break; } case ADA_STATE_IDDIR: { struct ata_identify_log_pages *id_dir; id_dir = malloc(sizeof(*id_dir), M_ATADA, M_NOWAIT | M_ZERO); if (id_dir == NULL) { xpt_print(periph->path, "Couldn't malloc id_dir " "data\n"); adaprobedone(periph, start_ccb); break; } ata_read_log(ataio, /*retries*/1, /*cbfcnp*/adadone, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_PAGE_LIST, /*block_count*/ 1, /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? CAM_ATAIO_DMA : 0, /*data_ptr*/ (uint8_t *)id_dir, /*dxfer_len*/ sizeof(*id_dir), /*timeout*/ada_default_timeout*1000); start_ccb->ccb_h.ccb_state = ADA_CCB_IDDIR; xpt_action(start_ccb); break; } case ADA_STATE_SUP_CAP: { struct ata_identify_log_sup_cap *sup_cap; sup_cap = malloc(sizeof(*sup_cap), M_ATADA, M_NOWAIT|M_ZERO); if (sup_cap == NULL) { xpt_print(periph->path, "Couldn't malloc sup_cap " "data\n"); adaprobedone(periph, start_ccb); break; } ata_read_log(ataio, /*retries*/1, /*cbfcnp*/adadone, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_SUP_CAP, /*block_count*/ 1, /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? CAM_ATAIO_DMA : 0, /*data_ptr*/ (uint8_t *)sup_cap, /*dxfer_len*/ sizeof(*sup_cap), /*timeout*/ada_default_timeout*1000); start_ccb->ccb_h.ccb_state = ADA_CCB_SUP_CAP; xpt_action(start_ccb); break; } case ADA_STATE_ZONE: { struct ata_zoned_info_log *ata_zone; ata_zone = malloc(sizeof(*ata_zone), M_ATADA, M_NOWAIT|M_ZERO); if (ata_zone == NULL) { xpt_print(periph->path, "Couldn't malloc ata_zone " "data\n"); adaprobedone(periph, start_ccb); break; } ata_read_log(ataio, /*retries*/1, /*cbfcnp*/adadone, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_ZDI, /*block_count*/ 1, /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? CAM_ATAIO_DMA : 0, /*data_ptr*/ (uint8_t *)ata_zone, /*dxfer_len*/ sizeof(*ata_zone), /*timeout*/ada_default_timeout*1000); start_ccb->ccb_h.ccb_state = ADA_CCB_ZONE; xpt_action(start_ccb); break; } } } static void adaprobedone(struct cam_periph *periph, union ccb *ccb) { struct ada_softc *softc; softc = (struct ada_softc *)periph->softc; if (ccb != NULL) xpt_release_ccb(ccb); softc->state = ADA_STATE_NORMAL; softc->flags |= ADA_FLAG_PROBED; adaschedule(periph); if ((softc->flags & ADA_FLAG_ANNOUNCED) == 0) { softc->flags |= ADA_FLAG_ANNOUNCED; cam_periph_unhold(periph); } else { cam_periph_release_locked(periph); } } static void adazonedone(struct cam_periph *periph, union ccb *ccb) { struct bio *bp; bp = (struct bio *)ccb->ccb_h.ccb_bp; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: break; case DISK_ZONE_REPORT_ZONES: { uint32_t avail_len; struct disk_zone_report *rep; struct scsi_report_zones_hdr *hdr; struct scsi_report_zones_desc *desc; struct disk_zone_rep_entry *entry; uint32_t hdr_len, num_avail; uint32_t num_to_fill, i; rep = &bp->bio_zone.zone_params.report; avail_len = ccb->ataio.dxfer_len - ccb->ataio.resid; /* * Note that bio_resid isn't normally used for zone * commands, but it is used by devstat_end_transaction_bio() * to determine how much data was transferred. Because * the size of the SCSI/ATA data structures is different * than the size of the BIO interface structures, the * amount of data actually transferred from the drive will * be different than the amount of data transferred to * the user. */ hdr = (struct scsi_report_zones_hdr *)ccb->ataio.data_ptr; if (avail_len < sizeof(*hdr)) { /* * Is there a better error than EIO here? We asked * for at least the header, and we got less than * that. */ bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; break; } hdr_len = le32dec(hdr->length); if (hdr_len > 0) rep->entries_available = hdr_len / sizeof(*desc); else rep->entries_available = 0; /* * NOTE: using the same values for the BIO version of the * same field as the SCSI/ATA values. This means we could * get some additional values that aren't defined in bio.h * if more values of the same field are defined later. */ rep->header.same = hdr->byte4 & SRZ_SAME_MASK; rep->header.maximum_lba = le64dec(hdr->maximum_lba); /* * If the drive reports no entries that match the query, * we're done. */ if (hdr_len == 0) { rep->entries_filled = 0; bp->bio_resid = bp->bio_bcount; break; } num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), hdr_len / sizeof(*desc)); /* * If the drive didn't return any data, then we're done. */ if (num_avail == 0) { rep->entries_filled = 0; bp->bio_resid = bp->bio_bcount; break; } num_to_fill = min(num_avail, rep->entries_allocated); /* * If the user didn't allocate any entries for us to fill, * we're done. */ if (num_to_fill == 0) { rep->entries_filled = 0; bp->bio_resid = bp->bio_bcount; break; } for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; i < num_to_fill; i++, desc++, entry++) { /* * NOTE: we're mapping the values here directly * from the SCSI/ATA bit definitions to the bio.h * definitions. There is also a warning in * disk_zone.h, but the impact is that if * additional values are added in the SCSI/ATA * specs these will be visible to consumers of * this interface. */ entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; entry->zone_condition = (desc->zone_flags & SRZ_ZONE_COND_MASK) >> SRZ_ZONE_COND_SHIFT; entry->zone_flags |= desc->zone_flags & (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); entry->zone_length = le64dec(desc->zone_length); entry->zone_start_lba = le64dec(desc->zone_start_lba); entry->write_pointer_lba = le64dec(desc->write_pointer_lba); } rep->entries_filled = num_to_fill; /* * Note that this residual is accurate from the user's * standpoint, but the amount transferred isn't accurate * from the standpoint of what actually came back from the * drive. */ bp->bio_resid = bp->bio_bcount - (num_to_fill * sizeof(*entry)); break; } case DISK_ZONE_GET_PARAMS: default: /* * In theory we should not get a GET_PARAMS bio, since it * should be handled without queueing the command to the * drive. */ panic("%s: Invalid zone command %d", __func__, bp->bio_zone.zone_cmd); break; } if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) free(ccb->ataio.data_ptr, M_ATADA); } static void adadone(struct cam_periph *periph, union ccb *done_ccb) { struct ada_softc *softc; struct ccb_ataio *ataio; struct cam_path *path; uint32_t priority; int state; softc = (struct ada_softc *)periph->softc; ataio = &done_ccb->ataio; path = done_ccb->ccb_h.path; priority = done_ccb->ccb_h.pinfo.priority; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("adadone\n")); state = ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK; switch (state) { case ADA_CCB_BUFFER_IO: case ADA_CCB_TRIM: { struct bio *bp; int error; cam_periph_lock(periph); bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = adaerror(done_ccb, 0, 0); if (error == ERESTART) { /* A retry was scheduled, so just return. */ cam_periph_unlock(periph); return; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); /* * If we get an error on an NCQ DSM TRIM, fall back * to a non-NCQ DSM TRIM forever. Please note that if * CAN_NCQ_TRIM is set, CAN_TRIM is necessarily set too. * However, for this one trim, we treat it as advisory * and return success up the stack. */ if (state == ADA_CCB_TRIM && error != 0 && (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) != 0) { softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM; error = 0; adasetdeletemethod(softc); } } else { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); error = 0; } bp->bio_error = error; if (error != 0) { bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } else { if (bp->bio_cmd == BIO_ZONE) adazonedone(periph, done_ccb); else if (state == ADA_CCB_TRIM) bp->bio_resid = 0; else bp->bio_resid = ataio->resid; if ((bp->bio_resid > 0) && (bp->bio_cmd != BIO_ZONE)) bp->bio_flags |= BIO_ERROR; } softc->outstanding_cmds--; if (softc->outstanding_cmds == 0) softc->flags |= ADA_FLAG_WAS_OTAG; /* * We need to call cam_iosched before we call biodone so that we * don't measure any activity that happens in the completion * routine, which in the case of sendfile can be quite * extensive. */ cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); xpt_release_ccb(done_ccb); if (state == ADA_CCB_TRIM) { TAILQ_HEAD(, bio) queue; struct bio *bp1; TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &softc->trim_req.bps, bio_queue); /* * Normally, the xpt_release_ccb() above would make sure * that when we have more work to do, that work would * get kicked off. However, we specifically keep * trim_running set to 0 before the call above to allow * other I/O to progress when many BIO_DELETE requests * are pushed down. We set trim_running to 0 and call * daschedule again so that we don't stall if there are * no other I/Os pending apart from BIO_DELETEs. */ cam_iosched_trim_done(softc->cam_iosched); adaschedule(periph); cam_periph_unlock(periph); while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bp1, bio_queue); bp1->bio_error = error; if (error != 0) { bp1->bio_flags |= BIO_ERROR; bp1->bio_resid = bp1->bio_bcount; } else bp1->bio_resid = 0; biodone(bp1); } } else { adaschedule(periph); cam_periph_unlock(periph); biodone(bp); } return; } case ADA_CCB_RAHEAD: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (adaerror(done_ccb, 0, 0) == ERESTART) { /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the reference on the peripheral. * The peripheral will only go away once the last reference * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(done_ccb); softc->state = ADA_STATE_WCACHE; xpt_schedule(periph, priority); /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } case ADA_CCB_WCACHE: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (adaerror(done_ccb, 0, 0) == ERESTART) { /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); if ((softc->flags & ADA_FLAG_CAN_LOG) && (softc->zone_mode != ADA_ZONE_NONE)) { xpt_release_ccb(done_ccb); softc->state = ADA_STATE_LOGDIR; xpt_schedule(periph, priority); } else { adaprobedone(periph, done_ccb); } return; } case ADA_CCB_LOGDIR: { int error; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { error = 0; softc->valid_logdir_len = 0; bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); softc->valid_logdir_len = ataio->dxfer_len - ataio->resid; if (softc->valid_logdir_len > 0) bcopy(ataio->data_ptr, &softc->ata_logdir, min(softc->valid_logdir_len, sizeof(softc->ata_logdir))); /* * Figure out whether the Identify Device log is * supported. The General Purpose log directory * has a header, and lists the number of pages * available for each GP log identified by the * offset into the list. */ if ((softc->valid_logdir_len >= ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) && (le16dec(softc->ata_logdir.header) == ATA_GP_LOG_DIR_VERSION) && (le16dec(&softc->ata_logdir.num_pages[ (ATA_IDENTIFY_DATA_LOG * sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ softc->flags |= ADA_FLAG_CAN_IDLOG; } else { softc->flags &= ~ADA_FLAG_CAN_IDLOG; } } else { error = adaerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA log directory, * then ATA logs are effectively not * supported even if the bit is set in the * identify data. */ softc->flags &= ~(ADA_FLAG_CAN_LOG | ADA_FLAG_CAN_IDLOG); if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ataio->data_ptr, M_ATADA); if ((error == 0) && (softc->flags & ADA_FLAG_CAN_IDLOG)) { softc->state = ADA_STATE_IDDIR; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); } else adaprobedone(periph, done_ccb); return; } case ADA_CCB_IDDIR: { int error; if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { off_t entries_offset, max_entries; error = 0; softc->valid_iddir_len = 0; bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); softc->flags &= ~(ADA_FLAG_CAN_SUPCAP | ADA_FLAG_CAN_ZONE); softc->valid_iddir_len = ataio->dxfer_len - ataio->resid; if (softc->valid_iddir_len > 0) bcopy(ataio->data_ptr, &softc->ata_iddir, min(softc->valid_iddir_len, sizeof(softc->ata_iddir))); entries_offset = __offsetof(struct ata_identify_log_pages,entries); max_entries = softc->valid_iddir_len - entries_offset; if ((softc->valid_iddir_len > (entries_offset + 1)) && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION) && (softc->ata_iddir.entry_count > 0)) { int num_entries, i; num_entries = softc->ata_iddir.entry_count; num_entries = min(num_entries, softc->valid_iddir_len - entries_offset); for (i = 0; i < num_entries && i < max_entries; i++) { if (softc->ata_iddir.entries[i] == ATA_IDL_SUP_CAP) softc->flags |= ADA_FLAG_CAN_SUPCAP; else if (softc->ata_iddir.entries[i]== ATA_IDL_ZDI) softc->flags |= ADA_FLAG_CAN_ZONE; if ((softc->flags & ADA_FLAG_CAN_SUPCAP) && (softc->flags & ADA_FLAG_CAN_ZONE)) break; } } } else { error = adaerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data log * directory, then it effectively isn't * supported even if the ATA Log directory * a non-zero number of pages present for * this log. */ softc->flags &= ~ADA_FLAG_CAN_IDLOG; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ataio->data_ptr, M_ATADA); if ((error == 0) && (softc->flags & ADA_FLAG_CAN_SUPCAP)) { softc->state = ADA_STATE_SUP_CAP; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); } else adaprobedone(periph, done_ccb); return; } case ADA_CCB_SUP_CAP: { int error; if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; size_t needed_size; struct ata_identify_log_sup_cap *sup_cap; error = 0; sup_cap = (struct ata_identify_log_sup_cap *) ataio->data_ptr; valid_len = ataio->dxfer_len - ataio->resid; needed_size = __offsetof(struct ata_identify_log_sup_cap, sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); if (valid_len >= needed_size) { uint64_t zoned, zac_cap; zoned = le64dec(sup_cap->zoned_cap); if (zoned & ATA_ZONED_VALID) { /* * This should have already been * set, because this is also in the * ATA identify data. */ if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) softc->zone_mode = ADA_ZONE_HOST_AWARE; else if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) softc->zone_mode = ADA_ZONE_DRIVE_MANAGED; } zac_cap = le64dec(sup_cap->sup_zac_cap); if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { if (zac_cap & ATA_REPORT_ZONES_SUP) softc->zone_flags |= ADA_ZONE_FLAG_RZ_SUP; if (zac_cap & ATA_ND_OPEN_ZONE_SUP) softc->zone_flags |= ADA_ZONE_FLAG_OPEN_SUP; if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) softc->zone_flags |= ADA_ZONE_FLAG_CLOSE_SUP; if (zac_cap & ATA_ND_FINISH_ZONE_SUP) softc->zone_flags |= ADA_ZONE_FLAG_FINISH_SUP; if (zac_cap & ATA_ND_RWP_SUP) softc->zone_flags |= ADA_ZONE_FLAG_RWP_SUP; } else { /* * This field was introduced in * ACS-4, r08 on April 28th, 2015. * If the drive firmware was written * to an earlier spec, it won't have * the field. So, assume all * commands are supported. */ softc->zone_flags |= ADA_ZONE_FLAG_SUP_MASK; } } } else { error = adaerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data * Supported Capabilities page, clear the * flag... */ softc->flags &= ~ADA_FLAG_CAN_SUPCAP; /* * And clear zone capabilities. */ softc->zone_flags &= ~ADA_ZONE_FLAG_SUP_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ataio->data_ptr, M_ATADA); if ((error == 0) && (softc->flags & ADA_FLAG_CAN_ZONE)) { softc->state = ADA_STATE_ZONE; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); } else adaprobedone(periph, done_ccb); return; } case ADA_CCB_ZONE: { int error; if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct ata_zoned_info_log *zi_log; uint32_t valid_len; size_t needed_size; zi_log = (struct ata_zoned_info_log *)ataio->data_ptr; valid_len = ataio->dxfer_len - ataio->resid; needed_size = __offsetof(struct ata_zoned_info_log, version_info) + 1 + sizeof(zi_log->version_info); if (valid_len >= needed_size) { uint64_t tmpvar; tmpvar = le64dec(zi_log->zoned_cap); if (tmpvar & ATA_ZDI_CAP_VALID) { if (tmpvar & ATA_ZDI_CAP_URSWRZ) softc->zone_flags |= ADA_ZONE_FLAG_URSWRZ; else softc->zone_flags &= ~ADA_ZONE_FLAG_URSWRZ; } tmpvar = le64dec(zi_log->optimal_seq_zones); if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { softc->zone_flags |= ADA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = (tmpvar & ATA_ZDI_OPT_SEQ_MASK); } else { softc->zone_flags &= ~ADA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = 0; } tmpvar =le64dec(zi_log->optimal_nonseq_zones); if (tmpvar & ATA_ZDI_OPT_NS_VALID) { softc->zone_flags |= ADA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = (tmpvar & ATA_ZDI_OPT_NS_MASK); } else { softc->zone_flags &= ~ADA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = 0; } tmpvar = le64dec(zi_log->max_seq_req_zones); if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { softc->zone_flags |= ADA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = (tmpvar & ATA_ZDI_MAX_SEQ_MASK); } else { softc->zone_flags &= ~ADA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = 0; } } } else { error = adaerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { softc->flags &= ~ADA_FLAG_CAN_ZONE; softc->flags &= ~ADA_ZONE_FLAG_SET_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ataio->data_ptr, M_ATADA); adaprobedone(periph, done_ccb); return; } case ADA_CCB_DUMP: /* No-op. We're polling */ return; default: break; } xpt_release_ccb(done_ccb); } static int adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { #ifdef CAM_IO_STATS struct ada_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct ada_softc *)periph->softc; switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: softc->timeouts++; break; case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: case CAM_ATA_STATUS_ERROR: softc->errors++; break; default: break; } #endif return(cam_periph_error(ccb, cam_flags, sense_flags)); } static void adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd) { struct ada_softc *softc = (struct ada_softc *)periph->softc; struct disk_params *dp = &softc->params; u_int64_t lbasize48; u_int32_t lbasize; dp->secsize = ata_logical_sector_size(&cgd->ident_data); if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) && cgd->ident_data.current_heads && cgd->ident_data.current_sectors) { dp->heads = cgd->ident_data.current_heads; dp->secs_per_track = cgd->ident_data.current_sectors; dp->cylinders = cgd->ident_data.cylinders; dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 | ((u_int32_t)cgd->ident_data.current_size_2 << 16); } else { dp->heads = cgd->ident_data.heads; dp->secs_per_track = cgd->ident_data.sectors; dp->cylinders = cgd->ident_data.cylinders; dp->sectors = cgd->ident_data.cylinders * (u_int32_t)(dp->heads * dp->secs_per_track); } lbasize = (u_int32_t)cgd->ident_data.lba_size_1 | ((u_int32_t)cgd->ident_data.lba_size_2 << 16); /* use the 28bit LBA size if valid or bigger than the CHS mapping */ if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize) dp->sectors = lbasize; /* use the 48bit LBA size if valid */ lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) | ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) | ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) | ((u_int64_t)cgd->ident_data.lba_size48_4 << 48); if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) && lbasize48 > ATA_MAX_28BIT_LBA) dp->sectors = lbasize48; } static void adasendorderedtag(void *arg) { struct ada_softc *softc = arg; if (ada_send_ordered) { if (softc->outstanding_cmds > 0) { if ((softc->flags & ADA_FLAG_WAS_OTAG) == 0) softc->flags |= ADA_FLAG_NEED_OTAG; softc->flags &= ~ADA_FLAG_WAS_OTAG; } } /* Queue us up again */ callout_reset(&softc->sendordered_c, (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL, adasendorderedtag, softc); } /* * Step through all ADA peripheral drivers, and if the device is still open, * sync the disk cache to physical media. */ static void adaflush(void) { struct cam_periph *periph; struct ada_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &adadriver) { softc = (struct ada_softc *)periph->softc; if (SCHEDULER_STOPPED()) { /* If we paniced with the lock held, do not recurse. */ if (!cam_periph_owned(periph) && (softc->flags & ADA_FLAG_OPEN)) { adadump(softc->disk, NULL, 0, 0, 0); } continue; } cam_periph_lock(periph); /* * We only sync the cache if the drive is still open, and * if the drive is capable of it.. */ if (((softc->flags & ADA_FLAG_OPEN) == 0) || (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) { cam_periph_unlock(periph); continue; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); cam_fill_ataio(&ccb->ataio, 0, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0); error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } static void adaspindown(uint8_t cmd, int flags) { struct cam_periph *periph; struct ada_softc *softc; struct ccb_ataio local_ccb; int error; CAM_PERIPH_FOREACH(periph, &adadriver) { /* If we paniced with lock held - not recurse here. */ if (cam_periph_owned(periph)) continue; cam_periph_lock(periph); softc = (struct ada_softc *)periph->softc; /* * We only spin-down the drive if it is capable of it.. */ if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) { cam_periph_unlock(periph); continue; } if (bootverbose) xpt_print(periph->path, "spin-down\n"); memset(&local_ccb, 0, sizeof(local_ccb)); xpt_setup_ccb(&local_ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); local_ccb.ccb_h.ccb_state = ADA_CCB_DUMP; cam_fill_ataio(&local_ccb, 0, adadone, CAM_DIR_NONE | flags, 0, NULL, 0, ada_default_timeout*1000); ata_28bit_cmd(&local_ccb, cmd, 0, 0, 0); error = cam_periph_runccb((union ccb *)&local_ccb, adaerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Spin-down disk failed\n"); cam_periph_unlock(periph); } } static void adashutdown(void *arg, int howto) { int how; adaflush(); /* * STANDBY IMMEDIATE saves any volatile data to the drive. It also spins * down hard drives. IDLE IMMEDIATE also saves the volatile data without * a spindown. We send the former when we expect to lose power soon. For * a warm boot, we send the latter to avoid a thundering herd of spinups * just after the kernel loads while probing. We have to do something to * flush the data because the BIOS in many systems resets the HBA * causing a COMINIT/COMRESET negotiation, which some drives interpret * as license to toss the volatile data, and others count as unclean * shutdown when in the Active PM state in SMART attributes. * * adaspindown will ensure that we don't send this to a drive that * doesn't support it. */ if (ada_spindown_shutdown != 0) { how = (howto & (RB_HALT | RB_POWEROFF | RB_POWERCYCLE)) ? ATA_STANDBY_IMMEDIATE : ATA_IDLE_IMMEDIATE; adaspindown(how, 0); } } static void adasuspend(void *arg) { adaflush(); /* * SLEEP also fushes any volatile data, like STANDBY IMEDIATE, * so we don't need to send it as well. */ if (ada_spindown_suspend != 0) adaspindown(ATA_SLEEP, CAM_DEV_QFREEZE); } static void adaresume(void *arg) { struct cam_periph *periph; struct ada_softc *softc; if (ada_spindown_suspend == 0) return; CAM_PERIPH_FOREACH(periph, &adadriver) { cam_periph_lock(periph); softc = (struct ada_softc *)periph->softc; /* * We only spin-down the drive if it is capable of it.. */ if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) { cam_periph_unlock(periph); continue; } if (bootverbose) xpt_print(periph->path, "resume\n"); /* * Drop freeze taken due to CAM_DEV_QFREEZE flag set on * sleep request. */ cam_release_devq(periph->path, /*relsim_flags*/0, /*openings*/0, /*timeout*/0, /*getcount_only*/0); cam_periph_unlock(periph); } } #endif /* _KERNEL */ Index: head/sys/cam/ata/ata_pmp.c =================================================================== --- head/sys/cam/ata/ata_pmp.c (revision 328917) +++ head/sys/cam/ata/ata_pmp.c (revision 328918) @@ -1,863 +1,863 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #ifdef _KERNEL typedef enum { PMP_STATE_NORMAL, PMP_STATE_PORTS, PMP_STATE_PM_QUIRKS_1, PMP_STATE_PM_QUIRKS_2, PMP_STATE_PM_QUIRKS_3, PMP_STATE_PRECONFIG, PMP_STATE_RESET, PMP_STATE_CONNECT, PMP_STATE_CHECK, PMP_STATE_CLEAR, PMP_STATE_CONFIG, PMP_STATE_SCAN } pmp_state; typedef enum { PMP_FLAG_SCTX_INIT = 0x200 } pmp_flags; typedef enum { PMP_CCB_PROBE = 0x01, } pmp_ccb_state; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct pmp_softc { SLIST_ENTRY(pmp_softc) links; pmp_state state; pmp_flags flags; uint32_t pm_pid; uint32_t pm_prv; int pm_ports; int pm_step; int pm_try; int found; int reset; int frozen; int restart; int events; #define PMP_EV_RESET 1 #define PMP_EV_RESCAN 2 u_int caps; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; }; static periph_init_t pmpinit; static void pmpasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void pmpsysctlinit(void *context, int pending); static periph_ctor_t pmpregister; static periph_dtor_t pmpcleanup; static periph_start_t pmpstart; static periph_oninv_t pmponinvalidate; static void pmpdone(struct cam_periph *periph, union ccb *done_ccb); #ifndef PMP_DEFAULT_TIMEOUT #define PMP_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ #endif #ifndef PMP_DEFAULT_RETRY #define PMP_DEFAULT_RETRY 1 #endif #ifndef PMP_DEFAULT_HIDE_SPECIAL #define PMP_DEFAULT_HIDE_SPECIAL 1 #endif static int pmp_retry_count = PMP_DEFAULT_RETRY; static int pmp_default_timeout = PMP_DEFAULT_TIMEOUT; static int pmp_hide_special = PMP_DEFAULT_HIDE_SPECIAL; static SYSCTL_NODE(_kern_cam, OID_AUTO, pmp, CTLFLAG_RD, 0, "CAM Direct Access Disk driver"); SYSCTL_INT(_kern_cam_pmp, OID_AUTO, retry_count, CTLFLAG_RWTUN, &pmp_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_pmp, OID_AUTO, default_timeout, CTLFLAG_RWTUN, &pmp_default_timeout, 0, "Normal I/O timeout (in seconds)"); SYSCTL_INT(_kern_cam_pmp, OID_AUTO, hide_special, CTLFLAG_RWTUN, &pmp_hide_special, 0, "Hide extra ports"); static struct periph_driver pmpdriver = { pmpinit, "pmp", TAILQ_HEAD_INITIALIZER(pmpdriver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(pmp, pmpdriver); static void pmpinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, pmpasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("pmp: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void pmpfreeze(struct cam_periph *periph, int mask) { struct pmp_softc *softc = (struct pmp_softc *)periph->softc; struct cam_path *dpath; int i; mask &= ~softc->frozen; for (i = 0; i < 15; i++) { if ((mask & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), i, 0) == CAM_REQ_CMP) { softc->frozen |= (1 << i); xpt_acquire_device(dpath->device); cam_freeze_devq(dpath); xpt_free_path(dpath); } } } static void pmprelease(struct cam_periph *periph, int mask) { struct pmp_softc *softc = (struct pmp_softc *)periph->softc; struct cam_path *dpath; int i; mask &= softc->frozen; for (i = 0; i < 15; i++) { if ((mask & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), i, 0) == CAM_REQ_CMP) { softc->frozen &= ~(1 << i); cam_release_devq(dpath, 0, 0, 0, FALSE); xpt_release_device(dpath->device); xpt_free_path(dpath); } } } static void pmponinvalidate(struct cam_periph *periph) { struct cam_path *dpath; int i; /* * De-register any async callbacks. */ xpt_register_async(0, pmpasync, periph, periph->path); for (i = 0; i < 15; i++) { if (xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), i, 0) == CAM_REQ_CMP) { xpt_async(AC_LOST_DEVICE, dpath, NULL); xpt_free_path(dpath); } } pmprelease(periph, -1); } static void pmpcleanup(struct cam_periph *periph) { struct pmp_softc *softc; softc = (struct pmp_softc *)periph->softc; cam_periph_unlock(periph); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & PMP_FLAG_SCTX_INIT) != 0 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) { xpt_print(periph->path, "can't remove sysctl context\n"); } free(softc, M_DEVBUF); cam_periph_lock(periph); } static void pmpasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct pmp_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SATAPM) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(pmpregister, pmponinvalidate, pmpcleanup, pmpstart, "pmp", CAM_PERIPH_BIO, path, pmpasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("pmpasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_SCSI_AEN: case AC_SENT_BDR: case AC_BUS_RESET: softc = (struct pmp_softc *)periph->softc; cam_periph_async(periph, code, path, arg); if (code == AC_SCSI_AEN) softc->events |= PMP_EV_RESCAN; else softc->events |= PMP_EV_RESET; if (code == AC_SCSI_AEN && softc->state != PMP_STATE_NORMAL) break; xpt_hold_boot(); pmpfreeze(periph, softc->found); if (code == AC_SENT_BDR || code == AC_BUS_RESET) softc->found = 0; /* We have to reset everything. */ if (softc->state == PMP_STATE_NORMAL) { - if (cam_periph_acquire(periph) == CAM_REQ_CMP) { + if (cam_periph_acquire(periph) == 0) { if (softc->pm_pid == 0x37261095 || softc->pm_pid == 0x38261095) softc->state = PMP_STATE_PM_QUIRKS_1; else softc->state = PMP_STATE_PRECONFIG; xpt_schedule(periph, CAM_PRIORITY_DEV); } else { pmprelease(periph, softc->found); xpt_release_boot(); } } else softc->restart = 1; break; default: cam_periph_async(periph, code, path, arg); break; } } static void pmpsysctlinit(void *context, int pending) { struct cam_periph *periph; struct pmp_softc *softc; char tmpstr[32], tmpstr2[16]; periph = (struct cam_periph *)context; - if (cam_periph_acquire(periph) != CAM_REQ_CMP) + if (cam_periph_acquire(periph) != 0) return; softc = (struct pmp_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM PMP unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= PMP_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_pmp), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("pmpsysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } cam_periph_release(periph); } static cam_status pmpregister(struct cam_periph *periph, void *arg) { struct pmp_softc *softc; struct ccb_getdev *cgd; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("pmpregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct pmp_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("pmpregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } periph->softc = softc; softc->pm_pid = ((uint32_t *)&cgd->ident_data)[0]; softc->pm_prv = ((uint32_t *)&cgd->ident_data)[1]; TASK_INIT(&softc->sysctl_task, 0, pmpsysctlinit, periph); xpt_announce_periph(periph, NULL); /* * Add async callbacks for bus reset and * bus device reset calls. I don't bother * checking if this fails as, in most cases, * the system will function just fine without * them and the only alternative would be to * not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_SCSI_AEN, pmpasync, periph, periph->path); /* * Take an exclusive refcount on the periph while pmpstart is called * to finish the probe. The reference will be dropped in pmpdone at * the end of probe. */ (void)cam_periph_acquire(periph); xpt_hold_boot(); softc->state = PMP_STATE_PORTS; softc->events = PMP_EV_RESCAN; xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static void pmpstart(struct cam_periph *periph, union ccb *start_ccb) { struct ccb_trans_settings cts; struct ccb_ataio *ataio; struct pmp_softc *softc; struct cam_path *dpath; int revision = 0; softc = (struct pmp_softc *)periph->softc; ataio = &start_ccb->ataio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("pmpstart\n")); if (softc->restart) { softc->restart = 0; if (softc->pm_pid == 0x37261095 || softc->pm_pid == 0x38261095) softc->state = min(softc->state, PMP_STATE_PM_QUIRKS_1); else softc->state = min(softc->state, PMP_STATE_PRECONFIG); } /* Fetch user wanted device speed. */ if (softc->state == PMP_STATE_RESET || softc->state == PMP_STATE_CONNECT) { if (xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), softc->pm_step, 0) == CAM_REQ_CMP) { bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, dpath, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (cts.xport_specific.sata.valid & CTS_SATA_VALID_REVISION) revision = cts.xport_specific.sata.revision; xpt_free_path(dpath); } } switch (softc->state) { case PMP_STATE_PORTS: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_read_cmd(ataio, 2, 15); break; case PMP_STATE_PM_QUIRKS_1: case PMP_STATE_PM_QUIRKS_3: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_read_cmd(ataio, 129, 15); break; case PMP_STATE_PM_QUIRKS_2: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 129, 15, softc->caps & ~0x1); break; case PMP_STATE_PRECONFIG: /* Get/update host SATA capabilities. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (cts.xport_specific.sata.valid & CTS_SATA_VALID_CAPS) softc->caps = cts.xport_specific.sata.caps; else softc->caps = 0; cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 0x60, 15, 0x0); break; case PMP_STATE_RESET: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 2, softc->pm_step, (revision << 4) | ((softc->found & (1 << softc->pm_step)) ? 0 : 1)); break; case PMP_STATE_CONNECT: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 2, softc->pm_step, (revision << 4)); break; case PMP_STATE_CHECK: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_read_cmd(ataio, 0, softc->pm_step); break; case PMP_STATE_CLEAR: softc->reset = 0; cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 1, softc->pm_step, 0xFFFFFFFF); break; case PMP_STATE_CONFIG: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 0x60, 15, 0x07 | ((softc->caps & CTS_SATA_CAPS_H_AN) ? 0x08 : 0)); break; default: break; } xpt_action(start_ccb); } static void pmpdone(struct cam_periph *periph, union ccb *done_ccb) { struct ccb_trans_settings cts; struct pmp_softc *softc; struct ccb_ataio *ataio; struct cam_path *dpath; u_int32_t priority, res; int i; softc = (struct pmp_softc *)periph->softc; ataio = &done_ccb->ataio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("pmpdone\n")); priority = done_ccb->ccb_h.pinfo.priority; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, 0) == ERESTART) { return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } goto done; } if (softc->restart) { softc->restart = 0; xpt_release_ccb(done_ccb); if (softc->pm_pid == 0x37261095 || softc->pm_pid == 0x38261095) softc->state = min(softc->state, PMP_STATE_PM_QUIRKS_1); else softc->state = min(softc->state, PMP_STATE_PRECONFIG); xpt_schedule(periph, priority); return; } switch (softc->state) { case PMP_STATE_PORTS: softc->pm_ports = (ataio->res.lba_high << 24) + (ataio->res.lba_mid << 16) + (ataio->res.lba_low << 8) + ataio->res.sector_count; if (pmp_hide_special) { /* * This PMP declares 6 ports, while only 5 of them * are real. Port 5 is a SEMB port, probing which * causes timeouts if external SEP is not connected * to PMP over I2C. */ if ((softc->pm_pid == 0x37261095 || softc->pm_pid == 0x38261095) && softc->pm_ports == 6) softc->pm_ports = 5; /* * This PMP declares 7 ports, while only 5 of them * are real. Port 5 is a fake "Config Disk" with * 640 sectors size. Port 6 is a SEMB port. */ if (softc->pm_pid == 0x47261095 && softc->pm_ports == 7) softc->pm_ports = 5; /* * These PMPs have extra configuration port. */ if (softc->pm_pid == 0x57231095 || softc->pm_pid == 0x57331095 || softc->pm_pid == 0x57341095 || softc->pm_pid == 0x57441095) softc->pm_ports--; } printf("%s%d: %d fan-out ports\n", periph->periph_name, periph->unit_number, softc->pm_ports); if (softc->pm_pid == 0x37261095 || softc->pm_pid == 0x38261095) softc->state = PMP_STATE_PM_QUIRKS_1; else softc->state = PMP_STATE_PRECONFIG; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_PM_QUIRKS_1: softc->caps = (ataio->res.lba_high << 24) + (ataio->res.lba_mid << 16) + (ataio->res.lba_low << 8) + ataio->res.sector_count; if (softc->caps & 0x1) softc->state = PMP_STATE_PM_QUIRKS_2; else softc->state = PMP_STATE_PRECONFIG; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_PM_QUIRKS_2: if (bootverbose) softc->state = PMP_STATE_PM_QUIRKS_3; else softc->state = PMP_STATE_PRECONFIG; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_PM_QUIRKS_3: res = (ataio->res.lba_high << 24) + (ataio->res.lba_mid << 16) + (ataio->res.lba_low << 8) + ataio->res.sector_count; printf("%s%d: Disabling SiI3x26 R_OK in GSCR_POLL: %x->%x\n", periph->periph_name, periph->unit_number, softc->caps, res); softc->state = PMP_STATE_PRECONFIG; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_PRECONFIG: softc->pm_step = 0; softc->state = PMP_STATE_RESET; softc->reset |= ~softc->found; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_RESET: softc->pm_step++; if (softc->pm_step >= softc->pm_ports) { softc->pm_step = 0; cam_freeze_devq(periph->path); cam_release_devq(periph->path, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/5, /*getcount_only*/0); softc->state = PMP_STATE_CONNECT; } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_CONNECT: softc->pm_step++; if (softc->pm_step >= softc->pm_ports) { softc->pm_step = 0; softc->pm_try = 0; cam_freeze_devq(periph->path); cam_release_devq(periph->path, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/10, /*getcount_only*/0); softc->state = PMP_STATE_CHECK; } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_CHECK: res = (ataio->res.lba_high << 24) + (ataio->res.lba_mid << 16) + (ataio->res.lba_low << 8) + ataio->res.sector_count; if (((res & 0xf0f) == 0x103 && (res & 0x0f0) != 0) || (res & 0x600) != 0) { if (bootverbose) { printf("%s%d: port %d status: %08x\n", periph->periph_name, periph->unit_number, softc->pm_step, res); } /* Report device speed if it is online. */ if ((res & 0xf0f) == 0x103 && xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), softc->pm_step, 0) == CAM_REQ_CMP) { bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, dpath, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.xport_specific.sata.revision = (res & 0x0f0) >> 4; cts.xport_specific.sata.valid = CTS_SATA_VALID_REVISION; cts.xport_specific.sata.caps = softc->caps & (CTS_SATA_CAPS_H_PMREQ | CTS_SATA_CAPS_H_DMAAA | CTS_SATA_CAPS_H_AN); cts.xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; xpt_action((union ccb *)&cts); xpt_free_path(dpath); } softc->found |= (1 << softc->pm_step); softc->pm_step++; } else { if (softc->pm_try < 10) { cam_freeze_devq(periph->path); cam_release_devq(periph->path, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/10, /*getcount_only*/0); softc->pm_try++; } else { if (bootverbose) { printf("%s%d: port %d status: %08x\n", periph->periph_name, periph->unit_number, softc->pm_step, res); } softc->found &= ~(1 << softc->pm_step); if (xpt_create_path(&dpath, periph, done_ccb->ccb_h.path_id, softc->pm_step, 0) == CAM_REQ_CMP) { xpt_async(AC_LOST_DEVICE, dpath, NULL); xpt_free_path(dpath); } softc->pm_step++; } } if (softc->pm_step >= softc->pm_ports) { if (softc->reset & softc->found) { cam_freeze_devq(periph->path); cam_release_devq(periph->path, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/1000, /*getcount_only*/0); } softc->state = PMP_STATE_CLEAR; softc->pm_step = 0; } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_CLEAR: softc->pm_step++; if (softc->pm_step >= softc->pm_ports) { softc->state = PMP_STATE_CONFIG; softc->pm_step = 0; } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_CONFIG: for (i = 0; i < softc->pm_ports; i++) { union ccb *ccb; if ((softc->found & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), i, 0) != CAM_REQ_CMP) { printf("pmpdone: xpt_create_path failed\n"); continue; } /* If we did hard reset to this device, inform XPT. */ if ((softc->reset & softc->found & (1 << i)) != 0) xpt_async(AC_SENT_BDR, dpath, NULL); /* If rescan requested, scan this device. */ if (softc->events & PMP_EV_RESCAN) { ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { xpt_free_path(dpath); goto done; } xpt_setup_ccb(&ccb->ccb_h, dpath, CAM_PRIORITY_XPT); xpt_rescan(ccb); } else xpt_free_path(dpath); } break; default: break; } done: xpt_release_ccb(done_ccb); softc->state = PMP_STATE_NORMAL; softc->events = 0; xpt_release_boot(); pmprelease(periph, -1); cam_periph_release_locked(periph); } #endif /* _KERNEL */ Index: head/sys/cam/ata/ata_xpt.c =================================================================== --- head/sys/cam/ata/ata_xpt.c (revision 328917) +++ head/sys/cam/ata/ata_xpt.c (revision 328918) @@ -1,2282 +1,2280 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for xpt_print below */ #include "opt_cam.h" struct ata_quirk_entry { struct scsi_inquiry_pattern inq_pat; u_int8_t quirks; #define CAM_QUIRK_MAXTAGS 0x01 u_int mintags; u_int maxtags; }; static periph_init_t probe_periph_init; static struct periph_driver probe_driver = { probe_periph_init, "aprobe", TAILQ_HEAD_INITIALIZER(probe_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(aprobe, probe_driver); typedef enum { PROBE_RESET, PROBE_IDENTIFY, PROBE_SPINUP, PROBE_SETMODE, PROBE_SETPM, PROBE_SETAPST, PROBE_SETDMAAA, PROBE_SETAN, PROBE_SET_MULTI, PROBE_INQUIRY, PROBE_FULL_INQUIRY, PROBE_PM_PID, PROBE_PM_PRV, PROBE_IDENTIFY_SES, PROBE_IDENTIFY_SAFTE, PROBE_DONE, PROBE_INVALID } probe_action; static char *probe_action_text[] = { "PROBE_RESET", "PROBE_IDENTIFY", "PROBE_SPINUP", "PROBE_SETMODE", "PROBE_SETPM", "PROBE_SETAPST", "PROBE_SETDMAAA", "PROBE_SETAN", "PROBE_SET_MULTI", "PROBE_INQUIRY", "PROBE_FULL_INQUIRY", "PROBE_PM_PID", "PROBE_PM_PRV", "PROBE_IDENTIFY_SES", "PROBE_IDENTIFY_SAFTE", "PROBE_DONE", "PROBE_INVALID" }; #define PROBE_SET_ACTION(softc, newaction) \ do { \ char **text; \ text = probe_action_text; \ CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ ("Probe %s to %s\n", text[(softc)->action], \ text[(newaction)])); \ (softc)->action = (newaction); \ } while(0) typedef enum { PROBE_NO_ANNOUNCE = 0x04 } probe_flags; typedef struct { TAILQ_HEAD(, ccb_hdr) request_ccbs; struct ata_params ident_data; probe_action action; probe_flags flags; uint32_t pm_pid; uint32_t pm_prv; int restart; int spinup; int faults; u_int caps; struct cam_periph *periph; } probe_softc; static struct ata_quirk_entry ata_quirk_table[] = { { /* Default tagged queuing parameters for all devices */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, /*vendor*/"*", /*product*/"*", /*revision*/"*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, }; static cam_status proberegister(struct cam_periph *periph, void *arg); static void probeschedule(struct cam_periph *probe_periph); static void probestart(struct cam_periph *periph, union ccb *start_ccb); static void proberequestdefaultnegotiation(struct cam_periph *periph); static void probedone(struct cam_periph *periph, union ccb *done_ccb); static void probecleanup(struct cam_periph *periph); static void ata_find_quirk(struct cam_ed *device); static void ata_scan_bus(struct cam_periph *periph, union ccb *ccb); static void ata_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *ccb); static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); static struct cam_ed * ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static void ata_device_transport(struct cam_path *path); static void ata_get_transfer_settings(struct ccb_trans_settings *cts); static void ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update); static void ata_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static void ata_action(union ccb *start_ccb); static void ata_announce_periph(struct cam_periph *periph); static void ata_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb); static void ata_proto_announce(struct cam_ed *device); static void ata_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb); static void ata_proto_denounce(struct cam_ed *device); static void ata_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb); static void ata_proto_debug_out(union ccb *ccb); static void semb_proto_announce(struct cam_ed *device); static void semb_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb); static void semb_proto_denounce(struct cam_ed *device); static void semb_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb); static int ata_dma = 1; static int atapi_dma = 1; TUNABLE_INT("hw.ata.ata_dma", &ata_dma); TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma); static struct xpt_xport_ops ata_xport_ops = { .alloc_device = ata_alloc_device, .action = ata_action, .async = ata_dev_async, .announce = ata_announce_periph, .announce_sbuf = ata_announce_periph_sbuf, }; #define ATA_XPT_XPORT(x, X) \ static struct xpt_xport ata_xport_ ## x = { \ .xport = XPORT_ ## X, \ .name = #x, \ .ops = &ata_xport_ops, \ }; \ CAM_XPT_XPORT(ata_xport_ ## x); ATA_XPT_XPORT(ata, ATA); ATA_XPT_XPORT(sata, SATA); #undef ATA_XPORT_XPORT static struct xpt_proto_ops ata_proto_ops_ata = { .announce = ata_proto_announce, .announce_sbuf = ata_proto_announce_sbuf, .denounce = ata_proto_denounce, .denounce_sbuf = ata_proto_denounce_sbuf, .debug_out = ata_proto_debug_out, }; static struct xpt_proto ata_proto_ata = { .proto = PROTO_ATA, .name = "ata", .ops = &ata_proto_ops_ata, }; static struct xpt_proto_ops ata_proto_ops_satapm = { .announce = ata_proto_announce, .announce_sbuf = ata_proto_announce_sbuf, .denounce = ata_proto_denounce, .denounce_sbuf = ata_proto_denounce_sbuf, .debug_out = ata_proto_debug_out, }; static struct xpt_proto ata_proto_satapm = { .proto = PROTO_SATAPM, .name = "satapm", .ops = &ata_proto_ops_satapm, }; static struct xpt_proto_ops ata_proto_ops_semb = { .announce = semb_proto_announce, .announce_sbuf = semb_proto_announce_sbuf, .denounce = semb_proto_denounce, .denounce_sbuf = semb_proto_denounce_sbuf, .debug_out = ata_proto_debug_out, }; static struct xpt_proto ata_proto_semb = { .proto = PROTO_SEMB, .name = "semb", .ops = &ata_proto_ops_semb, }; CAM_XPT_PROTO(ata_proto_ata); CAM_XPT_PROTO(ata_proto_satapm); CAM_XPT_PROTO(ata_proto_semb); static void probe_periph_init() { } static cam_status proberegister(struct cam_periph *periph, void *arg) { union ccb *request_ccb; /* CCB representing the probe request */ - cam_status status; probe_softc *softc; request_ccb = (union ccb *)arg; if (request_ccb == NULL) { printf("proberegister: no probe CCB, " "can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT); if (softc == NULL) { printf("proberegister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } TAILQ_INIT(&softc->request_ccbs); TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->flags = 0; periph->softc = softc; softc->periph = periph; softc->action = PROBE_INVALID; - status = cam_periph_acquire(periph); - if (status != CAM_REQ_CMP) { - return (status); - } + if (cam_periph_acquire(periph) != 0) + return (CAM_REQ_CMP_ERR); + CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); ata_device_transport(periph->path); probeschedule(periph); return(CAM_REQ_CMP); } static void probeschedule(struct cam_periph *periph) { union ccb *ccb; probe_softc *softc; softc = (probe_softc *)periph->softc; ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) || periph->path->device->protocol == PROTO_SATAPM || periph->path->device->protocol == PROTO_SEMB) PROBE_SET_ACTION(softc, PROBE_RESET); else PROBE_SET_ACTION(softc, PROBE_IDENTIFY); if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) softc->flags |= PROBE_NO_ANNOUNCE; else softc->flags &= ~PROBE_NO_ANNOUNCE; xpt_schedule(periph, CAM_PRIORITY_XPT); } static void probestart(struct cam_periph *periph, union ccb *start_ccb) { struct ccb_trans_settings cts; struct ccb_ataio *ataio; struct ccb_scsiio *csio; probe_softc *softc; struct cam_path *path; struct ata_params *ident_buf; CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); softc = (probe_softc *)periph->softc; path = start_ccb->ccb_h.path; ataio = &start_ccb->ataio; csio = &start_ccb->csio; ident_buf = &periph->path->device->ident_data; if (softc->restart) { softc->restart = 0; if ((path->device->flags & CAM_DEV_UNCONFIGURED) || path->device->protocol == PROTO_SATAPM || path->device->protocol == PROTO_SEMB) softc->action = PROBE_RESET; else softc->action = PROBE_IDENTIFY; } switch (softc->action) { case PROBE_RESET: cam_fill_ataio(ataio, 0, probedone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, 15 * 1000); ata_reset_cmd(ataio); break; case PROBE_IDENTIFY: cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_IN, 0, /*data_ptr*/(u_int8_t *)&softc->ident_data, /*dxfer_len*/sizeof(softc->ident_data), 30 * 1000); if (periph->path->device->protocol == PROTO_ATA) ata_28bit_cmd(ataio, ATA_ATA_IDENTIFY, 0, 0, 0); else ata_28bit_cmd(ataio, ATA_ATAPI_IDENTIFY, 0, 0, 0); break; case PROBE_SPINUP: if (bootverbose) xpt_print(path, "Spinning up device\n"); cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_NONE | CAM_HIGH_POWER, 0, /*data_ptr*/NULL, /*dxfer_len*/0, 30 * 1000); ata_28bit_cmd(ataio, ATA_SETFEATURES, ATA_SF_PUIS_SPINUP, 0, 0); break; case PROBE_SETMODE: { int mode, wantmode; mode = 0; /* Fetch user modes from SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_ATA) { if (cts.xport_specific.ata.valid & CTS_ATA_VALID_MODE) mode = cts.xport_specific.ata.mode; } else { if (cts.xport_specific.sata.valid & CTS_SATA_VALID_MODE) mode = cts.xport_specific.sata.mode; } if (periph->path->device->protocol == PROTO_ATA) { if (ata_dma == 0 && (mode == 0 || mode > ATA_PIO_MAX)) mode = ATA_PIO_MAX; } else { if (atapi_dma == 0 && (mode == 0 || mode > ATA_PIO_MAX)) mode = ATA_PIO_MAX; } negotiate: /* Honor device capabilities. */ wantmode = mode = ata_max_mode(ident_buf, mode); /* Report modes to SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; if (path->device->transport == XPORT_ATA) { cts.xport_specific.ata.mode = mode; cts.xport_specific.ata.valid = CTS_ATA_VALID_MODE; } else { cts.xport_specific.sata.mode = mode; cts.xport_specific.sata.valid = CTS_SATA_VALID_MODE; } xpt_action((union ccb *)&cts); /* Fetch current modes from SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_ATA) { if (cts.xport_specific.ata.valid & CTS_ATA_VALID_MODE) mode = cts.xport_specific.ata.mode; } else { if (cts.xport_specific.ata.valid & CTS_SATA_VALID_MODE) mode = cts.xport_specific.sata.mode; } /* If SIM disagree - renegotiate. */ if (mode != wantmode) goto negotiate; /* Remember what transport thinks about DMA. */ if (mode < ATA_DMA) path->device->inq_flags &= ~SID_DMA; else path->device->inq_flags |= SID_DMA; xpt_async(AC_GETDEV_CHANGED, path, NULL); cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, 30 * 1000); ata_28bit_cmd(ataio, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); break; } case PROBE_SETPM: cam_fill_ataio(ataio, 1, probedone, CAM_DIR_NONE, 0, NULL, 0, 30*1000); ata_28bit_cmd(ataio, ATA_SETFEATURES, (softc->caps & CTS_SATA_CAPS_H_PMREQ) ? 0x10 : 0x90, 0, 0x03); break; case PROBE_SETAPST: cam_fill_ataio(ataio, 1, probedone, CAM_DIR_NONE, 0, NULL, 0, 30*1000); ata_28bit_cmd(ataio, ATA_SETFEATURES, (softc->caps & CTS_SATA_CAPS_H_APST) ? 0x10 : 0x90, 0, 0x07); break; case PROBE_SETDMAAA: cam_fill_ataio(ataio, 1, probedone, CAM_DIR_NONE, 0, NULL, 0, 30*1000); ata_28bit_cmd(ataio, ATA_SETFEATURES, (softc->caps & CTS_SATA_CAPS_H_DMAAA) ? 0x10 : 0x90, 0, 0x02); break; case PROBE_SETAN: /* Remember what transport thinks about AEN. */ if (softc->caps & CTS_SATA_CAPS_H_AN) path->device->inq_flags |= SID_AEN; else path->device->inq_flags &= ~SID_AEN; xpt_async(AC_GETDEV_CHANGED, path, NULL); cam_fill_ataio(ataio, 1, probedone, CAM_DIR_NONE, 0, NULL, 0, 30*1000); ata_28bit_cmd(ataio, ATA_SETFEATURES, (softc->caps & CTS_SATA_CAPS_H_AN) ? 0x10 : 0x90, 0, 0x05); break; case PROBE_SET_MULTI: { u_int sectors, bytecount; bytecount = 8192; /* SATA maximum */ /* Fetch user bytecount from SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_ATA) { if (cts.xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) bytecount = cts.xport_specific.ata.bytecount; } else { if (cts.xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) bytecount = cts.xport_specific.sata.bytecount; } /* Honor device capabilities. */ sectors = max(1, min(ident_buf->sectors_intr & 0xff, bytecount / ata_logical_sector_size(ident_buf))); /* Report bytecount to SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; if (path->device->transport == XPORT_ATA) { cts.xport_specific.ata.bytecount = sectors * ata_logical_sector_size(ident_buf); cts.xport_specific.ata.valid = CTS_ATA_VALID_BYTECOUNT; } else { cts.xport_specific.sata.bytecount = sectors * ata_logical_sector_size(ident_buf); cts.xport_specific.sata.valid = CTS_SATA_VALID_BYTECOUNT; } xpt_action((union ccb *)&cts); /* Fetch current bytecount from SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_ATA) { if (cts.xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) bytecount = cts.xport_specific.ata.bytecount; } else { if (cts.xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) bytecount = cts.xport_specific.sata.bytecount; } sectors = bytecount / ata_logical_sector_size(ident_buf); cam_fill_ataio(ataio, 1, probedone, CAM_DIR_NONE, 0, NULL, 0, 30*1000); ata_28bit_cmd(ataio, ATA_SET_MULTI, 0, 0, sectors); break; } case PROBE_INQUIRY: { u_int bytecount; bytecount = 8192; /* SATA maximum */ /* Fetch user bytecount from SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_ATA) { if (cts.xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) bytecount = cts.xport_specific.ata.bytecount; } else { if (cts.xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) bytecount = cts.xport_specific.sata.bytecount; } /* Honor device capabilities. */ bytecount &= ~1; bytecount = max(2, min(65534, bytecount)); if (ident_buf->satacapabilities != 0x0000 && ident_buf->satacapabilities != 0xffff) { bytecount = min(8192, bytecount); } /* Report bytecount to SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; if (path->device->transport == XPORT_ATA) { cts.xport_specific.ata.bytecount = bytecount; cts.xport_specific.ata.valid = CTS_ATA_VALID_BYTECOUNT; } else { cts.xport_specific.sata.bytecount = bytecount; cts.xport_specific.sata.valid = CTS_SATA_VALID_BYTECOUNT; } xpt_action((union ccb *)&cts); /* FALLTHROUGH */ } case PROBE_FULL_INQUIRY: { u_int inquiry_len; struct scsi_inquiry_data *inq_buf = &periph->path->device->inq_data; if (softc->action == PROBE_INQUIRY) inquiry_len = SHORT_INQUIRY_LENGTH; else inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf); /* * Some parallel SCSI devices fail to send an * ignore wide residue message when dealing with * odd length inquiry requests. Round up to be * safe. */ inquiry_len = roundup2(inquiry_len, 2); scsi_inquiry(csio, /*retries*/1, probedone, MSG_SIMPLE_Q_TAG, (u_int8_t *)inq_buf, inquiry_len, /*evpd*/FALSE, /*page_code*/0, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } case PROBE_PM_PID: cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, 10 * 1000); ata_pm_read_cmd(ataio, 0, 15); break; case PROBE_PM_PRV: cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, 10 * 1000); ata_pm_read_cmd(ataio, 1, 15); break; case PROBE_IDENTIFY_SES: cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_IN, 0, /*data_ptr*/(u_int8_t *)&softc->ident_data, /*dxfer_len*/sizeof(softc->ident_data), 30 * 1000); ata_28bit_cmd(ataio, ATA_SEP_ATTN, 0xEC, 0x02, sizeof(softc->ident_data) / 4); break; case PROBE_IDENTIFY_SAFTE: cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_IN, 0, /*data_ptr*/(u_int8_t *)&softc->ident_data, /*dxfer_len*/sizeof(softc->ident_data), 30 * 1000); ata_28bit_cmd(ataio, ATA_SEP_ATTN, 0xEC, 0x00, sizeof(softc->ident_data) / 4); break; default: panic("probestart: invalid action state 0x%x\n", softc->action); } start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; xpt_action(start_ccb); } static void proberequestdefaultnegotiation(struct cam_periph *periph) { struct ccb_trans_settings cts; xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) return; cts.xport_specific.valid = 0; cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); } static void probedone(struct cam_periph *periph, union ccb *done_ccb) { struct ccb_trans_settings cts; struct ata_params *ident_buf; struct scsi_inquiry_data *inq_buf; probe_softc *softc; struct cam_path *path; cam_status status; u_int32_t priority; u_int caps; int changed = 1, found = 1; static const uint8_t fake_device_id_hdr[8] = {0, SVPD_DEVICE_ID, 0, 12, SVPD_ID_CODESET_BINARY, SVPD_ID_TYPE_NAA, 0, 8}; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); softc = (probe_softc *)periph->softc; path = done_ccb->ccb_h.path; priority = done_ccb->ccb_h.pinfo.priority; ident_buf = &path->device->ident_data; inq_buf = &path->device->inq_data; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, softc->restart ? (SF_NO_RECOVERY | SF_NO_RETRY) : 0 ) == ERESTART) { out: /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(path, 0, 0, 0, FALSE); return; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); } status = done_ccb->ccb_h.status & CAM_STATUS_MASK; if (softc->restart) { softc->faults++; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) softc->faults += 4; if (softc->faults < 10) goto done; else softc->restart = 0; /* Old PIO2 devices may not support mode setting. */ } else if (softc->action == PROBE_SETMODE && status == CAM_ATA_STATUS_ERROR && ata_max_pmode(ident_buf) <= ATA_PIO2 && (ident_buf->capabilities1 & ATA_SUPPORT_IORDY) == 0) { goto noerror; /* * Some old WD SATA disks report supported and enabled * device-initiated interface power management, but return * ABORT on attempt to disable it. */ } else if (softc->action == PROBE_SETPM && status == CAM_ATA_STATUS_ERROR) { goto noerror; /* * Some old WD SATA disks have broken SPINUP handling. * If we really fail to spin up the disk, then there will be * some media access errors later on, but at least we will * have a device to interact with for recovery attempts. */ } else if (softc->action == PROBE_SPINUP && status == CAM_ATA_STATUS_ERROR) { goto noerror; /* * Some HP SATA disks report supported DMA Auto-Activation, * but return ABORT on attempt to enable it. */ } else if (softc->action == PROBE_SETDMAAA && status == CAM_ATA_STATUS_ERROR) { goto noerror; /* * SES and SAF-TE SEPs have different IDENTIFY commands, * but SATA specification doesn't tell how to identify them. * Until better way found, just try another if first fail. */ } else if (softc->action == PROBE_IDENTIFY_SES && status == CAM_ATA_STATUS_ERROR) { PROBE_SET_ACTION(softc, PROBE_IDENTIFY_SAFTE); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } /* * If we get to this point, we got an error status back * from the inquiry and the error status doesn't require * automatically retrying the command. Therefore, the * inquiry failed. If we had inquiry information before * for this device, but this latest inquiry command failed, * the device has probably gone away. If this device isn't * already marked unconfigured, notify the peripheral * drivers that this device is no more. */ device_fail: if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) xpt_async(AC_LOST_DEVICE, path, NULL); PROBE_SET_ACTION(softc, PROBE_INVALID); found = 0; goto done; } noerror: if (softc->restart) goto done; switch (softc->action) { case PROBE_RESET: { int sign = (done_ccb->ataio.res.lba_high << 8) + done_ccb->ataio.res.lba_mid; CAM_DEBUG(path, CAM_DEBUG_PROBE, ("SIGNATURE: %04x\n", sign)); if (sign == 0x0000 && done_ccb->ccb_h.target_id != 15) { path->device->protocol = PROTO_ATA; PROBE_SET_ACTION(softc, PROBE_IDENTIFY); } else if (sign == 0x9669 && done_ccb->ccb_h.target_id == 15) { /* Report SIM that PM is present. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.xport_specific.sata.pm_present = 1; cts.xport_specific.sata.valid = CTS_SATA_VALID_PM; xpt_action((union ccb *)&cts); path->device->protocol = PROTO_SATAPM; PROBE_SET_ACTION(softc, PROBE_PM_PID); } else if (sign == 0xc33c && done_ccb->ccb_h.target_id != 15) { path->device->protocol = PROTO_SEMB; PROBE_SET_ACTION(softc, PROBE_IDENTIFY_SES); } else if (sign == 0xeb14 && done_ccb->ccb_h.target_id != 15) { path->device->protocol = PROTO_SCSI; PROBE_SET_ACTION(softc, PROBE_IDENTIFY); } else { if (done_ccb->ccb_h.target_id != 15) { xpt_print(path, "Unexpected signature 0x%04x\n", sign); } goto device_fail; } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } case PROBE_IDENTIFY: { struct ccb_pathinq cpi; int16_t *ptr; int veto = 0; ident_buf = &softc->ident_data; for (ptr = (int16_t *)ident_buf; ptr < (int16_t *)ident_buf + sizeof(struct ata_params)/2; ptr++) { *ptr = le16toh(*ptr); } /* * Allow others to veto this ATA disk attachment. This * is mainly used by VMs, whose disk controllers may * share the disks with the simulated ATA controllers. */ EVENTHANDLER_INVOKE(ada_probe_veto, path, ident_buf, &veto); if (veto) { goto device_fail; } if (strncmp(ident_buf->model, "FX", 2) && strncmp(ident_buf->model, "NEC", 3) && strncmp(ident_buf->model, "Pioneer", 7) && strncmp(ident_buf->model, "SHARP", 5)) { ata_bswap(ident_buf->model, sizeof(ident_buf->model)); ata_bswap(ident_buf->revision, sizeof(ident_buf->revision)); ata_bswap(ident_buf->serial, sizeof(ident_buf->serial)); } ata_btrim(ident_buf->model, sizeof(ident_buf->model)); ata_bpack(ident_buf->model, ident_buf->model, sizeof(ident_buf->model)); ata_btrim(ident_buf->revision, sizeof(ident_buf->revision)); ata_bpack(ident_buf->revision, ident_buf->revision, sizeof(ident_buf->revision)); ata_btrim(ident_buf->serial, sizeof(ident_buf->serial)); ata_bpack(ident_buf->serial, ident_buf->serial, sizeof(ident_buf->serial)); /* Device may need spin-up before IDENTIFY become valid. */ if ((ident_buf->specconf == 0x37c8 || ident_buf->specconf == 0x738c) && ((ident_buf->config & ATA_RESP_INCOMPLETE) || softc->spinup == 0)) { PROBE_SET_ACTION(softc, PROBE_SPINUP); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } ident_buf = &path->device->ident_data; if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { /* Check that it is the same device. */ if (bcmp(softc->ident_data.model, ident_buf->model, sizeof(ident_buf->model)) || bcmp(softc->ident_data.revision, ident_buf->revision, sizeof(ident_buf->revision)) || bcmp(softc->ident_data.serial, ident_buf->serial, sizeof(ident_buf->serial))) { /* Device changed. */ xpt_async(AC_LOST_DEVICE, path, NULL); } else { bcopy(&softc->ident_data, ident_buf, sizeof(struct ata_params)); changed = 0; } } if (changed) { bcopy(&softc->ident_data, ident_buf, sizeof(struct ata_params)); /* Clean up from previous instance of this device */ if (path->device->serial_num != NULL) { free(path->device->serial_num, M_CAMXPT); path->device->serial_num = NULL; path->device->serial_num_len = 0; } if (path->device->device_id != NULL) { free(path->device->device_id, M_CAMXPT); path->device->device_id = NULL; path->device->device_id_len = 0; } path->device->serial_num = (u_int8_t *)malloc((sizeof(ident_buf->serial) + 1), M_CAMXPT, M_NOWAIT); if (path->device->serial_num != NULL) { bcopy(ident_buf->serial, path->device->serial_num, sizeof(ident_buf->serial)); path->device->serial_num[sizeof(ident_buf->serial)] = '\0'; path->device->serial_num_len = strlen(path->device->serial_num); } if (ident_buf->enabled.extension & ATA_SUPPORT_64BITWWN) { path->device->device_id = malloc(16, M_CAMXPT, M_NOWAIT); if (path->device->device_id != NULL) { path->device->device_id_len = 16; bcopy(&fake_device_id_hdr, path->device->device_id, 8); bcopy(ident_buf->wwn, path->device->device_id + 8, 8); ata_bswap(path->device->device_id + 8, 8); } } path->device->flags |= CAM_DEV_IDENTIFY_DATA_VALID; xpt_async(AC_GETDEV_CHANGED, path, NULL); } if (ident_buf->satacapabilities & ATA_SUPPORT_NCQ) { path->device->mintags = 2; path->device->maxtags = ATA_QUEUE_LEN(ident_buf->queue) + 1; } ata_find_quirk(path->device); if (path->device->mintags != 0 && path->bus->sim->max_tagged_dev_openings != 0) { /* Check if the SIM does not want queued commands. */ xpt_path_inq(&cpi, path); if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_inquiry & PI_TAG_ABLE)) { /* Report SIM which tags are allowed. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.xport_specific.sata.tags = path->device->maxtags; cts.xport_specific.sata.valid = CTS_SATA_VALID_TAGS; xpt_action((union ccb *)&cts); } } ata_device_transport(path); if (changed) proberequestdefaultnegotiation(periph); PROBE_SET_ACTION(softc, PROBE_SETMODE); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } case PROBE_SPINUP: if (bootverbose) xpt_print(path, "Spin-up done\n"); softc->spinup = 1; PROBE_SET_ACTION(softc, PROBE_IDENTIFY); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; case PROBE_SETMODE: /* Set supported bits. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_SATA && cts.xport_specific.sata.valid & CTS_SATA_VALID_CAPS) caps = cts.xport_specific.sata.caps & CTS_SATA_CAPS_H; else if (path->device->transport == XPORT_ATA && cts.xport_specific.ata.valid & CTS_ATA_VALID_CAPS) caps = cts.xport_specific.ata.caps & CTS_ATA_CAPS_H; else caps = 0; if (path->device->transport == XPORT_SATA && ident_buf->satacapabilities != 0xffff) { if (ident_buf->satacapabilities & ATA_SUPPORT_IFPWRMNGTRCV) caps |= CTS_SATA_CAPS_D_PMREQ; if (ident_buf->satacapabilities & ATA_SUPPORT_HAPST) caps |= CTS_SATA_CAPS_D_APST; } /* Mask unwanted bits. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_SATA && cts.xport_specific.sata.valid & CTS_SATA_VALID_CAPS) caps &= cts.xport_specific.sata.caps; else if (path->device->transport == XPORT_ATA && cts.xport_specific.ata.valid & CTS_ATA_VALID_CAPS) caps &= cts.xport_specific.ata.caps; else caps = 0; /* * Remember what transport thinks about 48-bit DMA. If * capability information is not provided or transport is * SATA, we take support for granted. */ if (!(path->device->inq_flags & SID_DMA) || (path->device->transport == XPORT_ATA && (cts.xport_specific.ata.valid & CTS_ATA_VALID_CAPS) && !(caps & CTS_ATA_CAPS_H_DMA48))) path->device->inq_flags &= ~SID_DMA48; else path->device->inq_flags |= SID_DMA48; /* Store result to SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; if (path->device->transport == XPORT_SATA) { cts.xport_specific.sata.caps = caps; cts.xport_specific.sata.valid = CTS_SATA_VALID_CAPS; } else { cts.xport_specific.ata.caps = caps; cts.xport_specific.ata.valid = CTS_ATA_VALID_CAPS; } xpt_action((union ccb *)&cts); softc->caps = caps; if (path->device->transport != XPORT_SATA) goto notsata; if ((ident_buf->satasupport & ATA_SUPPORT_IFPWRMNGT) && (!(softc->caps & CTS_SATA_CAPS_H_PMREQ)) != (!(ident_buf->sataenabled & ATA_SUPPORT_IFPWRMNGT))) { PROBE_SET_ACTION(softc, PROBE_SETPM); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } /* FALLTHROUGH */ case PROBE_SETPM: if (ident_buf->satacapabilities != 0xffff && (ident_buf->satacapabilities & ATA_SUPPORT_DAPST) && (!(softc->caps & CTS_SATA_CAPS_H_APST)) != (!(ident_buf->sataenabled & ATA_ENABLED_DAPST))) { PROBE_SET_ACTION(softc, PROBE_SETAPST); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } /* FALLTHROUGH */ case PROBE_SETAPST: if ((ident_buf->satasupport & ATA_SUPPORT_AUTOACTIVATE) && (!(softc->caps & CTS_SATA_CAPS_H_DMAAA)) != (!(ident_buf->sataenabled & ATA_SUPPORT_AUTOACTIVATE))) { PROBE_SET_ACTION(softc, PROBE_SETDMAAA); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } /* FALLTHROUGH */ case PROBE_SETDMAAA: if (path->device->protocol != PROTO_ATA && (ident_buf->satasupport & ATA_SUPPORT_ASYNCNOTIF) && (!(softc->caps & CTS_SATA_CAPS_H_AN)) != (!(ident_buf->sataenabled & ATA_SUPPORT_ASYNCNOTIF))) { PROBE_SET_ACTION(softc, PROBE_SETAN); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } /* FALLTHROUGH */ case PROBE_SETAN: notsata: if (path->device->protocol == PROTO_ATA) { PROBE_SET_ACTION(softc, PROBE_SET_MULTI); } else { PROBE_SET_ACTION(softc, PROBE_INQUIRY); } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; case PROBE_SET_MULTI: if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); break; case PROBE_INQUIRY: case PROBE_FULL_INQUIRY: { u_int8_t periph_qual, len; path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; periph_qual = SID_QUAL(inq_buf); if (periph_qual != SID_QUAL_LU_CONNECTED && periph_qual != SID_QUAL_LU_OFFLINE) break; /* * We conservatively request only * SHORT_INQUIRY_LEN bytes of inquiry * information during our first try * at sending an INQUIRY. If the device * has more information to give, * perform a second request specifying * the amount of information the device * is willing to give. */ len = inq_buf->additional_length + offsetof(struct scsi_inquiry_data, additional_length) + 1; if (softc->action == PROBE_INQUIRY && len > SHORT_INQUIRY_LENGTH) { PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } ata_device_transport(path); if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); break; } case PROBE_PM_PID: if ((path->device->flags & CAM_DEV_IDENTIFY_DATA_VALID) == 0) bzero(ident_buf, sizeof(*ident_buf)); softc->pm_pid = (done_ccb->ataio.res.lba_high << 24) + (done_ccb->ataio.res.lba_mid << 16) + (done_ccb->ataio.res.lba_low << 8) + done_ccb->ataio.res.sector_count; ((uint32_t *)ident_buf)[0] = softc->pm_pid; snprintf(ident_buf->model, sizeof(ident_buf->model), "Port Multiplier %08x", softc->pm_pid); PROBE_SET_ACTION(softc, PROBE_PM_PRV); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; case PROBE_PM_PRV: softc->pm_prv = (done_ccb->ataio.res.lba_high << 24) + (done_ccb->ataio.res.lba_mid << 16) + (done_ccb->ataio.res.lba_low << 8) + done_ccb->ataio.res.sector_count; ((uint32_t *)ident_buf)[1] = softc->pm_prv; snprintf(ident_buf->revision, sizeof(ident_buf->revision), "%04x", softc->pm_prv); path->device->flags |= CAM_DEV_IDENTIFY_DATA_VALID; ata_device_transport(path); if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) proberequestdefaultnegotiation(periph); /* Set supported bits. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (cts.xport_specific.sata.valid & CTS_SATA_VALID_CAPS) caps = cts.xport_specific.sata.caps & CTS_SATA_CAPS_H; else caps = 0; /* All PMPs must support PM requests. */ caps |= CTS_SATA_CAPS_D_PMREQ; /* Mask unwanted bits. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (cts.xport_specific.sata.valid & CTS_SATA_VALID_CAPS) caps &= cts.xport_specific.sata.caps; else caps = 0; /* Remember what transport thinks about AEN. */ if ((caps & CTS_SATA_CAPS_H_AN) && path->device->protocol != PROTO_ATA) path->device->inq_flags |= SID_AEN; else path->device->inq_flags &= ~SID_AEN; /* Store result to SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.xport_specific.sata.caps = caps; cts.xport_specific.sata.valid = CTS_SATA_VALID_CAPS; xpt_action((union ccb *)&cts); softc->caps = caps; xpt_async(AC_GETDEV_CHANGED, path, NULL); if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, path, done_ccb); } else { done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_SCSI_AEN, path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); break; case PROBE_IDENTIFY_SES: case PROBE_IDENTIFY_SAFTE: if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { /* Check that it is the same device. */ if (bcmp(&softc->ident_data, ident_buf, 53)) { /* Device changed. */ xpt_async(AC_LOST_DEVICE, path, NULL); } else { bcopy(&softc->ident_data, ident_buf, sizeof(struct ata_params)); changed = 0; } } if (changed) { bcopy(&softc->ident_data, ident_buf, sizeof(struct ata_params)); /* Clean up from previous instance of this device */ if (path->device->device_id != NULL) { free(path->device->device_id, M_CAMXPT); path->device->device_id = NULL; path->device->device_id_len = 0; } path->device->device_id = malloc(16, M_CAMXPT, M_NOWAIT); if (path->device->device_id != NULL) { path->device->device_id_len = 16; bcopy(&fake_device_id_hdr, path->device->device_id, 8); bcopy(((uint8_t*)ident_buf) + 2, path->device->device_id + 8, 8); } path->device->flags |= CAM_DEV_IDENTIFY_DATA_VALID; } ata_device_transport(path); if (changed) proberequestdefaultnegotiation(periph); if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); break; default: panic("probedone: invalid action state 0x%x\n", softc->action); } done: if (softc->restart) { softc->restart = 0; xpt_release_ccb(done_ccb); probeschedule(periph); goto out; } xpt_release_ccb(done_ccb); CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n")); while ((done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) { TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); done_ccb->ccb_h.status = found ? CAM_REQ_CMP : CAM_REQ_CMP_ERR; xpt_done(done_ccb); } /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(path, 0, 0, 0, FALSE); cam_periph_invalidate(periph); cam_periph_release_locked(periph); } static void probecleanup(struct cam_periph *periph) { free(periph->softc, M_CAMXPT); } static void ata_find_quirk(struct cam_ed *device) { struct ata_quirk_entry *quirk; caddr_t match; match = cam_quirkmatch((caddr_t)&device->ident_data, (caddr_t)ata_quirk_table, nitems(ata_quirk_table), sizeof(*ata_quirk_table), ata_identify_match); if (match == NULL) panic("xpt_find_quirk: device didn't match wildcard entry!!"); quirk = (struct ata_quirk_entry *)match; device->quirk = quirk; if (quirk->quirks & CAM_QUIRK_MAXTAGS) { device->mintags = quirk->mintags; device->maxtags = quirk->maxtags; } } typedef struct { union ccb *request_ccb; struct ccb_pathinq *cpi; int counter; } ata_scan_bus_info; /* * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. * As the scan progresses, xpt_scan_bus is used as the * callback on completion function. */ static void ata_scan_bus(struct cam_periph *periph, union ccb *request_ccb) { struct cam_path *path; ata_scan_bus_info *scan_info; union ccb *work_ccb, *reset_ccb; struct mtx *mtx; cam_status status; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_scan_bus\n")); switch (request_ccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_SCAN_TGT: /* Find out the characteristics of the bus */ work_ccb = xpt_alloc_ccb_nowait(); if (work_ccb == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(request_ccb); return; } xpt_path_inq(&work_ccb->cpi, request_ccb->ccb_h.path); if (work_ccb->ccb_h.status != CAM_REQ_CMP) { request_ccb->ccb_h.status = work_ccb->ccb_h.status; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } /* We may need to reset bus first, if we haven't done it yet. */ if ((work_ccb->cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) && !(work_ccb->cpi.hba_misc & PIM_NOBUSRESET) && !timevalisset(&request_ccb->ccb_h.path->bus->last_reset)) { reset_ccb = xpt_alloc_ccb_nowait(); if (reset_ccb == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } xpt_setup_ccb(&reset_ccb->ccb_h, request_ccb->ccb_h.path, CAM_PRIORITY_NONE); reset_ccb->ccb_h.func_code = XPT_RESET_BUS; xpt_action(reset_ccb); if (reset_ccb->ccb_h.status != CAM_REQ_CMP) { request_ccb->ccb_h.status = reset_ccb->ccb_h.status; xpt_free_ccb(reset_ccb); xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } xpt_free_ccb(reset_ccb); } /* Save some state for use while we probe for devices */ scan_info = (ata_scan_bus_info *) malloc(sizeof(ata_scan_bus_info), M_CAMXPT, M_NOWAIT); if (scan_info == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } scan_info->request_ccb = request_ccb; scan_info->cpi = &work_ccb->cpi; /* If PM supported, probe it first. */ if (scan_info->cpi->hba_inquiry & PI_SATAPM) scan_info->counter = scan_info->cpi->max_target; else scan_info->counter = 0; work_ccb = xpt_alloc_ccb_nowait(); if (work_ccb == NULL) { free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(request_ccb); break; } mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); goto scan_next; case XPT_SCAN_LUN: work_ccb = request_ccb; /* Reuse the same CCB to query if a device was really found */ scan_info = (ata_scan_bus_info *)work_ccb->ccb_h.ppriv_ptr0; mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); mtx_lock(mtx); /* If there is PMP... */ if ((scan_info->cpi->hba_inquiry & PI_SATAPM) && (scan_info->counter == scan_info->cpi->max_target)) { if (work_ccb->ccb_h.status == CAM_REQ_CMP) { /* everything else will be probed by it */ /* Free the current request path- we're done with it. */ xpt_free_path(work_ccb->ccb_h.path); goto done; } else { struct ccb_trans_settings cts; /* Report SIM that PM is absent. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, work_ccb->ccb_h.path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.xport_specific.sata.pm_present = 0; cts.xport_specific.sata.valid = CTS_SATA_VALID_PM; xpt_action((union ccb *)&cts); } } /* Free the current request path- we're done with it. */ xpt_free_path(work_ccb->ccb_h.path); if (scan_info->counter == ((scan_info->cpi->hba_inquiry & PI_SATAPM) ? 0 : scan_info->cpi->max_target)) { done: mtx_unlock(mtx); xpt_free_ccb(work_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(request_ccb); break; } /* Take next device. Wrap from max (PMP) to 0. */ scan_info->counter = (scan_info->counter + 1 ) % (scan_info->cpi->max_target + 1); scan_next: status = xpt_create_path(&path, NULL, scan_info->request_ccb->ccb_h.path_id, scan_info->counter, 0); if (status != CAM_REQ_CMP) { if (request_ccb->ccb_h.func_code == XPT_SCAN_LUN) mtx_unlock(mtx); printf("xpt_scan_bus: xpt_create_path failed" " with status %#x, bus scan halted\n", status); xpt_free_ccb(work_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = status; xpt_done(request_ccb); break; } xpt_setup_ccb(&work_ccb->ccb_h, path, scan_info->request_ccb->ccb_h.pinfo.priority); work_ccb->ccb_h.func_code = XPT_SCAN_LUN; work_ccb->ccb_h.cbfcnp = ata_scan_bus; work_ccb->ccb_h.flags |= CAM_UNLOCKED; work_ccb->ccb_h.ppriv_ptr0 = scan_info; work_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; mtx_unlock(mtx); if (request_ccb->ccb_h.func_code == XPT_SCAN_LUN) mtx = NULL; xpt_action(work_ccb); if (mtx != NULL) mtx_lock(mtx); break; default: break; } } static void ata_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *request_ccb) { struct ccb_pathinq cpi; cam_status status; struct cam_path *new_path; struct cam_periph *old_periph; int lock; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_scan_lun\n")); xpt_path_inq(&cpi, path); if (cpi.ccb_h.status != CAM_REQ_CMP) { if (request_ccb != NULL) { request_ccb->ccb_h.status = cpi.ccb_h.status; xpt_done(request_ccb); } return; } if (request_ccb == NULL) { request_ccb = xpt_alloc_ccb_nowait(); if (request_ccb == NULL) { xpt_print(path, "xpt_scan_lun: can't allocate CCB, " "can't continue\n"); return; } status = xpt_create_path(&new_path, NULL, path->bus->path_id, path->target->target_id, path->device->lun_id); if (status != CAM_REQ_CMP) { xpt_print(path, "xpt_scan_lun: can't create path, " "can't continue\n"); xpt_free_ccb(request_ccb); return; } xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT); request_ccb->ccb_h.cbfcnp = xptscandone; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->crcn.flags = flags; } lock = (xpt_path_owned(path) == 0); if (lock) xpt_path_lock(path); if ((old_periph = cam_periph_find(path, "aprobe")) != NULL) { if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { probe_softc *softc; softc = (probe_softc *)old_periph->softc; TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->restart = 1; } else { request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(request_ccb); } } else { status = cam_periph_alloc(proberegister, NULL, probecleanup, probestart, "aprobe", CAM_PERIPH_BIO, request_ccb->ccb_h.path, NULL, 0, request_ccb); if (status != CAM_REQ_CMP) { xpt_print(path, "xpt_scan_lun: cam_alloc_periph " "returned an error, can't continue probe\n"); request_ccb->ccb_h.status = status; xpt_done(request_ccb); } } if (lock) xpt_path_unlock(path); } static void xptscandone(struct cam_periph *periph, union ccb *done_ccb) { xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } static struct cam_ed * ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct ata_quirk_entry *quirk; struct cam_ed *device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); /* * Take the default quirk entry until we have inquiry * data and can determine a better quirk to use. */ quirk = &ata_quirk_table[nitems(ata_quirk_table) - 1]; device->quirk = (void *)quirk; device->mintags = 0; device->maxtags = 0; bzero(&device->inq_data, sizeof(device->inq_data)); device->inq_flags = 0; device->queue_flags = 0; device->serial_num = NULL; device->serial_num_len = 0; return (device); } static void ata_device_transport(struct cam_path *path) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; struct scsi_inquiry_data *inq_buf = NULL; struct ata_params *ident_buf = NULL; /* Get transport information from the SIM */ xpt_path_inq(&cpi, path); path->device->transport = cpi.transport; if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) inq_buf = &path->device->inq_data; if ((path->device->flags & CAM_DEV_IDENTIFY_DATA_VALID) != 0) ident_buf = &path->device->ident_data; if (path->device->protocol == PROTO_ATA) { path->device->protocol_version = ident_buf ? ata_version(ident_buf->version_major) : cpi.protocol_version; } else if (path->device->protocol == PROTO_SCSI) { path->device->protocol_version = inq_buf ? SID_ANSI_REV(inq_buf) : cpi.protocol_version; } path->device->transport_version = ident_buf ? ata_version(ident_buf->version_major) : cpi.transport_version; /* Tell the controller what we think */ xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.transport = path->device->transport; cts.transport_version = path->device->transport_version; cts.protocol = path->device->protocol; cts.protocol_version = path->device->protocol_version; cts.proto_specific.valid = 0; if (ident_buf) { if (path->device->transport == XPORT_ATA) { cts.xport_specific.ata.atapi = (ident_buf->config == ATA_PROTO_CFA) ? 0 : ((ident_buf->config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_16) ? 16 : ((ident_buf->config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12) ? 12 : 0; cts.xport_specific.ata.valid = CTS_ATA_VALID_ATAPI; } else { cts.xport_specific.sata.atapi = (ident_buf->config == ATA_PROTO_CFA) ? 0 : ((ident_buf->config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_16) ? 16 : ((ident_buf->config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12) ? 12 : 0; cts.xport_specific.sata.valid = CTS_SATA_VALID_ATAPI; } } else cts.xport_specific.valid = 0; xpt_action((union ccb *)&cts); } static void ata_dev_advinfo(union ccb *start_ccb) { struct cam_ed *device; struct ccb_dev_advinfo *cdai; off_t amt; start_ccb->ccb_h.status = CAM_REQ_INVALID; device = start_ccb->ccb_h.path->device; cdai = &start_ccb->cdai; switch(cdai->buftype) { case CDAI_TYPE_SCSI_DEVID: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->device_id_len; if (device->device_id_len == 0) break; amt = device->device_id_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->device_id, amt); break; case CDAI_TYPE_SERIAL_NUM: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->serial_num_len; if (device->serial_num_len == 0) break; amt = device->serial_num_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->serial_num, amt); break; case CDAI_TYPE_PHYS_PATH: if (cdai->flags & CDAI_FLAG_STORE) { if (device->physpath != NULL) free(device->physpath, M_CAMXPT); device->physpath_len = cdai->bufsiz; /* Clear existing buffer if zero length */ if (cdai->bufsiz == 0) break; device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); if (device->physpath == NULL) { start_ccb->ccb_h.status = CAM_REQ_ABORTED; return; } memcpy(device->physpath, cdai->buf, cdai->bufsiz); } else { cdai->provsiz = device->physpath_len; if (device->physpath_len == 0) break; amt = device->physpath_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->physpath, amt); } break; default: return; } start_ccb->ccb_h.status = CAM_REQ_CMP; if (cdai->flags & CDAI_FLAG_STORE) { xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, (void *)(uintptr_t)cdai->buftype); } } static void ata_action(union ccb *start_ccb) { switch (start_ccb->ccb_h.func_code) { case XPT_SET_TRAN_SETTINGS: { ata_set_transfer_settings(&start_ccb->cts, start_ccb->ccb_h.path, /*async_update*/FALSE); break; } case XPT_SCAN_BUS: case XPT_SCAN_TGT: ata_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); break; case XPT_SCAN_LUN: ata_scan_lun(start_ccb->ccb_h.path->periph, start_ccb->ccb_h.path, start_ccb->crcn.flags, start_ccb); break; case XPT_GET_TRAN_SETTINGS: { ata_get_transfer_settings(&start_ccb->cts); break; } case XPT_SCSI_IO: { struct cam_ed *device; u_int maxlen = 0; device = start_ccb->ccb_h.path->device; if (device->protocol == PROTO_SCSI && (device->flags & CAM_DEV_IDENTIFY_DATA_VALID)) { uint16_t p = device->ident_data.config & ATA_PROTO_MASK; maxlen = (device->ident_data.config == ATA_PROTO_CFA) ? 0 : (p == ATA_PROTO_ATAPI_16) ? 16 : (p == ATA_PROTO_ATAPI_12) ? 12 : 0; } if (start_ccb->csio.cdb_len > maxlen) { start_ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(start_ccb); break; } xpt_action_default(start_ccb); break; } case XPT_DEV_ADVINFO: { ata_dev_advinfo(start_ccb); break; } default: xpt_action_default(start_ccb); break; } } static void ata_get_transfer_settings(struct ccb_trans_settings *cts) { struct ccb_trans_settings_ata *ata; struct ccb_trans_settings_scsi *scsi; struct cam_ed *device; device = cts->ccb_h.path->device; xpt_action_default((union ccb *)cts); if (cts->protocol == PROTO_UNKNOWN || cts->protocol == PROTO_UNSPECIFIED) { cts->protocol = device->protocol; cts->protocol_version = device->protocol_version; } if (cts->protocol == PROTO_ATA) { ata = &cts->proto_specific.ata; if ((ata->valid & CTS_ATA_VALID_TQ) == 0) { ata->valid |= CTS_ATA_VALID_TQ; if (cts->type == CTS_TYPE_USER_SETTINGS || (device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (device->inq_flags & SID_CmdQue) != 0) ata->flags |= CTS_ATA_FLAGS_TAG_ENB; } } if (cts->protocol == PROTO_SCSI) { scsi = &cts->proto_specific.scsi; if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) { scsi->valid |= CTS_SCSI_VALID_TQ; if (cts->type == CTS_TYPE_USER_SETTINGS || (device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (device->inq_flags & SID_CmdQue) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } } if (cts->transport == XPORT_UNKNOWN || cts->transport == XPORT_UNSPECIFIED) { cts->transport = device->transport; cts->transport_version = device->transport_version; } } static void ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update) { struct ccb_pathinq cpi; struct ccb_trans_settings_ata *ata; struct ccb_trans_settings_scsi *scsi; struct ata_params *ident_data; struct scsi_inquiry_data *inq_data; struct cam_ed *device; if (path == NULL || (device = path->device) == NULL) { cts->ccb_h.status = CAM_PATH_INVALID; xpt_done((union ccb *)cts); return; } if (cts->protocol == PROTO_UNKNOWN || cts->protocol == PROTO_UNSPECIFIED) { cts->protocol = device->protocol; cts->protocol_version = device->protocol_version; } if (cts->protocol_version == PROTO_VERSION_UNKNOWN || cts->protocol_version == PROTO_VERSION_UNSPECIFIED) cts->protocol_version = device->protocol_version; if (cts->protocol != device->protocol) { xpt_print(path, "Uninitialized Protocol %x:%x?\n", cts->protocol, device->protocol); cts->protocol = device->protocol; } if (cts->protocol_version > device->protocol_version) { if (bootverbose) { xpt_print(path, "Down reving Protocol " "Version from %d to %d?\n", cts->protocol_version, device->protocol_version); } cts->protocol_version = device->protocol_version; } if (cts->transport == XPORT_UNKNOWN || cts->transport == XPORT_UNSPECIFIED) { cts->transport = device->transport; cts->transport_version = device->transport_version; } if (cts->transport_version == XPORT_VERSION_UNKNOWN || cts->transport_version == XPORT_VERSION_UNSPECIFIED) cts->transport_version = device->transport_version; if (cts->transport != device->transport) { xpt_print(path, "Uninitialized Transport %x:%x?\n", cts->transport, device->transport); cts->transport = device->transport; } if (cts->transport_version > device->transport_version) { if (bootverbose) { xpt_print(path, "Down reving Transport " "Version from %d to %d?\n", cts->transport_version, device->transport_version); } cts->transport_version = device->transport_version; } ident_data = &device->ident_data; inq_data = &device->inq_data; if (cts->protocol == PROTO_ATA) ata = &cts->proto_specific.ata; else ata = NULL; if (cts->protocol == PROTO_SCSI) scsi = &cts->proto_specific.scsi; else scsi = NULL; xpt_path_inq(&cpi, path); /* Sanity checking */ if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 || (ata && (ident_data->satacapabilities & ATA_SUPPORT_NCQ) == 0) || (scsi && (INQ_DATA_TQ_ENABLED(inq_data)) == 0) || (device->queue_flags & SCP_QUEUE_DQUE) != 0 || (device->mintags == 0)) { /* * Can't tag on hardware that doesn't support tags, * doesn't have it enabled, or has broken tag support. */ if (ata) ata->flags &= ~CTS_ATA_FLAGS_TAG_ENB; if (scsi) scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } /* Start/stop tags use. */ if (cts->type == CTS_TYPE_CURRENT_SETTINGS && ((ata && (ata->valid & CTS_ATA_VALID_TQ) != 0) || (scsi && (scsi->valid & CTS_SCSI_VALID_TQ) != 0))) { int nowt, newt = 0; nowt = ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (device->inq_flags & SID_CmdQue) != 0); if (ata) newt = (ata->flags & CTS_ATA_FLAGS_TAG_ENB) != 0; if (scsi) newt = (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0; if (newt && !nowt) { /* * Delay change to use tags until after a * few commands have gone to this device so * the controller has time to perform transfer * negotiations without tagged messages getting * in the way. */ device->tag_delay_count = CAM_TAG_DELAY_COUNT; device->flags |= CAM_DEV_TAG_AFTER_COUNT; } else if (nowt && !newt) xpt_stop_tags(path); } if (async_update == FALSE) xpt_action_default((union ccb *)cts); } /* * Handle any per-device event notifications that require action by the XPT. */ static void ata_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { cam_status status; struct cam_path newpath; /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; /* * We need our own path with wildcards expanded to * handle certain types of events. */ if ((async_code == AC_SENT_BDR) || (async_code == AC_BUS_RESET) || (async_code == AC_INQ_CHANGED)) status = xpt_compile_path(&newpath, NULL, bus->path_id, target->target_id, device->lun_id); else status = CAM_REQ_CMP_ERR; if (status == CAM_REQ_CMP) { if (async_code == AC_INQ_CHANGED) { /* * We've sent a start unit command, or * something similar to a device that * may have caused its inquiry data to * change. So we re-scan the device to * refresh the inquiry data for it. */ ata_scan_lun(newpath.periph, &newpath, CAM_EXPECT_INQ_CHANGE, NULL); } else { /* We need to reinitialize device after reset. */ ata_scan_lun(newpath.periph, &newpath, 0, NULL); } xpt_release_path(&newpath); } else if (async_code == AC_LOST_DEVICE && (device->flags & CAM_DEV_UNCONFIGURED) == 0) { device->flags |= CAM_DEV_UNCONFIGURED; xpt_release_device(device); } else if (async_code == AC_TRANSFER_NEG) { struct ccb_trans_settings *settings; struct cam_path path; settings = (struct ccb_trans_settings *)async_arg; xpt_compile_path(&path, NULL, bus->path_id, target->target_id, device->lun_id); ata_set_transfer_settings(settings, &path, /*async_update*/TRUE); xpt_release_path(&path); } } static void _ata_announce_periph(struct cam_periph *periph, struct ccb_trans_settings *cts, u_int *speed) { struct ccb_pathinq cpi; struct cam_path *path = periph->path; cam_periph_assert(periph, MA_OWNED); xpt_setup_ccb(&cts->ccb_h, path, CAM_PRIORITY_NORMAL); cts->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts->type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb*)cts); if ((cts->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) return; /* Ask the SIM for its base transfer speed */ xpt_path_inq(&cpi, path); /* Report connection speed */ *speed = cpi.base_transfer_speed; if (cts->transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &cts->xport_specific.ata; if (pata->valid & CTS_ATA_VALID_MODE) *speed = ata_mode2speed(pata->mode); } if (cts->transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &cts->xport_specific.sata; if (sata->valid & CTS_SATA_VALID_REVISION) *speed = ata_revision2speed(sata->revision); } } static void ata_announce_periph(struct cam_periph *periph) { struct ccb_trans_settings cts; u_int speed, mb; _ata_announce_periph(periph, &cts, &speed); if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) return; mb = speed / 1000; if (mb > 0) printf("%s%d: %d.%03dMB/s transfers", periph->periph_name, periph->unit_number, mb, speed % 1000); else printf("%s%d: %dKB/s transfers", periph->periph_name, periph->unit_number, speed); /* Report additional information about connection */ if (cts.transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &cts.xport_specific.ata; printf(" ("); if (pata->valid & CTS_ATA_VALID_MODE) printf("%s, ", ata_mode2string(pata->mode)); if ((pata->valid & CTS_ATA_VALID_ATAPI) && pata->atapi != 0) printf("ATAPI %dbytes, ", pata->atapi); if (pata->valid & CTS_ATA_VALID_BYTECOUNT) printf("PIO %dbytes", pata->bytecount); printf(")"); } if (cts.transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &cts.xport_specific.sata; printf(" ("); if (sata->valid & CTS_SATA_VALID_REVISION) printf("SATA %d.x, ", sata->revision); else printf("SATA, "); if (sata->valid & CTS_SATA_VALID_MODE) printf("%s, ", ata_mode2string(sata->mode)); if ((sata->valid & CTS_ATA_VALID_ATAPI) && sata->atapi != 0) printf("ATAPI %dbytes, ", sata->atapi); if (sata->valid & CTS_SATA_VALID_BYTECOUNT) printf("PIO %dbytes", sata->bytecount); printf(")"); } printf("\n"); } static void ata_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) { struct ccb_trans_settings cts; u_int speed, mb; _ata_announce_periph(periph, &cts, &speed); if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) return; mb = speed / 1000; if (mb > 0) sbuf_printf(sb, "%s%d: %d.%03dMB/s transfers", periph->periph_name, periph->unit_number, mb, speed % 1000); else sbuf_printf(sb, "%s%d: %dKB/s transfers", periph->periph_name, periph->unit_number, speed); /* Report additional information about connection */ if (cts.transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &cts.xport_specific.ata; sbuf_printf(sb, " ("); if (pata->valid & CTS_ATA_VALID_MODE) sbuf_printf(sb, "%s, ", ata_mode2string(pata->mode)); if ((pata->valid & CTS_ATA_VALID_ATAPI) && pata->atapi != 0) sbuf_printf(sb, "ATAPI %dbytes, ", pata->atapi); if (pata->valid & CTS_ATA_VALID_BYTECOUNT) sbuf_printf(sb, "PIO %dbytes", pata->bytecount); sbuf_printf(sb, ")"); } if (cts.transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &cts.xport_specific.sata; sbuf_printf(sb, " ("); if (sata->valid & CTS_SATA_VALID_REVISION) sbuf_printf(sb, "SATA %d.x, ", sata->revision); else sbuf_printf(sb, "SATA, "); if (sata->valid & CTS_SATA_VALID_MODE) sbuf_printf(sb, "%s, ", ata_mode2string(sata->mode)); if ((sata->valid & CTS_ATA_VALID_ATAPI) && sata->atapi != 0) sbuf_printf(sb, "ATAPI %dbytes, ", sata->atapi); if (sata->valid & CTS_SATA_VALID_BYTECOUNT) sbuf_printf(sb, "PIO %dbytes", sata->bytecount); sbuf_printf(sb, ")"); } sbuf_printf(sb, "\n"); } static void ata_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb) { ata_print_ident_sbuf(&device->ident_data, sb); } static void ata_proto_announce(struct cam_ed *device) { ata_print_ident(&device->ident_data); } static void ata_proto_denounce(struct cam_ed *device) { ata_print_ident_short(&device->ident_data); } static void ata_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb) { ata_print_ident_short_sbuf(&device->ident_data, sb); } static void semb_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb) { semb_print_ident_sbuf((struct sep_identify_data *)&device->ident_data, sb); } static void semb_proto_announce(struct cam_ed *device) { semb_print_ident((struct sep_identify_data *)&device->ident_data); } static void semb_proto_denounce(struct cam_ed *device) { semb_print_ident_short((struct sep_identify_data *)&device->ident_data); } static void semb_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb) { semb_print_ident_short_sbuf((struct sep_identify_data *)&device->ident_data, sb); } static void ata_proto_debug_out(union ccb *ccb) { char cdb_str[(sizeof(struct ata_cmd) * 3) + 1]; if (ccb->ccb_h.func_code != XPT_ATA_IO) return; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_CDB,("%s. ACB: %s\n", ata_op_string(&ccb->ataio.cmd), ata_cmd_string(&ccb->ataio.cmd, cdb_str, sizeof(cdb_str)))); } Index: head/sys/cam/cam_periph.c =================================================================== --- head/sys/cam/cam_periph.c (revision 328917) +++ head/sys/cam/cam_periph.c (revision 328918) @@ -1,2065 +1,2065 @@ /*- * Common functions for CAM "type" (peripheral) drivers. * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, 1998 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static u_int camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, path_id_t pathid, target_id_t target, lun_id_t lun); static u_int camperiphunit(struct periph_driver *p_drv, path_id_t pathid, target_id_t target, lun_id_t lun); static void camperiphdone(struct cam_periph *periph, union ccb *done_ccb); static void camperiphfree(struct cam_periph *periph); static int camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb, cam_flags camflags, u_int32_t sense_flags, int *openings, u_int32_t *relsim_flags, u_int32_t *timeout, u_int32_t *action, const char **action_string); static int camperiphscsisenseerror(union ccb *ccb, union ccb **orig_ccb, cam_flags camflags, u_int32_t sense_flags, int *openings, u_int32_t *relsim_flags, u_int32_t *timeout, u_int32_t *action, const char **action_string); static void cam_periph_devctl_notify(union ccb *ccb); static int nperiph_drivers; static int initialized = 0; struct periph_driver **periph_drivers; static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); static int periph_selto_delay = 1000; TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); static int periph_noresrc_delay = 500; TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); static int periph_busy_delay = 500; TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); void periphdriver_register(void *data) { struct periph_driver *drv = (struct periph_driver *)data; struct periph_driver **newdrivers, **old; int ndrivers; again: ndrivers = nperiph_drivers + 2; newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, M_WAITOK); xpt_lock_buses(); if (ndrivers != nperiph_drivers + 2) { /* * Lost race against itself; go around. */ xpt_unlock_buses(); free(newdrivers, M_CAMPERIPH); goto again; } if (periph_drivers) bcopy(periph_drivers, newdrivers, sizeof(*newdrivers) * nperiph_drivers); newdrivers[nperiph_drivers] = drv; newdrivers[nperiph_drivers + 1] = NULL; old = periph_drivers; periph_drivers = newdrivers; nperiph_drivers++; xpt_unlock_buses(); if (old) free(old, M_CAMPERIPH); /* If driver marked as early or it is late now, initialize it. */ if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) || initialized > 1) (*drv->init)(); } int periphdriver_unregister(void *data) { struct periph_driver *drv = (struct periph_driver *)data; int error, n; /* If driver marked as early or it is late now, deinitialize it. */ if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) || initialized > 1) { if (drv->deinit == NULL) { printf("CAM periph driver '%s' doesn't have deinit.\n", drv->driver_name); return (EOPNOTSUPP); } error = drv->deinit(); if (error != 0) return (error); } xpt_lock_buses(); for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++) ; KASSERT(n < nperiph_drivers, ("Periph driver '%s' was not registered", drv->driver_name)); for (; n + 1 < nperiph_drivers; n++) periph_drivers[n] = periph_drivers[n + 1]; periph_drivers[n + 1] = NULL; nperiph_drivers--; xpt_unlock_buses(); return (0); } void periphdriver_init(int level) { int i, early; initialized = max(initialized, level); for (i = 0; periph_drivers[i] != NULL; i++) { early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2; if (early == initialized) (*periph_drivers[i]->init)(); } } cam_status cam_periph_alloc(periph_ctor_t *periph_ctor, periph_oninv_t *periph_oninvalidate, periph_dtor_t *periph_dtor, periph_start_t *periph_start, char *name, cam_periph_type type, struct cam_path *path, ac_callback_t *ac_callback, ac_code code, void *arg) { struct periph_driver **p_drv; struct cam_sim *sim; struct cam_periph *periph; struct cam_periph *cur_periph; path_id_t path_id; target_id_t target_id; lun_id_t lun_id; cam_status status; u_int init_level; init_level = 0; /* * Handle Hot-Plug scenarios. If there is already a peripheral * of our type assigned to this path, we are likely waiting for * final close on an old, invalidated, peripheral. If this is * the case, queue up a deferred call to the peripheral's async * handler. If it looks like a mistaken re-allocation, complain. */ if ((periph = cam_periph_find(path, name)) != NULL) { if ((periph->flags & CAM_PERIPH_INVALID) != 0 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; periph->deferred_callback = ac_callback; periph->deferred_ac = code; return (CAM_REQ_INPROG); } else { printf("cam_periph_alloc: attempt to re-allocate " "valid device %s%d rejected flags %#x " "refcount %d\n", periph->periph_name, periph->unit_number, periph->flags, periph->refcount); } return (CAM_REQ_INVALID); } periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH, M_NOWAIT|M_ZERO); if (periph == NULL) return (CAM_RESRC_UNAVAIL); init_level++; sim = xpt_path_sim(path); path_id = xpt_path_path_id(path); target_id = xpt_path_target_id(path); lun_id = xpt_path_lun_id(path); periph->periph_start = periph_start; periph->periph_dtor = periph_dtor; periph->periph_oninval = periph_oninvalidate; periph->type = type; periph->periph_name = name; periph->scheduled_priority = CAM_PRIORITY_NONE; periph->immediate_priority = CAM_PRIORITY_NONE; periph->refcount = 1; /* Dropped by invalidation. */ periph->sim = sim; SLIST_INIT(&periph->ccb_list); status = xpt_create_path(&path, periph, path_id, target_id, lun_id); if (status != CAM_REQ_CMP) goto failure; periph->path = path; xpt_lock_buses(); for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { if (strcmp((*p_drv)->driver_name, name) == 0) break; } if (*p_drv == NULL) { printf("cam_periph_alloc: invalid periph name '%s'\n", name); xpt_unlock_buses(); xpt_free_path(periph->path); free(periph, M_CAMPERIPH); return (CAM_REQ_INVALID); } periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); cur_periph = TAILQ_FIRST(&(*p_drv)->units); while (cur_periph != NULL && cur_periph->unit_number < periph->unit_number) cur_periph = TAILQ_NEXT(cur_periph, unit_links); if (cur_periph != NULL) { KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list")); TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); } else { TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); (*p_drv)->generation++; } xpt_unlock_buses(); init_level++; status = xpt_add_periph(periph); if (status != CAM_REQ_CMP) goto failure; init_level++; CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n")); status = periph_ctor(periph, arg); if (status == CAM_REQ_CMP) init_level++; failure: switch (init_level) { case 4: /* Initialized successfully */ break; case 3: CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); xpt_remove_periph(periph); /* FALLTHROUGH */ case 2: xpt_lock_buses(); TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); xpt_unlock_buses(); xpt_free_path(periph->path); /* FALLTHROUGH */ case 1: free(periph, M_CAMPERIPH); /* FALLTHROUGH */ case 0: /* No cleanup to perform. */ break; default: panic("%s: Unknown init level", __func__); } return(status); } /* * Find a peripheral structure with the specified path, target, lun, * and (optionally) type. If the name is NULL, this function will return * the first peripheral driver that matches the specified path. */ struct cam_periph * cam_periph_find(struct cam_path *path, char *name) { struct periph_driver **p_drv; struct cam_periph *periph; xpt_lock_buses(); for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) continue; TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { if (xpt_path_comp(periph->path, path) == 0) { xpt_unlock_buses(); cam_periph_assert(periph, MA_OWNED); return(periph); } } if (name != NULL) { xpt_unlock_buses(); return(NULL); } } xpt_unlock_buses(); return(NULL); } /* * Find peripheral driver instances attached to the specified path. */ int cam_periph_list(struct cam_path *path, struct sbuf *sb) { struct sbuf local_sb; struct periph_driver **p_drv; struct cam_periph *periph; int count; int sbuf_alloc_len; sbuf_alloc_len = 16; retry: sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN); count = 0; xpt_lock_buses(); for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { if (xpt_path_comp(periph->path, path) != 0) continue; if (sbuf_len(&local_sb) != 0) sbuf_cat(&local_sb, ","); sbuf_printf(&local_sb, "%s%d", periph->periph_name, periph->unit_number); if (sbuf_error(&local_sb) == ENOMEM) { sbuf_alloc_len *= 2; xpt_unlock_buses(); sbuf_delete(&local_sb); goto retry; } count++; } } xpt_unlock_buses(); sbuf_finish(&local_sb); sbuf_cpy(sb, sbuf_data(&local_sb)); sbuf_delete(&local_sb); return (count); } -cam_status +int cam_periph_acquire(struct cam_periph *periph) { - cam_status status; + int status; - status = CAM_REQ_CMP_ERR; if (periph == NULL) - return (status); + return (EINVAL); + status = ENOENT; xpt_lock_buses(); if ((periph->flags & CAM_PERIPH_INVALID) == 0) { periph->refcount++; - status = CAM_REQ_CMP; + status = 0; } xpt_unlock_buses(); return (status); } void cam_periph_doacquire(struct cam_periph *periph) { xpt_lock_buses(); KASSERT(periph->refcount >= 1, ("cam_periph_doacquire() with refcount == %d", periph->refcount)); periph->refcount++; xpt_unlock_buses(); } void cam_periph_release_locked_buses(struct cam_periph *periph) { cam_periph_assert(periph, MA_OWNED); KASSERT(periph->refcount >= 1, ("periph->refcount >= 1")); if (--periph->refcount == 0) camperiphfree(periph); } void cam_periph_release_locked(struct cam_periph *periph) { if (periph == NULL) return; xpt_lock_buses(); cam_periph_release_locked_buses(periph); xpt_unlock_buses(); } void cam_periph_release(struct cam_periph *periph) { struct mtx *mtx; if (periph == NULL) return; cam_periph_assert(periph, MA_NOTOWNED); mtx = cam_periph_mtx(periph); mtx_lock(mtx); cam_periph_release_locked(periph); mtx_unlock(mtx); } int cam_periph_hold(struct cam_periph *periph, int priority) { int error; /* * Increment the reference count on the peripheral * while we wait for our lock attempt to succeed * to ensure the peripheral doesn't disappear out * from user us while we sleep. */ - if (cam_periph_acquire(periph) != CAM_REQ_CMP) + if (cam_periph_acquire(periph) != 0) return (ENXIO); cam_periph_assert(periph, MA_OWNED); while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { periph->flags |= CAM_PERIPH_LOCK_WANTED; if ((error = cam_periph_sleep(periph, periph, priority, "caplck", 0)) != 0) { cam_periph_release_locked(periph); return (error); } if (periph->flags & CAM_PERIPH_INVALID) { cam_periph_release_locked(periph); return (ENXIO); } } periph->flags |= CAM_PERIPH_LOCKED; return (0); } void cam_periph_unhold(struct cam_periph *periph) { cam_periph_assert(periph, MA_OWNED); periph->flags &= ~CAM_PERIPH_LOCKED; if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { periph->flags &= ~CAM_PERIPH_LOCK_WANTED; wakeup(periph); } cam_periph_release_locked(periph); } /* * Look for the next unit number that is not currently in use for this * peripheral type starting at "newunit". Also exclude unit numbers that * are reserved by for future "hardwiring" unless we already know that this * is a potential wired device. Only assume that the device is "wired" the * first time through the loop since after that we'll be looking at unit * numbers that did not match a wiring entry. */ static u_int camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, path_id_t pathid, target_id_t target, lun_id_t lun) { struct cam_periph *periph; char *periph_name; int i, val, dunit, r; const char *dname, *strval; periph_name = p_drv->driver_name; for (;;newunit++) { for (periph = TAILQ_FIRST(&p_drv->units); periph != NULL && periph->unit_number != newunit; periph = TAILQ_NEXT(periph, unit_links)) ; if (periph != NULL && periph->unit_number == newunit) { if (wired != 0) { xpt_print(periph->path, "Duplicate Wired " "Device entry!\n"); xpt_print(periph->path, "Second device (%s " "device at scbus%d target %d lun %d) will " "not be wired\n", periph_name, pathid, target, lun); wired = 0; } continue; } if (wired) break; /* * Don't match entries like "da 4" as a wired down * device, but do match entries like "da 4 target 5" * or even "da 4 scbus 1". */ i = 0; dname = periph_name; for (;;) { r = resource_find_dev(&i, dname, &dunit, NULL, NULL); if (r != 0) break; /* if no "target" and no specific scbus, skip */ if (resource_int_value(dname, dunit, "target", &val) && (resource_string_value(dname, dunit, "at",&strval)|| strcmp(strval, "scbus") == 0)) continue; if (newunit == dunit) break; } if (r != 0) break; } return (newunit); } static u_int camperiphunit(struct periph_driver *p_drv, path_id_t pathid, target_id_t target, lun_id_t lun) { u_int unit; int wired, i, val, dunit; const char *dname, *strval; char pathbuf[32], *periph_name; periph_name = p_drv->driver_name; snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); unit = 0; i = 0; dname = periph_name; for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; wired = 0) { if (resource_string_value(dname, dunit, "at", &strval) == 0) { if (strcmp(strval, pathbuf) != 0) continue; wired++; } if (resource_int_value(dname, dunit, "target", &val) == 0) { if (val != target) continue; wired++; } if (resource_int_value(dname, dunit, "lun", &val) == 0) { if (val != lun) continue; wired++; } if (wired != 0) { unit = dunit; break; } } /* * Either start from 0 looking for the next unit or from * the unit number given in the resource config. This way, * if we have wildcard matches, we don't return the same * unit number twice. */ unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); return (unit); } void cam_periph_invalidate(struct cam_periph *periph) { cam_periph_assert(periph, MA_OWNED); /* * We only call this routine the first time a peripheral is * invalidated. */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) return; CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n")); if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) { struct sbuf sb; char buffer[160]; sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN); xpt_denounce_periph_sbuf(periph, &sb); sbuf_finish(&sb); sbuf_putbuf(&sb); } periph->flags |= CAM_PERIPH_INVALID; periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; if (periph->periph_oninval != NULL) periph->periph_oninval(periph); cam_periph_release_locked(periph); } static void camperiphfree(struct cam_periph *periph) { struct periph_driver **p_drv; struct periph_driver *drv; cam_periph_assert(periph, MA_OWNED); KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating", periph->periph_name, periph->unit_number)); for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) break; } if (*p_drv == NULL) { printf("camperiphfree: attempt to free non-existant periph\n"); return; } /* * Cache a pointer to the periph_driver structure. If a * periph_driver is added or removed from the array (see * periphdriver_register()) while we drop the toplogy lock * below, p_drv may change. This doesn't protect against this * particular periph_driver going away. That will require full * reference counting in the periph_driver infrastructure. */ drv = *p_drv; /* * We need to set this flag before dropping the topology lock, to * let anyone who is traversing the list that this peripheral is * about to be freed, and there will be no more reference count * checks. */ periph->flags |= CAM_PERIPH_FREE; /* * The peripheral destructor semantics dictate calling with only the * SIM mutex held. Since it might sleep, it should not be called * with the topology lock held. */ xpt_unlock_buses(); /* * We need to call the peripheral destructor prior to removing the * peripheral from the list. Otherwise, we risk running into a * scenario where the peripheral unit number may get reused * (because it has been removed from the list), but some resources * used by the peripheral are still hanging around. In particular, * the devfs nodes used by some peripherals like the pass(4) driver * aren't fully cleaned up until the destructor is run. If the * unit number is reused before the devfs instance is fully gone, * devfs will panic. */ if (periph->periph_dtor != NULL) periph->periph_dtor(periph); /* * The peripheral list is protected by the topology lock. */ xpt_lock_buses(); TAILQ_REMOVE(&drv->units, periph, unit_links); drv->generation++; xpt_remove_periph(periph); xpt_unlock_buses(); if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) xpt_print(periph->path, "Periph destroyed\n"); else CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { union ccb ccb; void *arg; switch (periph->deferred_ac) { case AC_FOUND_DEVICE: ccb.ccb_h.func_code = XPT_GDEV_TYPE; xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); xpt_action(&ccb); arg = &ccb; break; case AC_PATH_REGISTERED: xpt_path_inq(&ccb.cpi, periph->path); arg = &ccb; break; default: arg = NULL; break; } periph->deferred_callback(NULL, periph->deferred_ac, periph->path, arg); } xpt_free_path(periph->path); free(periph, M_CAMPERIPH); xpt_lock_buses(); } /* * Map user virtual pointers into kernel virtual address space, so we can * access the memory. This is now a generic function that centralizes most * of the sanity checks on the data flags, if any. * This also only works for up to MAXPHYS memory. Since we use * buffers to map stuff in and out, we're limited to the buffer size. */ int cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo, u_int maxmap) { int numbufs, i, j; int flags[CAM_PERIPH_MAXMAPS]; u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; u_int32_t lengths[CAM_PERIPH_MAXMAPS]; u_int32_t dirs[CAM_PERIPH_MAXMAPS]; if (maxmap == 0) maxmap = DFLTPHYS; /* traditional default */ else if (maxmap > MAXPHYS) maxmap = MAXPHYS; /* for safety */ switch(ccb->ccb_h.func_code) { case XPT_DEV_MATCH: if (ccb->cdm.match_buf_len == 0) { printf("cam_periph_mapmem: invalid match buffer " "length 0\n"); return(EINVAL); } if (ccb->cdm.pattern_buf_len > 0) { data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; lengths[0] = ccb->cdm.pattern_buf_len; dirs[0] = CAM_DIR_OUT; data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; lengths[1] = ccb->cdm.match_buf_len; dirs[1] = CAM_DIR_IN; numbufs = 2; } else { data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; lengths[0] = ccb->cdm.match_buf_len; dirs[0] = CAM_DIR_IN; numbufs = 1; } /* * This request will not go to the hardware, no reason * to be so strict. vmapbuf() is able to map up to MAXPHYS. */ maxmap = MAXPHYS; break; case XPT_SCSI_IO: case XPT_CONT_TARGET_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) return (EINVAL); data_ptrs[0] = &ccb->csio.data_ptr; lengths[0] = ccb->csio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; numbufs = 1; break; case XPT_ATA_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) return (EINVAL); data_ptrs[0] = &ccb->ataio.data_ptr; lengths[0] = ccb->ataio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; numbufs = 1; break; case XPT_MMC_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); /* Two mappings: one for cmd->data and one for cmd->data->data */ data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data; lengths[0] = sizeof(struct mmc_data *); dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data; lengths[1] = ccb->mmcio.cmd.data->len; dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK; numbufs = 2; break; case XPT_SMP_IO: data_ptrs[0] = &ccb->smpio.smp_request; lengths[0] = ccb->smpio.smp_request_len; dirs[0] = CAM_DIR_OUT; data_ptrs[1] = &ccb->smpio.smp_response; lengths[1] = ccb->smpio.smp_response_len; dirs[1] = CAM_DIR_IN; numbufs = 2; break; case XPT_NVME_IO: case XPT_NVME_ADMIN: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return (0); if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) return (EINVAL); data_ptrs[0] = &ccb->nvmeio.data_ptr; lengths[0] = ccb->nvmeio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; numbufs = 1; break; case XPT_DEV_ADVINFO: if (ccb->cdai.bufsiz == 0) return (0); data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; lengths[0] = ccb->cdai.bufsiz; dirs[0] = CAM_DIR_IN; numbufs = 1; /* * This request will not go to the hardware, no reason * to be so strict. vmapbuf() is able to map up to MAXPHYS. */ maxmap = MAXPHYS; break; default: return(EINVAL); break; /* NOTREACHED */ } /* * Check the transfer length and permissions first, so we don't * have to unmap any previously mapped buffers. */ for (i = 0; i < numbufs; i++) { flags[i] = 0; /* * The userland data pointer passed in may not be page * aligned. vmapbuf() truncates the address to a page * boundary, so if the address isn't page aligned, we'll * need enough space for the given transfer length, plus * whatever extra space is necessary to make it to the page * boundary. */ if ((lengths[i] + (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){ printf("cam_periph_mapmem: attempt to map %lu bytes, " "which is greater than %lu\n", (long)(lengths[i] + (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), (u_long)maxmap); return(E2BIG); } if (dirs[i] & CAM_DIR_OUT) { flags[i] = BIO_WRITE; } if (dirs[i] & CAM_DIR_IN) { flags[i] = BIO_READ; } } /* * This keeps the kernel stack of current thread from getting * swapped. In low-memory situations where the kernel stack might * otherwise get swapped out, this holds it and allows the thread * to make progress and release the kernel mapped pages sooner. * * XXX KDM should I use P_NOSWAP instead? */ PHOLD(curproc); for (i = 0; i < numbufs; i++) { /* * Get the buffer. */ mapinfo->bp[i] = getpbuf(NULL); /* put our pointer in the data slot */ mapinfo->bp[i]->b_data = *data_ptrs[i]; /* save the user's data address */ mapinfo->bp[i]->b_caller1 = *data_ptrs[i]; /* set the transfer length, we know it's < MAXPHYS */ mapinfo->bp[i]->b_bufsize = lengths[i]; /* set the direction */ mapinfo->bp[i]->b_iocmd = flags[i]; /* * Map the buffer into kernel memory. * * Note that useracc() alone is not a sufficient test. * vmapbuf() can still fail due to a smaller file mapped * into a larger area of VM, or if userland races against * vmapbuf() after the useracc() check. */ if (vmapbuf(mapinfo->bp[i], 1) < 0) { for (j = 0; j < i; ++j) { *data_ptrs[j] = mapinfo->bp[j]->b_caller1; vunmapbuf(mapinfo->bp[j]); relpbuf(mapinfo->bp[j], NULL); } relpbuf(mapinfo->bp[i], NULL); PRELE(curproc); return(EACCES); } /* set our pointer to the new mapped area */ *data_ptrs[i] = mapinfo->bp[i]->b_data; mapinfo->num_bufs_used++; } /* * Now that we've gotten this far, change ownership to the kernel * of the buffers so that we don't run afoul of returning to user * space with locks (on the buffer) held. */ for (i = 0; i < numbufs; i++) { BUF_KERNPROC(mapinfo->bp[i]); } return(0); } /* * Unmap memory segments mapped into kernel virtual address space by * cam_periph_mapmem(). */ void cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) { int numbufs, i; u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; if (mapinfo->num_bufs_used <= 0) { /* nothing to free and the process wasn't held. */ return; } switch (ccb->ccb_h.func_code) { case XPT_DEV_MATCH: numbufs = min(mapinfo->num_bufs_used, 2); if (numbufs == 1) { data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; } else { data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; } break; case XPT_SCSI_IO: case XPT_CONT_TARGET_IO: data_ptrs[0] = &ccb->csio.data_ptr; numbufs = min(mapinfo->num_bufs_used, 1); break; case XPT_ATA_IO: data_ptrs[0] = &ccb->ataio.data_ptr; numbufs = min(mapinfo->num_bufs_used, 1); break; case XPT_SMP_IO: numbufs = min(mapinfo->num_bufs_used, 2); data_ptrs[0] = &ccb->smpio.smp_request; data_ptrs[1] = &ccb->smpio.smp_response; break; case XPT_DEV_ADVINFO: numbufs = min(mapinfo->num_bufs_used, 1); data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; break; case XPT_NVME_IO: case XPT_NVME_ADMIN: data_ptrs[0] = &ccb->nvmeio.data_ptr; numbufs = min(mapinfo->num_bufs_used, 1); break; default: /* allow ourselves to be swapped once again */ PRELE(curproc); return; break; /* NOTREACHED */ } for (i = 0; i < numbufs; i++) { /* Set the user's pointer back to the original value */ *data_ptrs[i] = mapinfo->bp[i]->b_caller1; /* unmap the buffer */ vunmapbuf(mapinfo->bp[i]); /* release the buffer */ relpbuf(mapinfo->bp[i], NULL); } /* allow ourselves to be swapped once again */ PRELE(curproc); } int cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, int (*error_routine)(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags)) { union ccb *ccb; int error; int found; error = found = 0; switch(cmd){ case CAMGETPASSTHRU: ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); ccb->ccb_h.func_code = XPT_GDEVLIST; /* * Basically, the point of this is that we go through * getting the list of devices, until we find a passthrough * device. In the current version of the CAM code, the * only way to determine what type of device we're dealing * with is by its name. */ while (found == 0) { ccb->cgdl.index = 0; ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { /* we want the next device in the list */ xpt_action(ccb); if (strncmp(ccb->cgdl.periph_name, "pass", 4) == 0){ found = 1; break; } } if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && (found == 0)) { ccb->cgdl.periph_name[0] = '\0'; ccb->cgdl.unit_number = 0; break; } } /* copy the result back out */ bcopy(ccb, addr, sizeof(union ccb)); /* and release the ccb */ xpt_release_ccb(ccb); break; default: error = ENOTTY; break; } return(error); } static void cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb) { panic("%s: already done with ccb %p", __func__, done_ccb); } static void cam_periph_done(struct cam_periph *periph, union ccb *done_ccb) { /* Caller will release the CCB */ xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED); done_ccb->ccb_h.cbfcnp = cam_periph_done_panic; wakeup(&done_ccb->ccb_h.cbfcnp); } static void cam_periph_ccbwait(union ccb *ccb) { if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) { while (ccb->ccb_h.cbfcnp != cam_periph_done_panic) xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp, PRIBIO, "cbwait", 0); } KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX && (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG, ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, " "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code, ccb->ccb_h.status, ccb->ccb_h.pinfo.index)); } int cam_periph_runccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags), cam_flags camflags, u_int32_t sense_flags, struct devstat *ds) { struct bintime *starttime; struct bintime ltime; int error; bool must_poll; uint32_t timeout = 1; starttime = NULL; xpt_path_assert(ccb->ccb_h.path, MA_OWNED); KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0, ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb, ccb->ccb_h.func_code, ccb->ccb_h.flags)); /* * If the user has supplied a stats structure, and if we understand * this particular type of ccb, record the transaction start. */ if (ds != NULL && (ccb->ccb_h.func_code == XPT_SCSI_IO || ccb->ccb_h.func_code == XPT_ATA_IO || ccb->ccb_h.func_code == XPT_NVME_IO)) { starttime = <ime; binuptime(starttime); devstat_start_transaction(ds, starttime); } /* * We must poll the I/O while we're dumping. The scheduler is normally * stopped for dumping, except when we call doadump from ddb. While the * scheduler is running in this case, we still need to poll the I/O to * avoid sleeping waiting for the ccb to complete. * * A panic triggered dump stops the scheduler, any callback from the * shutdown_post_sync event will run with the scheduler stopped, but * before we're officially dumping. To avoid hanging in adashutdown * initiated commands (or other similar situations), we have to test for * either SCHEDULER_STOPPED() here as well. * * To avoid locking problems, dumping/polling callers must call * without a periph lock held. */ must_poll = dumping || SCHEDULER_STOPPED(); ccb->ccb_h.cbfcnp = cam_periph_done; /* * If we're polling, then we need to ensure that we have ample resources * in the periph. * cam_periph_error can reschedule the ccb by calling xpt_action and returning * ERESTART, so we have to effect the polling in the do loop below. */ if (must_poll) { timeout = xpt_poll_setup(ccb); } if (timeout == 0) { ccb->ccb_h.status = CAM_RESRC_UNAVAIL; error = EBUSY; } else { xpt_action(ccb); do { if (must_poll) { xpt_pollwait(ccb, timeout); timeout = ccb->ccb_h.timeout * 10; } else { cam_periph_ccbwait(ccb); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) error = 0; else if (error_routine != NULL) { ccb->ccb_h.cbfcnp = cam_periph_done; error = (*error_routine)(ccb, camflags, sense_flags); } else error = 0; } while (error == ERESTART); } if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(ccb->ccb_h.path, /* relsim_flags */0, /* openings */0, /* timeout */0, /* getcount_only */ FALSE); ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } if (ds != NULL) { uint32_t bytes; devstat_tag_type tag; bool valid = true; if (ccb->ccb_h.func_code == XPT_SCSI_IO) { bytes = ccb->csio.dxfer_len - ccb->csio.resid; tag = (devstat_tag_type)(ccb->csio.tag_action & 0x3); } else if (ccb->ccb_h.func_code == XPT_ATA_IO) { bytes = ccb->ataio.dxfer_len - ccb->ataio.resid; tag = (devstat_tag_type)0; } else if (ccb->ccb_h.func_code == XPT_NVME_IO) { bytes = ccb->nvmeio.dxfer_len; /* NB: resid no possible */ tag = (devstat_tag_type)0; } else { valid = false; } if (valid) devstat_end_transaction(ds, bytes, tag, ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime); } return(error); } void cam_freeze_devq(struct cam_path *path) { struct ccb_hdr ccb_h; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n")); xpt_setup_ccb(&ccb_h, path, /*priority*/1); ccb_h.func_code = XPT_NOOP; ccb_h.flags = CAM_DEV_QFREEZE; xpt_action((union ccb *)&ccb_h); } u_int32_t cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, u_int32_t openings, u_int32_t arg, int getcount_only) { struct ccb_relsim crs; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n", relsim_flags, openings, arg, getcount_only)); xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; crs.release_flags = relsim_flags; crs.openings = openings; crs.release_timeout = arg; xpt_action((union ccb *)&crs); return (crs.qfrozen_cnt); } #define saved_ccb_ptr ppriv_ptr0 static void camperiphdone(struct cam_periph *periph, union ccb *done_ccb) { union ccb *saved_ccb; cam_status status; struct scsi_start_stop_unit *scsi_cmd; int error_code, sense_key, asc, ascq; scsi_cmd = (struct scsi_start_stop_unit *) &done_ccb->csio.cdb_io.cdb_bytes; status = done_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key, &asc, &ascq)) { /* * If the error is "invalid field in CDB", * and the load/eject flag is set, turn the * flag off and try again. This is just in * case the drive in question barfs on the * load eject flag. The CAM code should set * the load/eject flag by default for * removable media. */ if ((scsi_cmd->opcode == START_STOP_UNIT) && ((scsi_cmd->how & SSS_LOEJ) != 0) && (asc == 0x24) && (ascq == 0x00)) { scsi_cmd->how &= ~SSS_LOEJ; if (status & CAM_DEV_QFRZN) { cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } xpt_action(done_ccb); goto out; } } if (cam_periph_error(done_ccb, 0, SF_RETRY_UA | SF_NO_PRINT) == ERESTART) goto out; if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) { cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } } else { /* * If we have successfully taken a device from the not * ready to ready state, re-scan the device and re-get * the inquiry information. Many devices (mostly disks) * don't properly report their inquiry information unless * they are spun up. */ if (scsi_cmd->opcode == START_STOP_UNIT) xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL); } /* * Perform the final retry with the original CCB so that final * error processing is performed by the owner of the CCB. */ saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; bcopy(saved_ccb, done_ccb, sizeof(*done_ccb)); xpt_free_ccb(saved_ccb); if (done_ccb->ccb_h.cbfcnp != camperiphdone) periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; xpt_action(done_ccb); out: /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); } /* * Generic Async Event handler. Peripheral drivers usually * filter out the events that require personal attention, * and leave the rest to this function. */ void cam_periph_async(struct cam_periph *periph, u_int32_t code, struct cam_path *path, void *arg) { switch (code) { case AC_LOST_DEVICE: cam_periph_invalidate(periph); break; default: break; } } void cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) { struct ccb_getdevstats cgds; xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgds.ccb_h.func_code = XPT_GDEV_STATS; xpt_action((union ccb *)&cgds); cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); } void cam_periph_freeze_after_event(struct cam_periph *periph, struct timeval* event_time, u_int duration_ms) { struct timeval delta; struct timeval duration_tv; if (!timevalisset(event_time)) return; microtime(&delta); timevalsub(&delta, event_time); duration_tv.tv_sec = duration_ms / 1000; duration_tv.tv_usec = (duration_ms % 1000) * 1000; if (timevalcmp(&delta, &duration_tv, <)) { timevalsub(&duration_tv, &delta); duration_ms = duration_tv.tv_sec * 1000; duration_ms += duration_tv.tv_usec / 1000; cam_freeze_devq(periph->path); cam_release_devq(periph->path, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/duration_ms, /*getcount_only*/0); } } static int camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb, cam_flags camflags, u_int32_t sense_flags, int *openings, u_int32_t *relsim_flags, u_int32_t *timeout, u_int32_t *action, const char **action_string) { int error; switch (ccb->csio.scsi_status) { case SCSI_STATUS_OK: case SCSI_STATUS_COND_MET: case SCSI_STATUS_INTERMED: case SCSI_STATUS_INTERMED_COND_MET: error = 0; break; case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: error = camperiphscsisenseerror(ccb, orig_ccb, camflags, sense_flags, openings, relsim_flags, timeout, action, action_string); break; case SCSI_STATUS_QUEUE_FULL: { /* no decrement */ struct ccb_getdevstats cgds; /* * First off, find out what the current * transaction counts are. */ xpt_setup_ccb(&cgds.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgds.ccb_h.func_code = XPT_GDEV_STATS; xpt_action((union ccb *)&cgds); /* * If we were the only transaction active, treat * the QUEUE FULL as if it were a BUSY condition. */ if (cgds.dev_active != 0) { int total_openings; /* * Reduce the number of openings to * be 1 less than the amount it took * to get a queue full bounded by the * minimum allowed tag count for this * device. */ total_openings = cgds.dev_active + cgds.dev_openings; *openings = cgds.dev_active; if (*openings < cgds.mintags) *openings = cgds.mintags; if (*openings < total_openings) *relsim_flags = RELSIM_ADJUST_OPENINGS; else { /* * Some devices report queue full for * temporary resource shortages. For * this reason, we allow a minimum * tag count to be entered via a * quirk entry to prevent the queue * count on these devices from falling * to a pessimisticly low value. We * still wait for the next successful * completion, however, before queueing * more transactions to the device. */ *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; } *timeout = 0; error = ERESTART; *action &= ~SSQ_PRINT_SENSE; break; } /* FALLTHROUGH */ } case SCSI_STATUS_BUSY: /* * Restart the queue after either another * command completes or a 1 second timeout. */ if ((sense_flags & SF_RETRY_BUSY) != 0 || (ccb->ccb_h.retry_count--) > 0) { error = ERESTART; *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT | RELSIM_RELEASE_AFTER_CMDCMPLT; *timeout = 1000; } else { error = EIO; } break; case SCSI_STATUS_RESERV_CONFLICT: default: error = EIO; break; } return (error); } static int camperiphscsisenseerror(union ccb *ccb, union ccb **orig, cam_flags camflags, u_int32_t sense_flags, int *openings, u_int32_t *relsim_flags, u_int32_t *timeout, u_int32_t *action, const char **action_string) { struct cam_periph *periph; union ccb *orig_ccb = ccb; int error, recoveryccb; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL) biotrack(ccb->csio.bio, __func__); #endif periph = xpt_path_periph(ccb->ccb_h.path); recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone); if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) { /* * If error recovery is already in progress, don't attempt * to process this error, but requeue it unconditionally * and attempt to process it once error recovery has * completed. This failed command is probably related to * the error that caused the currently active error recovery * action so our current recovery efforts should also * address this command. Be aware that the error recovery * code assumes that only one recovery action is in progress * on a particular peripheral instance at any given time * (e.g. only one saved CCB for error recovery) so it is * imperitive that we don't violate this assumption. */ error = ERESTART; *action &= ~SSQ_PRINT_SENSE; } else { scsi_sense_action err_action; struct ccb_getdev cgd; /* * Grab the inquiry data for this device. */ xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); err_action = scsi_error_action(&ccb->csio, &cgd.inq_data, sense_flags); error = err_action & SS_ERRMASK; /* * Do not autostart sequential access devices * to avoid unexpected tape loading. */ if ((err_action & SS_MASK) == SS_START && SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) { *action_string = "Will not autostart a " "sequential access device"; goto sense_error_done; } /* * Avoid recovery recursion if recovery action is the same. */ if ((err_action & SS_MASK) >= SS_START && recoveryccb) { if (((err_action & SS_MASK) == SS_START && ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) || ((err_action & SS_MASK) == SS_TUR && (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) { err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; *timeout = 500; } } /* * If the recovery action will consume a retry, * make sure we actually have retries available. */ if ((err_action & SSQ_DECREMENT_COUNT) != 0) { if (ccb->ccb_h.retry_count > 0 && (periph->flags & CAM_PERIPH_INVALID) == 0) ccb->ccb_h.retry_count--; else { *action_string = "Retries exhausted"; goto sense_error_done; } } if ((err_action & SS_MASK) >= SS_START) { /* * Do common portions of commands that * use recovery CCBs. */ orig_ccb = xpt_alloc_ccb_nowait(); if (orig_ccb == NULL) { *action_string = "Can't allocate recovery CCB"; goto sense_error_done; } /* * Clear freeze flag for original request here, as * this freeze will be dropped as part of ERESTART. */ ccb->ccb_h.status &= ~CAM_DEV_QFRZN; bcopy(ccb, orig_ccb, sizeof(*orig_ccb)); } switch (err_action & SS_MASK) { case SS_NOP: *action_string = "No recovery action needed"; error = 0; break; case SS_RETRY: *action_string = "Retrying command (per sense data)"; error = ERESTART; break; case SS_FAIL: *action_string = "Unretryable error"; break; case SS_START: { int le; /* * Send a start unit command to the device, and * then retry the command. */ *action_string = "Attempting to start unit"; periph->flags |= CAM_PERIPH_RECOVERY_INPROG; /* * Check for removable media and set * load/eject flag appropriately. */ if (SID_IS_REMOVABLE(&cgd.inq_data)) le = TRUE; else le = FALSE; scsi_start_stop(&ccb->csio, /*retries*/1, camperiphdone, MSG_SIMPLE_Q_TAG, /*start*/TRUE, /*load/eject*/le, /*immediate*/FALSE, SSD_FULL_SIZE, /*timeout*/50000); break; } case SS_TUR: { /* * Send a Test Unit Ready to the device. * If the 'many' flag is set, we send 120 * test unit ready commands, one every half * second. Otherwise, we just send one TUR. * We only want to do this if the retry * count has not been exhausted. */ int retries; if ((err_action & SSQ_MANY) != 0) { *action_string = "Polling device for readiness"; retries = 120; } else { *action_string = "Testing device for readiness"; retries = 1; } periph->flags |= CAM_PERIPH_RECOVERY_INPROG; scsi_test_unit_ready(&ccb->csio, retries, camperiphdone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, /*timeout*/5000); /* * Accomplish our 500ms delay by deferring * the release of our device queue appropriately. */ *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; *timeout = 500; break; } default: panic("Unhandled error action %x", err_action); } if ((err_action & SS_MASK) >= SS_START) { /* * Drop the priority, so that the recovery * CCB is the first to execute. Freeze the queue * after this command is sent so that we can * restore the old csio and have it queued in * the proper order before we release normal * transactions to the device. */ ccb->ccb_h.pinfo.priority--; ccb->ccb_h.flags |= CAM_DEV_QFREEZE; ccb->ccb_h.saved_ccb_ptr = orig_ccb; error = ERESTART; *orig = orig_ccb; } sense_error_done: *action = err_action; } return (error); } /* * Generic error handler. Peripheral drivers usually filter * out the errors that they handle in a unique manner, then * call this function. */ int cam_periph_error(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags) { struct cam_path *newpath; union ccb *orig_ccb, *scan_ccb; struct cam_periph *periph; const char *action_string; cam_status status; int frozen, error, openings, devctl_err; u_int32_t action, relsim_flags, timeout; action = SSQ_PRINT_SENSE; periph = xpt_path_periph(ccb->ccb_h.path); action_string = NULL; status = ccb->ccb_h.status; frozen = (status & CAM_DEV_QFRZN) != 0; status &= CAM_STATUS_MASK; devctl_err = openings = relsim_flags = timeout = 0; orig_ccb = ccb; /* Filter the errors that should be reported via devctl */ switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: case CAM_SCSI_STATUS_ERROR: case CAM_ATA_STATUS_ERROR: case CAM_SMP_STATUS_ERROR: devctl_err++; break; default: break; } switch (status) { case CAM_REQ_CMP: error = 0; action &= ~SSQ_PRINT_SENSE; break; case CAM_SCSI_STATUS_ERROR: error = camperiphscsistatuserror(ccb, &orig_ccb, camflags, sense_flags, &openings, &relsim_flags, &timeout, &action, &action_string); break; case CAM_AUTOSENSE_FAIL: error = EIO; /* we have to kill the command */ break; case CAM_UA_ABORT: case CAM_UA_TERMIO: case CAM_MSG_REJECT_REC: /* XXX Don't know that these are correct */ error = EIO; break; case CAM_SEL_TIMEOUT: if ((camflags & CAM_RETRY_SELTO) != 0) { if (ccb->ccb_h.retry_count > 0 && (periph->flags & CAM_PERIPH_INVALID) == 0) { ccb->ccb_h.retry_count--; error = ERESTART; /* * Wait a bit to give the device * time to recover before we try again. */ relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; timeout = periph_selto_delay; break; } action_string = "Retries exhausted"; } /* FALLTHROUGH */ case CAM_DEV_NOT_THERE: error = ENXIO; action = SSQ_LOST; break; case CAM_REQ_INVALID: case CAM_PATH_INVALID: case CAM_NO_HBA: case CAM_PROVIDE_FAIL: case CAM_REQ_TOO_BIG: case CAM_LUN_INVALID: case CAM_TID_INVALID: case CAM_FUNC_NOTAVAIL: error = EINVAL; break; case CAM_SCSI_BUS_RESET: case CAM_BDR_SENT: /* * Commands that repeatedly timeout and cause these * kinds of error recovery actions, should return * CAM_CMD_TIMEOUT, which allows us to safely assume * that this command was an innocent bystander to * these events and should be unconditionally * retried. */ case CAM_REQUEUE_REQ: /* Unconditional requeue if device is still there */ if (periph->flags & CAM_PERIPH_INVALID) { action_string = "Periph was invalidated"; error = EIO; } else if (sense_flags & SF_NO_RETRY) { error = EIO; action_string = "Retry was blocked"; } else { error = ERESTART; action &= ~SSQ_PRINT_SENSE; } break; case CAM_RESRC_UNAVAIL: /* Wait a bit for the resource shortage to abate. */ timeout = periph_noresrc_delay; /* FALLTHROUGH */ case CAM_BUSY: if (timeout == 0) { /* Wait a bit for the busy condition to abate. */ timeout = periph_busy_delay; } relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; /* FALLTHROUGH */ case CAM_ATA_STATUS_ERROR: case CAM_REQ_CMP_ERR: case CAM_CMD_TIMEOUT: case CAM_UNEXP_BUSFREE: case CAM_UNCOR_PARITY: case CAM_DATA_RUN_ERR: default: if (periph->flags & CAM_PERIPH_INVALID) { error = EIO; action_string = "Periph was invalidated"; } else if (ccb->ccb_h.retry_count == 0) { error = EIO; action_string = "Retries exhausted"; } else if (sense_flags & SF_NO_RETRY) { error = EIO; action_string = "Retry was blocked"; } else { ccb->ccb_h.retry_count--; error = ERESTART; } break; } if ((sense_flags & SF_PRINT_ALWAYS) || CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO)) action |= SSQ_PRINT_SENSE; else if (sense_flags & SF_NO_PRINT) action &= ~SSQ_PRINT_SENSE; if ((action & SSQ_PRINT_SENSE) != 0) cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL); if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) { if (error != ERESTART) { if (action_string == NULL) action_string = "Unretryable error"; xpt_print(ccb->ccb_h.path, "Error %d, %s\n", error, action_string); } else if (action_string != NULL) xpt_print(ccb->ccb_h.path, "%s\n", action_string); else xpt_print(ccb->ccb_h.path, "Retrying command\n"); } if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0)) cam_periph_devctl_notify(orig_ccb); if ((action & SSQ_LOST) != 0) { lun_id_t lun_id; /* * For a selection timeout, we consider all of the LUNs on * the target to be gone. If the status is CAM_DEV_NOT_THERE, * then we only get rid of the device(s) specified by the * path in the original CCB. */ if (status == CAM_SEL_TIMEOUT) lun_id = CAM_LUN_WILDCARD; else lun_id = xpt_path_lun_id(ccb->ccb_h.path); /* Should we do more if we can't create the path?? */ if (xpt_create_path(&newpath, periph, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), lun_id) == CAM_REQ_CMP) { /* * Let peripheral drivers know that this * device has gone away. */ xpt_async(AC_LOST_DEVICE, newpath, NULL); xpt_free_path(newpath); } } /* Broadcast UNIT ATTENTIONs to all periphs. */ if ((action & SSQ_UA) != 0) xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb); /* Rescan target on "Reported LUNs data has changed" */ if ((action & SSQ_RESCAN) != 0) { if (xpt_create_path(&newpath, NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), CAM_LUN_WILDCARD) == CAM_REQ_CMP) { scan_ccb = xpt_alloc_ccb_nowait(); if (scan_ccb != NULL) { scan_ccb->ccb_h.path = newpath; scan_ccb->ccb_h.func_code = XPT_SCAN_TGT; scan_ccb->crcn.flags = 0; xpt_rescan(scan_ccb); } else { xpt_print(newpath, "Can't allocate CCB to rescan target\n"); xpt_free_path(newpath); } } } /* Attempt a retry */ if (error == ERESTART || error == 0) { if (frozen != 0) ccb->ccb_h.status &= ~CAM_DEV_QFRZN; if (error == ERESTART) xpt_action(ccb); if (frozen != 0) cam_release_devq(ccb->ccb_h.path, relsim_flags, openings, timeout, /*getcount_only*/0); } return (error); } #define CAM_PERIPH_DEVD_MSG_SIZE 256 static void cam_periph_devctl_notify(union ccb *ccb) { struct cam_periph *periph; struct ccb_getdev *cgd; struct sbuf sb; int serr, sk, asc, ascq; char *sbmsg, *type; sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT); if (sbmsg == NULL) return; sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN); periph = xpt_path_periph(ccb->ccb_h.path); sbuf_printf(&sb, "device=%s%d ", periph->periph_name, periph->unit_number); sbuf_printf(&sb, "serial=\""); if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) { xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)cgd); if (cgd->ccb_h.status == CAM_REQ_CMP) sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len); xpt_free_ccb((union ccb *)cgd); } sbuf_printf(&sb, "\" "); sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status); switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout); type = "timeout"; break; case CAM_SCSI_STATUS_ERROR: sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status); if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq)) sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ", serr, sk, asc, ascq); type = "error"; break; case CAM_ATA_STATUS_ERROR: sbuf_printf(&sb, "RES=\""); ata_res_sbuf(&ccb->ataio.res, &sb); sbuf_printf(&sb, "\" "); type = "error"; break; default: type = "error"; break; } if (ccb->ccb_h.func_code == XPT_SCSI_IO) { sbuf_printf(&sb, "CDB=\""); scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb); sbuf_printf(&sb, "\" "); } else if (ccb->ccb_h.func_code == XPT_ATA_IO) { sbuf_printf(&sb, "ACB=\""); ata_cmd_sbuf(&ccb->ataio.cmd, &sb); sbuf_printf(&sb, "\" "); } if (sbuf_finish(&sb) == 0) devctl_notify("CAM", "periph", type, sbuf_data(&sb)); sbuf_delete(&sb); free(sbmsg, M_CAMPERIPH); } Index: head/sys/cam/cam_periph.h =================================================================== --- head/sys/cam/cam_periph.h (revision 328917) +++ head/sys/cam/cam_periph.h (revision 328918) @@ -1,267 +1,267 @@ /*- * Data structures and definitions for CAM peripheral ("type") drivers. * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_PERIPH_H #define _CAM_CAM_PERIPH_H 1 #include #include #ifdef _KERNEL #include #include struct devstat; extern struct cam_periph *xpt_periph; extern struct periph_driver **periph_drivers; void periphdriver_register(void *); int periphdriver_unregister(void *); void periphdriver_init(int level); #include #define PERIPHDRIVER_DECLARE(name, driver) \ static int name ## _modevent(module_t mod, int type, void *data) \ { \ switch (type) { \ case MOD_LOAD: \ periphdriver_register(data); \ break; \ case MOD_UNLOAD: \ return (periphdriver_unregister(data)); \ default: \ return EOPNOTSUPP; \ } \ return 0; \ } \ static moduledata_t name ## _mod = { \ #name, \ name ## _modevent, \ (void *)&driver \ }; \ DECLARE_MODULE(name, name ## _mod, SI_SUB_DRIVERS, SI_ORDER_ANY); \ MODULE_DEPEND(name, cam, 1, 1, 1) /* * Callback informing the peripheral driver it can perform it's * initialization since the XPT is now fully initialized. */ typedef void (periph_init_t)(void); /* * Callback requesting the peripheral driver to remove its instances * and shutdown, if possible. */ typedef int (periph_deinit_t)(void); struct periph_driver { periph_init_t *init; char *driver_name; TAILQ_HEAD(,cam_periph) units; u_int generation; u_int flags; #define CAM_PERIPH_DRV_EARLY 0x01 periph_deinit_t *deinit; }; typedef enum { CAM_PERIPH_BIO } cam_periph_type; /* Generically useful offsets into the peripheral private area */ #define ppriv_ptr0 periph_priv.entries[0].ptr #define ppriv_ptr1 periph_priv.entries[1].ptr #define ppriv_field0 periph_priv.entries[0].field #define ppriv_field1 periph_priv.entries[1].field typedef void periph_start_t (struct cam_periph *periph, union ccb *start_ccb); typedef cam_status periph_ctor_t (struct cam_periph *periph, void *arg); typedef void periph_oninv_t (struct cam_periph *periph); typedef void periph_dtor_t (struct cam_periph *periph); struct cam_periph { periph_start_t *periph_start; periph_oninv_t *periph_oninval; periph_dtor_t *periph_dtor; char *periph_name; struct cam_path *path; /* Compiled path to device */ void *softc; struct cam_sim *sim; u_int32_t unit_number; cam_periph_type type; u_int32_t flags; #define CAM_PERIPH_RUNNING 0x01 #define CAM_PERIPH_LOCKED 0x02 #define CAM_PERIPH_LOCK_WANTED 0x04 #define CAM_PERIPH_INVALID 0x08 #define CAM_PERIPH_NEW_DEV_FOUND 0x10 #define CAM_PERIPH_RECOVERY_INPROG 0x20 #define CAM_PERIPH_RUN_TASK 0x40 #define CAM_PERIPH_FREE 0x80 #define CAM_PERIPH_ANNOUNCED 0x100 uint32_t scheduled_priority; uint32_t immediate_priority; int periph_allocating; int periph_allocated; u_int32_t refcount; SLIST_HEAD(, ccb_hdr) ccb_list; /* For "immediate" requests */ SLIST_ENTRY(cam_periph) periph_links; TAILQ_ENTRY(cam_periph) unit_links; ac_callback_t *deferred_callback; ac_code deferred_ac; struct task periph_run_task; }; #define CAM_PERIPH_MAXMAPS 2 struct cam_periph_map_info { int num_bufs_used; struct buf *bp[CAM_PERIPH_MAXMAPS]; }; cam_status cam_periph_alloc(periph_ctor_t *periph_ctor, periph_oninv_t *periph_oninvalidate, periph_dtor_t *periph_dtor, periph_start_t *periph_start, char *name, cam_periph_type type, struct cam_path *, ac_callback_t *, ac_code, void *arg); struct cam_periph *cam_periph_find(struct cam_path *path, char *name); int cam_periph_list(struct cam_path *, struct sbuf *); -cam_status cam_periph_acquire(struct cam_periph *periph); +int cam_periph_acquire(struct cam_periph *periph); void cam_periph_doacquire(struct cam_periph *periph); void cam_periph_release(struct cam_periph *periph); void cam_periph_release_locked(struct cam_periph *periph); void cam_periph_release_locked_buses(struct cam_periph *periph); int cam_periph_hold(struct cam_periph *periph, int priority); void cam_periph_unhold(struct cam_periph *periph); void cam_periph_invalidate(struct cam_periph *periph); int cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo, u_int maxmap); void cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo); union ccb *cam_periph_getccb(struct cam_periph *periph, u_int32_t priority); int cam_periph_runccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags), cam_flags camflags, u_int32_t sense_flags, struct devstat *ds); int cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, int (*error_routine)(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags)); void cam_freeze_devq(struct cam_path *path); u_int32_t cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, u_int32_t opening_reduction, u_int32_t arg, int getcount_only); void cam_periph_async(struct cam_periph *periph, u_int32_t code, struct cam_path *path, void *arg); void cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle_ms); void cam_periph_freeze_after_event(struct cam_periph *periph, struct timeval* event_time, u_int duration_ms); int cam_periph_error(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags); static __inline struct mtx * cam_periph_mtx(struct cam_periph *periph) { if (periph != NULL) return (xpt_path_mtx(periph->path)); else return (NULL); } #define cam_periph_owned(periph) \ mtx_owned(xpt_path_mtx((periph)->path)) #define cam_periph_lock(periph) \ mtx_lock(xpt_path_mtx((periph)->path)) #define cam_periph_unlock(periph) \ mtx_unlock(xpt_path_mtx((periph)->path)) #define cam_periph_assert(periph, what) \ mtx_assert(xpt_path_mtx((periph)->path), (what)) #define cam_periph_sleep(periph, chan, priority, wmesg, timo) \ xpt_path_sleep((periph)->path, (chan), (priority), (wmesg), (timo)) static inline struct cam_periph * cam_periph_acquire_first(struct periph_driver *driver) { struct cam_periph *periph; xpt_lock_buses(); periph = TAILQ_FIRST(&driver->units); while (periph != NULL && (periph->flags & CAM_PERIPH_INVALID) != 0) periph = TAILQ_NEXT(periph, unit_links); if (periph != NULL) periph->refcount++; xpt_unlock_buses(); return (periph); } static inline struct cam_periph * cam_periph_acquire_next(struct cam_periph *pperiph) { struct cam_periph *periph = pperiph; cam_periph_assert(pperiph, MA_NOTOWNED); xpt_lock_buses(); do { periph = TAILQ_NEXT(periph, unit_links); } while (periph != NULL && (periph->flags & CAM_PERIPH_INVALID) != 0); if (periph != NULL) periph->refcount++; xpt_unlock_buses(); cam_periph_release(pperiph); return (periph); } #define CAM_PERIPH_FOREACH(periph, driver) \ for ((periph) = cam_periph_acquire_first(driver); \ (periph) != NULL; \ (periph) = cam_periph_acquire_next(periph)) #define CAM_PERIPH_PRINT(p, msg, args...) \ printf("%s%d:" msg, (periph)->periph_name, (periph)->unit_number, ##args) #endif /* _KERNEL */ #endif /* _CAM_CAM_PERIPH_H */ Index: head/sys/cam/ctl/scsi_ctl.c =================================================================== --- head/sys/cam/ctl/scsi_ctl.c (revision 328917) +++ head/sys/cam/ctl/scsi_ctl.c (revision 328918) @@ -1,1995 +1,1995 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008, 2009 Silicon Graphics International Corp. * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ */ /* * Peripheral driver interface between CAM and CTL (CAM Target Layer). * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct ctlfe_softc { struct ctl_port port; path_id_t path_id; target_id_t target_id; uint32_t hba_misc; u_int maxio; struct cam_sim *sim; char port_name[DEV_IDLEN]; struct mtx lun_softc_mtx; STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; STAILQ_ENTRY(ctlfe_softc) links; }; STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; struct mtx ctlfe_list_mtx; static char ctlfe_mtx_desc[] = "ctlfelist"; typedef enum { CTLFE_LUN_NONE = 0x00, CTLFE_LUN_WILDCARD = 0x01 } ctlfe_lun_flags; struct ctlfe_lun_softc { struct ctlfe_softc *parent_softc; struct cam_periph *periph; ctlfe_lun_flags flags; int ctios_sent; /* Number of active CTIOs */ int refcount; /* Number of active xpt_action() */ int atios_alloced; /* Number of ATIOs not freed */ int inots_alloced; /* Number of INOTs not freed */ struct task refdrain_task; STAILQ_HEAD(, ccb_hdr) work_queue; LIST_HEAD(, ccb_hdr) atio_list; /* List of ATIOs queued to SIM. */ LIST_HEAD(, ccb_hdr) inot_list; /* List of INOTs queued to SIM. */ STAILQ_ENTRY(ctlfe_lun_softc) links; }; typedef enum { CTLFE_CMD_NONE = 0x00, CTLFE_CMD_PIECEWISE = 0x01 } ctlfe_cmd_flags; struct ctlfe_cmd_info { int cur_transfer_index; size_t cur_transfer_off; ctlfe_cmd_flags flags; /* * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 * bytes on amd64. So with 32 elements, this is 256 bytes on * i386 and 512 bytes on amd64. */ #define CTLFE_MAX_SEGS 32 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; }; /* * When we register the adapter/bus, request that this many ctl_ios be * allocated. This should be the maximum supported by the adapter, but we * currently don't have a way to get that back from the path inquiry. * XXX KDM add that to the path inquiry. */ #define CTLFE_REQ_CTL_IO 4096 /* * Number of Accept Target I/O CCBs to allocate and queue down to the * adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_ATIO_PER_LUN 1024 /* * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to * allocate and queue down to the adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_IN_PER_LUN 1024 /* * Timeout (in seconds) on CTIO CCB doing DMA or sending status */ #define CTLFE_TIMEOUT 5 /* * Turn this on to enable extra debugging prints. */ #if 0 #define CTLFE_DEBUG #endif MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); #define io_ptr ppriv_ptr0 /* This is only used in the CTIO */ #define ccb_atio ppriv_ptr1 #define PRIV_CCB(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0]) #define PRIV_INFO(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1]) static int ctlfeinitialize(void); static int ctlfeshutdown(void); static periph_init_t ctlfeperiphinit; static periph_deinit_t ctlfeperiphdeinit; static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static periph_ctor_t ctlferegister; static periph_oninv_t ctlfeoninvalidate; static periph_dtor_t ctlfecleanup; static periph_start_t ctlfestart; static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb); static void ctlfe_onoffline(void *arg, int online); static void ctlfe_online(void *arg); static void ctlfe_offline(void *arg); static int ctlfe_lun_enable(void *arg, int lun_id); static int ctlfe_lun_disable(void *arg, int lun_id); static void ctlfe_dump_sim(struct cam_sim *sim); static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); static void ctlfe_datamove(union ctl_io *io); static void ctlfe_done(union ctl_io *io); static void ctlfe_dump(void); static void ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb); static void ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock); static struct periph_driver ctlfe_driver = { ctlfeperiphinit, "ctl", TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0, CAM_PERIPH_DRV_EARLY, ctlfeperiphdeinit }; static struct ctl_frontend ctlfe_frontend = { .name = "camtgt", .init = ctlfeinitialize, .fe_dump = ctlfe_dump, .shutdown = ctlfeshutdown, }; CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend); static int ctlfeinitialize(void) { STAILQ_INIT(&ctlfe_softc_list); mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); periphdriver_register(&ctlfe_driver); return (0); } static int ctlfeshutdown(void) { int error; error = periphdriver_unregister(&ctlfe_driver); if (error != 0) return (error); mtx_destroy(&ctlfe_list_mtx); return (0); } static void ctlfeperiphinit(void) { cam_status status; status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | AC_CONTRACT, ctlfeasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ctl: Failed to attach async callback due to CAM " "status 0x%x!\n", status); } } static int ctlfeperiphdeinit(void) { /* XXX: It would be good to tear down active ports here. */ if (!TAILQ_EMPTY(&ctlfe_driver.units)) return (EBUSY); xpt_register_async(0, ctlfeasync, NULL, NULL); return (0); } static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct ctlfe_softc *softc; #ifdef CTLFEDEBUG printf("%s: entered\n", __func__); #endif mtx_lock(&ctlfe_list_mtx); STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { if (softc->path_id == xpt_path_path_id(path)) break; } mtx_unlock(&ctlfe_list_mtx); /* * When a new path gets registered, and it is capable of target * mode, go ahead and attach. Later on, we may need to be more * selective, but for now this will be sufficient. */ switch (code) { case AC_PATH_REGISTERED: { struct ctl_port *port; struct ccb_pathinq *cpi; int retval; cpi = (struct ccb_pathinq *)arg; /* Don't attach if it doesn't support target mode */ if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { #ifdef CTLFEDEBUG printf("%s: SIM %s%d doesn't support target mode\n", __func__, cpi->dev_name, cpi->unit_number); #endif break; } if (softc != NULL) { #ifdef CTLFEDEBUG printf("%s: CTL port for CAM path %u already exists\n", __func__, xpt_path_path_id(path)); #endif break; } /* * We're in an interrupt context here, so we have to * use M_NOWAIT. Of course this means trouble if we * can't allocate memory. */ softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("%s: unable to malloc %zd bytes for softc\n", __func__, sizeof(*softc)); return; } softc->path_id = cpi->ccb_h.path_id; softc->target_id = cpi->initiator_id; softc->sim = xpt_path_sim(path); softc->hba_misc = cpi->hba_misc; if (cpi->maxio != 0) softc->maxio = cpi->maxio; else softc->maxio = DFLTPHYS; mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF); STAILQ_INIT(&softc->lun_softc_list); port = &softc->port; port->frontend = &ctlfe_frontend; /* * XXX KDM should we be more accurate here ? */ if (cpi->transport == XPORT_FC) port->port_type = CTL_PORT_FC; else if (cpi->transport == XPORT_SAS) port->port_type = CTL_PORT_SAS; else port->port_type = CTL_PORT_SCSI; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = CTLFE_REQ_CTL_IO; snprintf(softc->port_name, sizeof(softc->port_name), "%s%d", cpi->dev_name, cpi->unit_number); /* * XXX KDM it would be nice to allocate storage in the * frontend structure itself. */ port->port_name = softc->port_name; port->physical_port = cpi->bus_id; port->virtual_port = 0; port->port_online = ctlfe_online; port->port_offline = ctlfe_offline; port->onoff_arg = softc; port->lun_enable = ctlfe_lun_enable; port->lun_disable = ctlfe_lun_disable; port->targ_lun_arg = softc; port->fe_datamove = ctlfe_datamove; port->fe_done = ctlfe_done; port->targ_port = -1; retval = ctl_port_register(port); if (retval != 0) { printf("%s: ctl_port_register() failed with " "error %d!\n", __func__, retval); mtx_destroy(&softc->lun_softc_mtx); free(softc, M_CTLFE); break; } else { mtx_lock(&ctlfe_list_mtx); STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links); mtx_unlock(&ctlfe_list_mtx); } break; } case AC_PATH_DEREGISTERED: { if (softc != NULL) { /* * XXX KDM are we certain at this point that there * are no outstanding commands for this frontend? */ mtx_lock(&ctlfe_list_mtx); STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc, links); mtx_unlock(&ctlfe_list_mtx); ctl_port_deregister(&softc->port); mtx_destroy(&softc->lun_softc_mtx); free(softc, M_CTLFE); } break; } case AC_CONTRACT: { struct ac_contract *ac; ac = (struct ac_contract *)arg; switch (ac->contract_number) { case AC_CONTRACT_DEV_CHG: { struct ac_device_changed *dev_chg; int retval; dev_chg = (struct ac_device_changed *)ac->contract_data; printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", __func__, dev_chg->wwpn, dev_chg->port, xpt_path_path_id(path), dev_chg->target, (dev_chg->arrived == 0) ? "left" : "arrived"); if (softc == NULL) { printf("%s: CTL port for CAM path %u not " "found!\n", __func__, xpt_path_path_id(path)); break; } if (dev_chg->arrived != 0) { retval = ctl_add_initiator(&softc->port, dev_chg->target, dev_chg->wwpn, NULL); } else { retval = ctl_remove_initiator(&softc->port, dev_chg->target); } if (retval < 0) { printf("%s: could not %s port %d iid %u " "WWPN %#jx!\n", __func__, (dev_chg->arrived != 0) ? "add" : "remove", softc->port.targ_port, dev_chg->target, (uintmax_t)dev_chg->wwpn); } break; } default: printf("%s: unsupported contract number %ju\n", __func__, (uintmax_t)ac->contract_number); break; } break; } default: break; } } static cam_status ctlferegister(struct cam_periph *periph, void *arg) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; union ccb ccb; cam_status status; - int i; + int i, acstatus; softc = (struct ctlfe_lun_softc *)arg; bus_softc = softc->parent_softc; STAILQ_INIT(&softc->work_queue); LIST_INIT(&softc->atio_list); LIST_INIT(&softc->inot_list); softc->periph = periph; periph->softc = softc; /* Increase device openings to maximum for the SIM. */ if (bus_softc->sim->max_tagged_dev_openings > bus_softc->sim->max_dev_openings) { cam_release_devq(periph->path, /*relsim_flags*/RELSIM_ADJUST_OPENINGS, /*openings*/bus_softc->sim->max_tagged_dev_openings, /*timeout*/0, /*getcount_only*/1); } xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); ccb.ccb_h.func_code = XPT_EN_LUN; ccb.cel.grp6_len = 0; ccb.cel.grp7_len = 0; ccb.cel.enable = 1; xpt_action(&ccb); status = (ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", __func__, ccb.ccb_h.status); return (status); } status = CAM_REQ_CMP; for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { union ccb *new_ccb; union ctl_io *new_io; struct ctlfe_cmd_info *cmd_info; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); if (new_io == NULL) { free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } cmd_info = malloc(sizeof(*cmd_info), M_CTLFE, M_ZERO | M_NOWAIT); if (cmd_info == NULL) { ctl_free_io(new_io); free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } PRIV_INFO(new_io) = cmd_info; softc->atios_alloced++; new_ccb->ccb_h.io_ptr = new_io; LIST_INSERT_HEAD(&softc->atio_list, &new_ccb->ccb_h, periph_links.le); xpt_setup_ccb(&new_ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; new_ccb->ccb_h.cbfcnp = ctlfedone; new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { free(cmd_info, M_CTLFE); ctl_free_io(new_io); free(new_ccb, M_CTLFE); break; } } - status = cam_periph_acquire(periph); - if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + acstatus = cam_periph_acquire(periph); + if (acstatus != 0) { xpt_print(periph->path, "%s: could not acquire reference " - "count, status = %#x\n", __func__, status); - return (status); + "count, status = %#x\n", __func__, acstatus); + return (CAM_REQ_CMP_ERR); } if (i == 0) { xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " "status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } for (i = 0; i < CTLFE_IN_PER_LUN; i++) { union ccb *new_ccb; union ctl_io *new_io; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); if (new_io == NULL) { free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } softc->inots_alloced++; new_ccb->ccb_h.io_ptr = new_io; LIST_INSERT_HEAD(&softc->inot_list, &new_ccb->ccb_h, periph_links.le); xpt_setup_ccb(&new_ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; new_ccb->ccb_h.cbfcnp = ctlfedone; new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { /* * Note that we don't free the CCB here. If the * status is not CAM_REQ_INPROG, then we're * probably talking to a SIM that says it is * target-capable but doesn't support the * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the * older API. In that case, it'll call xpt_done() * on the CCB, and we need to free it in our done * routine as a result. */ break; } } if ((i == 0) || (status != CAM_REQ_INPROG)) { xpt_print(periph->path, "%s: could not allocate immediate " "notify CCBs, status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); mtx_unlock(&bus_softc->lun_softc_mtx); return (CAM_REQ_CMP); } static void ctlfeoninvalidate(struct cam_periph *periph) { struct ctlfe_lun_softc *softc = (struct ctlfe_lun_softc *)periph->softc; struct ctlfe_softc *bus_softc; union ccb ccb; struct ccb_hdr *hdr; cam_status status; /* Abort all ATIOs and INOTs queued to SIM. */ xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); ccb.ccb_h.func_code = XPT_ABORT; LIST_FOREACH(hdr, &softc->atio_list, periph_links.le) { ccb.cab.abort_ccb = (union ccb *)hdr; xpt_action(&ccb); } LIST_FOREACH(hdr, &softc->inot_list, periph_links.le) { ccb.cab.abort_ccb = (union ccb *)hdr; xpt_action(&ccb); } /* Disable the LUN in SIM. */ ccb.ccb_h.func_code = XPT_EN_LUN; ccb.cel.grp6_len = 0; ccb.cel.grp7_len = 0; ccb.cel.enable = 0; xpt_action(&ccb); status = (ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", __func__, ccb.ccb_h.status); /* * XXX KDM what do we do now? */ } bus_softc = softc->parent_softc; mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); mtx_unlock(&bus_softc->lun_softc_mtx); } static void ctlfecleanup(struct cam_periph *periph) { struct ctlfe_lun_softc *softc; softc = (struct ctlfe_lun_softc *)periph->softc; KASSERT(softc->ctios_sent == 0, ("%s: ctios_sent %d != 0", __func__, softc->ctios_sent)); KASSERT(softc->refcount == 0, ("%s: refcount %d != 0", __func__, softc->refcount)); KASSERT(softc->atios_alloced == 0, ("%s: atios_alloced %d != 0", __func__, softc->atios_alloced)); KASSERT(softc->inots_alloced == 0, ("%s: inots_alloced %d != 0", __func__, softc->inots_alloced)); free(softc, M_CTLFE); } static void ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, u_int16_t *sglist_cnt) { struct ctlfe_softc *bus_softc; struct ctlfe_cmd_info *cmd_info; struct ctl_sg_entry *ctl_sglist; bus_dma_segment_t *cam_sglist; size_t off; int i, idx; cmd_info = PRIV_INFO(io); bus_softc = softc->parent_softc; /* * Set the direction, relative to the initiator. */ *flags &= ~CAM_DIR_MASK; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) *flags |= CAM_DIR_IN; else *flags |= CAM_DIR_OUT; *flags &= ~CAM_DATA_MASK; idx = cmd_info->cur_transfer_index; off = cmd_info->cur_transfer_off; cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; if (io->scsiio.kern_sg_entries == 0) { /* No S/G list. */ /* One time shift for SRR offset. */ off += io->scsiio.ext_data_filled; io->scsiio.ext_data_filled = 0; *data_ptr = io->scsiio.kern_data_ptr + off; if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { *dxfer_len = io->scsiio.kern_data_len - off; } else { *dxfer_len = bus_softc->maxio; cmd_info->cur_transfer_off += bus_softc->maxio; cmd_info->flags |= CTLFE_CMD_PIECEWISE; } *sglist_cnt = 0; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) *flags |= CAM_DATA_PADDR; else *flags |= CAM_DATA_VADDR; } else { /* S/G list with physical or virtual pointers. */ ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; /* One time shift for SRR offset. */ while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) { io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off; idx++; off = 0; } off += io->scsiio.ext_data_filled; io->scsiio.ext_data_filled = 0; cam_sglist = cmd_info->cam_sglist; *dxfer_len = 0; for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { cam_sglist[i].ds_addr = (bus_addr_t)(uintptr_t)ctl_sglist[i + idx].addr + off; if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; *dxfer_len += cam_sglist[i].ds_len; } else { cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; cmd_info->cur_transfer_index = idx + i; cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; cmd_info->flags |= CTLFE_CMD_PIECEWISE; *dxfer_len += cam_sglist[i].ds_len; if (ctl_sglist[i].len != 0) i++; break; } if (i == (CTLFE_MAX_SEGS - 1) && idx + i < (io->scsiio.kern_sg_entries - 1)) { cmd_info->cur_transfer_index = idx + i + 1; cmd_info->cur_transfer_off = 0; cmd_info->flags |= CTLFE_CMD_PIECEWISE; i++; break; } off = 0; } *sglist_cnt = i; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) *flags |= CAM_DATA_SG_PADDR; else *flags |= CAM_DATA_SG; *data_ptr = (uint8_t *)cam_sglist; } } static void ctlfestart(struct cam_periph *periph, union ccb *start_ccb) { struct ctlfe_lun_softc *softc; struct ctlfe_cmd_info *cmd_info; struct ccb_hdr *ccb_h; struct ccb_accept_tio *atio; struct ccb_scsiio *csio; uint8_t *data_ptr; uint32_t dxfer_len; ccb_flags flags; union ctl_io *io; uint8_t scsi_status; softc = (struct ctlfe_lun_softc *)periph->softc; next: /* Take the ATIO off the work queue */ ccb_h = STAILQ_FIRST(&softc->work_queue); if (ccb_h == NULL) { xpt_release_ccb(start_ccb); return; } STAILQ_REMOVE_HEAD(&softc->work_queue, periph_links.stqe); atio = (struct ccb_accept_tio *)ccb_h; io = (union ctl_io *)ccb_h->io_ptr; csio = &start_ccb->csio; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); cmd_info = PRIV_INFO(io); cmd_info->cur_transfer_index = 0; cmd_info->cur_transfer_off = 0; cmd_info->flags = 0; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { /* * Datamove call, we need to setup the S/G list. */ ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); } else { /* * We're done, send status back. */ if ((io->io_hdr.flags & CTL_FLAG_ABORT) && (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; /* Tell the SIM that we've aborted this ATIO */ #ifdef CTLFEDEBUG printf("%s: tag %04x abort\n", __func__, atio->tag_id); #endif KASSERT(atio->ccb_h.func_code == XPT_ACCEPT_TARGET_IO, ("func_code %#x is not ATIO", atio->ccb_h.func_code)); start_ccb->ccb_h.func_code = XPT_ABORT; start_ccb->cab.abort_ccb = (union ccb *)atio; xpt_action(start_ccb); ctlfe_requeue_ccb(periph, (union ccb *)atio, /* unlock */0); /* XPT_ABORT is not queued, so we can take next I/O. */ goto next; } data_ptr = NULL; dxfer_len = 0; csio->sglist_cnt = 0; } scsi_status = 0; if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) && (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 && ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 || io->io_hdr.status == CTL_SUCCESS)) { flags |= CAM_SEND_STATUS; scsi_status = io->scsiio.scsi_status; csio->sense_len = io->scsiio.sense_len; #ifdef CTLFEDEBUG printf("%s: tag %04x status %x\n", __func__, atio->tag_id, io->io_hdr.status); #endif if (csio->sense_len != 0) { csio->sense_data = io->scsiio.sense_data; flags |= CAM_SEND_SENSE; } } #ifdef CTLFEDEBUG printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, (flags & CAM_SEND_STATUS) ? "done" : "datamove", atio->tag_id, flags, data_ptr, dxfer_len); #endif /* * Valid combinations: * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, * sglist_cnt != 0 */ #ifdef CTLFEDEBUG if (((flags & CAM_SEND_STATUS) && (((flags & CAM_DATA_SG) != 0) || (dxfer_len != 0) || (csio->sglist_cnt != 0))) || (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) || ((flags & CAM_DATA_SG) && (csio->sglist_cnt == 0)) || (((flags & CAM_DATA_SG) == 0) && (csio->sglist_cnt != 0))) { printf("%s: tag %04x cdb %02x flags %#x dxfer_len " "%d sg %u\n", __func__, atio->tag_id, atio_cdb_ptr(atio)[0], flags, dxfer_len, csio->sglist_cnt); printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } #endif cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, scsi_status, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ CTLFE_TIMEOUT * 1000); start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.ccb_atio = atio; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED); softc->ctios_sent++; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* * If we still have work to do, ask for another CCB. */ if (!STAILQ_EMPTY(&softc->work_queue)) xpt_schedule(periph, CAM_PRIORITY_NORMAL); } static void ctlfe_drain(void *context, int pending) { struct cam_periph *periph = context; struct ctlfe_lun_softc *softc = periph->softc; cam_periph_lock(periph); while (softc->refcount != 0) { cam_periph_sleep(periph, &softc->refcount, PRIBIO, "ctlfe_drain", 1); } cam_periph_unlock(periph); cam_periph_release(periph); } static void ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) { struct ctlfe_lun_softc *softc; union ctl_io *io; struct ctlfe_cmd_info *cmd_info; softc = (struct ctlfe_lun_softc *)periph->softc; io = ccb->ccb_h.io_ptr; switch (ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: softc->atios_alloced--; cmd_info = PRIV_INFO(io); free(cmd_info, M_CTLFE); break; case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: softc->inots_alloced--; break; default: break; } ctl_free_io(io); free(ccb, M_CTLFE); KASSERT(softc->atios_alloced >= 0, ("%s: atios_alloced %d < 0", __func__, softc->atios_alloced)); KASSERT(softc->inots_alloced >= 0, ("%s: inots_alloced %d < 0", __func__, softc->inots_alloced)); /* * If we have received all of our CCBs, we can release our * reference on the peripheral driver. It will probably go away * now. */ if (softc->atios_alloced == 0 && softc->inots_alloced == 0) { if (softc->refcount == 0) { cam_periph_release_locked(periph); } else { TASK_INIT(&softc->refdrain_task, 0, ctlfe_drain, periph); taskqueue_enqueue(taskqueue_thread, &softc->refdrain_task); } } } /* * Send the ATIO/INOT back to the SIM, or free it if periph was invalidated. */ static void ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock) { struct ctlfe_lun_softc *softc; struct mtx *mtx; if (periph->flags & CAM_PERIPH_INVALID) { mtx = cam_periph_mtx(periph); ctlfe_free_ccb(periph, ccb); if (unlock) mtx_unlock(mtx); return; } softc = (struct ctlfe_lun_softc *)periph->softc; if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) LIST_INSERT_HEAD(&softc->atio_list, &ccb->ccb_h, periph_links.le); else LIST_INSERT_HEAD(&softc->inot_list, &ccb->ccb_h, periph_links.le); if (unlock) cam_periph_unlock(periph); /* * For a wildcard attachment, commands can come in with a specific * target/lun. Reset the target and LUN fields back to the wildcard * values before we send them back down to the SIM. */ xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, CAM_PRIORITY_NONE, ccb->ccb_h.flags); xpt_action(ccb); } static int ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) { uint64_t lba; uint32_t num_blocks, nbc; uint8_t *cmdbyt = atio_cdb_ptr(atio); nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ switch (cmdbyt[0]) { case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; lba = scsi_3btoul(cdb->addr); lba &= 0x1fffff; num_blocks = cdb->length; if (num_blocks == 0) num_blocks = 256; lba += nbc; num_blocks -= nbc; scsi_ulto3b(lba, cdb->addr); cdb->length = num_blocks; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto2b(num_blocks, cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } default: return -1; } return (0); } static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb) { struct ctlfe_lun_softc *softc; struct ctlfe_softc *bus_softc; struct ctlfe_cmd_info *cmd_info; struct ccb_accept_tio *atio = NULL; union ctl_io *io = NULL; struct mtx *mtx; cam_status status; KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, ("CCB in ctlfedone() without CAM_UNLOCKED flag")); #ifdef CTLFE_DEBUG printf("%s: entered, func_code = %#x\n", __func__, done_ccb->ccb_h.func_code); #endif /* * At this point CTL has no known use case for device queue freezes. * In case some SIM think different -- drop its freeze right here. */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(periph->path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } softc = (struct ctlfe_lun_softc *)periph->softc; bus_softc = softc->parent_softc; mtx = cam_periph_mtx(periph); mtx_lock(mtx); switch (done_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: { LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); atio = &done_ccb->atio; status = atio->ccb_h.status & CAM_STATUS_MASK; if (status != CAM_CDB_RECVD) { ctlfe_free_ccb(periph, done_ccb); goto out; } resubmit: /* * Allocate a ctl_io, pass it to CTL, and wait for the * datamove or done. */ mtx_unlock(mtx); io = done_ccb->ccb_h.io_ptr; cmd_info = PRIV_INFO(io); ctl_zero_io(io); /* Save pointers on both sides */ PRIV_CCB(io) = done_ccb; PRIV_INFO(io) = cmd_info; done_ccb->ccb_h.io_ptr = io; /* * Only SCSI I/O comes down this path, resets, etc. come * down the immediate notify path below. */ io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = atio->init_id; io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; if (bus_softc->hba_misc & PIM_EXTLUNS) { io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun)); } else { io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; } io->scsiio.tag_num = atio->tag_id; switch (atio->tag_action) { case CAM_TAG_ACTION_NONE: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, atio->tag_action); break; } if (atio->cdb_len > sizeof(io->scsiio.cdb)) { printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); } io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len); #ifdef CTLFEDEBUG printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, io->scsiio.tag_num, io->scsiio.cdb[0]); #endif ctl_queue(io); return; } case XPT_CONT_TARGET_IO: { int srr = 0; uint32_t srr_off = 0; atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; io = (union ctl_io *)atio->ccb_h.io_ptr; softc->ctios_sent--; #ifdef CTLFEDEBUG printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", __func__, atio->tag_id, done_ccb->ccb_h.flags); #endif /* * Handle SRR case were the data pointer is pushed back hack */ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV && done_ccb->csio.msg_ptr != NULL && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED && done_ccb->csio.msg_ptr[1] == 5 && done_ccb->csio.msg_ptr[2] == 0) { srr = 1; srr_off = (done_ccb->csio.msg_ptr[3] << 24) | (done_ccb->csio.msg_ptr[4] << 16) | (done_ccb->csio.msg_ptr[5] << 8) | (done_ccb->csio.msg_ptr[6]); } /* * If we have an SRR and we're still sending data, we * should be able to adjust offsets and cycle again. * It is possible only if offset is from this datamove. */ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) && srr_off >= io->scsiio.kern_rel_offset && srr_off < io->scsiio.kern_rel_offset + io->scsiio.kern_data_len) { io->scsiio.kern_data_resid = io->scsiio.kern_rel_offset + io->scsiio.kern_data_len - srr_off; io->scsiio.ext_data_filled = srr_off; io->scsiio.io_hdr.status = CTL_STATUS_NONE; io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; xpt_release_ccb(done_ccb); STAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); break; } /* * If status was being sent, the back end data is now history. * Hack it up and resubmit a new command with the CDB adjusted. * If the SIM does the right thing, all of the resid math * should work. */ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { xpt_release_ccb(done_ccb); if (ctlfe_adjust_cdb(atio, srr_off) == 0) { done_ccb = (union ccb *)atio; goto resubmit; } /* * Fall through to doom.... */ } if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) && (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; /* * If we were sending status back to the initiator, free up * resources. If we were doing a datamove, call the * datamove done routine. */ if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { /* * If we asked to send sense data but it wasn't sent, * queue the I/O back to CTL for later REQUEST SENSE. */ if ((done_ccb->ccb_h.flags & CAM_SEND_SENSE) != 0 && (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (done_ccb->ccb_h.status & CAM_SENT_SENSE) == 0 && (io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref)) != NULL) { PRIV_INFO(io) = PRIV_INFO( (union ctl_io *)atio->ccb_h.io_ptr); ctl_queue_sense(atio->ccb_h.io_ptr); atio->ccb_h.io_ptr = io; } /* Abort ATIO if CTIO sending status has failed. */ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { done_ccb->ccb_h.func_code = XPT_ABORT; done_ccb->cab.abort_ccb = (union ccb *)atio; xpt_action(done_ccb); } xpt_release_ccb(done_ccb); ctlfe_requeue_ccb(periph, (union ccb *)atio, /* unlock */1); return; } else { struct ctlfe_cmd_info *cmd_info; struct ccb_scsiio *csio; csio = &done_ccb->csio; cmd_info = PRIV_INFO(io); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; /* * Translate CAM status to CTL status. Success * does not change the overall, ctl_io status. In * that case we just set port_status to 0. If we * have a failure, though, set a data phase error * for the overall ctl_io. */ switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_REQ_CMP: io->scsiio.kern_data_resid -= csio->dxfer_len - csio->resid; io->io_hdr.port_status = 0; break; default: /* * XXX KDM we probably need to figure out a * standard set of errors that the SIM * drivers should return in the event of a * data transfer failure. A data phase * error will at least point the user to a * data transfer error of some sort. * Hopefully the SIM printed out some * additional information to give the user * a clue what happened. */ io->io_hdr.port_status = 0xbad1; ctl_set_data_phase_error(&io->scsiio); /* * XXX KDM figure out residual. */ break; } /* * If we had to break this S/G list into multiple * pieces, figure out where we are in the list, and * continue sending pieces if necessary. */ if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) && io->io_hdr.port_status == 0 && csio->resid == 0) { ccb_flags flags; uint8_t *data_ptr; uint32_t dxfer_len; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT| CAM_TAG_ACTION_VALID); ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); if (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) { printf("%s: tag %04x no status or " "len cdb = %02x\n", __func__, atio->tag_id, atio_cdb_ptr(atio)[0]); printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, CTLFE_TIMEOUT * 1000); csio->ccb_h.flags |= CAM_UNLOCKED; csio->resid = 0; csio->ccb_h.ccb_atio = atio; io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; softc->ctios_sent++; mtx_unlock(mtx); xpt_action((union ccb *)csio); } else { /* * Release the CTIO. The ATIO will be sent back * down to the SIM once we send status. */ xpt_release_ccb(done_ccb); mtx_unlock(mtx); /* Call the backend move done callback */ io->scsiio.be_move_done(io); } return; } break; } case XPT_IMMEDIATE_NOTIFY: { union ctl_io *io; struct ccb_immediate_notify *inot; int send_ctl_io; LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); inot = &done_ccb->cin1; io = done_ccb->ccb_h.io_ptr; ctl_zero_io(io); send_ctl_io = 1; io->io_hdr.io_type = CTL_IO_TASK; PRIV_CCB(io) = done_ccb; inot->ccb_h.io_ptr = io; io->io_hdr.nexus.initid = inot->initiator_id; io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; if (bus_softc->hba_misc & PIM_EXTLUNS) { io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun)); } else { io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; } /* XXX KDM should this be the tag_id? */ io->taskio.tag_num = inot->seq_id; status = inot->ccb_h.status & CAM_STATUS_MASK; switch (status) { case CAM_SCSI_BUS_RESET: io->taskio.task_action = CTL_TASK_BUS_RESET; break; case CAM_BDR_SENT: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case CAM_MESSAGE_RECV: switch (inot->arg) { case MSG_ABORT_TASK_SET: io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; break; case MSG_TARGET_RESET: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case MSG_ABORT_TASK: io->taskio.task_action = CTL_TASK_ABORT_TASK; break; case MSG_LOGICAL_UNIT_RESET: io->taskio.task_action = CTL_TASK_LUN_RESET; break; case MSG_CLEAR_TASK_SET: io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET; break; case MSG_CLEAR_ACA: io->taskio.task_action = CTL_TASK_CLEAR_ACA; break; case MSG_QUERY_TASK: io->taskio.task_action = CTL_TASK_QUERY_TASK; break; case MSG_QUERY_TASK_SET: io->taskio.task_action = CTL_TASK_QUERY_TASK_SET; break; case MSG_QUERY_ASYNC_EVENT: io->taskio.task_action = CTL_TASK_QUERY_ASYNC_EVENT; break; case MSG_NOOP: send_ctl_io = 0; break; default: xpt_print(periph->path, "%s: unsupported INOT message 0x%x\n", __func__, inot->arg); send_ctl_io = 0; break; } break; default: xpt_print(periph->path, "%s: unsupported INOT status 0x%x\n", __func__, status); /* FALLTHROUGH */ case CAM_REQ_ABORTED: case CAM_REQ_INVALID: case CAM_DEV_NOT_THERE: case CAM_PROVIDE_FAIL: ctlfe_free_ccb(periph, done_ccb); goto out; } if (send_ctl_io != 0) { ctl_queue(io); } else { done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; xpt_action(done_ccb); } break; } case XPT_NOTIFY_ACKNOWLEDGE: /* Queue this back down to the SIM as an immediate notify. */ done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; ctlfe_requeue_ccb(periph, done_ccb, /* unlock */1); return; case XPT_SET_SIM_KNOB: case XPT_GET_SIM_KNOB: case XPT_GET_SIM_KNOB_OLD: break; default: panic("%s: unexpected CCB type %#x", __func__, done_ccb->ccb_h.func_code); break; } out: mtx_unlock(mtx); } static void ctlfe_onoffline(void *arg, int online) { struct ctlfe_softc *bus_softc = arg; union ccb *ccb; cam_status status; struct cam_path *path; int set_wwnn = 0; status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path!\n", __func__); return; } ccb = xpt_alloc_ccb(); xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; xpt_action(ccb); /* Check whether we should change WWNs. */ if (online != 0) { if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ printf("%s: %s current WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s current WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); /* * If the user has specified a WWNN/WWPN, send them * down to the SIM. Otherwise, record what the SIM * has reported. */ if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn != ccb->knob.xport_specific.fc.wwnn) { ccb->knob.xport_specific.fc.wwnn = bus_softc->port.wwnn; set_wwnn = 1; } else { ctl_port_set_wwns(&bus_softc->port, true, ccb->knob.xport_specific.fc.wwnn, false, 0); } if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn != ccb->knob.xport_specific.fc.wwpn) { ccb->knob.xport_specific.fc.wwpn = bus_softc->port.wwpn; set_wwnn = 1; } else { ctl_port_set_wwns(&bus_softc->port, false, 0, true, ccb->knob.xport_specific.fc.wwpn); } } else { printf("%s: %s has no valid WWNN/WWPN\n", __func__, bus_softc->port_name); if (bus_softc->port.wwnn != 0) { ccb->knob.xport_specific.fc.wwnn = bus_softc->port.wwnn; set_wwnn = 1; } if (bus_softc->port.wwpn != 0) { ccb->knob.xport_specific.fc.wwpn = bus_softc->port.wwpn; set_wwnn = 1; } } } if (set_wwnn) { ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ADDRESS; xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: %s (path id %d) failed set WWNs: %#x\n", __func__, bus_softc->port_name, bus_softc->path_id, ccb->ccb_h.status); } else { printf("%s: %s new WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s new WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); } } /* Check whether we should change role. */ if ((ccb->knob.xport_specific.valid & KNOB_VALID_ROLE) == 0 || ((online != 0) ^ ((ccb->knob.xport_specific.fc.role & KNOB_ROLE_TARGET) != 0)) != 0) { ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; if (online) ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET; else ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET; xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: %s (path id %d) failed %s target role: %#x\n", __func__, bus_softc->port_name, bus_softc->path_id, online ? "enable" : "disable", ccb->ccb_h.status); } else { printf("%s: %s (path id %d) target role %s succeeded\n", __func__, bus_softc->port_name, bus_softc->path_id, online ? "enable" : "disable"); } } xpt_free_path(path); xpt_free_ccb(ccb); } static void ctlfe_online(void *arg) { struct ctlfe_softc *bus_softc; struct cam_path *path; cam_status status; struct ctlfe_lun_softc *lun_softc; struct cam_periph *periph; bus_softc = (struct ctlfe_softc *)arg; /* * Create the wildcard LUN before bringing the port online. */ status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard periph\n", __func__); return; } lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO); xpt_path_lock(path); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ xpt_path_unlock(path); xpt_free_path(path); free(lun_softc, M_CTLFE); return; } lun_softc->parent_softc = bus_softc; lun_softc->flags |= CTLFE_LUN_WILDCARD; status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, lun_softc); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("%s: CAM error %s (%#x) returned from " "cam_periph_alloc()\n", __func__, (entry != NULL) ? entry->status_text : "Unknown", status); free(lun_softc, M_CTLFE); } xpt_path_unlock(path); ctlfe_onoffline(arg, /*online*/ 1); xpt_free_path(path); } static void ctlfe_offline(void *arg) { struct ctlfe_softc *bus_softc; struct cam_path *path; cam_status status; struct cam_periph *periph; bus_softc = (struct ctlfe_softc *)arg; ctlfe_onoffline(arg, /*online*/ 0); /* * Disable the wildcard LUN for this port now that we have taken * the port offline. */ status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard periph\n", __func__); return; } xpt_path_lock(path); if ((periph = cam_periph_find(path, "ctl")) != NULL) cam_periph_invalidate(periph); xpt_path_unlock(path); xpt_free_path(path); } /* * This will get called to enable a LUN on every bus that is attached to * CTL. So we only need to create a path/periph for this particular bus. */ static int ctlfe_lun_enable(void *arg, int lun_id) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; struct cam_path *path; struct cam_periph *periph; cam_status status; bus_softc = (struct ctlfe_softc *)arg; if (bus_softc->hba_misc & PIM_EXTLUNS) lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, bus_softc->target_id, lun_id); /* XXX KDM need some way to return status to CTL here? */ if (status != CAM_REQ_CMP) { printf("%s: could not create path, status %#x\n", __func__, status); return (1); } softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); xpt_path_lock(path); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ xpt_path_unlock(path); xpt_free_path(path); free(softc, M_CTLFE); return (0); } softc->parent_softc = bus_softc; status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, softc); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("%s: CAM error %s (%#x) returned from " "cam_periph_alloc()\n", __func__, (entry != NULL) ? entry->status_text : "Unknown", status); free(softc, M_CTLFE); } xpt_path_unlock(path); xpt_free_path(path); return (0); } /* * This will get called when the user removes a LUN to disable that LUN * on every bus that is attached to CTL. */ static int ctlfe_lun_disable(void *arg, int lun_id) { struct ctlfe_softc *softc; struct ctlfe_lun_softc *lun_softc; softc = (struct ctlfe_softc *)arg; if (softc->hba_misc & PIM_EXTLUNS) lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); mtx_lock(&softc->lun_softc_mtx); STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { struct cam_path *path; path = lun_softc->periph->path; if ((xpt_path_target_id(path) == softc->target_id) && (xpt_path_lun_id(path) == lun_id)) { break; } } if (lun_softc == NULL) { mtx_unlock(&softc->lun_softc_mtx); printf("%s: can't find lun %d\n", __func__, lun_id); return (1); } cam_periph_acquire(lun_softc->periph); mtx_unlock(&softc->lun_softc_mtx); cam_periph_lock(lun_softc->periph); cam_periph_invalidate(lun_softc->periph); cam_periph_unlock(lun_softc->periph); cam_periph_release(lun_softc->periph); return (0); } static void ctlfe_dump_sim(struct cam_sim *sim) { printf("%s%d: max dev openings: %d, max tagged dev openings: %d\n", sim->sim_name, sim->unit_number, sim->max_dev_openings, sim->max_tagged_dev_openings); } /* * Assumes that the SIM lock is held. */ static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc) { struct cam_periph *periph = softc->periph; struct ccb_hdr *hdr; struct ccb_getdevstats cgds; int num_items; xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgds.ccb_h.func_code = XPT_GDEV_STATS; xpt_action((union ccb *)&cgds); if ((cgds.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { xpt_print(periph->path, "devq: openings %d, active %d, " "allocated %d, queued %d, held %d\n", cgds.dev_openings, cgds.dev_active, cgds.allocated, cgds.queued, cgds.held); } num_items = 0; STAILQ_FOREACH(hdr, &softc->work_queue, periph_links.stqe) { union ctl_io *io = hdr->io_ptr; num_items++; /* * Only regular SCSI I/O is put on the work * queue, so we can print sense here. There may be no * sense if it's no the queue for a DMA, but this serves to * print out the CCB as well. * * XXX KDM switch this over to scsi_sense_print() when * CTL is merged in with CAM. */ ctl_io_error_print(io, NULL); /* * Print DMA status if we are DMA_QUEUED. */ if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { xpt_print(periph->path, "Total %u, Current %u, Resid %u\n", io->scsiio.kern_total_len, io->scsiio.kern_data_len, io->scsiio.kern_data_resid); } } xpt_print(periph->path, "%d requests waiting for CCBs\n", num_items); xpt_print(periph->path, "%d CTIOs outstanding\n", softc->ctios_sent); } /* * Datamove/done routine called by CTL. Put ourselves on the queue to * receive a CCB from CAM so we can queue the continue I/O request down * to the adapter. */ static void ctlfe_datamove(union ctl_io *io) { union ccb *ccb; struct cam_periph *periph; struct ctlfe_lun_softc *softc; KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type)); io->scsiio.ext_data_filled = 0; ccb = PRIV_CCB(io); periph = xpt_path_periph(ccb->ccb_h.path); cam_periph_lock(periph); softc = (struct ctlfe_lun_softc *)periph->softc; io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); } static void ctlfe_done(union ctl_io *io) { union ccb *ccb; struct cam_periph *periph; struct ctlfe_lun_softc *softc; ccb = PRIV_CCB(io); periph = xpt_path_periph(ccb->ccb_h.path); cam_periph_lock(periph); softc = (struct ctlfe_lun_softc *)periph->softc; if (io->io_hdr.io_type == CTL_IO_TASK) { /* * Send the notify acknowledge down to the SIM, to let it * know we processed the task management command. */ ccb->ccb_h.status = CAM_REQ_INPROG; ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; switch (io->taskio.task_status) { case CTL_TASK_FUNCTION_COMPLETE: ccb->cna2.arg = CAM_RSP_TMF_COMPLETE; break; case CTL_TASK_FUNCTION_SUCCEEDED: ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_FUNCTION_REJECTED: ccb->cna2.arg = CAM_RSP_TMF_REJECTED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_LUN_DOES_NOT_EXIST: ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_FUNCTION_NOT_SUPPORTED: ccb->cna2.arg = CAM_RSP_TMF_FAILED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; } ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8; xpt_action(ccb); } else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) { ctlfe_requeue_ccb(periph, ccb, /* unlock */1); return; } else { io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); } cam_periph_unlock(periph); } static void ctlfe_dump(void) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *lun_softc; STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { ctlfe_dump_sim(bus_softc->sim); STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) ctlfe_dump_queue(lun_softc); } } Index: head/sys/cam/mmc/mmc_da.c =================================================================== --- head/sys/cam/mmc/mmc_da.c (revision 328917) +++ head/sys/cam/mmc/mmc_da.c (revision 328918) @@ -1,1429 +1,1429 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Bernd Walter * Copyright (c) 2006 M. Warner Losh * Copyright (c) 2009 Alexander Motin * Copyright (c) 2015-2017 Ilya Bakulin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Some code derived from the sys/dev/mmc and sys/cam/ata * Thanks to Warner Losh , Alexander Motin * Bernd Walter , and other authors. */ #include __FBSDID("$FreeBSD$"); //#include "opt_sdda.h" #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for PRIu64 */ #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #include #include #include #include #include /* geometry translation */ #ifdef _KERNEL typedef enum { SDDA_FLAG_OPEN = 0x0002, SDDA_FLAG_DIRTY = 0x0004 } sdda_flags; typedef enum { SDDA_STATE_INIT, SDDA_STATE_INVALID, SDDA_STATE_NORMAL } sdda_state; struct sdda_softc { struct bio_queue_head bio_queue; int outstanding_cmds; /* Number of active commands */ int refcount; /* Active xpt_action() calls */ sdda_state state; sdda_flags flags; struct mmc_data *mmcdata; // sdda_quirks quirks; struct task start_init_task; struct disk *disk; uint32_t raw_csd[4]; uint8_t raw_ext_csd[512]; /* MMC only? */ struct mmc_csd csd; struct mmc_cid cid; struct mmc_scr scr; /* Calculated from CSD */ uint64_t sector_count; uint64_t mediasize; /* Calculated from CID */ char card_id_string[64];/* Formatted CID info (serial, MFG, etc) */ char card_sn_string[16];/* Formatted serial # for disk->d_ident */ /* Determined from CSD + is highspeed card*/ uint32_t card_f_max; }; #define ccb_bp ppriv_ptr1 static disk_strategy_t sddastrategy; static periph_init_t sddainit; static void sddaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static periph_ctor_t sddaregister; static periph_dtor_t sddacleanup; static periph_start_t sddastart; static periph_oninv_t sddaoninvalidate; static void sddadone(struct cam_periph *periph, union ccb *done_ccb); static int sddaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static uint16_t get_rca(struct cam_periph *periph); static cam_status sdda_hook_into_geom(struct cam_periph *periph); static void sdda_start_init(void *context, union ccb *start_ccb); static void sdda_start_init_task(void *context, int pending); static struct periph_driver sddadriver = { sddainit, "sdda", TAILQ_HEAD_INITIALIZER(sddadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(sdda, sddadriver); static MALLOC_DEFINE(M_SDDA, "sd_da", "sd_da buffers"); static const int exp[8] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000 }; static const int mant[16] = { 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 }; static const int cur_min[8] = { 500, 1000, 5000, 10000, 25000, 35000, 60000, 100000 }; static const int cur_max[8] = { 1000, 5000, 10000, 25000, 35000, 45000, 800000, 200000 }; static uint16_t get_rca(struct cam_periph *periph) { return periph->path->device->mmc_ident_data.card_rca; } static uint32_t mmc_get_bits(uint32_t *bits, int bit_len, int start, int size) { const int i = (bit_len / 32) - (start / 32) - 1; const int shift = start & 31; uint32_t retval = bits[i] >> shift; if (size + shift > 32) retval |= bits[i - 1] << (32 - shift); return (retval & ((1llu << size) - 1)); } static void mmc_decode_csd_sd(uint32_t *raw_csd, struct mmc_csd *csd) { int v; int m; int e; memset(csd, 0, sizeof(*csd)); csd->csd_structure = v = mmc_get_bits(raw_csd, 128, 126, 2); if (v == 0) { m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = (exp[e] * mant[m] + 9) / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->vdd_r_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 59, 3)]; csd->vdd_r_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 56, 3)]; csd->vdd_w_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 53, 3)]; csd->vdd_w_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 50, 3)]; m = mmc_get_bits(raw_csd, 128, 62, 12); e = mmc_get_bits(raw_csd, 128, 47, 3); csd->capacity = ((1 + m) << (e + 2)) * csd->read_bl_len; csd->erase_blk_en = mmc_get_bits(raw_csd, 128, 46, 1); csd->erase_sector = mmc_get_bits(raw_csd, 128, 39, 7) + 1; csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 7); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); } else if (v == 1) { m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = (exp[e] * mant[m] + 9) / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->capacity = ((uint64_t)mmc_get_bits(raw_csd, 128, 48, 22) + 1) * 512 * 1024; csd->erase_blk_en = mmc_get_bits(raw_csd, 128, 46, 1); csd->erase_sector = mmc_get_bits(raw_csd, 128, 39, 7) + 1; csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 7); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); } else panic("unknown SD CSD version"); } static void mmc_decode_csd_mmc(uint32_t *raw_csd, struct mmc_csd *csd) { int m; int e; memset(csd, 0, sizeof(*csd)); csd->csd_structure = mmc_get_bits(raw_csd, 128, 126, 2); csd->spec_vers = mmc_get_bits(raw_csd, 128, 122, 4); m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = exp[e] * mant[m] + 9 / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->vdd_r_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 59, 3)]; csd->vdd_r_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 56, 3)]; csd->vdd_w_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 53, 3)]; csd->vdd_w_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 50, 3)]; m = mmc_get_bits(raw_csd, 128, 62, 12); e = mmc_get_bits(raw_csd, 128, 47, 3); csd->capacity = ((1 + m) << (e + 2)) * csd->read_bl_len; csd->erase_blk_en = 0; csd->erase_sector = (mmc_get_bits(raw_csd, 128, 42, 5) + 1) * (mmc_get_bits(raw_csd, 128, 37, 5) + 1); csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 5); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); } static void mmc_decode_cid_sd(uint32_t *raw_cid, struct mmc_cid *cid) { int i; /* There's no version info, so we take it on faith */ memset(cid, 0, sizeof(*cid)); cid->mid = mmc_get_bits(raw_cid, 128, 120, 8); cid->oid = mmc_get_bits(raw_cid, 128, 104, 16); for (i = 0; i < 5; i++) cid->pnm[i] = mmc_get_bits(raw_cid, 128, 96 - i * 8, 8); cid->pnm[5] = 0; cid->prv = mmc_get_bits(raw_cid, 128, 56, 8); cid->psn = mmc_get_bits(raw_cid, 128, 24, 32); cid->mdt_year = mmc_get_bits(raw_cid, 128, 12, 8) + 2000; cid->mdt_month = mmc_get_bits(raw_cid, 128, 8, 4); } static void mmc_decode_cid_mmc(uint32_t *raw_cid, struct mmc_cid *cid) { int i; /* There's no version info, so we take it on faith */ memset(cid, 0, sizeof(*cid)); cid->mid = mmc_get_bits(raw_cid, 128, 120, 8); cid->oid = mmc_get_bits(raw_cid, 128, 104, 8); for (i = 0; i < 6; i++) cid->pnm[i] = mmc_get_bits(raw_cid, 128, 96 - i * 8, 8); cid->pnm[6] = 0; cid->prv = mmc_get_bits(raw_cid, 128, 48, 8); cid->psn = mmc_get_bits(raw_cid, 128, 16, 32); cid->mdt_month = mmc_get_bits(raw_cid, 128, 12, 4); cid->mdt_year = mmc_get_bits(raw_cid, 128, 8, 4) + 1997; } static void mmc_format_card_id_string(struct sdda_softc *sc, struct mmc_params *mmcp) { char oidstr[8]; uint8_t c1; uint8_t c2; /* * Format a card ID string for use by the mmcsd driver, it's what * appears between the <> in the following: * mmcsd0: 968MB at mmc0 * 22.5MHz/4bit/128-block * * Also format just the card serial number, which the mmcsd driver will * use as the disk->d_ident string. * * The card_id_string in mmc_ivars is currently allocated as 64 bytes, * and our max formatted length is currently 55 bytes if every field * contains the largest value. * * Sometimes the oid is two printable ascii chars; when it's not, * format it as 0xnnnn instead. */ c1 = (sc->cid.oid >> 8) & 0x0ff; c2 = sc->cid.oid & 0x0ff; if (c1 > 0x1f && c1 < 0x7f && c2 > 0x1f && c2 < 0x7f) snprintf(oidstr, sizeof(oidstr), "%c%c", c1, c2); else snprintf(oidstr, sizeof(oidstr), "0x%04x", sc->cid.oid); snprintf(sc->card_sn_string, sizeof(sc->card_sn_string), "%08X", sc->cid.psn); snprintf(sc->card_id_string, sizeof(sc->card_id_string), "%s%s %s %d.%d SN %08X MFG %02d/%04d by %d %s", mmcp->card_features & CARD_FEATURE_MMC ? "MMC" : "SD", mmcp->card_features & CARD_FEATURE_SDHC ? "HC" : "", sc->cid.pnm, sc->cid.prv >> 4, sc->cid.prv & 0x0f, sc->cid.psn, sc->cid.mdt_month, sc->cid.mdt_year, sc->cid.mid, oidstr); } static int sddaopen(struct disk *dp) { struct cam_periph *periph; struct sdda_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { return(ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddaopen\n")); softc = (struct sdda_softc *)periph->softc; softc->flags |= SDDA_FLAG_OPEN; cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } static int sddaclose(struct disk *dp) { struct cam_periph *periph; struct sdda_softc *softc; // union ccb *ccb; // int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct sdda_softc *)periph->softc; softc->flags &= ~SDDA_FLAG_OPEN; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddaclose\n")); while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "sddaclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void sddaschedule(struct cam_periph *periph) { struct sdda_softc *softc = (struct sdda_softc *)periph->softc; /* Check if we have more work to do. */ if (bioq_first(&softc->bio_queue)) { xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void sddastrategy(struct bio *bp) { struct cam_periph *periph; struct sdda_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct sdda_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddastrategy(%p)\n", bp)); /* * If the device has been made invalid, error out */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * Place it in the queue of disk activities for this disk */ bioq_disksort(&softc->bio_queue, bp); /* * Schedule ourselves for performing the work. */ sddaschedule(periph); cam_periph_unlock(periph); return; } static void sddainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, sddaasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("sdda: Failed to attach master async callback " "due to status 0x%x!\n", status); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void sddadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddadiskgonecb\n")); cam_periph_release(periph); } static void sddaoninvalidate(struct cam_periph *periph) { struct sdda_softc *softc; softc = (struct sdda_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddaoninvalidate\n")); /* * De-register any async callbacks. */ xpt_register_async(0, sddaasync, periph, periph->path); /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("bioq_flush start\n")); bioq_flush(&softc->bio_queue, NULL, ENXIO); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("bioq_flush end\n")); disk_gone(softc->disk); } static void sddacleanup(struct cam_periph *periph) { struct sdda_softc *softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddacleanup\n")); softc = (struct sdda_softc *)periph->softc; cam_periph_unlock(periph); disk_destroy(softc->disk); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void sddaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct ccb_getdev cgd; struct cam_periph *periph; struct sdda_softc *softc; periph = (struct cam_periph *)callback_arg; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("sddaasync(code=%d)\n", code)); switch (code) { case AC_FOUND_DEVICE: { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("=> AC_FOUND_DEVICE\n")); struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_MMCSD) break; if (!(path->device->mmc_ident_data.card_features & CARD_FEATURE_MEMORY)) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("No memory on the card!\n")); break; } /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(sddaregister, sddaoninvalidate, sddacleanup, sddastart, "sdda", CAM_PERIPH_BIO, path, sddaasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("sddaasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_GETDEV_CHANGED: { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("=> AC_GETDEV_CHANGED\n")); softc = (struct sdda_softc *)periph->softc; xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); cam_periph_async(periph, code, path, arg); break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("=> AC_ADVINFO_CHANGED\n")); buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct sdda_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_SENT_BDR: case AC_BUS_RESET: { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("AC_BUS_RESET")); } default: CAM_DEBUG(path, CAM_DEBUG_TRACE, ("=> default?!\n")); cam_periph_async(periph, code, path, arg); break; } } static int sddagetattr(struct bio *bp) { int ret; struct cam_periph *periph; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static cam_status sddaregister(struct cam_periph *periph, void *arg) { struct sdda_softc *softc; // struct ccb_pathinq cpi; struct ccb_getdev *cgd; // char announce_buf[80], buf1[32]; // caddr_t match; union ccb *request_ccb; /* CCB representing the probe request */ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddaregister\n")); cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("sddaregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct sdda_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("sddaregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } bioq_init(&softc->bio_queue); softc->state = SDDA_STATE_INIT; softc->mmcdata = (struct mmc_data *) malloc(sizeof(struct mmc_data), M_DEVBUF, M_NOWAIT|M_ZERO); periph->softc = softc; request_ccb = (union ccb*) arg; xpt_schedule(periph, CAM_PRIORITY_XPT); TASK_INIT(&softc->start_init_task, 0, sdda_start_init_task, periph); taskqueue_enqueue(taskqueue_thread, &softc->start_init_task); return (CAM_REQ_CMP); } static cam_status sdda_hook_into_geom(struct cam_periph *periph) { struct sdda_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev cgd; u_int maxio; softc = (struct sdda_softc*) periph->softc; xpt_path_inq(&cpi, periph->path); bzero(&cgd, sizeof(cgd)); xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); /* * Register this media as a disk */ (void)cam_periph_hold(periph, PRIBIO); cam_periph_unlock(periph); softc->disk = disk_alloc(); softc->disk->d_rotation_rate = 0; softc->disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, 512, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); softc->disk->d_open = sddaopen; softc->disk->d_close = sddaclose; softc->disk->d_strategy = sddastrategy; softc->disk->d_getattr = sddagetattr; // softc->disk->d_dump = sddadump; softc->disk->d_gone = sddadiskgonecb; softc->disk->d_name = "sdda"; softc->disk->d_drv1 = periph; maxio = cpi.maxio; /* Honor max I/O size of SIM */ if (maxio == 0) maxio = DFLTPHYS; /* traditional default */ else if (maxio > MAXPHYS) maxio = MAXPHYS; /* for safety */ softc->disk->d_maxsize = maxio; softc->disk->d_unit = periph->unit_number; softc->disk->d_flags = DISKFLAG_CANDELETE; strlcpy(softc->disk->d_descr, softc->card_id_string, MIN(sizeof(softc->disk->d_descr), sizeof(softc->card_id_string))); strlcpy(softc->disk->d_ident, softc->card_sn_string, MIN(sizeof(softc->disk->d_ident), sizeof(softc->card_sn_string))); softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; softc->disk->d_sectorsize = 512; softc->disk->d_mediasize = softc->mediasize; softc->disk->d_stripesize = 0; softc->disk->d_fwsectors = 0; softc->disk->d_fwheads = 0; /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * sddadiskgonecb()) telling us that our provider has been freed. */ - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); cam_periph_unhold(periph); xpt_announce_periph(periph, softc->card_id_string); /* * Add async callbacks for bus reset and * bus device reset calls. I don't bother * checking if this fails as, in most cases, * the system will function just fine without * them and the only alternative would be to * not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_GETDEV_CHANGED | AC_ADVINFO_CHANGED, sddaasync, periph, periph->path); return(CAM_REQ_CMP); } static int mmc_exec_app_cmd(struct cam_periph *periph, union ccb *ccb, struct mmc_command *cmd) { int err; /* Send APP_CMD first */ memset(&ccb->mmcio.cmd, 0, sizeof(struct mmc_command)); memset(&ccb->mmcio.stop, 0, sizeof(struct mmc_command)); cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_NONE, /*mmc_opcode*/ MMC_APP_CMD, /*mmc_arg*/ get_rca(periph) << 16, /*mmc_flags*/ MMC_RSP_R1 | MMC_CMD_AC, /*mmc_data*/ NULL, /*timeout*/ 0); err = cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); if (err != 0) return err; if (!(ccb->mmcio.cmd.resp[0] & R1_APP_CMD)) return MMC_ERR_FAILED; /* Now exec actual command */ int flags = 0; if (cmd->data != NULL) { ccb->mmcio.cmd.data = cmd->data; if (cmd->data->flags & MMC_DATA_READ) flags |= CAM_DIR_IN; if (cmd->data->flags & MMC_DATA_WRITE) flags |= CAM_DIR_OUT; } else flags = CAM_DIR_NONE; cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ flags, /*mmc_opcode*/ cmd->opcode, /*mmc_arg*/ cmd->arg, /*mmc_flags*/ cmd->flags, /*mmc_data*/ cmd->data, /*timeout*/ 0); err = cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); memcpy(cmd->resp, ccb->mmcio.cmd.resp, sizeof(cmd->resp)); cmd->error = ccb->mmcio.cmd.error; if (err != 0) return err; return 0; } static int mmc_app_get_scr(struct cam_periph *periph, union ccb *ccb, uint32_t *rawscr) { int err; struct mmc_command cmd; struct mmc_data d; memset(&cmd, 0, sizeof(cmd)); memset(rawscr, 0, 8); cmd.opcode = ACMD_SEND_SCR; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.arg = 0; d.data = rawscr; d.len = 8; d.flags = MMC_DATA_READ; cmd.data = &d; err = mmc_exec_app_cmd(periph, ccb, &cmd); rawscr[0] = be32toh(rawscr[0]); rawscr[1] = be32toh(rawscr[1]); return (err); } static int mmc_send_ext_csd(struct cam_periph *periph, union ccb *ccb, uint8_t *rawextcsd, size_t buf_len) { int err; struct mmc_data d; KASSERT(buf_len == 512, ("Buffer for ext csd must be 512 bytes")); d.data = rawextcsd; d.len = buf_len; d.flags = MMC_DATA_READ; memset(d.data, 0, d.len); cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_IN, /*mmc_opcode*/ MMC_SEND_EXT_CSD, /*mmc_arg*/ 0, /*mmc_flags*/ MMC_RSP_R1 | MMC_CMD_ADTC, /*mmc_data*/ &d, /*timeout*/ 0); err = cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); if (err != 0) return err; if (!(ccb->mmcio.cmd.resp[0] & R1_APP_CMD)) return MMC_ERR_FAILED; return MMC_ERR_NONE; } static void mmc_app_decode_scr(uint32_t *raw_scr, struct mmc_scr *scr) { unsigned int scr_struct; memset(scr, 0, sizeof(*scr)); scr_struct = mmc_get_bits(raw_scr, 64, 60, 4); if (scr_struct != 0) { printf("Unrecognised SCR structure version %d\n", scr_struct); return; } scr->sda_vsn = mmc_get_bits(raw_scr, 64, 56, 4); scr->bus_widths = mmc_get_bits(raw_scr, 64, 48, 4); } static int mmc_switch(struct cam_periph *periph, union ccb *ccb, uint8_t set, uint8_t index, uint8_t value) { int arg = (MMC_SWITCH_FUNC_WR << 24) | (index << 16) | (value << 8) | set; cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_NONE, /*mmc_opcode*/ MMC_SWITCH_FUNC, /*mmc_arg*/ arg, /*mmc_flags*/ MMC_RSP_R1B | MMC_CMD_AC, /*mmc_data*/ NULL, /*timeout*/ 0); cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)) { if (ccb->mmcio.cmd.error != 0) { CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_PERIPH, ("%s: MMC command failed", __func__)); return EIO; } return 0; /* Normal return */ } else { CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_PERIPH, ("%s: CAM request failed\n", __func__)); return EIO; } } static int mmc_sd_switch(struct cam_periph *periph, union ccb *ccb, uint8_t mode, uint8_t grp, uint8_t value, uint8_t *res) { struct mmc_data mmc_d; memset(res, 0, 64); mmc_d.len = 64; mmc_d.data = res; mmc_d.flags = MMC_DATA_READ; cam_fill_mmcio(&ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ NULL, /*flags*/ CAM_DIR_IN, /*mmc_opcode*/ SD_SWITCH_FUNC, /*mmc_arg*/ mode << 31, /*mmc_flags*/ MMC_RSP_R1 | MMC_CMD_ADTC, /*mmc_data*/ &mmc_d, /*timeout*/ 0); cam_periph_runccb(ccb, sddaerror, CAM_FLAG_NONE, /*sense_flags*/0, NULL); if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)) { if (ccb->mmcio.cmd.error != 0) { CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_PERIPH, ("%s: MMC command failed", __func__)); return EIO; } return 0; /* Normal return */ } else { CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_PERIPH, ("%s: CAM request failed\n", __func__)); return EIO; } } static int mmc_set_timing(struct cam_periph *periph, union ccb *ccb, enum mmc_bus_timing timing) { u_char switch_res[64]; int err; uint8_t value; struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mmc_set_timing(timing=%d)", timing)); switch (timing) { case bus_timing_normal: value = 0; break; case bus_timing_hs: value = 1; break; default: return (MMC_ERR_INVALID); } if (mmcp->card_features & CARD_FEATURE_MMC) { err = mmc_switch(periph, ccb, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, value); } else { err = mmc_sd_switch(periph, ccb, SD_SWITCH_MODE_SET, SD_SWITCH_GROUP1, value, switch_res); } /* Set high-speed timing on the host */ struct ccb_trans_settings_mmc *cts; cts = &ccb->cts.proto_specific.mmc; ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 0; ccb->ccb_h.timeout = 100; ccb->ccb_h.cbfcnp = NULL; cts->ios.timing = timing; cts->ios_valid = MMC_BT; xpt_action(ccb); return (err); } static void sdda_start_init_task(void *context, int pending) { union ccb *new_ccb; struct cam_periph *periph; periph = (struct cam_periph *)context; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sdda_start_init_task\n")); new_ccb = xpt_alloc_ccb(); xpt_setup_ccb(&new_ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); cam_periph_lock(periph); sdda_start_init(context, new_ccb); cam_periph_unlock(periph); xpt_free_ccb(new_ccb); } static void sdda_set_bus_width(struct cam_periph *periph, union ccb *ccb, int width) { struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; int err; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sdda_set_bus_width\n")); /* First set for the card, then for the host */ if (mmcp->card_features & CARD_FEATURE_MMC) { uint8_t value; switch (width) { case bus_width_1: value = EXT_CSD_BUS_WIDTH_1; break; case bus_width_4: value = EXT_CSD_BUS_WIDTH_4; break; case bus_width_8: value = EXT_CSD_BUS_WIDTH_8; break; default: panic("Invalid bus width %d", width); } err = mmc_switch(periph, ccb, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, value); } else { /* For SD cards we send ACMD6 with the required bus width in arg */ struct mmc_command cmd; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = ACMD_SET_BUS_WIDTH; cmd.arg = width; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_exec_app_cmd(periph, ccb, &cmd); } if (err != MMC_ERR_NONE) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Error %d when setting bus width on the card\n", err)); return; } /* Now card is done, set the host to the same width */ struct ccb_trans_settings_mmc *cts; cts = &ccb->cts.proto_specific.mmc; ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 0; ccb->ccb_h.timeout = 100; ccb->ccb_h.cbfcnp = NULL; cts->ios.bus_width = width; cts->ios_valid = MMC_BW; xpt_action(ccb); } static inline const char *bus_width_str(enum mmc_bus_width w) { switch (w) { case bus_width_1: return "1-bit"; case bus_width_4: return "4-bit"; case bus_width_8: return "8-bit"; } } static void sdda_start_init(void *context, union ccb *start_ccb) { struct cam_periph *periph; periph = (struct cam_periph *)context; int err; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sdda_start_init\n")); /* periph was held for us when this task was enqueued */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_release(periph); return; } struct sdda_softc *softc = (struct sdda_softc *)periph->softc; //struct ccb_mmcio *mmcio = &start_ccb->mmcio; struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; struct cam_ed *device = periph->path->device; if (mmcp->card_features & CARD_FEATURE_MMC) { mmc_decode_csd_mmc(mmcp->card_csd, &softc->csd); mmc_decode_cid_mmc(mmcp->card_cid, &softc->cid); if (softc->csd.spec_vers >= 4) err = mmc_send_ext_csd(periph, start_ccb, (uint8_t *)&softc->raw_ext_csd, sizeof(softc->raw_ext_csd)); } else { mmc_decode_csd_sd(mmcp->card_csd, &softc->csd); mmc_decode_cid_sd(mmcp->card_cid, &softc->cid); } softc->sector_count = softc->csd.capacity / 512; softc->mediasize = softc->csd.capacity; /* MMC >= 4.x have EXT_CSD that has its own opinion about capacity */ if (softc->csd.spec_vers >= 4) { uint32_t sec_count = softc->raw_ext_csd[EXT_CSD_SEC_CNT] + (softc->raw_ext_csd[EXT_CSD_SEC_CNT + 1] << 8) + (softc->raw_ext_csd[EXT_CSD_SEC_CNT + 2] << 16) + (softc->raw_ext_csd[EXT_CSD_SEC_CNT + 3] << 24); if (sec_count != 0) { softc->sector_count = sec_count; softc->mediasize = softc->sector_count * 512; /* FIXME: there should be a better name for this option...*/ mmcp->card_features |= CARD_FEATURE_SDHC; } } CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Capacity: %"PRIu64", sectors: %"PRIu64"\n", softc->mediasize, softc->sector_count)); mmc_format_card_id_string(softc, mmcp); /* Update info for CAM */ device->serial_num_len = strlen(softc->card_sn_string); device->serial_num = (u_int8_t *)malloc((device->serial_num_len + 1), M_CAMXPT, M_NOWAIT); strlcpy(device->serial_num, softc->card_sn_string, device->serial_num_len); device->device_id_len = strlen(softc->card_id_string); device->device_id = (u_int8_t *)malloc((device->device_id_len + 1), M_CAMXPT, M_NOWAIT); strlcpy(device->device_id, softc->card_id_string, device->device_id_len); strlcpy(mmcp->model, softc->card_id_string, sizeof(mmcp->model)); /* Set the clock frequency that the card can handle */ struct ccb_trans_settings_mmc *cts; cts = &start_ccb->cts.proto_specific.mmc; /* First, get the host's max freq */ start_ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; start_ccb->ccb_h.flags = CAM_DIR_NONE; start_ccb->ccb_h.retry_count = 0; start_ccb->ccb_h.timeout = 100; start_ccb->ccb_h.cbfcnp = NULL; xpt_action(start_ccb); if (start_ccb->ccb_h.status != CAM_REQ_CMP) panic("Cannot get max host freq"); int host_f_max = cts->host_f_max; uint32_t host_caps = cts->host_caps; if (cts->ios.bus_width != bus_width_1) panic("Bus width in ios is not 1-bit"); /* Now check if the card supports High-speed */ softc->card_f_max = softc->csd.tran_speed; if (host_caps & MMC_CAP_HSPEED) { /* Find out if the card supports High speed timing */ if (mmcp->card_features & CARD_FEATURE_SD20) { /* Get and decode SCR */ uint32_t rawscr; uint8_t res[64]; if (mmc_app_get_scr(periph, start_ccb, &rawscr)) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Cannot get SCR\n")); goto finish_hs_tests; } mmc_app_decode_scr(&rawscr, &softc->scr); if ((softc->scr.sda_vsn >= 1) && (softc->csd.ccc & (1<<10))) { mmc_sd_switch(periph, start_ccb, SD_SWITCH_MODE_CHECK, SD_SWITCH_GROUP1, SD_SWITCH_NOCHANGE, res); if (res[13] & 2) { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Card supports HS\n")); softc->card_f_max = SD_HS_MAX; } } else { CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Not trying the switch\n")); goto finish_hs_tests; } } if (mmcp->card_features & CARD_FEATURE_MMC && softc->csd.spec_vers >= 4) { if (softc->raw_ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_HS_52) softc->card_f_max = MMC_TYPE_HS_52_MAX; else if (softc->raw_ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_HS_26) softc->card_f_max = MMC_TYPE_HS_26_MAX; } } int f_max; finish_hs_tests: f_max = min(host_f_max, softc->card_f_max); CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Set SD freq to %d MHz (min out of host f=%d MHz and card f=%d MHz)\n", f_max / 1000000, host_f_max / 1000000, softc->card_f_max / 1000000)); start_ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; start_ccb->ccb_h.flags = CAM_DIR_NONE; start_ccb->ccb_h.retry_count = 0; start_ccb->ccb_h.timeout = 100; start_ccb->ccb_h.cbfcnp = NULL; cts->ios.clock = f_max; cts->ios_valid = MMC_CLK; xpt_action(start_ccb); /* Set bus width */ enum mmc_bus_width desired_bus_width = bus_width_1; enum mmc_bus_width max_host_bus_width = (host_caps & MMC_CAP_8_BIT_DATA ? bus_width_8 : host_caps & MMC_CAP_4_BIT_DATA ? bus_width_4 : bus_width_1); enum mmc_bus_width max_card_bus_width = bus_width_1; if (mmcp->card_features & CARD_FEATURE_SD20 && softc->scr.bus_widths & SD_SCR_BUS_WIDTH_4) max_card_bus_width = bus_width_4; /* * Unlike SD, MMC cards don't have any information about supported bus width... * So we need to perform read/write test to find out the width. */ /* TODO: figure out bus width for MMC; use 8-bit for now (to test on BBB) */ if (mmcp->card_features & CARD_FEATURE_MMC) max_card_bus_width = bus_width_8; desired_bus_width = min(max_host_bus_width, max_card_bus_width); CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("Set bus width to %s (min of host %s and card %s)\n", bus_width_str(desired_bus_width), bus_width_str(max_host_bus_width), bus_width_str(max_card_bus_width))); sdda_set_bus_width(periph, start_ccb, desired_bus_width); if (f_max > 25000000) { err = mmc_set_timing(periph, start_ccb, bus_timing_hs); if (err != MMC_ERR_NONE) CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("Cannot switch card to high-speed mode")); } softc->state = SDDA_STATE_NORMAL; sdda_hook_into_geom(periph); } /* Called with periph lock held! */ static void sddastart(struct cam_periph *periph, union ccb *start_ccb) { struct sdda_softc *softc = (struct sdda_softc *)periph->softc; struct mmc_params *mmcp = &periph->path->device->mmc_ident_data; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sddastart\n")); if (softc->state != SDDA_STATE_NORMAL) { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("device is not in SDDA_STATE_NORMAL yet")); xpt_release_ccb(start_ccb); return; } struct bio *bp; /* Run regular command. */ bp = bioq_first(&softc->bio_queue); if (bp == NULL) { xpt_release_ccb(start_ccb); return; } bioq_remove(&softc->bio_queue, bp); switch (bp->bio_cmd) { case BIO_WRITE: CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("BIO_WRITE\n")); softc->flags |= SDDA_FLAG_DIRTY; /* FALLTHROUGH */ case BIO_READ: { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("BIO_READ\n")); uint64_t blockno = bp->bio_pblkno; uint16_t count = bp->bio_bcount / 512; uint16_t opcode; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("Block %"PRIu64" cnt %u\n", blockno, count)); /* Construct new MMC command */ if (bp->bio_cmd == BIO_READ) { if (count > 1) opcode = MMC_READ_MULTIPLE_BLOCK; else opcode = MMC_READ_SINGLE_BLOCK; } else { if (count > 1) opcode = MMC_WRITE_MULTIPLE_BLOCK; else opcode = MMC_WRITE_BLOCK; } start_ccb->ccb_h.func_code = XPT_MMC_IO; start_ccb->ccb_h.flags = (bp->bio_cmd == BIO_READ ? CAM_DIR_IN : CAM_DIR_OUT); start_ccb->ccb_h.retry_count = 0; start_ccb->ccb_h.timeout = 15 * 1000; start_ccb->ccb_h.cbfcnp = sddadone; struct ccb_mmcio *mmcio; mmcio = &start_ccb->mmcio; mmcio->cmd.opcode = opcode; mmcio->cmd.arg = blockno; if (!(mmcp->card_features & CARD_FEATURE_SDHC)) mmcio->cmd.arg <<= 9; mmcio->cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; mmcio->cmd.data = softc->mmcdata; mmcio->cmd.data->data = bp->bio_data; mmcio->cmd.data->len = 512 * count; mmcio->cmd.data->flags = (bp->bio_cmd == BIO_READ ? MMC_DATA_READ : MMC_DATA_WRITE); /* Direct h/w to issue CMD12 upon completion */ if (count > 1) { mmcio->stop.opcode = MMC_STOP_TRANSMISSION; mmcio->stop.flags = MMC_RSP_R1B | MMC_CMD_AC; mmcio->stop.arg = 0; } break; } case BIO_FLUSH: CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("BIO_FLUSH\n")); sddaschedule(periph); break; case BIO_DELETE: CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("BIO_DELETE\n")); sddaschedule(periph); break; } start_ccb->ccb_h.ccb_bp = bp; softc->outstanding_cmds++; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* May have more work to do, so ensure we stay scheduled */ sddaschedule(periph); } static void sddadone(struct cam_periph *periph, union ccb *done_ccb) { struct sdda_softc *softc; struct ccb_mmcio *mmcio; // struct ccb_getdev *cgd; struct cam_path *path; // int state; softc = (struct sdda_softc *)periph->softc; mmcio = &done_ccb->mmcio; path = done_ccb->ccb_h.path; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("sddadone\n")); struct bio *bp; int error = 0; // cam_periph_lock(periph); if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Error!!!\n")); if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); error = 5; /* EIO */ } else { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); error = 0; } bp = (struct bio *)done_ccb->ccb_h.ccb_bp; bp->bio_error = error; if (error != 0) { bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } else { /* XXX: How many bytes remaining? */ bp->bio_resid = 0; if (bp->bio_resid > 0) bp->bio_flags |= BIO_ERROR; } uint32_t card_status = mmcio->cmd.resp[0]; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Card status: %08x\n", R1_STATUS(card_status))); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Current state: %d\n", R1_CURRENT_STATE(card_status))); softc->outstanding_cmds--; xpt_release_ccb(done_ccb); biodone(bp); } static int sddaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { return(cam_periph_error(ccb, cam_flags, sense_flags)); } #endif /* _KERNEL */ Index: head/sys/cam/mmc/mmc_xpt.c =================================================================== --- head/sys/cam/mmc/mmc_xpt.c (revision 328917) +++ head/sys/cam/mmc/mmc_xpt.c (revision 328918) @@ -1,1080 +1,1080 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013,2014 Ilya Bakulin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for xpt_print below */ #include /* for PRIu64 */ #include "opt_cam.h" FEATURE(mmccam, "CAM-based MMC/SD/SDIO stack"); static struct cam_ed * mmc_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static void mmc_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static void mmc_action(union ccb *start_ccb); static void mmc_dev_advinfo(union ccb *start_ccb); static void mmc_announce_periph(struct cam_periph *periph); static void mmc_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *ccb); /* mmcprobe methods */ static cam_status mmcprobe_register(struct cam_periph *periph, void *arg); static void mmcprobe_start(struct cam_periph *periph, union ccb *start_ccb); static void mmcprobe_cleanup(struct cam_periph *periph); static void mmcprobe_done(struct cam_periph *periph, union ccb *done_ccb); static void mmc_proto_announce(struct cam_ed *device); static void mmc_proto_denounce(struct cam_ed *device); static void mmc_proto_debug_out(union ccb *ccb); typedef enum { PROBE_RESET, PROBE_IDENTIFY, PROBE_SDIO_RESET, PROBE_SEND_IF_COND, PROBE_SDIO_INIT, PROBE_MMC_INIT, PROBE_SEND_APP_OP_COND, PROBE_GET_CID, PROBE_GET_CSD, PROBE_SEND_RELATIVE_ADDR, PROBE_SELECT_CARD, PROBE_DONE, PROBE_INVALID } probe_action; static char *probe_action_text[] = { "PROBE_RESET", "PROBE_IDENTIFY", "PROBE_SDIO_RESET", "PROBE_SEND_IF_COND", "PROBE_SDIO_INIT", "PROBE_MMC_INIT", "PROBE_SEND_APP_OP_COND", "PROBE_GET_CID", "PROBE_GET_CSD", "PROBE_SEND_RELATIVE_ADDR", "PROBE_SELECT_CARD", "PROBE_DONE", "PROBE_INVALID" }; #define PROBE_SET_ACTION(softc, newaction) \ do { \ char **text; \ text = probe_action_text; \ CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ ("Probe %s to %s\n", text[(softc)->action], \ text[(newaction)])); \ (softc)->action = (newaction); \ } while(0) static struct xpt_xport_ops mmc_xport_ops = { .alloc_device = mmc_alloc_device, .action = mmc_action, .async = mmc_dev_async, .announce = mmc_announce_periph, }; #define MMC_XPT_XPORT(x, X) \ static struct xpt_xport mmc_xport_ ## x = { \ .xport = XPORT_ ## X, \ .name = #x, \ .ops = &mmc_xport_ops, \ }; \ CAM_XPT_XPORT(mmc_xport_ ## x); MMC_XPT_XPORT(mmc, MMCSD); static struct xpt_proto_ops mmc_proto_ops = { .announce = mmc_proto_announce, .denounce = mmc_proto_denounce, .debug_out = mmc_proto_debug_out, }; static struct xpt_proto mmc_proto = { .proto = PROTO_MMCSD, .name = "mmcsd", .ops = &mmc_proto_ops, }; CAM_XPT_PROTO(mmc_proto); typedef struct { probe_action action; int restart; union ccb saved_ccb; uint32_t flags; #define PROBE_FLAG_ACMD_SENT 0x1 /* CMD55 is sent, card expects ACMD */ uint8_t acmd41_count; /* how many times ACMD41 has been issued */ struct cam_periph *periph; } mmcprobe_softc; /* XPort functions -- an interface to CAM at periph side */ static struct cam_ed * mmc_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; printf("mmc_alloc_device()\n"); device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); device->quirk = NULL; device->mintags = 0; device->maxtags = 0; bzero(&device->inq_data, sizeof(device->inq_data)); device->inq_flags = 0; device->queue_flags = 0; device->serial_num = NULL; device->serial_num_len = 0; return (device); } static void mmc_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { printf("mmc_dev_async(async_code=0x%x, path_id=%d, target_id=%x, lun_id=%" SCNx64 "\n", async_code, bus->path_id, target->target_id, device->lun_id); /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; if (async_code == AC_LOST_DEVICE) { if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) { printf("AC_LOST_DEVICE -> set to unconfigured\n"); device->flags |= CAM_DEV_UNCONFIGURED; xpt_release_device(device); } else { printf("AC_LOST_DEVICE on unconfigured device\n"); } } else if (async_code == AC_FOUND_DEVICE) { printf("Got AC_FOUND_DEVICE -- whatever...\n"); } else if (async_code == AC_PATH_REGISTERED) { printf("Got AC_PATH_REGISTERED -- whatever...\n"); } else if (async_code == AC_PATH_DEREGISTERED ) { printf("Got AC_PATH_DEREGISTERED -- whatever...\n"); } else if (async_code == AC_UNIT_ATTENTION) { printf("Got interrupt generated by the card and ignored it\n"); } else panic("Unknown async code\n"); } /* Taken from nvme_scan_lun, thanks to bsdimp@ */ static void mmc_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *request_ccb) { struct ccb_pathinq cpi; cam_status status; struct cam_periph *old_periph; int lock; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("mmc_scan_lun\n")); xpt_path_inq(&cpi, periph->path); if (cpi.ccb_h.status != CAM_REQ_CMP) { if (request_ccb != NULL) { request_ccb->ccb_h.status = cpi.ccb_h.status; xpt_done(request_ccb); } return; } if (xpt_path_lun_id(path) == CAM_LUN_WILDCARD) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("mmd_scan_lun ignoring bus\n")); request_ccb->ccb_h.status = CAM_REQ_CMP; /* XXX signal error ? */ xpt_done(request_ccb); return; } lock = (xpt_path_owned(path) == 0); if (lock) xpt_path_lock(path); if ((old_periph = cam_periph_find(path, "mmcprobe")) != NULL) { if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { // mmcprobe_softc *softc; // softc = (mmcprobe_softc *)old_periph->softc; // Not sure if we need request ccb queue for mmc // TAILQ_INSERT_TAIL(&softc->request_ccbs, // &request_ccb->ccb_h, periph_links.tqe); // softc->restart = 1; CAM_DEBUG(path, CAM_DEBUG_INFO, ("Got scan request, but mmcprobe already exists\n")); request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(request_ccb); } else { request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(request_ccb); } } else { xpt_print(path, " Set up the mmcprobe device...\n"); status = cam_periph_alloc(mmcprobe_register, NULL, mmcprobe_cleanup, mmcprobe_start, "mmcprobe", CAM_PERIPH_BIO, path, NULL, 0, request_ccb); if (status != CAM_REQ_CMP) { xpt_print(path, "xpt_scan_lun: cam_alloc_periph " "returned an error, can't continue probe\n"); } request_ccb->ccb_h.status = status; xpt_done(request_ccb); } if (lock) xpt_path_unlock(path); } static void mmc_action(union ccb *start_ccb) { CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mmc_action! func_code=%x, action %s\n", start_ccb->ccb_h.func_code, xpt_action_name(start_ccb->ccb_h.func_code))); switch (start_ccb->ccb_h.func_code) { case XPT_SCAN_BUS: /* FALLTHROUGH */ case XPT_SCAN_TGT: /* FALLTHROUGH */ case XPT_SCAN_LUN: CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO, ("XPT_SCAN_{BUS,TGT,LUN}\n")); mmc_scan_lun(start_ccb->ccb_h.path->periph, start_ccb->ccb_h.path, start_ccb->crcn.flags, start_ccb); break; case XPT_DEV_ADVINFO: { mmc_dev_advinfo(start_ccb); break; } default: xpt_action_default(start_ccb); break; } } static void mmc_dev_advinfo(union ccb *start_ccb) { struct cam_ed *device; struct ccb_dev_advinfo *cdai; off_t amt; start_ccb->ccb_h.status = CAM_REQ_INVALID; device = start_ccb->ccb_h.path->device; cdai = &start_ccb->cdai; CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("%s: request %x\n", __func__, cdai->buftype)); /* We don't support writing any data */ if (cdai->flags & CDAI_FLAG_STORE) panic("Attempt to store data?!"); switch(cdai->buftype) { case CDAI_TYPE_SCSI_DEVID: cdai->provsiz = device->device_id_len; if (device->device_id_len == 0) break; amt = MIN(cdai->provsiz, cdai->bufsiz); memcpy(cdai->buf, device->device_id, amt); break; case CDAI_TYPE_SERIAL_NUM: cdai->provsiz = device->serial_num_len; if (device->serial_num_len == 0) break; amt = MIN(cdai->provsiz, cdai->bufsiz); memcpy(cdai->buf, device->serial_num, amt); break; case CDAI_TYPE_PHYS_PATH: /* pass(4) wants this */ cdai->provsiz = 0; break; case CDAI_TYPE_MMC_PARAMS: cdai->provsiz = sizeof(struct mmc_params); amt = MIN(cdai->provsiz, cdai->bufsiz); memcpy(cdai->buf, &device->mmc_ident_data, amt); break; default: panic("Unknown buftype"); return; } start_ccb->ccb_h.status = CAM_REQ_CMP; } static void mmc_announce_periph(struct cam_periph *periph) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; struct cam_path *path = periph->path; cam_periph_assert(periph, MA_OWNED); CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("mmc_announce_periph: called\n")); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb*)&cts); if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) return; xpt_path_inq(&cpi, periph->path); printf("XPT info: CLK %04X, ...\n", cts.proto_specific.mmc.ios.clock); } /* This func is called per attached device :-( */ void mmc_print_ident(struct mmc_params *ident_data) { printf("Relative addr: %08x\n", ident_data->card_rca); printf("Card features: <"); if (ident_data->card_features & CARD_FEATURE_MMC) printf("MMC "); if (ident_data->card_features & CARD_FEATURE_MEMORY) printf("Memory "); if (ident_data->card_features & CARD_FEATURE_SDHC) printf("High-Capacity "); if (ident_data->card_features & CARD_FEATURE_SD20) printf("SD2.0-Conditions "); if (ident_data->card_features & CARD_FEATURE_SDIO) printf("SDIO "); printf(">\n"); if (ident_data->card_features & CARD_FEATURE_MEMORY) printf("Card memory OCR: %08x\n", ident_data->card_ocr); if (ident_data->card_features & CARD_FEATURE_SDIO) { printf("Card IO OCR: %08x\n", ident_data->io_ocr); printf("Number of funcitions: %u\n", ident_data->sdio_func_count); } } static void mmc_proto_announce(struct cam_ed *device) { mmc_print_ident(&device->mmc_ident_data); } static void mmc_proto_denounce(struct cam_ed *device) { mmc_print_ident(&device->mmc_ident_data); } static void mmc_proto_debug_out(union ccb *ccb) { if (ccb->ccb_h.func_code != XPT_MMC_IO) return; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_CDB,("mmc_proto_debug_out\n")); } static periph_init_t probe_periph_init; static struct periph_driver probe_driver = { probe_periph_init, "mmcprobe", TAILQ_HEAD_INITIALIZER(probe_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(mmcprobe, probe_driver); #define CARD_ID_FREQUENCY 400000 /* Spec requires 400kHz max during ID phase. */ static void probe_periph_init() { } static cam_status mmcprobe_register(struct cam_periph *periph, void *arg) { - union ccb *request_ccb; /* CCB representing the probe request */ - cam_status status; mmcprobe_softc *softc; + union ccb *request_ccb; /* CCB representing the probe request */ + int status; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("mmcprobe_register\n")); request_ccb = (union ccb *)arg; if (request_ccb == NULL) { printf("mmcprobe_register: no probe CCB, " "can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (mmcprobe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT); if (softc == NULL) { printf("proberegister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } softc->flags = 0; softc->acmd41_count = 0; periph->softc = softc; softc->periph = periph; softc->action = PROBE_INVALID; softc->restart = 0; status = cam_periph_acquire(periph); memset(&periph->path->device->mmc_ident_data, 0, sizeof(struct mmc_params)); - if (status != CAM_REQ_CMP) { + if (status != 0) { printf("proberegister: cam_periph_acquire failed (status=%d)\n", status); - return (status); + return (CAM_REQ_CMP_ERR); } CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) PROBE_SET_ACTION(softc, PROBE_RESET); else PROBE_SET_ACTION(softc, PROBE_IDENTIFY); /* This will kick the ball */ xpt_schedule(periph, CAM_PRIORITY_XPT); return(CAM_REQ_CMP); } static int mmc_highest_voltage(uint32_t ocr) { int i; for (i = MMC_OCR_MAX_VOLTAGE_SHIFT; i >= MMC_OCR_MIN_VOLTAGE_SHIFT; i--) if (ocr & (1 << i)) return (i); return (-1); } static inline void init_standard_ccb(union ccb *ccb, uint32_t cmd) { ccb->ccb_h.func_code = cmd; ccb->ccb_h.flags = CAM_DIR_OUT; ccb->ccb_h.retry_count = 0; ccb->ccb_h.timeout = 15 * 1000; ccb->ccb_h.cbfcnp = mmcprobe_done; } static void mmcprobe_start(struct cam_periph *periph, union ccb *start_ccb) { mmcprobe_softc *softc; struct cam_path *path; struct ccb_mmcio *mmcio; struct mtx *p_mtx = cam_periph_mtx(periph); struct ccb_trans_settings_mmc *cts; CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("mmcprobe_start\n")); softc = (mmcprobe_softc *)periph->softc; path = start_ccb->ccb_h.path; mmcio = &start_ccb->mmcio; cts = &start_ccb->cts.proto_specific.mmc; struct mmc_params *mmcp = &path->device->mmc_ident_data; memset(&mmcio->cmd, 0, sizeof(struct mmc_command)); if (softc->restart) { softc->restart = 0; if (path->device->flags & CAM_DEV_UNCONFIGURED) softc->action = PROBE_RESET; else softc->action = PROBE_IDENTIFY; } /* Here is the place where the identify fun begins */ switch (softc->action) { case PROBE_RESET: /* FALLTHROUGH */ case PROBE_IDENTIFY: xpt_path_inq(&start_ccb->cpi, periph->path); CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("Start with PROBE_RESET\n")); init_standard_ccb(start_ccb, XPT_SET_TRAN_SETTINGS); cts->ios.power_mode = power_off; cts->ios_valid = MMC_PM; xpt_action(start_ccb); mtx_sleep(periph, p_mtx, 0, "mmcios", 100); /* mmc_power_up */ /* Get the host OCR */ init_standard_ccb(start_ccb, XPT_GET_TRAN_SETTINGS); xpt_action(start_ccb); uint32_t hv = mmc_highest_voltage(cts->host_ocr); init_standard_ccb(start_ccb, XPT_SET_TRAN_SETTINGS); cts->ios.vdd = hv; cts->ios.bus_mode = opendrain; cts->ios.chip_select = cs_dontcare; cts->ios.power_mode = power_up; cts->ios.bus_width = bus_width_1; cts->ios.clock = 0; cts->ios_valid = MMC_VDD | MMC_PM | MMC_BM | MMC_CS | MMC_BW | MMC_CLK; xpt_action(start_ccb); mtx_sleep(periph, p_mtx, 0, "mmcios", 100); init_standard_ccb(start_ccb, XPT_SET_TRAN_SETTINGS); cts->ios.power_mode = power_on; cts->ios.clock = CARD_ID_FREQUENCY; cts->ios.timing = bus_timing_normal; cts->ios_valid = MMC_PM | MMC_CLK | MMC_BT; xpt_action(start_ccb); mtx_sleep(periph, p_mtx, 0, "mmcios", 100); /* End for mmc_power_on */ /* Begin mmc_idle_cards() */ init_standard_ccb(start_ccb, XPT_SET_TRAN_SETTINGS); cts->ios.chip_select = cs_high; cts->ios_valid = MMC_CS; xpt_action(start_ccb); mtx_sleep(periph, p_mtx, 0, "mmcios", 1); CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("Send first XPT_MMC_IO\n")); init_standard_ccb(start_ccb, XPT_MMC_IO); mmcio->cmd.opcode = MMC_GO_IDLE_STATE; /* CMD 0 */ mmcio->cmd.arg = 0; mmcio->cmd.flags = MMC_RSP_NONE | MMC_CMD_BC; mmcio->cmd.data = NULL; mmcio->stop.opcode = 0; /* XXX Reset I/O portion as well */ break; case PROBE_SDIO_RESET: CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("Start with PROBE_SDIO_RESET\n")); uint32_t mmc_arg = SD_IO_RW_ADR(SD_IO_CCCR_CTL) | SD_IO_RW_DAT(CCCR_CTL_RES) | SD_IO_RW_WR | SD_IO_RW_RAW; cam_fill_mmcio(&start_ccb->mmcio, /*retries*/ 0, /*cbfcnp*/ mmcprobe_done, /*flags*/ CAM_DIR_NONE, /*mmc_opcode*/ SD_IO_RW_DIRECT, /*mmc_arg*/ mmc_arg, /*mmc_flags*/ MMC_RSP_R5 | MMC_CMD_AC, /*mmc_data*/ NULL, /*timeout*/ 1000); break; case PROBE_SEND_IF_COND: CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("Start with PROBE_SEND_IF_COND\n")); init_standard_ccb(start_ccb, XPT_MMC_IO); mmcio->cmd.opcode = SD_SEND_IF_COND; /* CMD 8 */ mmcio->cmd.arg = (1 << 8) + 0xAA; mmcio->cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR; mmcio->stop.opcode = 0; break; case PROBE_SDIO_INIT: CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("Start with PROBE_SDIO_INIT\n")); init_standard_ccb(start_ccb, XPT_MMC_IO); mmcio->cmd.opcode = IO_SEND_OP_COND; /* CMD 5 */ mmcio->cmd.arg = mmcp->io_ocr; mmcio->cmd.flags = MMC_RSP_R4; mmcio->stop.opcode = 0; break; case PROBE_MMC_INIT: CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("Start with PROBE_MMC_INIT\n")); init_standard_ccb(start_ccb, XPT_MMC_IO); mmcio->cmd.opcode = MMC_SEND_OP_COND; /* CMD 1 */ mmcio->cmd.arg = MMC_OCR_CCS | mmcp->card_ocr; /* CCS + ocr */; mmcio->cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; mmcio->stop.opcode = 0; break; case PROBE_SEND_APP_OP_COND: init_standard_ccb(start_ccb, XPT_MMC_IO); if (softc->flags & PROBE_FLAG_ACMD_SENT) { mmcio->cmd.opcode = ACMD_SD_SEND_OP_COND; /* CMD 41 */ /* * We set CCS bit because we do support SDHC cards. * XXX: Don't set CCS if no response to CMD8. */ uint32_t cmd_arg = MMC_OCR_CCS | mmcp->card_ocr; /* CCS + ocr */ if (softc->acmd41_count < 10 && mmcp->card_ocr != 0 ) cmd_arg |= MMC_OCR_S18R; mmcio->cmd.arg = cmd_arg; mmcio->cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; softc->acmd41_count++; } else { mmcio->cmd.opcode = MMC_APP_CMD; /* CMD 55 */ mmcio->cmd.arg = 0; /* rca << 16 */ mmcio->cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; } mmcio->stop.opcode = 0; break; case PROBE_GET_CID: /* XXX move to mmc_da */ init_standard_ccb(start_ccb, XPT_MMC_IO); mmcio->cmd.opcode = MMC_ALL_SEND_CID; mmcio->cmd.arg = 0; mmcio->cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; mmcio->stop.opcode = 0; break; case PROBE_SEND_RELATIVE_ADDR: init_standard_ccb(start_ccb, XPT_MMC_IO); mmcio->cmd.opcode = SD_SEND_RELATIVE_ADDR; mmcio->cmd.arg = 0; mmcio->cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; mmcio->stop.opcode = 0; break; case PROBE_SELECT_CARD: init_standard_ccb(start_ccb, XPT_MMC_IO); mmcio->cmd.opcode = MMC_SELECT_CARD; mmcio->cmd.arg = (uint32_t)path->device->mmc_ident_data.card_rca << 16; mmcio->cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; mmcio->stop.opcode = 0; break; case PROBE_GET_CSD: /* XXX move to mmc_da */ init_standard_ccb(start_ccb, XPT_MMC_IO); mmcio->cmd.opcode = MMC_SEND_CSD; mmcio->cmd.arg = (uint32_t)path->device->mmc_ident_data.card_rca << 16; mmcio->cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; mmcio->stop.opcode = 0; break; case PROBE_DONE: CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("Start with PROBE_DONE\n")); init_standard_ccb(start_ccb, XPT_SET_TRAN_SETTINGS); cts->ios.bus_mode = pushpull; cts->ios_valid = MMC_BM; xpt_action(start_ccb); return; /* NOTREACHED */ break; case PROBE_INVALID: break; default: CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("probestart: invalid action state 0x%x\n", softc->action)); panic("default: case in mmc_probe_start()"); } start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; xpt_action(start_ccb); } static void mmcprobe_cleanup(struct cam_periph *periph) { free(periph->softc, M_CAMXPT); } static void mmcprobe_done(struct cam_periph *periph, union ccb *done_ccb) { mmcprobe_softc *softc; struct cam_path *path; int err; struct ccb_mmcio *mmcio; u_int32_t priority; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("mmcprobe_done\n")); softc = (mmcprobe_softc *)periph->softc; path = done_ccb->ccb_h.path; priority = done_ccb->ccb_h.pinfo.priority; switch (softc->action) { case PROBE_RESET: /* FALLTHROUGH */ case PROBE_IDENTIFY: { printf("Starting completion of PROBE_RESET\n"); CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("done with PROBE_RESET\n")); mmcio = &done_ccb->mmcio; err = mmcio->cmd.error; if (err != MMC_ERR_NONE) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("GO_IDLE_STATE failed with error %d\n", err)); /* There was a device there, but now it's gone... */ if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { xpt_async(AC_LOST_DEVICE, path, NULL); } PROBE_SET_ACTION(softc, PROBE_INVALID); break; } path->device->protocol = PROTO_MMCSD; PROBE_SET_ACTION(softc, PROBE_SEND_IF_COND); break; } case PROBE_SEND_IF_COND: { mmcio = &done_ccb->mmcio; err = mmcio->cmd.error; struct mmc_params *mmcp = &path->device->mmc_ident_data; if (err != MMC_ERR_NONE || mmcio->cmd.resp[0] != 0x1AA) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("IF_COND: error %d, pattern %08x\n", err, mmcio->cmd.resp[0])); } else { mmcp->card_features |= CARD_FEATURE_SD20; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("SD 2.0 interface conditions: OK\n")); } PROBE_SET_ACTION(softc, PROBE_SDIO_RESET); break; } case PROBE_SDIO_RESET: { mmcio = &done_ccb->mmcio; err = mmcio->cmd.error; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("SDIO_RESET: error %d, CCCR CTL register: %08x\n", err, mmcio->cmd.resp[0])); PROBE_SET_ACTION(softc, PROBE_SDIO_INIT); break; } case PROBE_SDIO_INIT: { mmcio = &done_ccb->mmcio; err = mmcio->cmd.error; struct mmc_params *mmcp = &path->device->mmc_ident_data; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("SDIO_INIT: error %d, %08x %08x %08x %08x\n", err, mmcio->cmd.resp[0], mmcio->cmd.resp[1], mmcio->cmd.resp[2], mmcio->cmd.resp[3])); /* * Error here means that this card is not SDIO, * so proceed with memory init as if nothing has happened */ if (err != MMC_ERR_NONE) { PROBE_SET_ACTION(softc, PROBE_SEND_APP_OP_COND); break; } mmcp->card_features |= CARD_FEATURE_SDIO; uint32_t ioifcond = mmcio->cmd.resp[0]; uint32_t io_ocr = ioifcond & R4_IO_OCR_MASK; mmcp->sdio_func_count = R4_IO_NUM_FUNCTIONS(ioifcond); CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("SDIO card: %d functions\n", mmcp->sdio_func_count)); if (io_ocr == 0) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("SDIO OCR invalid?!\n")); break; /* Retry */ } if (io_ocr != 0 && mmcp->io_ocr == 0) { mmcp->io_ocr = io_ocr; break; /* Retry, this time with non-0 OCR */ } CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("SDIO OCR: %08x\n", mmcp->io_ocr)); if (ioifcond & R4_IO_MEM_PRESENT) { /* Combo card -- proceed to memory initialization */ PROBE_SET_ACTION(softc, PROBE_SEND_APP_OP_COND); } else { /* No memory portion -- get RCA and select card */ PROBE_SET_ACTION(softc, PROBE_SEND_RELATIVE_ADDR); } break; } case PROBE_MMC_INIT: { mmcio = &done_ccb->mmcio; err = mmcio->cmd.error; struct mmc_params *mmcp = &path->device->mmc_ident_data; if (err != MMC_ERR_NONE) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("MMC_INIT: error %d, resp %08x\n", err, mmcio->cmd.resp[0])); PROBE_SET_ACTION(softc, PROBE_INVALID); break; } CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("MMC card, OCR %08x\n", mmcio->cmd.resp[0])); if (mmcp->card_ocr == 0) { /* We haven't sent the OCR to the card yet -- do it */ mmcp->card_ocr = mmcio->cmd.resp[0]; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("-> sending OCR to card\n")); break; } if (!(mmcio->cmd.resp[0] & MMC_OCR_CARD_BUSY)) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("Card is still powering up\n")); break; } mmcp->card_features |= CARD_FEATURE_MMC | CARD_FEATURE_MEMORY; PROBE_SET_ACTION(softc, PROBE_GET_CID); break; } case PROBE_SEND_APP_OP_COND: { mmcio = &done_ccb->mmcio; err = mmcio->cmd.error; if (err != MMC_ERR_NONE) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("APP_OP_COND: error %d, resp %08x\n", err, mmcio->cmd.resp[0])); PROBE_SET_ACTION(softc, PROBE_MMC_INIT); break; } if (!(softc->flags & PROBE_FLAG_ACMD_SENT)) { /* Don't change the state */ softc->flags |= PROBE_FLAG_ACMD_SENT; break; } softc->flags &= ~PROBE_FLAG_ACMD_SENT; if ((mmcio->cmd.resp[0] & MMC_OCR_CARD_BUSY) || (mmcio->cmd.arg & MMC_OCR_VOLTAGE) == 0) { struct mmc_params *mmcp = &path->device->mmc_ident_data; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("Card OCR: %08x\n", mmcio->cmd.resp[0])); if (mmcp->card_ocr == 0) { mmcp->card_ocr = mmcio->cmd.resp[0]; /* Now when we know OCR that we want -- send it to card */ CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("-> sending OCR to card\n")); } else { /* We already know the OCR and despite of that we * are processing the answer to ACMD41 -> move on */ PROBE_SET_ACTION(softc, PROBE_GET_CID); } /* Getting an answer to ACMD41 means the card has memory */ mmcp->card_features |= CARD_FEATURE_MEMORY; /* Standard capacity vs High Capacity memory card */ if (mmcio->cmd.resp[0] & MMC_OCR_CCS) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("Card is SDHC\n")); mmcp->card_features |= CARD_FEATURE_SDHC; } /* Whether the card supports 1.8V signaling */ if (mmcio->cmd.resp[0] & MMC_OCR_S18A) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("Card supports 1.8V signaling\n")); mmcp->card_features |= CARD_FEATURE_18V; } } else { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("Card not ready: %08x\n", mmcio->cmd.resp[0])); /* Send CMD55+ACMD41 once again */ PROBE_SET_ACTION(softc, PROBE_SEND_APP_OP_COND); } break; } case PROBE_GET_CID: /* XXX move to mmc_da */ { mmcio = &done_ccb->mmcio; err = mmcio->cmd.error; if (err != MMC_ERR_NONE) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("PROBE_GET_CID: error %d\n", err)); PROBE_SET_ACTION(softc, PROBE_INVALID); break; } struct mmc_params *mmcp = &path->device->mmc_ident_data; memcpy(mmcp->card_cid, mmcio->cmd.resp, 4 * sizeof(uint32_t)); CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("CID %08x%08x%08x%08x\n", mmcp->card_cid[0], mmcp->card_cid[1], mmcp->card_cid[2], mmcp->card_cid[3])); PROBE_SET_ACTION(softc, PROBE_SEND_RELATIVE_ADDR); break; } case PROBE_SEND_RELATIVE_ADDR: { mmcio = &done_ccb->mmcio; err = mmcio->cmd.error; struct mmc_params *mmcp = &path->device->mmc_ident_data; uint16_t rca = mmcio->cmd.resp[0] >> 16; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("Card published RCA: %u\n", rca)); path->device->mmc_ident_data.card_rca = rca; if (err != MMC_ERR_NONE) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("PROBE_SEND_RELATIVE_ADDR: error %d\n", err)); PROBE_SET_ACTION(softc, PROBE_INVALID); break; } /* If memory is present, get CSD, otherwise select card */ if (mmcp->card_features & CARD_FEATURE_MEMORY) PROBE_SET_ACTION(softc, PROBE_GET_CSD); else PROBE_SET_ACTION(softc, PROBE_SELECT_CARD); break; } case PROBE_GET_CSD: { mmcio = &done_ccb->mmcio; err = mmcio->cmd.error; if (err != MMC_ERR_NONE) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("PROBE_GET_CSD: error %d\n", err)); PROBE_SET_ACTION(softc, PROBE_INVALID); break; } struct mmc_params *mmcp = &path->device->mmc_ident_data; memcpy(mmcp->card_csd, mmcio->cmd.resp, 4 * sizeof(uint32_t)); CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("CSD %08x%08x%08x%08x\n", mmcp->card_csd[0], mmcp->card_csd[1], mmcp->card_csd[2], mmcp->card_csd[3])); PROBE_SET_ACTION(softc, PROBE_SELECT_CARD); break; } case PROBE_SELECT_CARD: { mmcio = &done_ccb->mmcio; err = mmcio->cmd.error; if (err != MMC_ERR_NONE) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("PROBE_SEND_RELATIVE_ADDR: error %d\n", err)); PROBE_SET_ACTION(softc, PROBE_INVALID); break; } PROBE_SET_ACTION(softc, PROBE_DONE); break; } default: CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("mmc_probedone: invalid action state 0x%x\n", softc->action)); panic("default: case in mmc_probe_done()"); } if (softc->action == PROBE_INVALID && (path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("mmc_probedone: Should send AC_LOST_DEVICE but won't for now\n")); //xpt_async(AC_LOST_DEVICE, path, NULL); } xpt_release_ccb(done_ccb); if (softc->action != PROBE_INVALID) xpt_schedule(periph, priority); /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ int frozen = cam_release_devq(path, 0, 0, 0, FALSE); printf("mmc_probedone: remaining freezecnt %d\n", frozen); if (softc->action == PROBE_DONE) { /* Notify the system that the device is found! */ if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, path, done_ccb); } } if (softc->action == PROBE_DONE || softc->action == PROBE_INVALID) { cam_periph_invalidate(periph); cam_periph_release_locked(periph); } } Index: head/sys/cam/nvme/nvme_da.c =================================================================== --- head/sys/cam/nvme/nvme_da.c (revision 328917) +++ head/sys/cam/nvme/nvme_da.c (revision 328918) @@ -1,1139 +1,1139 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2015 Netflix, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Derived from ata_da.c: * Copyright (c) 2009 Alexander Motin */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include typedef enum { NDA_STATE_NORMAL } nda_state; typedef enum { NDA_FLAG_OPEN = 0x0001, NDA_FLAG_DIRTY = 0x0002, NDA_FLAG_SCTX_INIT = 0x0004, } nda_flags; typedef enum { NDA_Q_4K = 0x01, NDA_Q_NONE = 0x00, } nda_quirks; #define NDA_Q_BIT_STRING \ "\020" \ "\001Bit 0" typedef enum { NDA_CCB_BUFFER_IO = 0x01, NDA_CCB_DUMP = 0x02, NDA_CCB_TRIM = 0x03, NDA_CCB_TYPE_MASK = 0x0F, } nda_ccb_state; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct trim_request { TAILQ_HEAD(, bio) bps; }; struct nda_softc { struct cam_iosched_softc *cam_iosched; int outstanding_cmds; /* Number of active commands */ int refcount; /* Active xpt_action() calls */ nda_state state; nda_flags flags; nda_quirks quirks; int unmappedio; uint32_t nsid; /* Namespace ID for this nda device */ struct disk *disk; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct trim_request trim_req; #ifdef CAM_IO_STATS struct sysctl_ctx_list sysctl_stats_ctx; struct sysctl_oid *sysctl_stats_tree; u_int timeouts; u_int errors; u_int invalidations; #endif }; /* Need quirk table */ static disk_strategy_t ndastrategy; static dumper_t ndadump; static periph_init_t ndainit; static void ndaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void ndasysctlinit(void *context, int pending); static periph_ctor_t ndaregister; static periph_dtor_t ndacleanup; static periph_start_t ndastart; static periph_oninv_t ndaoninvalidate; static void ndadone(struct cam_periph *periph, union ccb *done_ccb); static int ndaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static void ndashutdown(void *arg, int howto); static void ndasuspend(void *arg); #ifndef NDA_DEFAULT_SEND_ORDERED #define NDA_DEFAULT_SEND_ORDERED 1 #endif #ifndef NDA_DEFAULT_TIMEOUT #define NDA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ #endif #ifndef NDA_DEFAULT_RETRY #define NDA_DEFAULT_RETRY 4 #endif //static int nda_retry_count = NDA_DEFAULT_RETRY; static int nda_send_ordered = NDA_DEFAULT_SEND_ORDERED; static int nda_default_timeout = NDA_DEFAULT_TIMEOUT; /* * All NVMe media is non-rotational, so all nvme device instances * share this to implement the sysctl. */ static int nda_rotating_media = 0; static SYSCTL_NODE(_kern_cam, OID_AUTO, nda, CTLFLAG_RD, 0, "CAM Direct Access Disk driver"); static struct periph_driver ndadriver = { ndainit, "nda", TAILQ_HEAD_INITIALIZER(ndadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(nda, ndadriver); static MALLOC_DEFINE(M_NVMEDA, "nvme_da", "nvme_da buffers"); /* * nice wrappers. Maybe these belong in nvme_all.c instead of * here, but this is the only place that uses these. Should * we ever grow another NVME periph, we should move them * all there wholesale. */ static void nda_nvme_flush(struct nda_softc *softc, struct ccb_nvmeio *nvmeio) { cam_fill_nvmeio(nvmeio, 0, /* retries */ ndadone, /* cbfcnp */ CAM_DIR_NONE, /* flags */ NULL, /* data_ptr */ 0, /* dxfer_len */ nda_default_timeout * 1000); /* timeout 30s */ nvme_ns_flush_cmd(&nvmeio->cmd, softc->nsid); } static void nda_nvme_trim(struct nda_softc *softc, struct ccb_nvmeio *nvmeio, void *payload, uint32_t num_ranges) { cam_fill_nvmeio(nvmeio, 0, /* retries */ ndadone, /* cbfcnp */ CAM_DIR_OUT, /* flags */ payload, /* data_ptr */ num_ranges * sizeof(struct nvme_dsm_range), /* dxfer_len */ nda_default_timeout * 1000); /* timeout 30s */ nvme_ns_trim_cmd(&nvmeio->cmd, softc->nsid, num_ranges); } static void nda_nvme_write(struct nda_softc *softc, struct ccb_nvmeio *nvmeio, void *payload, uint64_t lba, uint32_t len, uint32_t count) { cam_fill_nvmeio(nvmeio, 0, /* retries */ ndadone, /* cbfcnp */ CAM_DIR_OUT, /* flags */ payload, /* data_ptr */ len, /* dxfer_len */ nda_default_timeout * 1000); /* timeout 30s */ nvme_ns_write_cmd(&nvmeio->cmd, softc->nsid, lba, count); } static void nda_nvme_rw_bio(struct nda_softc *softc, struct ccb_nvmeio *nvmeio, struct bio *bp, uint32_t rwcmd) { int flags = rwcmd == NVME_OPC_READ ? CAM_DIR_IN : CAM_DIR_OUT; void *payload; uint64_t lba; uint32_t count; if (bp->bio_flags & BIO_UNMAPPED) { flags |= CAM_DATA_BIO; payload = bp; } else { payload = bp->bio_data; } lba = bp->bio_pblkno; count = bp->bio_bcount / softc->disk->d_sectorsize; cam_fill_nvmeio(nvmeio, 0, /* retries */ ndadone, /* cbfcnp */ flags, /* flags */ payload, /* data_ptr */ bp->bio_bcount, /* dxfer_len */ nda_default_timeout * 1000); /* timeout 30s */ nvme_ns_rw_cmd(&nvmeio->cmd, rwcmd, softc->nsid, lba, count); } static int ndaopen(struct disk *dp) { struct cam_periph *periph; struct nda_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { return(ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("ndaopen\n")); softc = (struct nda_softc *)periph->softc; softc->flags |= NDA_FLAG_OPEN; cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } static int ndaclose(struct disk *dp) { struct cam_periph *periph; struct nda_softc *softc; union ccb *ccb; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct nda_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("ndaclose\n")); if ((softc->flags & NDA_FLAG_DIRTY) != 0 && (periph->flags & CAM_PERIPH_INVALID) == 0 && cam_periph_hold(periph, PRIBIO) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); nda_nvme_flush(softc, &ccb->nvmeio); error = cam_periph_runccb(ccb, ndaerror, /*cam_flags*/0, /*sense_flags*/0, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); else softc->flags &= ~NDA_FLAG_DIRTY; xpt_release_ccb(ccb); cam_periph_unhold(periph); } softc->flags &= ~NDA_FLAG_OPEN; while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "ndaclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void ndaschedule(struct cam_periph *periph) { struct nda_softc *softc = (struct nda_softc *)periph->softc; if (softc->state != NDA_STATE_NORMAL) return; cam_iosched_schedule(softc->cam_iosched, periph); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void ndastrategy(struct bio *bp) { struct cam_periph *periph; struct nda_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct nda_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ndastrategy(%p)\n", bp)); /* * If the device has been made invalid, error out */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * Place it in the queue of disk activities for this disk */ cam_iosched_queue_work(softc->cam_iosched, bp); /* * Schedule ourselves for performing the work. */ ndaschedule(periph); cam_periph_unlock(periph); return; } static int ndadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct cam_periph *periph; struct nda_softc *softc; u_int secsize; struct ccb_nvmeio nvmeio; struct disk *dp; uint64_t lba; uint32_t count; int error = 0; dp = arg; periph = dp->d_drv1; softc = (struct nda_softc *)periph->softc; secsize = softc->disk->d_sectorsize; lba = offset / secsize; count = length / secsize; if ((periph->flags & CAM_PERIPH_INVALID) != 0) return (ENXIO); /* xpt_get_ccb returns a zero'd allocation for the ccb, mimic that here */ memset(&nvmeio, 0, sizeof(nvmeio)); if (length > 0) { xpt_setup_ccb(&nvmeio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); nvmeio.ccb_h.ccb_state = NDA_CCB_DUMP; nda_nvme_write(softc, &nvmeio, virtual, lba, length, count); error = cam_periph_runccb((union ccb *)&nvmeio, cam_periph_error, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) printf("Aborting dump due to I/O error %d.\n", error); return (error); } /* Flush */ xpt_setup_ccb(&nvmeio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); nvmeio.ccb_h.ccb_state = NDA_CCB_DUMP; nda_nvme_flush(softc, &nvmeio); error = cam_periph_runccb((union ccb *)&nvmeio, cam_periph_error, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) xpt_print(periph->path, "flush cmd failed\n"); return (error); } static void ndainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, ndaasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("nda: Failed to attach master async callback " "due to status 0x%x!\n", status); } else if (nda_send_ordered) { /* Register our event handlers */ if ((EVENTHANDLER_REGISTER(power_suspend, ndasuspend, NULL, EVENTHANDLER_PRI_LAST)) == NULL) printf("ndainit: power event registration failed!\n"); if ((EVENTHANDLER_REGISTER(shutdown_post_sync, ndashutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("ndainit: shutdown event registration failed!\n"); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void ndadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void ndaoninvalidate(struct cam_periph *periph) { struct nda_softc *softc; softc = (struct nda_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, ndaasync, periph, periph->path); #ifdef CAM_IO_STATS softc->invalidations++; #endif /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); disk_gone(softc->disk); } static void ndacleanup(struct cam_periph *periph) { struct nda_softc *softc; softc = (struct nda_softc *)periph->softc; cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & NDA_FLAG_SCTX_INIT) != 0) { #ifdef CAM_IO_STATS if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) xpt_print(periph->path, "can't remove sysctl stats context\n"); #endif if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); } disk_destroy(softc->disk); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void ndaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_NVME) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(ndaregister, ndaoninvalidate, ndacleanup, ndastart, "nda", CAM_PERIPH_BIO, path, ndaasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("ndaasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct nda_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_LOST_DEVICE: default: cam_periph_async(periph, code, path, arg); break; } } static void ndasysctlinit(void *context, int pending) { struct cam_periph *periph; struct nda_softc *softc; char tmpstr[32], tmpstr2[16]; periph = (struct cam_periph *)context; /* periph was held for us when this task was enqueued */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_release(periph); return; } softc = (struct nda_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM NDA unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= NDA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_nda), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("ndasysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "unmapped_io", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->unmappedio, 0, "Unmapped I/O leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "rotating", CTLFLAG_RD | CTLFLAG_MPSAFE, &nda_rotating_media, 0, "Rotating media"); #ifdef CAM_IO_STATS softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD, 0, "Statistics"); if (softc->sysctl_stats_tree == NULL) { printf("ndasysctlinit: unable to allocate sysctl tree for stats\n"); cam_periph_release(periph); return; } SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "timeouts", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->timeouts, 0, "Device timeouts reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "errors", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->errors, 0, "Transport errors reported by the SIM."); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "pack_invalidations", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->invalidations, 0, "Device pack invalidations."); #endif cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); cam_periph_release(periph); } static int ndagetattr(struct bio *bp) { int ret; struct cam_periph *periph; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static cam_status ndaregister(struct cam_periph *periph, void *arg) { struct nda_softc *softc; struct disk *disk; struct ccb_pathinq cpi; const struct nvme_namespace_data *nsd; const struct nvme_controller_data *cd; char announce_buf[80]; u_int maxio; int quirks; nsd = nvme_get_identify_ns(periph); cd = nvme_get_identify_cntrl(periph); softc = (struct nda_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("ndaregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { printf("ndaregister: Unable to probe new device. " "Unable to allocate iosched memory\n"); return(CAM_REQ_CMP_ERR); } /* ident_data parsing */ periph->softc = softc; softc->quirks = NDA_Q_NONE; xpt_path_inq(&cpi, periph->path); TASK_INIT(&softc->sysctl_task, 0, ndasysctlinit, periph); /* * The name space ID is the lun, save it for later I/O */ softc->nsid = (uint32_t)xpt_path_lun_id(periph->path); /* * Register this media as a disk */ (void)cam_periph_hold(periph, PRIBIO); cam_periph_unlock(periph); snprintf(announce_buf, sizeof(announce_buf), "kern.cam.nda.%d.quirks", periph->unit_number); quirks = softc->quirks; TUNABLE_INT_FETCH(announce_buf, &quirks); softc->quirks = quirks; cam_iosched_set_sort_queue(softc->cam_iosched, 0); softc->disk = disk = disk_alloc(); strlcpy(softc->disk->d_descr, cd->mn, MIN(sizeof(softc->disk->d_descr), sizeof(cd->mn))); strlcpy(softc->disk->d_ident, cd->sn, MIN(sizeof(softc->disk->d_ident), sizeof(cd->sn))); disk->d_rotation_rate = DISK_RR_NON_ROTATING; disk->d_open = ndaopen; disk->d_close = ndaclose; disk->d_strategy = ndastrategy; disk->d_getattr = ndagetattr; disk->d_dump = ndadump; disk->d_gone = ndadiskgonecb; disk->d_name = "nda"; disk->d_drv1 = periph; disk->d_unit = periph->unit_number; maxio = cpi.maxio; /* Honor max I/O size of SIM */ if (maxio == 0) maxio = DFLTPHYS; /* traditional default */ else if (maxio > MAXPHYS) maxio = MAXPHYS; /* for safety */ disk->d_maxsize = maxio; disk->d_sectorsize = 1 << nsd->lbaf[nsd->flbas.format].lbads; disk->d_mediasize = (off_t)(disk->d_sectorsize * nsd->nsze); disk->d_delmaxsize = disk->d_mediasize; disk->d_flags = DISKFLAG_DIRECT_COMPLETION; // if (cd->oncs.dsm) // XXX broken? disk->d_flags |= DISKFLAG_CANDELETE; if (cd->vwc.present) disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { disk->d_flags |= DISKFLAG_UNMAPPED_BIO; softc->unmappedio = 1; } /* * d_ident and d_descr are both far bigger than the length of either * the serial or model number strings. */ nvme_strvis(disk->d_descr, cd->mn, sizeof(disk->d_descr), NVME_MODEL_NUMBER_LENGTH); nvme_strvis(disk->d_ident, cd->sn, sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH); disk->d_hba_vendor = cpi.hba_vendor; disk->d_hba_device = cpi.hba_device; disk->d_hba_subvendor = cpi.hba_subvendor; disk->d_hba_subdevice = cpi.hba_subdevice; disk->d_stripesize = disk->d_sectorsize; disk->d_stripeoffset = 0; disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, disk->d_sectorsize, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); /* * Add alias for older nvd drives to ease transition. */ /* disk_add_alias(disk, "nvd"); Have reports of this causing problems */ /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * ndadiskgonecb()) telling us that our provider has been freed. */ - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); cam_periph_unhold(periph); snprintf(announce_buf, sizeof(announce_buf), "%juMB (%ju %u byte sectors)", (uintmax_t)((uintmax_t)disk->d_mediasize / (1024*1024)), (uintmax_t)disk->d_mediasize / disk->d_sectorsize, disk->d_sectorsize); xpt_announce_periph(periph, announce_buf); xpt_announce_quirks(periph, softc->quirks, NDA_Q_BIT_STRING); /* * Create our sysctl variables, now that we know * we have successfully attached. */ - if (cam_periph_acquire(periph) == CAM_REQ_CMP) + if (cam_periph_acquire(periph) == 0) taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); /* * Register for device going away and info about the drive * changing (though with NVMe, it can't) */ xpt_register_async(AC_LOST_DEVICE | AC_ADVINFO_CHANGED, ndaasync, periph, periph->path); softc->state = NDA_STATE_NORMAL; return(CAM_REQ_CMP); } static void ndastart(struct cam_periph *periph, union ccb *start_ccb) { struct nda_softc *softc = (struct nda_softc *)periph->softc; struct ccb_nvmeio *nvmeio = &start_ccb->nvmeio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ndastart\n")); switch (softc->state) { case NDA_STATE_NORMAL: { struct bio *bp; bp = cam_iosched_next_bio(softc->cam_iosched); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ndastart: bio %p\n", bp)); if (bp == NULL) { xpt_release_ccb(start_ccb); break; } switch (bp->bio_cmd) { case BIO_WRITE: softc->flags |= NDA_FLAG_DIRTY; /* FALLTHROUGH */ case BIO_READ: { #ifdef NDA_TEST_FAILURE int fail = 0; /* * Support the failure ioctls. If the command is a * read, and there are pending forced read errors, or * if a write and pending write errors, then fail this * operation with EIO. This is useful for testing * purposes. Also, support having every Nth read fail. * * This is a rather blunt tool. */ if (bp->bio_cmd == BIO_READ) { if (softc->force_read_error) { softc->force_read_error--; fail = 1; } if (softc->periodic_read_error > 0) { if (++softc->periodic_read_count >= softc->periodic_read_error) { softc->periodic_read_count = 0; fail = 1; } } } else { if (softc->force_write_error) { softc->force_write_error--; fail = 1; } } if (fail) { biofinish(bp, NULL, EIO); xpt_release_ccb(start_ccb); ndaschedule(periph); return; } #endif KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 || round_page(bp->bio_bcount + bp->bio_ma_offset) / PAGE_SIZE == bp->bio_ma_n, ("Short bio %p", bp)); nda_nvme_rw_bio(softc, &start_ccb->nvmeio, bp, bp->bio_cmd == BIO_READ ? NVME_OPC_READ : NVME_OPC_WRITE); break; } case BIO_DELETE: { struct nvme_dsm_range *dsm_range; dsm_range = malloc(sizeof(*dsm_range), M_NVMEDA, M_ZERO | M_NOWAIT); if (dsm_range == NULL) { biofinish(bp, NULL, ENOMEM); xpt_release_ccb(start_ccb); ndaschedule(periph); return; } dsm_range->length = bp->bio_bcount / softc->disk->d_sectorsize; dsm_range->starting_lba = bp->bio_offset / softc->disk->d_sectorsize; bp->bio_driver2 = dsm_range; nda_nvme_trim(softc, &start_ccb->nvmeio, dsm_range, 1); start_ccb->ccb_h.ccb_state = NDA_CCB_TRIM; start_ccb->ccb_h.flags |= CAM_UNLOCKED; /* * Note: We can have multiple TRIMs in flight, so we don't call * cam_iosched_submit_trim(softc->cam_iosched); * since that forces the I/O scheduler to only schedule one at a time. * On NVMe drives, this is a performance disaster. */ goto out; } case BIO_FLUSH: nda_nvme_flush(softc, nvmeio); break; } start_ccb->ccb_h.ccb_state = NDA_CCB_BUFFER_IO; start_ccb->ccb_h.flags |= CAM_UNLOCKED; out: start_ccb->ccb_h.ccb_bp = bp; softc->outstanding_cmds++; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* May have more work to do, so ensure we stay scheduled */ ndaschedule(periph); break; } } } static void ndadone(struct cam_periph *periph, union ccb *done_ccb) { struct nda_softc *softc; struct ccb_nvmeio *nvmeio = &done_ccb->nvmeio; struct cam_path *path; int state; softc = (struct nda_softc *)periph->softc; path = done_ccb->ccb_h.path; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("ndadone\n")); state = nvmeio->ccb_h.ccb_state & NDA_CCB_TYPE_MASK; switch (state) { case NDA_CCB_BUFFER_IO: case NDA_CCB_TRIM: { struct bio *bp; int error; cam_periph_lock(periph); bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = ndaerror(done_ccb, 0, 0); if (error == ERESTART) { /* A retry was scheduled, so just return. */ cam_periph_unlock(periph); return; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); error = 0; } bp->bio_error = error; if (error != 0) { bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } else { bp->bio_resid = 0; } if (state == NDA_CCB_TRIM) free(bp->bio_driver2, M_NVMEDA); softc->outstanding_cmds--; /* * We need to call cam_iosched before we call biodone so that we * don't measure any activity that happens in the completion * routine, which in the case of sendfile can be quite * extensive. */ cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); xpt_release_ccb(done_ccb); if (state == NDA_CCB_TRIM) { #ifdef notyet TAILQ_HEAD(, bio) queue; struct bio *bp1; TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &softc->trim_req.bps, bio_queue); #endif /* * Since we can have multiple trims in flight, we don't * need to call this here. * cam_iosched_trim_done(softc->cam_iosched); */ ndaschedule(periph); cam_periph_unlock(periph); #ifdef notyet /* Not yet collapsing several BIO_DELETE requests into one TRIM */ while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bp1, bio_queue); bp1->bio_error = error; if (error != 0) { bp1->bio_flags |= BIO_ERROR; bp1->bio_resid = bp1->bio_bcount; } else bp1->bio_resid = 0; biodone(bp1); } #else biodone(bp); #endif } else { ndaschedule(periph); cam_periph_unlock(periph); biodone(bp); } return; } case NDA_CCB_DUMP: /* No-op. We're polling */ return; default: break; } xpt_release_ccb(done_ccb); } static int ndaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct nda_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct nda_softc *)periph->softc; switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: #ifdef CAM_IO_STATS softc->timeouts++; #endif break; case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: case CAM_ATA_STATUS_ERROR: #ifdef CAM_IO_STATS softc->errors++; #endif break; default: break; } return(cam_periph_error(ccb, cam_flags, sense_flags)); } /* * Step through all NDA peripheral drivers, and if the device is still open, * sync the disk cache to physical media. */ static void ndaflush(void) { struct cam_periph *periph; struct nda_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &ndadriver) { softc = (struct nda_softc *)periph->softc; if (SCHEDULER_STOPPED()) { /* If we paniced with the lock held, do not recurse. */ if (!cam_periph_owned(periph) && (softc->flags & NDA_FLAG_OPEN)) { ndadump(softc->disk, NULL, 0, 0, 0); } continue; } cam_periph_lock(periph); /* * We only sync the cache if the drive is still open, and * if the drive is capable of it.. */ if ((softc->flags & NDA_FLAG_OPEN) == 0) { cam_periph_unlock(periph); continue; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); nda_nvme_flush(softc, &ccb->nvmeio); error = cam_periph_runccb(ccb, ndaerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } static void ndashutdown(void *arg, int howto) { ndaflush(); } static void ndasuspend(void *arg) { ndaflush(); } Index: head/sys/cam/nvme/nvme_xpt.c =================================================================== --- head/sys/cam/nvme/nvme_xpt.c (revision 328917) +++ head/sys/cam/nvme/nvme_xpt.c (revision 328918) @@ -1,674 +1,672 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2015 Netflix, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * derived from ata_xpt.c: Copyright (c) 2009 Alexander Motin */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for xpt_print below */ #include "opt_cam.h" struct nvme_quirk_entry { u_int quirks; #define CAM_QUIRK_MAXTAGS 1 u_int mintags; u_int maxtags; }; /* Not even sure why we need this */ static periph_init_t nvme_probe_periph_init; static struct periph_driver nvme_probe_driver = { nvme_probe_periph_init, "nvme_probe", TAILQ_HEAD_INITIALIZER(nvme_probe_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(nvme_probe, nvme_probe_driver); typedef enum { NVME_PROBE_IDENTIFY, NVME_PROBE_DONE, NVME_PROBE_INVALID, NVME_PROBE_RESET } nvme_probe_action; static char *nvme_probe_action_text[] = { "NVME_PROBE_IDENTIFY", "NVME_PROBE_DONE", "NVME_PROBE_INVALID", "NVME_PROBE_RESET", }; #define NVME_PROBE_SET_ACTION(softc, newaction) \ do { \ char **text; \ text = nvme_probe_action_text; \ CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ ("Probe %s to %s\n", text[(softc)->action], \ text[(newaction)])); \ (softc)->action = (newaction); \ } while(0) typedef enum { NVME_PROBE_NO_ANNOUNCE = 0x04 } nvme_probe_flags; typedef struct { TAILQ_HEAD(, ccb_hdr) request_ccbs; nvme_probe_action action; nvme_probe_flags flags; int restart; struct cam_periph *periph; } nvme_probe_softc; static struct nvme_quirk_entry nvme_quirk_table[] = { { // { // T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, // /*vendor*/"*", /*product*/"*", /*revision*/"*" // }, .quirks = 0, .mintags = 0, .maxtags = 0 }, }; static const int nvme_quirk_table_size = sizeof(nvme_quirk_table) / sizeof(*nvme_quirk_table); static cam_status nvme_probe_register(struct cam_periph *periph, void *arg); static void nvme_probe_schedule(struct cam_periph *nvme_probe_periph); static void nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb); static void nvme_probe_cleanup(struct cam_periph *periph); //static void nvme_find_quirk(struct cam_ed *device); static void nvme_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *ccb); static struct cam_ed * nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static void nvme_device_transport(struct cam_path *path); static void nvme_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static void nvme_action(union ccb *start_ccb); static void nvme_announce_periph(struct cam_periph *periph); static void nvme_proto_announce(struct cam_ed *device); static void nvme_proto_denounce(struct cam_ed *device); static void nvme_proto_debug_out(union ccb *ccb); static struct xpt_xport_ops nvme_xport_ops = { .alloc_device = nvme_alloc_device, .action = nvme_action, .async = nvme_dev_async, .announce = nvme_announce_periph, }; #define NVME_XPT_XPORT(x, X) \ static struct xpt_xport nvme_xport_ ## x = { \ .xport = XPORT_ ## X, \ .name = #x, \ .ops = &nvme_xport_ops, \ }; \ CAM_XPT_XPORT(nvme_xport_ ## x); NVME_XPT_XPORT(nvme, NVME); #undef NVME_XPT_XPORT static struct xpt_proto_ops nvme_proto_ops = { .announce = nvme_proto_announce, .denounce = nvme_proto_denounce, .debug_out = nvme_proto_debug_out, }; static struct xpt_proto nvme_proto = { .proto = PROTO_NVME, .name = "nvme", .ops = &nvme_proto_ops, }; CAM_XPT_PROTO(nvme_proto); static void nvme_probe_periph_init() { } static cam_status nvme_probe_register(struct cam_periph *periph, void *arg) { union ccb *request_ccb; /* CCB representing the probe request */ - cam_status status; nvme_probe_softc *softc; request_ccb = (union ccb *)arg; if (request_ccb == NULL) { printf("nvme_probe_register: no probe CCB, " "can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (nvme_probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT); if (softc == NULL) { printf("nvme_probe_register: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } TAILQ_INIT(&softc->request_ccbs); TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->flags = 0; periph->softc = softc; softc->periph = periph; softc->action = NVME_PROBE_INVALID; - status = cam_periph_acquire(periph); - if (status != CAM_REQ_CMP) { - return (status); - } + if (cam_periph_acquire(periph) != 0) + return (CAM_REQ_CMP_ERR); + CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); // nvme_device_transport(periph->path); nvme_probe_schedule(periph); return(CAM_REQ_CMP); } static void nvme_probe_schedule(struct cam_periph *periph) { union ccb *ccb; nvme_probe_softc *softc; softc = (nvme_probe_softc *)periph->softc; ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY); if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) softc->flags |= NVME_PROBE_NO_ANNOUNCE; else softc->flags &= ~NVME_PROBE_NO_ANNOUNCE; xpt_schedule(periph, CAM_PRIORITY_XPT); } static void nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb) { struct ccb_nvmeio *nvmeio; struct ccb_scsiio *csio; nvme_probe_softc *softc; struct cam_path *path; const struct nvme_namespace_data *nvme_data; lun_id_t lun; CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_start\n")); softc = (nvme_probe_softc *)periph->softc; path = start_ccb->ccb_h.path; nvmeio = &start_ccb->nvmeio; csio = &start_ccb->csio; nvme_data = periph->path->device->nvme_data; if (softc->restart) { softc->restart = 0; if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) NVME_PROBE_SET_ACTION(softc, NVME_PROBE_RESET); else NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY); } /* * Other transports have to ask their SIM to do a lot of action. * NVMe doesn't, so don't do the dance. Just do things * directly. */ switch (softc->action) { case NVME_PROBE_RESET: /* FALLTHROUGH */ case NVME_PROBE_IDENTIFY: nvme_device_transport(path); /* * Test for lun == CAM_LUN_WILDCARD is lame, but * appears to be necessary here. XXX */ lun = xpt_path_lun_id(periph->path); if (lun == CAM_LUN_WILDCARD || periph->path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); start_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(start_ccb); xpt_async(AC_FOUND_DEVICE, path, start_ccb); } NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE); break; default: panic("nvme_probe_start: invalid action state 0x%x\n", softc->action); } /* * Probing is now done. We need to complete any lingering items * in the queue, though there shouldn't be any. */ xpt_release_ccb(start_ccb); CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n")); while ((start_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) { TAILQ_REMOVE(&softc->request_ccbs, &start_ccb->ccb_h, periph_links.tqe); start_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(start_ccb); } cam_periph_invalidate(periph); cam_periph_release_locked(periph); } static void nvme_probe_cleanup(struct cam_periph *periph) { free(periph->softc, M_CAMXPT); } #if 0 /* XXX should be used, don't delete */ static void nvme_find_quirk(struct cam_ed *device) { struct nvme_quirk_entry *quirk; caddr_t match; match = cam_quirkmatch((caddr_t)&device->nvme_data, (caddr_t)nvme_quirk_table, nvme_quirk_table_size, sizeof(*nvme_quirk_table), nvme_identify_match); if (match == NULL) panic("xpt_find_quirk: device didn't match wildcard entry!!"); quirk = (struct nvme_quirk_entry *)match; device->quirk = quirk; if (quirk->quirks & CAM_QUIRK_MAXTAGS) { device->mintags = quirk->mintags; device->maxtags = quirk->maxtags; } } #endif static void nvme_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *request_ccb) { struct ccb_pathinq cpi; cam_status status; struct cam_periph *old_periph; int lock; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun\n")); xpt_path_inq(&cpi, path); if (cpi.ccb_h.status != CAM_REQ_CMP) { if (request_ccb != NULL) { request_ccb->ccb_h.status = cpi.ccb_h.status; xpt_done(request_ccb); } return; } if (xpt_path_lun_id(path) == CAM_LUN_WILDCARD) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun ignoring bus\n")); request_ccb->ccb_h.status = CAM_REQ_CMP; /* XXX signal error ? */ xpt_done(request_ccb); return; } lock = (xpt_path_owned(path) == 0); if (lock) xpt_path_lock(path); if ((old_periph = cam_periph_find(path, "nvme_probe")) != NULL) { if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { nvme_probe_softc *softc; softc = (nvme_probe_softc *)old_periph->softc; TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->restart = 1; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("restarting nvme_probe device\n")); } else { request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Failing to restart nvme_probe device\n")); xpt_done(request_ccb); } } else { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Adding nvme_probe device\n")); status = cam_periph_alloc(nvme_probe_register, NULL, nvme_probe_cleanup, nvme_probe_start, "nvme_probe", CAM_PERIPH_BIO, request_ccb->ccb_h.path, NULL, 0, request_ccb); if (status != CAM_REQ_CMP) { xpt_print(path, "xpt_scan_lun: cam_alloc_periph " "returned an error, can't continue probe\n"); request_ccb->ccb_h.status = status; xpt_done(request_ccb); } } if (lock) xpt_path_unlock(path); } static struct cam_ed * nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct nvme_quirk_entry *quirk; struct cam_ed *device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); /* * Take the default quirk entry until we have inquiry * data from nvme and can determine a better quirk to use. */ quirk = &nvme_quirk_table[nvme_quirk_table_size - 1]; device->quirk = (void *)quirk; device->mintags = 0; device->maxtags = 0; device->inq_flags = 0; device->queue_flags = 0; device->device_id = NULL; /* XXX Need to set this somewhere */ device->device_id_len = 0; device->serial_num = NULL; /* XXX Need to set this somewhere */ device->serial_num_len = 0; return (device); } static void nvme_device_transport(struct cam_path *path) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; /* XXX get data from nvme namespace and other info ??? */ /* Get transport information from the SIM */ xpt_path_inq(&cpi, path); path->device->transport = cpi.transport; path->device->transport_version = cpi.transport_version; path->device->protocol = cpi.protocol; path->device->protocol_version = cpi.protocol_version; /* Tell the controller what we think */ xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.transport = path->device->transport; cts.transport_version = path->device->transport_version; cts.protocol = path->device->protocol; cts.protocol_version = path->device->protocol_version; cts.proto_specific.valid = 0; cts.xport_specific.valid = 0; xpt_action((union ccb *)&cts); } static void nvme_dev_advinfo(union ccb *start_ccb) { struct cam_ed *device; struct ccb_dev_advinfo *cdai; off_t amt; start_ccb->ccb_h.status = CAM_REQ_INVALID; device = start_ccb->ccb_h.path->device; cdai = &start_ccb->cdai; switch(cdai->buftype) { case CDAI_TYPE_SCSI_DEVID: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->device_id_len; if (device->device_id_len == 0) break; amt = device->device_id_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->device_id, amt); break; case CDAI_TYPE_SERIAL_NUM: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->serial_num_len; if (device->serial_num_len == 0) break; amt = device->serial_num_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->serial_num, amt); break; case CDAI_TYPE_PHYS_PATH: if (cdai->flags & CDAI_FLAG_STORE) { if (device->physpath != NULL) free(device->physpath, M_CAMXPT); device->physpath_len = cdai->bufsiz; /* Clear existing buffer if zero length */ if (cdai->bufsiz == 0) break; device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); if (device->physpath == NULL) { start_ccb->ccb_h.status = CAM_REQ_ABORTED; return; } memcpy(device->physpath, cdai->buf, cdai->bufsiz); } else { cdai->provsiz = device->physpath_len; if (device->physpath_len == 0) break; amt = device->physpath_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->physpath, amt); } break; case CDAI_TYPE_NVME_CNTRL: if (cdai->flags & CDAI_FLAG_STORE) return; amt = sizeof(struct nvme_controller_data); cdai->provsiz = amt; if (amt > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->nvme_cdata, amt); break; case CDAI_TYPE_NVME_NS: if (cdai->flags & CDAI_FLAG_STORE) return; amt = sizeof(struct nvme_namespace_data); cdai->provsiz = amt; if (amt > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->nvme_data, amt); break; default: return; } start_ccb->ccb_h.status = CAM_REQ_CMP; if (cdai->flags & CDAI_FLAG_STORE) { xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, (void *)(uintptr_t)cdai->buftype); } } static void nvme_action(union ccb *start_ccb) { CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_action: func= %#x\n", start_ccb->ccb_h.func_code)); switch (start_ccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_SCAN_TGT: case XPT_SCAN_LUN: nvme_scan_lun(start_ccb->ccb_h.path->periph, start_ccb->ccb_h.path, start_ccb->crcn.flags, start_ccb); break; case XPT_DEV_ADVINFO: nvme_dev_advinfo(start_ccb); break; default: xpt_action_default(start_ccb); break; } } /* * Handle any per-device event notifications that require action by the XPT. */ static void nvme_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; if (async_code == AC_LOST_DEVICE && (device->flags & CAM_DEV_UNCONFIGURED) == 0) { device->flags |= CAM_DEV_UNCONFIGURED; xpt_release_device(device); } } static void nvme_announce_periph(struct cam_periph *periph) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; struct cam_path *path = periph->path; struct ccb_trans_settings_nvme *nvmex; cam_periph_assert(periph, MA_OWNED); /* Ask the SIM for connection details */ xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb*)&cts); if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) return; nvmex = &cts.xport_specific.nvme; /* Ask the SIM for its base transfer speed */ xpt_path_inq(&cpi, periph->path); printf("%s%d: nvme version %d.%d x%d (max x%d) lanes PCIe Gen%d (max Gen%d) link", periph->periph_name, periph->unit_number, NVME_MAJOR(nvmex->spec), NVME_MINOR(nvmex->spec), nvmex->lanes, nvmex->max_lanes, nvmex->speed, nvmex->max_speed); printf("\n"); } static void nvme_proto_announce(struct cam_ed *device) { struct sbuf sb; char buffer[120]; sbuf_new(&sb, buffer, sizeof(buffer), SBUF_FIXEDLEN); nvme_print_ident(device->nvme_cdata, device->nvme_data, &sb); sbuf_finish(&sb); sbuf_putbuf(&sb); } static void nvme_proto_denounce(struct cam_ed *device) { nvme_proto_announce(device); } static void nvme_proto_debug_out(union ccb *ccb) { char cdb_str[(sizeof(struct nvme_command) * 3) + 1]; if (ccb->ccb_h.func_code != XPT_NVME_IO) return; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_CDB,("%s. NCB: %s\n", nvme_op_string(&ccb->nvmeio.cmd), nvme_cmd_string(&ccb->nvmeio.cmd, cdb_str, sizeof(cdb_str)))); } Index: head/sys/cam/scsi/scsi_cd.c =================================================================== --- head/sys/cam/scsi/scsi_cd.c (revision 328917) +++ head/sys/cam/scsi/scsi_cd.c (revision 328918) @@ -1,3713 +1,3713 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Portions of this driver taken from the original FreeBSD cd driver. * Written by Julian Elischer (julian@tfs.com) * for TRW Financial Systems for use under the MACH(2.5) operating system. * * TRW Financial Systems, in accordance with their agreement with Carnegie * Mellon University, makes this software available to CMU to distribute * or use in any manner that they see fit as long as this message is kept with * the software. For this reason TFS also grants any other persons or * organisations permission to use or modify this software. * * TFS supplies this software to be publicly redistributed * on the understanding that TFS is not responsible for the correct * functioning of this software in any circumstances. * * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992 * * from: cd.c,v 1.83 1997/05/04 15:24:22 joerg Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_cd.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LEADOUT 0xaa /* leadout toc entry */ struct cd_params { u_int32_t blksize; u_long disksize; }; typedef enum { CD_Q_NONE = 0x00, CD_Q_NO_TOUCH = 0x01, CD_Q_BCD_TRACKS = 0x02, CD_Q_10_BYTE_ONLY = 0x10, CD_Q_RETRY_BUSY = 0x40 } cd_quirks; #define CD_Q_BIT_STRING \ "\020" \ "\001NO_TOUCH" \ "\002BCD_TRACKS" \ "\00510_BYTE_ONLY" \ "\007RETRY_BUSY" typedef enum { CD_FLAG_INVALID = 0x0001, CD_FLAG_NEW_DISC = 0x0002, CD_FLAG_DISC_LOCKED = 0x0004, CD_FLAG_DISC_REMOVABLE = 0x0008, CD_FLAG_SAW_MEDIA = 0x0010, CD_FLAG_ACTIVE = 0x0080, CD_FLAG_SCHED_ON_COMP = 0x0100, CD_FLAG_RETRY_UA = 0x0200, CD_FLAG_VALID_MEDIA = 0x0400, CD_FLAG_VALID_TOC = 0x0800, CD_FLAG_SCTX_INIT = 0x1000 } cd_flags; typedef enum { CD_CCB_PROBE = 0x01, CD_CCB_BUFFER_IO = 0x02, CD_CCB_TUR = 0x04, CD_CCB_TYPE_MASK = 0x0F, CD_CCB_RETRY_UA = 0x10 } cd_ccb_state; #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct cd_tocdata { struct ioc_toc_header header; struct cd_toc_entry entries[100]; }; struct cd_toc_single { struct ioc_toc_header header; struct cd_toc_entry entry; }; typedef enum { CD_STATE_PROBE, CD_STATE_NORMAL } cd_state; struct cd_softc { cam_pinfo pinfo; cd_state state; volatile cd_flags flags; struct bio_queue_head bio_queue; LIST_HEAD(, ccb_hdr) pending_ccbs; struct cd_params params; union ccb saved_ccb; cd_quirks quirks; struct cam_periph *periph; int minimum_command_size; int outstanding_cmds; int tur; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; STAILQ_HEAD(, cd_mode_params) mode_queue; struct cd_tocdata toc; struct disk *disk; struct callout mediapoll_c; #define CD_ANNOUNCETMP_SZ 120 char announce_temp[CD_ANNOUNCETMP_SZ]; #define CD_ANNOUNCE_SZ 400 char announce_buf[CD_ANNOUNCE_SZ]; }; struct cd_page_sizes { int page; int page_size; }; static struct cd_page_sizes cd_page_size_table[] = { { AUDIO_PAGE, sizeof(struct cd_audio_page)} }; struct cd_quirk_entry { struct scsi_inquiry_pattern inq_pat; cd_quirks quirks; }; /* * NOTE ON 10_BYTE_ONLY quirks: Any 10_BYTE_ONLY quirks MUST be because * your device hangs when it gets a 10 byte command. Adding a quirk just * to get rid of the informative diagnostic message is not acceptable. All * 10_BYTE_ONLY quirks must be documented in full in a PR (which should be * referenced in a comment along with the quirk) , and must be approved by * ken@FreeBSD.org. Any quirks added that don't adhere to this policy may * be removed until the submitter can explain why they are needed. * 10_BYTE_ONLY quirks will be removed (as they will no longer be necessary) * when the CAM_NEW_TRAN_CODE work is done. */ static struct cd_quirk_entry cd_quirk_table[] = { { { T_CDROM, SIP_MEDIA_REMOVABLE, "CHINON", "CD-ROM CDS-535","*"}, /* quirks */ CD_Q_BCD_TRACKS }, { /* * VMware returns BUSY status when storage has transient * connectivity problems, so better wait. */ {T_CDROM, SIP_MEDIA_REMOVABLE, "NECVMWar", "VMware IDE CDR10", "*"}, /*quirks*/ CD_Q_RETRY_BUSY } }; static disk_open_t cdopen; static disk_close_t cdclose; static disk_ioctl_t cdioctl; static disk_strategy_t cdstrategy; static periph_init_t cdinit; static periph_ctor_t cdregister; static periph_dtor_t cdcleanup; static periph_start_t cdstart; static periph_oninv_t cdoninvalidate; static void cdasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static int cdcmdsizesysctl(SYSCTL_HANDLER_ARGS); static int cdrunccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags), u_int32_t cam_flags, u_int32_t sense_flags); static void cddone(struct cam_periph *periph, union ccb *start_ccb); static union cd_pages *cdgetpage(struct cd_mode_params *mode_params); static int cdgetpagesize(int page_num); static void cdprevent(struct cam_periph *periph, int action); static int cdcheckmedia(struct cam_periph *periph); static int cdsize(struct cam_periph *periph, u_int32_t *size); static int cd6byteworkaround(union ccb *ccb); static int cderror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int cdreadtoc(struct cam_periph *periph, u_int32_t mode, u_int32_t start, u_int8_t *data, u_int32_t len, u_int32_t sense_flags); static int cdgetmode(struct cam_periph *periph, struct cd_mode_params *data, u_int32_t page); static int cdsetmode(struct cam_periph *periph, struct cd_mode_params *data); static int cdplay(struct cam_periph *periph, u_int32_t blk, u_int32_t len); static int cdreadsubchannel(struct cam_periph *periph, u_int32_t mode, u_int32_t format, int track, struct cd_sub_channel_info *data, u_int32_t len); static int cdplaymsf(struct cam_periph *periph, u_int32_t startm, u_int32_t starts, u_int32_t startf, u_int32_t endm, u_int32_t ends, u_int32_t endf); static int cdplaytracks(struct cam_periph *periph, u_int32_t strack, u_int32_t sindex, u_int32_t etrack, u_int32_t eindex); static int cdpause(struct cam_periph *periph, u_int32_t go); static int cdstopunit(struct cam_periph *periph, u_int32_t eject); static int cdstartunit(struct cam_periph *periph, int load); static int cdsetspeed(struct cam_periph *periph, u_int32_t rdspeed, u_int32_t wrspeed); static int cdreportkey(struct cam_periph *periph, struct dvd_authinfo *authinfo); static int cdsendkey(struct cam_periph *periph, struct dvd_authinfo *authinfo); static int cdreaddvdstructure(struct cam_periph *periph, struct dvd_struct *dvdstruct); static timeout_t cdmediapoll; static struct periph_driver cddriver = { cdinit, "cd", TAILQ_HEAD_INITIALIZER(cddriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(cd, cddriver); #ifndef CD_DEFAULT_POLL_PERIOD #define CD_DEFAULT_POLL_PERIOD 3 #endif #ifndef CD_DEFAULT_RETRY #define CD_DEFAULT_RETRY 4 #endif #ifndef CD_DEFAULT_TIMEOUT #define CD_DEFAULT_TIMEOUT 30000 #endif static int cd_poll_period = CD_DEFAULT_POLL_PERIOD; static int cd_retry_count = CD_DEFAULT_RETRY; static int cd_timeout = CD_DEFAULT_TIMEOUT; static SYSCTL_NODE(_kern_cam, OID_AUTO, cd, CTLFLAG_RD, 0, "CAM CDROM driver"); SYSCTL_INT(_kern_cam_cd, OID_AUTO, poll_period, CTLFLAG_RWTUN, &cd_poll_period, 0, "Media polling period in seconds"); SYSCTL_INT(_kern_cam_cd, OID_AUTO, retry_count, CTLFLAG_RWTUN, &cd_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_cd, OID_AUTO, timeout, CTLFLAG_RWTUN, &cd_timeout, 0, "Timeout, in us, for read operations"); static MALLOC_DEFINE(M_SCSICD, "scsi_cd", "scsi_cd buffers"); static void cdinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, cdasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("cd: Failed to attach master async callback " "due to status 0x%x!\n", status); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void cddiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void cdoninvalidate(struct cam_periph *periph) { struct cd_softc *softc; softc = (struct cd_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, cdasync, periph, periph->path); softc->flags |= CD_FLAG_INVALID; /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ bioq_flush(&softc->bio_queue, NULL, ENXIO); disk_gone(softc->disk); } static void cdcleanup(struct cam_periph *periph) { struct cd_softc *softc; softc = (struct cd_softc *)periph->softc; cam_periph_unlock(periph); if ((softc->flags & CD_FLAG_SCTX_INIT) != 0 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) { xpt_print(periph->path, "can't remove sysctl context\n"); } callout_drain(&softc->mediapoll_c); disk_destroy(softc->disk); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void cdasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct cd_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_CDROM && SID_TYPE(&cgd->inq_data) != T_WORM) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(cdregister, cdoninvalidate, cdcleanup, cdstart, "cd", CAM_PERIPH_BIO, path, cdasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("cdasync: Unable to attach new device " "due to status 0x%x\n", status); break; } case AC_UNIT_ATTENTION: { union ccb *ccb; int error_code, sense_key, asc, ascq; softc = (struct cd_softc *)periph->softc; ccb = (union ccb *)arg; /* * Handle all media change UNIT ATTENTIONs except * our own, as they will be handled by cderror(). */ if (xpt_path_periph(ccb->ccb_h.path) != periph && scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (asc == 0x28 && ascq == 0x00) disk_media_changed(softc->disk, M_NOWAIT); } cam_periph_async(periph, code, path, arg); break; } case AC_SCSI_AEN: softc = (struct cd_softc *)periph->softc; if (softc->state == CD_STATE_NORMAL && !softc->tur) { - if (cam_periph_acquire(periph) == CAM_REQ_CMP) { + if (cam_periph_acquire(periph) == 0) { softc->tur = 1; xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } /* FALLTHROUGH */ case AC_SENT_BDR: case AC_BUS_RESET: { struct ccb_hdr *ccbh; softc = (struct cd_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= CD_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= CD_CCB_RETRY_UA; /* FALLTHROUGH */ } default: cam_periph_async(periph, code, path, arg); break; } } static void cdsysctlinit(void *context, int pending) { struct cam_periph *periph; struct cd_softc *softc; char tmpstr[32], tmpstr2[16]; periph = (struct cam_periph *)context; - if (cam_periph_acquire(periph) != CAM_REQ_CMP) + if (cam_periph_acquire(periph) != 0) return; softc = (struct cd_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM CD unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= CD_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_cd), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("cdsysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } /* * Now register the sysctl handler, so the user can the value on * the fly. */ SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, &softc->minimum_command_size, 0, cdcmdsizesysctl, "I", "Minimum CDB size"); cam_periph_release(periph); } /* * We have a handler function for this so we can check the values when the * user sets them, instead of every time we look at them. */ static int cdcmdsizesysctl(SYSCTL_HANDLER_ARGS) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* * The only real values we can have here are 6 or 10. I don't * really forsee having 12 be an option at any time in the future. * So if the user sets something less than or equal to 6, we'll set * it to 6. If he sets something greater than 6, we'll set it to 10. * * I suppose we could just return an error here for the wrong values, * but I don't think it's necessary to do so, as long as we can * determine the user's intent without too much trouble. */ if (value < 6) value = 6; else if (value > 6) value = 10; *(int *)arg1 = value; return (0); } static cam_status cdregister(struct cam_periph *periph, void *arg) { struct cd_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; char tmpstr[80]; caddr_t match; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("cdregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct cd_softc *)malloc(sizeof(*softc),M_DEVBUF, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("cdregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } LIST_INIT(&softc->pending_ccbs); STAILQ_INIT(&softc->mode_queue); softc->state = CD_STATE_PROBE; bioq_init(&softc->bio_queue); if (SID_IS_REMOVABLE(&cgd->inq_data)) softc->flags |= CD_FLAG_DISC_REMOVABLE; periph->softc = softc; softc->periph = periph; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)cd_quirk_table, nitems(cd_quirk_table), sizeof(*cd_quirk_table), scsi_inquiry_match); if (match != NULL) softc->quirks = ((struct cd_quirk_entry *)match)->quirks; else softc->quirks = CD_Q_NONE; /* Check if the SIM does not want 6 byte commands */ xpt_path_inq(&cpi, periph->path); if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) softc->quirks |= CD_Q_10_BYTE_ONLY; TASK_INIT(&softc->sysctl_task, 0, cdsysctlinit, periph); /* The default is 6 byte commands, unless quirked otherwise */ if (softc->quirks & CD_Q_10_BYTE_ONLY) softc->minimum_command_size = 10; else softc->minimum_command_size = 6; /* * Refcount and block open attempts until we are setup * Can't block */ (void)cam_periph_hold(periph, PRIBIO); cam_periph_unlock(periph); /* * Load the user's default, if any. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.cd.%d.minimum_cmd_size", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->minimum_command_size); /* 6 and 10 are the only permissible values here. */ if (softc->minimum_command_size < 6) softc->minimum_command_size = 6; else if (softc->minimum_command_size > 6) softc->minimum_command_size = 10; /* * We need to register the statistics structure for this device, * but we don't have the blocksize yet for it. So, we register * the structure and indicate that we don't have the blocksize * yet. Unlike other SCSI peripheral drivers, we explicitly set * the device type here to be CDROM, rather than just ORing in * the device type. This is because this driver can attach to either * CDROM or WORM devices, and we want this peripheral driver to * show up in the devstat list as a CD peripheral driver, not a * WORM peripheral driver. WORM drives will also have the WORM * driver attached to them. */ softc->disk = disk_alloc(); softc->disk->d_devstat = devstat_new_entry("cd", periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, DEVSTAT_TYPE_CDROM | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_CD); softc->disk->d_open = cdopen; softc->disk->d_close = cdclose; softc->disk->d_strategy = cdstrategy; softc->disk->d_gone = cddiskgonecb; softc->disk->d_ioctl = cdioctl; softc->disk->d_name = "cd"; cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], cgd->inq_data.product, sizeof(cgd->inq_data.product), sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); softc->disk->d_unit = periph->unit_number; softc->disk->d_drv1 = periph; if (cpi.maxio == 0) softc->disk->d_maxsize = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->disk->d_maxsize = MAXPHYS; /* for safety */ else softc->disk->d_maxsize = cpi.maxio; softc->disk->d_flags = 0; softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * dadiskgonecb()) telling us that our provider has been freed. */ - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); /* * Add an async callback so that we get * notified if this device goes away. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_SCSI_AEN | AC_UNIT_ATTENTION, cdasync, periph, periph->path); /* * Schedule a periodic media polling events. */ callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); if ((softc->flags & CD_FLAG_DISC_REMOVABLE) && (cgd->inq_flags & SID_AEN) == 0 && cd_poll_period != 0) callout_reset(&softc->mediapoll_c, cd_poll_period * hz, cdmediapoll, periph); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int cdopen(struct disk *dp) { struct cam_periph *periph; struct cd_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct cd_softc *)periph->softc; - if (cam_periph_acquire(periph) != CAM_REQ_CMP) + if (cam_periph_acquire(periph) != 0) return(ENXIO); cam_periph_lock(periph); if (softc->flags & CD_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } if ((error = cam_periph_hold(periph, PRIBIO | PCATCH)) != 0) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("cdopen\n")); /* * Check for media, and set the appropriate flags. We don't bail * if we don't have media, but then we don't allow anything but the * CDIOCEJECT/CDIOCCLOSE ioctls if there is no media. */ cdcheckmedia(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("leaving cdopen\n")); cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } static int cdclose(struct disk *dp) { struct cam_periph *periph; struct cd_softc *softc; periph = (struct cam_periph *)dp->d_drv1; softc = (struct cd_softc *)periph->softc; cam_periph_lock(periph); if (cam_periph_hold(periph, PRIBIO) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (0); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("cdclose\n")); if ((softc->flags & CD_FLAG_DISC_REMOVABLE) != 0) cdprevent(periph, PR_ALLOW); /* * Since we're closing this CD, mark the blocksize as unavailable. * It will be marked as available when the CD is opened again. */ softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; /* * We'll check the media and toc again at the next open(). */ softc->flags &= ~(CD_FLAG_VALID_MEDIA|CD_FLAG_VALID_TOC); cam_periph_unhold(periph); cam_periph_release_locked(periph); cam_periph_unlock(periph); return (0); } static int cdrunccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags), u_int32_t cam_flags, u_int32_t sense_flags) { struct cd_softc *softc; struct cam_periph *periph; int error; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; error = cam_periph_runccb(ccb, error_routine, cam_flags, sense_flags, softc->disk->d_devstat); return(error); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void cdstrategy(struct bio *bp) { struct cam_periph *periph; struct cd_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cdstrategy(%p)\n", bp)); softc = (struct cd_softc *)periph->softc; /* * If the device has been made invalid, error out */ if ((softc->flags & CD_FLAG_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * If we don't have valid media, look for it before trying to * schedule the I/O. */ if ((softc->flags & CD_FLAG_VALID_MEDIA) == 0) { int error; error = cdcheckmedia(periph); if (error != 0) { cam_periph_unlock(periph); biofinish(bp, NULL, error); return; } } /* * Place it in the queue of disk activities for this disk */ bioq_disksort(&softc->bio_queue, bp); xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); return; } static void cdstart(struct cam_periph *periph, union ccb *start_ccb) { struct cd_softc *softc; struct bio *bp; struct ccb_scsiio *csio; struct scsi_read_capacity_data *rcap; softc = (struct cd_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdstart\n")); switch (softc->state) { case CD_STATE_NORMAL: { bp = bioq_first(&softc->bio_queue); if (bp == NULL) { if (softc->tur) { softc->tur = 0; csio = &start_ccb->csio; scsi_test_unit_ready(csio, /*retries*/ cd_retry_count, cddone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, cd_timeout); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CD_CCB_TUR; xpt_action(start_ccb); } else xpt_release_ccb(start_ccb); } else { if (softc->tur) { softc->tur = 0; cam_periph_release_locked(periph); } bioq_remove(&softc->bio_queue, bp); scsi_read_write(&start_ccb->csio, /*retries*/ cd_retry_count, /* cbfcnp */ cddone, MSG_SIMPLE_Q_TAG, /* read */bp->bio_cmd == BIO_READ ? SCSI_RW_READ : SCSI_RW_WRITE, /* byte2 */ 0, /* minimum_cmd_size */ 10, /* lba */ bp->bio_offset / softc->params.blksize, bp->bio_bcount / softc->params.blksize, /* data_ptr */ bp->bio_data, /* dxfer_len */ bp->bio_bcount, /* sense_len */ cd_retry_count ? SSD_FULL_SIZE : SF_NO_PRINT, /* timeout */ cd_timeout); /* Use READ CD command for audio tracks. */ if (softc->params.blksize == 2352) { start_ccb->csio.cdb_io.cdb_bytes[0] = READ_CD; start_ccb->csio.cdb_io.cdb_bytes[9] = 0xf8; start_ccb->csio.cdb_io.cdb_bytes[10] = 0; start_ccb->csio.cdb_io.cdb_bytes[11] = 0; start_ccb->csio.cdb_len = 12; } start_ccb->ccb_h.ccb_state = CD_CCB_BUFFER_IO; LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); softc->outstanding_cmds++; /* We expect a unit attention from this device */ if ((softc->flags & CD_FLAG_RETRY_UA) != 0) { start_ccb->ccb_h.ccb_state |= CD_CCB_RETRY_UA; softc->flags &= ~CD_FLAG_RETRY_UA; } start_ccb->ccb_h.ccb_bp = bp; bp = bioq_first(&softc->bio_queue); xpt_action(start_ccb); } if (bp != NULL || softc->tur) { /* Have more work to do, so ensure we stay scheduled */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); } break; } case CD_STATE_PROBE: { rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), M_SCSICD, M_NOWAIT | M_ZERO); if (rcap == NULL) { xpt_print(periph->path, "cdstart: Couldn't malloc read_capacity data\n"); /* cd_free_periph??? */ break; } csio = &start_ccb->csio; scsi_read_capacity(csio, /*retries*/ cd_retry_count, cddone, MSG_SIMPLE_Q_TAG, rcap, SSD_FULL_SIZE, /*timeout*/20000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CD_CCB_PROBE; xpt_action(start_ccb); break; } } } static void cddone(struct cam_periph *periph, union ccb *done_ccb) { struct cd_softc *softc; struct ccb_scsiio *csio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cddone\n")); softc = (struct cd_softc *)periph->softc; csio = &done_ccb->csio; switch (csio->ccb_h.ccb_state & CD_CCB_TYPE_MASK) { case CD_CCB_BUFFER_IO: { struct bio *bp; int error; bp = (struct bio *)done_ccb->ccb_h.ccb_bp; error = 0; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int sf; if ((done_ccb->ccb_h.ccb_state & CD_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = cderror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } } if (error != 0) { xpt_print(periph->path, "cddone: got error %#x back\n", error); bioq_flush(&softc->bio_queue, NULL, EIO); bp->bio_resid = bp->bio_bcount; bp->bio_error = error; bp->bio_flags |= BIO_ERROR; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else { bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) { /* * Short transfer ??? * XXX: not sure this is correct for partial * transfers at EOM */ bp->bio_flags |= BIO_ERROR; } } LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); softc->outstanding_cmds--; biofinish(bp, NULL, 0); break; } case CD_CCB_PROBE: { struct scsi_read_capacity_data *rdcap; char *announce_buf; struct cd_params *cdp; int error; cdp = &softc->params; announce_buf = softc->announce_temp; rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; cdp->disksize = scsi_4btoul (rdcap->addr) + 1; cdp->blksize = scsi_4btoul (rdcap->length); /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP || (error = cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT)) == 0) { snprintf(announce_buf, CD_ANNOUNCETMP_SZ, "%juMB (%ju %u byte sectors)", ((uintmax_t)cdp->disksize * cdp->blksize) / (1024 * 1024), (uintmax_t)cdp->disksize, cdp->blksize); } else { if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } else { int asc, ascq; int sense_key, error_code; int have_sense; cam_status status; struct ccb_getdev cgd; /* Don't wedge this device's queue */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); status = done_ccb->ccb_h.status; xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key, &asc, &ascq)) have_sense = TRUE; else have_sense = FALSE; /* * Attach to anything that claims to be a * CDROM or WORM device, as long as it * doesn't return a "Logical unit not * supported" (0x25) error. */ if ((have_sense) && (asc != 0x25) && (error_code == SSD_CURRENT_ERROR)) { const char *sense_key_desc; const char *asc_desc; scsi_sense_desc(sense_key, asc, ascq, &cgd.inq_data, &sense_key_desc, &asc_desc); snprintf(announce_buf, sizeof(announce_buf), "Attempt to query device " "size failed: %s, %s", sense_key_desc, asc_desc); } else if ((have_sense == 0) && ((status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) && (csio->scsi_status == SCSI_STATUS_BUSY)) { snprintf(announce_buf, sizeof(announce_buf), "Attempt to query device " "size failed: SCSI Status: %s", scsi_status_string(csio)); } else if (SID_TYPE(&cgd.inq_data) == T_CDROM) { /* * We only print out an error for * CDROM type devices. For WORM * devices, we don't print out an * error since a few WORM devices * don't support CDROM commands. * If we have sense information, go * ahead and print it out. * Otherwise, just say that we * couldn't attach. */ /* * Just print out the error, not * the full probe message, when we * don't attach. */ if (have_sense) scsi_sense_print( &done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, " "failed to attach to device\n"); /* * Invalidate this peripheral. */ cam_periph_invalidate(periph); announce_buf = NULL; } else { /* * Invalidate this peripheral. */ cam_periph_invalidate(periph); announce_buf = NULL; } } } free(rdcap, M_SCSICD); if (announce_buf != NULL) { struct sbuf sb; sbuf_new(&sb, softc->announce_buf, CD_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, announce_buf); xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, CD_Q_BIT_STRING); sbuf_finish(&sb); sbuf_putbuf(&sb); /* * Create our sysctl variables, now that we know * we have successfully attached. */ taskqueue_enqueue(taskqueue_thread,&softc->sysctl_task); } softc->state = CD_STATE_NORMAL; /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(done_ccb); cam_periph_unhold(periph); return; } case CD_CCB_TUR: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } xpt_release_ccb(done_ccb); cam_periph_release_locked(periph); return; } default: break; } xpt_release_ccb(done_ccb); } static union cd_pages * cdgetpage(struct cd_mode_params *mode_params) { union cd_pages *page; if (mode_params->cdb_size == 10) page = (union cd_pages *)find_mode_page_10( (struct scsi_mode_header_10 *)mode_params->mode_buf); else page = (union cd_pages *)find_mode_page_6( (struct scsi_mode_header_6 *)mode_params->mode_buf); return (page); } static int cdgetpagesize(int page_num) { u_int i; for (i = 0; i < nitems(cd_page_size_table); i++) { if (cd_page_size_table[i].page == page_num) return (cd_page_size_table[i].page_size); } return (-1); } static int cdioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td) { struct cam_periph *periph; struct cd_softc *softc; int nocopyout, error = 0; periph = (struct cam_periph *)dp->d_drv1; cam_periph_lock(periph); softc = (struct cd_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cdioctl(%#lx)\n", cmd)); if ((error = cam_periph_hold(periph, PRIBIO | PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } /* * If we don't have media loaded, check for it. If still don't * have media loaded, we can only do a load or eject. * * We only care whether media is loaded if this is a cd-specific ioctl * (thus the IOCGROUP check below). Note that this will break if * anyone adds any ioctls into the switch statement below that don't * have their ioctl group set to 'c'. */ if (((softc->flags & CD_FLAG_VALID_MEDIA) == 0) && ((cmd != CDIOCCLOSE) && (cmd != CDIOCEJECT)) && (IOCGROUP(cmd) == 'c')) { error = cdcheckmedia(periph); if (error != 0) { cam_periph_unhold(periph); cam_periph_unlock(periph); return (error); } } /* * Drop the lock here so later mallocs can use WAITOK. The periph * is essentially locked still with the cam_periph_hold call above. */ cam_periph_unlock(periph); nocopyout = 0; switch (cmd) { case CDIOCPLAYTRACKS: { struct ioc_play_track *args = (struct ioc_play_track *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYTRACKS\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.flags &= ~CD_PA_SOTC; page->audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); if (error) { cam_periph_unlock(periph); break; } /* * This was originally implemented with the PLAY * AUDIO TRACK INDEX command, but that command was * deprecated after SCSI-2. Most (all?) SCSI CDROM * drives support it but ATAPI and ATAPI-derivative * drives don't seem to support it. So we keep a * cache of the table of contents and translate * track numbers to MSF format. */ if (softc->flags & CD_FLAG_VALID_TOC) { union msf_lba *sentry, *eentry; int st, et; if (args->end_track < softc->toc.header.ending_track + 1) args->end_track++; if (args->end_track > softc->toc.header.ending_track + 1) args->end_track = softc->toc.header.ending_track + 1; st = args->start_track - softc->toc.header.starting_track; et = args->end_track - softc->toc.header.starting_track; if ((st < 0) || (et < 0) || (st > (softc->toc.header.ending_track - softc->toc.header.starting_track))) { error = EINVAL; cam_periph_unlock(periph); break; } sentry = &softc->toc.entries[st].addr; eentry = &softc->toc.entries[et].addr; error = cdplaymsf(periph, sentry->msf.minute, sentry->msf.second, sentry->msf.frame, eentry->msf.minute, eentry->msf.second, eentry->msf.frame); } else { /* * If we don't have a valid TOC, try the * play track index command. It is part of * the SCSI-2 spec, but was removed in the * MMC specs. ATAPI and ATAPI-derived * drives don't support it. */ if (softc->quirks & CD_Q_BCD_TRACKS) { args->start_track = bin2bcd(args->start_track); args->end_track = bin2bcd(args->end_track); } error = cdplaytracks(periph, args->start_track, args->start_index, args->end_track, args->end_index); } cam_periph_unlock(periph); } break; case CDIOCPLAYMSF: { struct ioc_play_msf *args = (struct ioc_play_msf *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYMSF\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.flags &= ~CD_PA_SOTC; page->audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); if (error) { cam_periph_unlock(periph); break; } error = cdplaymsf(periph, args->start_m, args->start_s, args->start_f, args->end_m, args->end_s, args->end_f); cam_periph_unlock(periph); } break; case CDIOCPLAYBLOCKS: { struct ioc_play_blocks *args = (struct ioc_play_blocks *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYBLOCKS\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.flags &= ~CD_PA_SOTC; page->audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); if (error) { cam_periph_unlock(periph); break; } error = cdplay(periph, args->blk, args->len); cam_periph_unlock(periph); } break; case CDIOCREADSUBCHANNEL_SYSSPACE: nocopyout = 1; /* Fallthrough */ case CDIOCREADSUBCHANNEL: { struct ioc_read_subchannel *args = (struct ioc_read_subchannel *) addr; struct cd_sub_channel_info *data; u_int32_t len = args->data_len; data = malloc(sizeof(struct cd_sub_channel_info), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCREADSUBCHANNEL\n")); if ((len > sizeof(struct cd_sub_channel_info)) || (len < sizeof(struct cd_sub_channel_header))) { printf( "scsi_cd: cdioctl: " "cdioreadsubchannel: error, len=%d\n", len); error = EINVAL; free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) args->track = bin2bcd(args->track); error = cdreadsubchannel(periph, args->address_format, args->data_format, args->track, data, len); if (error) { free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) data->what.track_info.track_number = bcd2bin(data->what.track_info.track_number); len = min(len, ((data->header.data_len[0] << 8) + data->header.data_len[1] + sizeof(struct cd_sub_channel_header))); cam_periph_unlock(periph); if (nocopyout == 0) { if (copyout(data, args->data, len) != 0) { error = EFAULT; } } else { bcopy(data, args->data, len); } free(data, M_SCSICD); } break; case CDIOREADTOCHEADER: { struct ioc_toc_header *th; th = malloc(sizeof(struct ioc_toc_header), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCHEADER\n")); error = cdreadtoc(periph, 0, 0, (u_int8_t *)th, sizeof (*th), /*sense_flags*/SF_NO_PRINT); if (error) { free(th, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } th->len = ntohs(th->len); bcopy(th, addr, sizeof(*th)); free(th, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOREADTOCENTRYS: { struct cd_tocdata *data; struct cd_toc_single *lead; struct ioc_read_toc_entry *te = (struct ioc_read_toc_entry *) addr; struct ioc_toc_header *th; u_int32_t len, readlen, idx, num; u_int32_t starting_track = te->starting_track; data = malloc(sizeof(*data), M_SCSICD, M_WAITOK | M_ZERO); lead = malloc(sizeof(*lead), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCENTRYS\n")); if (te->data_len < sizeof(struct cd_toc_entry) || (te->data_len % sizeof(struct cd_toc_entry)) != 0 || (te->address_format != CD_MSF_FORMAT && te->address_format != CD_LBA_FORMAT)) { error = EINVAL; printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } th = &data->header; error = cdreadtoc(periph, 0, 0, (u_int8_t *)th, sizeof (*th), /*sense_flags*/0); if (error) { free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } if (starting_track == 0) starting_track = th->starting_track; else if (starting_track == LEADOUT) starting_track = th->ending_track + 1; else if (starting_track < th->starting_track || starting_track > th->ending_track + 1) { printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); error = EINVAL; break; } /* calculate reading length without leadout entry */ readlen = (th->ending_track - starting_track + 1) * sizeof(struct cd_toc_entry); /* and with leadout entry */ len = readlen + sizeof(struct cd_toc_entry); if (te->data_len < len) { len = te->data_len; if (readlen > len) readlen = len; } if (len > sizeof(data->entries)) { printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); error = EINVAL; free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } num = len / sizeof(struct cd_toc_entry); if (readlen > 0) { error = cdreadtoc(periph, te->address_format, starting_track, (u_int8_t *)data, readlen + sizeof (*th), /*sense_flags*/0); if (error) { free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } } /* make leadout entry if needed */ idx = starting_track + num - 1; if (softc->quirks & CD_Q_BCD_TRACKS) th->ending_track = bcd2bin(th->ending_track); if (idx == th->ending_track + 1) { error = cdreadtoc(periph, te->address_format, LEADOUT, (u_int8_t *)lead, sizeof(*lead), /*sense_flags*/0); if (error) { free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } data->entries[idx - starting_track] = lead->entry; } if (softc->quirks & CD_Q_BCD_TRACKS) { for (idx = 0; idx < num - 1; idx++) { data->entries[idx].track = bcd2bin(data->entries[idx].track); } } cam_periph_unlock(periph); error = copyout(data->entries, te->data, len); free(data, M_SCSICD); free(lead, M_SCSICD); } break; case CDIOREADTOCENTRY: { struct cd_toc_single *data; struct ioc_read_toc_single_entry *te = (struct ioc_read_toc_single_entry *) addr; struct ioc_toc_header *th; u_int32_t track; data = malloc(sizeof(*data), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCENTRY\n")); if (te->address_format != CD_MSF_FORMAT && te->address_format != CD_LBA_FORMAT) { printf("error in readtocentry, " " returning EINVAL\n"); free(data, M_SCSICD); error = EINVAL; cam_periph_unlock(periph); break; } th = &data->header; error = cdreadtoc(periph, 0, 0, (u_int8_t *)th, sizeof (*th), /*sense_flags*/0); if (error) { free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } track = te->track; if (track == 0) track = th->starting_track; else if (track == LEADOUT) /* OK */; else if (track < th->starting_track || track > th->ending_track + 1) { printf("error in readtocentry, " " returning EINVAL\n"); free(data, M_SCSICD); error = EINVAL; cam_periph_unlock(periph); break; } error = cdreadtoc(periph, te->address_format, track, (u_int8_t *)data, sizeof(*data), /*sense_flags*/0); if (error) { free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) data->entry.track = bcd2bin(data->entry.track); bcopy(&data->entry, &te->entry, sizeof(struct cd_toc_entry)); free(data, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETPATCH: { struct ioc_patch *arg = (struct ioc_patch *)addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETPATCH\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = arg->patch[0]; page->audio.port[RIGHT_PORT].channels = arg->patch[1]; page->audio.port[2].channels = arg->patch[2]; page->audio.port[3].channels = arg->patch[3]; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCGETVOL: { struct ioc_vol *arg = (struct ioc_vol *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCGETVOL\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); arg->vol[LEFT_PORT] = page->audio.port[LEFT_PORT].volume; arg->vol[RIGHT_PORT] = page->audio.port[RIGHT_PORT].volume; arg->vol[2] = page->audio.port[2].volume; arg->vol[3] = page->audio.port[3].volume; free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETVOL: { struct ioc_vol *arg = (struct ioc_vol *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETVOL\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = CHANNEL_0; page->audio.port[LEFT_PORT].volume = arg->vol[LEFT_PORT]; page->audio.port[RIGHT_PORT].channels = CHANNEL_1; page->audio.port[RIGHT_PORT].volume = arg->vol[RIGHT_PORT]; page->audio.port[2].volume = arg->vol[2]; page->audio.port[3].volume = arg->vol[3]; error = cdsetmode(periph, ¶ms); cam_periph_unlock(periph); free(params.mode_buf, M_SCSICD); } break; case CDIOCSETMONO: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETMONO\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = LEFT_CHANNEL | RIGHT_CHANNEL; page->audio.port[RIGHT_PORT].channels = LEFT_CHANNEL | RIGHT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); cam_periph_unlock(periph); free(params.mode_buf, M_SCSICD); } break; case CDIOCSETSTEREO: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETSTEREO\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = LEFT_CHANNEL; page->audio.port[RIGHT_PORT].channels = RIGHT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETMUTE: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETMUTE\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = 0; page->audio.port[RIGHT_PORT].channels = 0; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETLEFT: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETLEFT\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = LEFT_CHANNEL; page->audio.port[RIGHT_PORT].channels = LEFT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETRIGHT: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETRIGHT\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = RIGHT_CHANNEL; page->audio.port[RIGHT_PORT].channels = RIGHT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCRESUME: cam_periph_lock(periph); error = cdpause(periph, 1); cam_periph_unlock(periph); break; case CDIOCPAUSE: cam_periph_lock(periph); error = cdpause(periph, 0); cam_periph_unlock(periph); break; case CDIOCSTART: cam_periph_lock(periph); error = cdstartunit(periph, 0); cam_periph_unlock(periph); break; case CDIOCCLOSE: cam_periph_lock(periph); error = cdstartunit(periph, 1); cam_periph_unlock(periph); break; case CDIOCSTOP: cam_periph_lock(periph); error = cdstopunit(periph, 0); cam_periph_unlock(periph); break; case CDIOCEJECT: cam_periph_lock(periph); error = cdstopunit(periph, 1); cam_periph_unlock(periph); break; case CDIOCALLOW: cam_periph_lock(periph); cdprevent(periph, PR_ALLOW); cam_periph_unlock(periph); break; case CDIOCPREVENT: cam_periph_lock(periph); cdprevent(periph, PR_PREVENT); cam_periph_unlock(periph); break; case CDIOCSETDEBUG: /* sc_link->flags |= (SDEV_DB1 | SDEV_DB2); */ error = ENOTTY; break; case CDIOCCLRDEBUG: /* sc_link->flags &= ~(SDEV_DB1 | SDEV_DB2); */ error = ENOTTY; break; case CDIOCRESET: /* return (cd_reset(periph)); */ error = ENOTTY; break; case CDRIOCREADSPEED: cam_periph_lock(periph); error = cdsetspeed(periph, *(u_int32_t *)addr, CDR_MAX_SPEED); cam_periph_unlock(periph); break; case CDRIOCWRITESPEED: cam_periph_lock(periph); error = cdsetspeed(periph, CDR_MAX_SPEED, *(u_int32_t *)addr); cam_periph_unlock(periph); break; case CDRIOCGETBLOCKSIZE: *(int *)addr = softc->params.blksize; break; case CDRIOCSETBLOCKSIZE: if (*(int *)addr <= 0) { error = EINVAL; break; } softc->disk->d_sectorsize = softc->params.blksize = *(int *)addr; break; case DVDIOCSENDKEY: case DVDIOCREPORTKEY: { struct dvd_authinfo *authinfo; authinfo = (struct dvd_authinfo *)addr; if (cmd == DVDIOCREPORTKEY) error = cdreportkey(periph, authinfo); else error = cdsendkey(periph, authinfo); break; } case DVDIOCREADSTRUCTURE: { struct dvd_struct *dvdstruct; dvdstruct = (struct dvd_struct *)addr; error = cdreaddvdstructure(periph, dvdstruct); break; } default: cam_periph_lock(periph); error = cam_periph_ioctl(periph, cmd, addr, cderror); cam_periph_unlock(periph); break; } cam_periph_lock(periph); cam_periph_unhold(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("leaving cdioctl\n")); if (error && bootverbose) { printf("scsi_cd.c::ioctl cmd=%08lx error=%d\n", cmd, error); } cam_periph_unlock(periph); return (error); } static void cdprevent(struct cam_periph *periph, int action) { union ccb *ccb; struct cd_softc *softc; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdprevent\n")); softc = (struct cd_softc *)periph->softc; if (((action == PR_ALLOW) && (softc->flags & CD_FLAG_DISC_LOCKED) == 0) || ((action == PR_PREVENT) && (softc->flags & CD_FLAG_DISC_LOCKED) != 0)) { return; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_prevent(&ccb->csio, /*retries*/ cd_retry_count, cddone, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, /* timeout */60000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA|SF_NO_PRINT); xpt_release_ccb(ccb); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~CD_FLAG_DISC_LOCKED; else softc->flags |= CD_FLAG_DISC_LOCKED; } } /* * XXX: the disk media and sector size is only really able to change * XXX: while the device is closed. */ static int cdcheckmedia(struct cam_periph *periph) { struct cd_softc *softc; struct ioc_toc_header *toch; struct cd_toc_single leadout; u_int32_t size, toclen; int error, num_entries, cdindex; softc = (struct cd_softc *)periph->softc; cdprevent(periph, PR_PREVENT); softc->disk->d_sectorsize = 2048; softc->disk->d_mediasize = 0; /* * Get the disc size and block size. If we can't get it, we don't * have media, most likely. */ if ((error = cdsize(periph, &size)) != 0) { softc->flags &= ~(CD_FLAG_VALID_MEDIA|CD_FLAG_VALID_TOC); cdprevent(periph, PR_ALLOW); return (error); } else { softc->flags |= CD_FLAG_SAW_MEDIA | CD_FLAG_VALID_MEDIA; softc->disk->d_sectorsize = softc->params.blksize; softc->disk->d_mediasize = (off_t)softc->params.blksize * softc->params.disksize; } /* * Now we check the table of contents. This (currently) is only * used for the CDIOCPLAYTRACKS ioctl. It may be used later to do * things like present a separate entry in /dev for each track, * like that acd(4) driver does. */ bzero(&softc->toc, sizeof(softc->toc)); toch = &softc->toc.header; /* * We will get errors here for media that doesn't have a table of * contents. According to the MMC-3 spec: "When a Read TOC/PMA/ATIP * command is presented for a DDCD/CD-R/RW media, where the first TOC * has not been recorded (no complete session) and the Format codes * 0000b, 0001b, or 0010b are specified, this command shall be rejected * with an INVALID FIELD IN CDB. Devices that are not capable of * reading an incomplete session on DDC/CD-R/RW media shall report * CANNOT READ MEDIUM - INCOMPATIBLE FORMAT." * * So this isn't fatal if we can't read the table of contents, it * just means that the user won't be able to issue the play tracks * ioctl, and likely lots of other stuff won't work either. They * need to burn the CD before we can do a whole lot with it. So * we don't print anything here if we get an error back. */ error = cdreadtoc(periph, 0, 0, (u_int8_t *)toch, sizeof(*toch), SF_NO_PRINT); /* * Errors in reading the table of contents aren't fatal, we just * won't have a valid table of contents cached. */ if (error != 0) { error = 0; bzero(&softc->toc, sizeof(softc->toc)); goto bailout; } if (softc->quirks & CD_Q_BCD_TRACKS) { toch->starting_track = bcd2bin(toch->starting_track); toch->ending_track = bcd2bin(toch->ending_track); } /* Number of TOC entries, plus leadout */ num_entries = (toch->ending_track - toch->starting_track) + 2; if (num_entries <= 0) goto bailout; toclen = num_entries * sizeof(struct cd_toc_entry); error = cdreadtoc(periph, CD_MSF_FORMAT, toch->starting_track, (u_int8_t *)&softc->toc, toclen + sizeof(*toch), SF_NO_PRINT); if (error != 0) { error = 0; bzero(&softc->toc, sizeof(softc->toc)); goto bailout; } if (softc->quirks & CD_Q_BCD_TRACKS) { toch->starting_track = bcd2bin(toch->starting_track); toch->ending_track = bcd2bin(toch->ending_track); } /* * XXX KDM is this necessary? Probably only if the drive doesn't * return leadout information with the table of contents. */ cdindex = toch->starting_track + num_entries -1; if (cdindex == toch->ending_track + 1) { error = cdreadtoc(periph, CD_MSF_FORMAT, LEADOUT, (u_int8_t *)&leadout, sizeof(leadout), SF_NO_PRINT); if (error != 0) { error = 0; goto bailout; } softc->toc.entries[cdindex - toch->starting_track] = leadout.entry; } if (softc->quirks & CD_Q_BCD_TRACKS) { for (cdindex = 0; cdindex < num_entries - 1; cdindex++) { softc->toc.entries[cdindex].track = bcd2bin(softc->toc.entries[cdindex].track); } } softc->flags |= CD_FLAG_VALID_TOC; /* If the first track is audio, correct sector size. */ if ((softc->toc.entries[0].control & 4) == 0) { softc->disk->d_sectorsize = softc->params.blksize = 2352; softc->disk->d_mediasize = (off_t)softc->params.blksize * softc->params.disksize; } bailout: /* * We unconditionally (re)set the blocksize each time the * CD device is opened. This is because the CD can change, * and therefore the blocksize might change. * XXX problems here if some slice or partition is still * open with the old size? */ if ((softc->disk->d_devstat->flags & DEVSTAT_BS_UNAVAILABLE) != 0) softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; softc->disk->d_devstat->block_size = softc->params.blksize; return (error); } static int cdsize(struct cam_periph *periph, u_int32_t *size) { struct cd_softc *softc; union ccb *ccb; struct scsi_read_capacity_data *rcap_buf; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdsize\n")); softc = (struct cd_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); /* XXX Should be M_WAITOK */ rcap_buf = malloc(sizeof(struct scsi_read_capacity_data), M_SCSICD, M_NOWAIT | M_ZERO); if (rcap_buf == NULL) return (ENOMEM); scsi_read_capacity(&ccb->csio, /*retries*/ cd_retry_count, cddone, MSG_SIMPLE_Q_TAG, rcap_buf, SSD_FULL_SIZE, /* timeout */20000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA|SF_NO_PRINT); xpt_release_ccb(ccb); softc->params.disksize = scsi_4btoul(rcap_buf->addr) + 1; softc->params.blksize = scsi_4btoul(rcap_buf->length); /* Make sure we got at least some block size. */ if (error == 0 && softc->params.blksize == 0) error = EIO; /* * SCSI-3 mandates that the reported blocksize shall be 2048. * Older drives sometimes report funny values, trim it down to * 2048, or other parts of the kernel will get confused. * * XXX we leave drives alone that might report 512 bytes, as * well as drives reporting more weird sizes like perhaps 4K. */ if (softc->params.blksize > 2048 && softc->params.blksize <= 2352) softc->params.blksize = 2048; free(rcap_buf, M_SCSICD); *size = softc->params.disksize; return (error); } static int cd6byteworkaround(union ccb *ccb) { u_int8_t *cdb; struct cam_periph *periph; struct cd_softc *softc; struct cd_mode_params *params; int frozen, found; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; cdb = ccb->csio.cdb_io.cdb_bytes; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) || ((cdb[0] != MODE_SENSE_6) && (cdb[0] != MODE_SELECT_6))) return (0); /* * Because there is no convenient place to stash the overall * cd_mode_params structure pointer, we have to grab it like this. * This means that ALL MODE_SENSE and MODE_SELECT requests in the * cd(4) driver MUST go through cdgetmode() and cdsetmode()! * * XXX It would be nice if, at some point, we could increase the * number of available peripheral private pointers. Both pointers * are currently used in most every peripheral driver. */ found = 0; STAILQ_FOREACH(params, &softc->mode_queue, links) { if (params->mode_buf == ccb->csio.data_ptr) { found = 1; break; } } /* * This shouldn't happen. All mode sense and mode select * operations in the cd(4) driver MUST go through cdgetmode() and * cdsetmode()! */ if (found == 0) { xpt_print(periph->path, "mode buffer not found in mode queue!\n"); return (0); } params->cdb_size = 10; softc->minimum_command_size = 10; xpt_print(ccb->ccb_h.path, "%s(6) failed, increasing minimum CDB size to 10 bytes\n", (cdb[0] == MODE_SENSE_6) ? "MODE_SENSE" : "MODE_SELECT"); if (cdb[0] == MODE_SENSE_6) { struct scsi_mode_sense_10 ms10; struct scsi_mode_sense_6 *ms6; int len; ms6 = (struct scsi_mode_sense_6 *)cdb; bzero(&ms10, sizeof(ms10)); ms10.opcode = MODE_SENSE_10; ms10.byte2 = ms6->byte2; ms10.page = ms6->page; /* * 10 byte mode header, block descriptor, * sizeof(union cd_pages) */ len = sizeof(struct cd_mode_data_10); ccb->csio.dxfer_len = len; scsi_ulto2b(len, ms10.length); ms10.control = ms6->control; bcopy(&ms10, cdb, 10); ccb->csio.cdb_len = 10; } else { struct scsi_mode_select_10 ms10; struct scsi_mode_select_6 *ms6; struct scsi_mode_header_6 *header6; struct scsi_mode_header_10 *header10; struct scsi_mode_page_header *page_header; int blk_desc_len, page_num, page_size, len; ms6 = (struct scsi_mode_select_6 *)cdb; bzero(&ms10, sizeof(ms10)); ms10.opcode = MODE_SELECT_10; ms10.byte2 = ms6->byte2; header6 = (struct scsi_mode_header_6 *)params->mode_buf; header10 = (struct scsi_mode_header_10 *)params->mode_buf; page_header = find_mode_page_6(header6); page_num = page_header->page_code; blk_desc_len = header6->blk_desc_len; page_size = cdgetpagesize(page_num); if (page_size != (page_header->page_length + sizeof(*page_header))) page_size = page_header->page_length + sizeof(*page_header); len = sizeof(*header10) + blk_desc_len + page_size; len = min(params->alloc_len, len); /* * Since the 6 byte parameter header is shorter than the 10 * byte parameter header, we need to copy the actual mode * page data, and the block descriptor, if any, so things wind * up in the right place. The regions will overlap, but * bcopy() does the right thing. */ bcopy(params->mode_buf + sizeof(*header6), params->mode_buf + sizeof(*header10), len - sizeof(*header10)); /* Make sure these fields are set correctly. */ scsi_ulto2b(0, header10->data_length); header10->medium_type = 0; scsi_ulto2b(blk_desc_len, header10->blk_desc_len); ccb->csio.dxfer_len = len; scsi_ulto2b(len, ms10.length); ms10.control = ms6->control; bcopy(&ms10, cdb, 10); ccb->csio.cdb_len = 10; } frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_action(ccb); if (frozen) { cam_release_devq(ccb->ccb_h.path, /*relsim_flags*/0, /*openings*/0, /*timeout*/0, /*getcount_only*/0); } return (ERESTART); } static int cderror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct cd_softc *softc; struct cam_periph *periph; int error, error_code, sense_key, asc, ascq; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; error = 0; /* * We use a status of CAM_REQ_INVALID as shorthand -- if a 6 byte * CDB comes back with this particular error, try transforming it * into the 10 byte version. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { error = cd6byteworkaround(ccb); } else if (scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (sense_key == SSD_KEY_ILLEGAL_REQUEST) error = cd6byteworkaround(ccb); else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x28 && ascq == 0x00) disk_media_changed(softc->disk, M_NOWAIT); else if (sense_key == SSD_KEY_NOT_READY && asc == 0x3a && (softc->flags & CD_FLAG_SAW_MEDIA)) { softc->flags &= ~CD_FLAG_SAW_MEDIA; disk_media_gone(softc->disk, M_NOWAIT); } } if (error == ERESTART) return (error); /* * XXX * Until we have a better way of doing pack validation, * don't treat UAs as errors. */ sense_flags |= SF_RETRY_UA; if (softc->quirks & CD_Q_RETRY_BUSY) sense_flags |= SF_RETRY_BUSY; return (cam_periph_error(ccb, cam_flags, sense_flags)); } static void cdmediapoll(void *arg) { struct cam_periph *periph = arg; struct cd_softc *softc = periph->softc; if (softc->state == CD_STATE_NORMAL && !softc->tur && softc->outstanding_cmds == 0) { - if (cam_periph_acquire(periph) == CAM_REQ_CMP) { + if (cam_periph_acquire(periph) == 0) { softc->tur = 1; xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } /* Queue us up again */ if (cd_poll_period != 0) callout_schedule(&softc->mediapoll_c, cd_poll_period * hz); } /* * Read table of contents */ static int cdreadtoc(struct cam_periph *periph, u_int32_t mode, u_int32_t start, u_int8_t *data, u_int32_t len, u_int32_t sense_flags) { struct scsi_read_toc *scsi_cmd; u_int32_t ntoc; struct ccb_scsiio *csio; union ccb *ccb; int error; ntoc = len; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* flags */ CAM_DIR_IN, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ data, /* dxfer_len */ len, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_read_toc), /* timeout */ 50000); scsi_cmd = (struct scsi_read_toc *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); if (mode == CD_MSF_FORMAT) scsi_cmd->byte2 |= CD_MSF; scsi_cmd->from_track = start; /* scsi_ulto2b(ntoc, (u_int8_t *)scsi_cmd->data_len); */ scsi_cmd->data_len[0] = (ntoc) >> 8; scsi_cmd->data_len[1] = (ntoc) & 0xff; scsi_cmd->op_code = READ_TOC; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA | sense_flags); xpt_release_ccb(ccb); return(error); } static int cdreadsubchannel(struct cam_periph *periph, u_int32_t mode, u_int32_t format, int track, struct cd_sub_channel_info *data, u_int32_t len) { struct scsi_read_subchannel *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* flags */ CAM_DIR_IN, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ (u_int8_t *)data, /* dxfer_len */ len, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_read_subchannel), /* timeout */ 50000); scsi_cmd = (struct scsi_read_subchannel *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = READ_SUBCHANNEL; if (mode == CD_MSF_FORMAT) scsi_cmd->byte1 |= CD_MSF; scsi_cmd->byte2 = SRS_SUBQ; scsi_cmd->subchan_format = format; scsi_cmd->track = track; scsi_ulto2b(len, (u_int8_t *)scsi_cmd->data_len); scsi_cmd->control = 0; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } /* * All MODE_SENSE requests in the cd(4) driver MUST go through this * routine. See comments in cd6byteworkaround() for details. */ static int cdgetmode(struct cam_periph *periph, struct cd_mode_params *data, u_int32_t page) { struct ccb_scsiio *csio; struct cd_softc *softc; union ccb *ccb; int param_len; int error; softc = (struct cd_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; data->cdb_size = softc->minimum_command_size; if (data->cdb_size < 10) param_len = sizeof(struct cd_mode_data); else param_len = sizeof(struct cd_mode_data_10); /* Don't say we've got more room than we actually allocated */ param_len = min(param_len, data->alloc_len); scsi_mode_sense_len(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ 0, /* page_code */ SMS_PAGE_CTRL_CURRENT, /* page */ page, /* param_buf */ data->mode_buf, /* param_len */ param_len, /* minimum_cmd_size */ softc->minimum_command_size, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); /* * It would be nice not to have to do this, but there's no * available pointer in the CCB that would allow us to stuff the * mode params structure in there and retrieve it in * cd6byteworkaround(), so we can set the cdb size. The cdb size * lets the caller know what CDB size we ended up using, so they * can find the actual mode page offset. */ STAILQ_INSERT_TAIL(&softc->mode_queue, data, links); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); STAILQ_REMOVE(&softc->mode_queue, data, cd_mode_params, links); /* * This is a bit of belt-and-suspenders checking, but if we run * into a situation where the target sends back multiple block * descriptors, we might not have enough space in the buffer to * see the whole mode page. Better to return an error than * potentially access memory beyond our malloced region. */ if (error == 0) { u_int32_t data_len; if (data->cdb_size == 10) { struct scsi_mode_header_10 *hdr10; hdr10 = (struct scsi_mode_header_10 *)data->mode_buf; data_len = scsi_2btoul(hdr10->data_length); data_len += sizeof(hdr10->data_length); } else { struct scsi_mode_header_6 *hdr6; hdr6 = (struct scsi_mode_header_6 *)data->mode_buf; data_len = hdr6->data_length; data_len += sizeof(hdr6->data_length); } /* * Complain if there is more mode data available than we * allocated space for. This could potentially happen if * we miscalculated the page length for some reason, if the * drive returns multiple block descriptors, or if it sets * the data length incorrectly. */ if (data_len > data->alloc_len) { xpt_print(periph->path, "allocated modepage %d length " "%d < returned length %d\n", page, data->alloc_len, data_len); error = ENOSPC; } } return (error); } /* * All MODE_SELECT requests in the cd(4) driver MUST go through this * routine. See comments in cd6byteworkaround() for details. */ static int cdsetmode(struct cam_periph *periph, struct cd_mode_params *data) { struct ccb_scsiio *csio; struct cd_softc *softc; union ccb *ccb; int cdb_size, param_len; int error; softc = (struct cd_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; error = 0; /* * If the data is formatted for the 10 byte version of the mode * select parameter list, we need to use the 10 byte CDB. * Otherwise, we use whatever the stored minimum command size. */ if (data->cdb_size == 10) cdb_size = data->cdb_size; else cdb_size = softc->minimum_command_size; if (cdb_size >= 10) { struct scsi_mode_header_10 *mode_header; u_int32_t data_len; mode_header = (struct scsi_mode_header_10 *)data->mode_buf; data_len = scsi_2btoul(mode_header->data_length); scsi_ulto2b(0, mode_header->data_length); /* * SONY drives do not allow a mode select with a medium_type * value that has just been returned by a mode sense; use a * medium_type of 0 (Default) instead. */ mode_header->medium_type = 0; /* * Pass back whatever the drive passed to us, plus the size * of the data length field. */ param_len = data_len + sizeof(mode_header->data_length); } else { struct scsi_mode_header_6 *mode_header; mode_header = (struct scsi_mode_header_6 *)data->mode_buf; param_len = mode_header->data_length + 1; mode_header->data_length = 0; /* * SONY drives do not allow a mode select with a medium_type * value that has just been returned by a mode sense; use a * medium_type of 0 (Default) instead. */ mode_header->medium_type = 0; } /* Don't say we've got more room than we actually allocated */ param_len = min(param_len, data->alloc_len); scsi_mode_select_len(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* scsi_page_fmt */ 1, /* save_pages */ 0, /* param_buf */ data->mode_buf, /* param_len */ param_len, /* minimum_cmd_size */ cdb_size, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); /* See comments in cdgetmode() and cd6byteworkaround(). */ STAILQ_INSERT_TAIL(&softc->mode_queue, data, links); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); STAILQ_REMOVE(&softc->mode_queue, data, cd_mode_params, links); return (error); } static int cdplay(struct cam_periph *periph, u_int32_t blk, u_int32_t len) { struct ccb_scsiio *csio; union ccb *ccb; int error; u_int8_t cdb_len; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; /* * Use the smallest possible command to perform the operation. */ if ((len & 0xffff0000) == 0) { /* * We can fit in a 10 byte cdb. */ struct scsi_play_10 *scsi_cmd; scsi_cmd = (struct scsi_play_10 *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_10; scsi_ulto4b(blk, (u_int8_t *)scsi_cmd->blk_addr); scsi_ulto2b(len, (u_int8_t *)scsi_cmd->xfer_len); cdb_len = sizeof(*scsi_cmd); } else { struct scsi_play_12 *scsi_cmd; scsi_cmd = (struct scsi_play_12 *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_12; scsi_ulto4b(blk, (u_int8_t *)scsi_cmd->blk_addr); scsi_ulto4b(len, (u_int8_t *)scsi_cmd->xfer_len); cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, /*retries*/ cd_retry_count, cddone, /*flags*/CAM_DIR_NONE, MSG_SIMPLE_Q_TAG, /*dataptr*/NULL, /*datalen*/0, /*sense_len*/SSD_FULL_SIZE, cdb_len, /*timeout*/50 * 1000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdplaymsf(struct cam_periph *periph, u_int32_t startm, u_int32_t starts, u_int32_t startf, u_int32_t endm, u_int32_t ends, u_int32_t endf) { struct scsi_play_msf *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_play_msf), /* timeout */ 50000); scsi_cmd = (struct scsi_play_msf *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_MSF; scsi_cmd->start_m = startm; scsi_cmd->start_s = starts; scsi_cmd->start_f = startf; scsi_cmd->end_m = endm; scsi_cmd->end_s = ends; scsi_cmd->end_f = endf; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdplaytracks(struct cam_periph *periph, u_int32_t strack, u_int32_t sindex, u_int32_t etrack, u_int32_t eindex) { struct scsi_play_track *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_play_track), /* timeout */ 50000); scsi_cmd = (struct scsi_play_track *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_TRACK; scsi_cmd->start_track = strack; scsi_cmd->start_index = sindex; scsi_cmd->end_track = etrack; scsi_cmd->end_index = eindex; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdpause(struct cam_periph *periph, u_int32_t go) { struct scsi_pause *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_pause), /* timeout */ 50000); scsi_cmd = (struct scsi_pause *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PAUSE; scsi_cmd->resume = go; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdstartunit(struct cam_periph *periph, int load) { union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_start_stop(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* start */ TRUE, /* load_eject */ load, /* immediate */ FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdstopunit(struct cam_periph *periph, u_int32_t eject) { union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_start_stop(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* start */ FALSE, /* load_eject */ eject, /* immediate */ FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdsetspeed(struct cam_periph *periph, u_int32_t rdspeed, u_int32_t wrspeed) { struct scsi_set_speed *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; /* Preserve old behavior: units in multiples of CDROM speed */ if (rdspeed < 177) rdspeed *= 177; if (wrspeed < 177) wrspeed *= 177; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_set_speed), /* timeout */ 50000); scsi_cmd = (struct scsi_set_speed *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SET_CD_SPEED; scsi_ulto2b(rdspeed, scsi_cmd->readspeed); scsi_ulto2b(wrspeed, scsi_cmd->writespeed); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdreportkey(struct cam_periph *periph, struct dvd_authinfo *authinfo) { union ccb *ccb; u_int8_t *databuf; u_int32_t lba; int error; int length; error = 0; databuf = NULL; lba = 0; switch (authinfo->format) { case DVD_REPORT_AGID: length = sizeof(struct scsi_report_key_data_agid); break; case DVD_REPORT_CHALLENGE: length = sizeof(struct scsi_report_key_data_challenge); break; case DVD_REPORT_KEY1: length = sizeof(struct scsi_report_key_data_key1_key2); break; case DVD_REPORT_TITLE_KEY: length = sizeof(struct scsi_report_key_data_title); /* The lba field is only set for the title key */ lba = authinfo->lba; break; case DVD_REPORT_ASF: length = sizeof(struct scsi_report_key_data_asf); break; case DVD_REPORT_RPC: length = sizeof(struct scsi_report_key_data_rpc); break; case DVD_INVALIDATE_AGID: length = 0; break; default: return (EINVAL); } if (length != 0) { databuf = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); } else databuf = NULL; cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_report_key(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* lba */ lba, /* agid */ authinfo->agid, /* key_format */ authinfo->format, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); if (error != 0) goto bailout; if (ccb->csio.resid != 0) { xpt_print(periph->path, "warning, residual for report key " "command is %d\n", ccb->csio.resid); } switch(authinfo->format) { case DVD_REPORT_AGID: { struct scsi_report_key_data_agid *agid_data; agid_data = (struct scsi_report_key_data_agid *)databuf; authinfo->agid = (agid_data->agid & RKD_AGID_MASK) >> RKD_AGID_SHIFT; break; } case DVD_REPORT_CHALLENGE: { struct scsi_report_key_data_challenge *chal_data; chal_data = (struct scsi_report_key_data_challenge *)databuf; bcopy(chal_data->challenge_key, authinfo->keychal, min(sizeof(chal_data->challenge_key), sizeof(authinfo->keychal))); break; } case DVD_REPORT_KEY1: { struct scsi_report_key_data_key1_key2 *key1_data; key1_data = (struct scsi_report_key_data_key1_key2 *)databuf; bcopy(key1_data->key1, authinfo->keychal, min(sizeof(key1_data->key1), sizeof(authinfo->keychal))); break; } case DVD_REPORT_TITLE_KEY: { struct scsi_report_key_data_title *title_data; title_data = (struct scsi_report_key_data_title *)databuf; authinfo->cpm = (title_data->byte0 & RKD_TITLE_CPM) >> RKD_TITLE_CPM_SHIFT; authinfo->cp_sec = (title_data->byte0 & RKD_TITLE_CP_SEC) >> RKD_TITLE_CP_SEC_SHIFT; authinfo->cgms = (title_data->byte0 & RKD_TITLE_CMGS_MASK) >> RKD_TITLE_CMGS_SHIFT; bcopy(title_data->title_key, authinfo->keychal, min(sizeof(title_data->title_key), sizeof(authinfo->keychal))); break; } case DVD_REPORT_ASF: { struct scsi_report_key_data_asf *asf_data; asf_data = (struct scsi_report_key_data_asf *)databuf; authinfo->asf = asf_data->success & RKD_ASF_SUCCESS; break; } case DVD_REPORT_RPC: { struct scsi_report_key_data_rpc *rpc_data; rpc_data = (struct scsi_report_key_data_rpc *)databuf; authinfo->reg_type = (rpc_data->byte4 & RKD_RPC_TYPE_MASK) >> RKD_RPC_TYPE_SHIFT; authinfo->vend_rsts = (rpc_data->byte4 & RKD_RPC_VENDOR_RESET_MASK) >> RKD_RPC_VENDOR_RESET_SHIFT; authinfo->user_rsts = rpc_data->byte4 & RKD_RPC_USER_RESET_MASK; authinfo->region = rpc_data->region_mask; authinfo->rpc_scheme = rpc_data->rpc_scheme1; break; } case DVD_INVALIDATE_AGID: break; default: /* This should be impossible, since we checked above */ error = EINVAL; goto bailout; break; /* NOTREACHED */ } bailout: xpt_release_ccb(ccb); cam_periph_unlock(periph); if (databuf != NULL) free(databuf, M_DEVBUF); return(error); } static int cdsendkey(struct cam_periph *periph, struct dvd_authinfo *authinfo) { union ccb *ccb; u_int8_t *databuf; int length; int error; error = 0; databuf = NULL; switch(authinfo->format) { case DVD_SEND_CHALLENGE: { struct scsi_report_key_data_challenge *challenge_data; length = sizeof(*challenge_data); challenge_data = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); databuf = (u_int8_t *)challenge_data; scsi_ulto2b(length - sizeof(challenge_data->data_len), challenge_data->data_len); bcopy(authinfo->keychal, challenge_data->challenge_key, min(sizeof(authinfo->keychal), sizeof(challenge_data->challenge_key))); break; } case DVD_SEND_KEY2: { struct scsi_report_key_data_key1_key2 *key2_data; length = sizeof(*key2_data); key2_data = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); databuf = (u_int8_t *)key2_data; scsi_ulto2b(length - sizeof(key2_data->data_len), key2_data->data_len); bcopy(authinfo->keychal, key2_data->key1, min(sizeof(authinfo->keychal), sizeof(key2_data->key1))); break; } case DVD_SEND_RPC: { struct scsi_send_key_data_rpc *rpc_data; length = sizeof(*rpc_data); rpc_data = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); databuf = (u_int8_t *)rpc_data; scsi_ulto2b(length - sizeof(rpc_data->data_len), rpc_data->data_len); rpc_data->region_code = authinfo->region; break; } default: return (EINVAL); } cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_send_key(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* agid */ authinfo->agid, /* key_format */ authinfo->format, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); cam_periph_unlock(periph); if (databuf != NULL) free(databuf, M_DEVBUF); return(error); } static int cdreaddvdstructure(struct cam_periph *periph, struct dvd_struct *dvdstruct) { union ccb *ccb; u_int8_t *databuf; u_int32_t address; int error; int length; error = 0; databuf = NULL; /* The address is reserved for many of the formats */ address = 0; switch(dvdstruct->format) { case DVD_STRUCT_PHYSICAL: length = sizeof(struct scsi_read_dvd_struct_data_physical); break; case DVD_STRUCT_COPYRIGHT: length = sizeof(struct scsi_read_dvd_struct_data_copyright); break; case DVD_STRUCT_DISCKEY: length = sizeof(struct scsi_read_dvd_struct_data_disc_key); break; case DVD_STRUCT_BCA: length = sizeof(struct scsi_read_dvd_struct_data_bca); break; case DVD_STRUCT_MANUFACT: length = sizeof(struct scsi_read_dvd_struct_data_manufacturer); break; case DVD_STRUCT_CMI: return (ENODEV); case DVD_STRUCT_PROTDISCID: length = sizeof(struct scsi_read_dvd_struct_data_prot_discid); break; case DVD_STRUCT_DISCKEYBLOCK: length = sizeof(struct scsi_read_dvd_struct_data_disc_key_blk); break; case DVD_STRUCT_DDS: length = sizeof(struct scsi_read_dvd_struct_data_dds); break; case DVD_STRUCT_MEDIUM_STAT: length = sizeof(struct scsi_read_dvd_struct_data_medium_status); break; case DVD_STRUCT_SPARE_AREA: length = sizeof(struct scsi_read_dvd_struct_data_spare_area); break; case DVD_STRUCT_RMD_LAST: return (ENODEV); case DVD_STRUCT_RMD_RMA: return (ENODEV); case DVD_STRUCT_PRERECORDED: length = sizeof(struct scsi_read_dvd_struct_data_leadin); break; case DVD_STRUCT_UNIQUEID: length = sizeof(struct scsi_read_dvd_struct_data_disc_id); break; case DVD_STRUCT_DCB: return (ENODEV); case DVD_STRUCT_LIST: /* * This is the maximum allocation length for the READ DVD * STRUCTURE command. There's nothing in the MMC3 spec * that indicates a limit in the amount of data that can * be returned from this call, other than the limits * imposed by the 2-byte length variables. */ length = 65535; break; default: return (EINVAL); } if (length != 0) { databuf = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); } else databuf = NULL; cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_read_dvd_structure(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* lba */ address, /* layer_number */ dvdstruct->layer_num, /* key_format */ dvdstruct->format, /* agid */ dvdstruct->agid, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); if (error != 0) goto bailout; switch(dvdstruct->format) { case DVD_STRUCT_PHYSICAL: { struct scsi_read_dvd_struct_data_layer_desc *inlayer; struct dvd_layer *outlayer; struct scsi_read_dvd_struct_data_physical *phys_data; phys_data = (struct scsi_read_dvd_struct_data_physical *)databuf; inlayer = &phys_data->layer_desc; outlayer = (struct dvd_layer *)&dvdstruct->data; dvdstruct->length = sizeof(*inlayer); outlayer->book_type = (inlayer->book_type_version & RDSD_BOOK_TYPE_MASK) >> RDSD_BOOK_TYPE_SHIFT; outlayer->book_version = (inlayer->book_type_version & RDSD_BOOK_VERSION_MASK); outlayer->disc_size = (inlayer->disc_size_max_rate & RDSD_DISC_SIZE_MASK) >> RDSD_DISC_SIZE_SHIFT; outlayer->max_rate = (inlayer->disc_size_max_rate & RDSD_MAX_RATE_MASK); outlayer->nlayers = (inlayer->layer_info & RDSD_NUM_LAYERS_MASK) >> RDSD_NUM_LAYERS_SHIFT; outlayer->track_path = (inlayer->layer_info & RDSD_TRACK_PATH_MASK) >> RDSD_TRACK_PATH_SHIFT; outlayer->layer_type = (inlayer->layer_info & RDSD_LAYER_TYPE_MASK); outlayer->linear_density = (inlayer->density & RDSD_LIN_DENSITY_MASK) >> RDSD_LIN_DENSITY_SHIFT; outlayer->track_density = (inlayer->density & RDSD_TRACK_DENSITY_MASK); outlayer->bca = (inlayer->bca & RDSD_BCA_MASK) >> RDSD_BCA_SHIFT; outlayer->start_sector = scsi_3btoul(inlayer->main_data_start); outlayer->end_sector = scsi_3btoul(inlayer->main_data_end); outlayer->end_sector_l0 = scsi_3btoul(inlayer->end_sector_layer0); break; } case DVD_STRUCT_COPYRIGHT: { struct scsi_read_dvd_struct_data_copyright *copy_data; copy_data = (struct scsi_read_dvd_struct_data_copyright *) databuf; dvdstruct->cpst = copy_data->cps_type; dvdstruct->rmi = copy_data->region_info; dvdstruct->length = 0; break; } default: /* * Tell the user what the overall length is, no matter * what we can actually fit in the data buffer. */ dvdstruct->length = length - ccb->csio.resid - sizeof(struct scsi_read_dvd_struct_data_header); /* * But only actually copy out the smaller of what we read * in or what the structure can take. */ bcopy(databuf + sizeof(struct scsi_read_dvd_struct_data_header), dvdstruct->data, min(sizeof(dvdstruct->data), dvdstruct->length)); break; } bailout: xpt_release_ccb(ccb); cam_periph_unlock(periph); if (databuf != NULL) free(databuf, M_DEVBUF); return(error); } void scsi_report_key(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t lba, u_int8_t agid, u_int8_t key_format, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_report_key *scsi_cmd; scsi_cmd = (struct scsi_report_key *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REPORT_KEY; scsi_ulto4b(lba, scsi_cmd->lba); scsi_ulto2b(dxfer_len, scsi_cmd->alloc_len); scsi_cmd->agid_keyformat = (agid << RK_KF_AGID_SHIFT) | (key_format & RK_KF_KEYFORMAT_MASK); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len == 0) ? CAM_DIR_NONE : CAM_DIR_IN, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_send_key(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t agid, u_int8_t key_format, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_send_key *scsi_cmd; scsi_cmd = (struct scsi_send_key *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SEND_KEY; scsi_ulto2b(dxfer_len, scsi_cmd->param_len); scsi_cmd->agid_keyformat = (agid << RK_KF_AGID_SHIFT) | (key_format & RK_KF_KEYFORMAT_MASK); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_OUT, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_dvd_structure(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t address, u_int8_t layer_number, u_int8_t format, u_int8_t agid, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_dvd_structure *scsi_cmd; scsi_cmd = (struct scsi_read_dvd_structure *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_DVD_STRUCTURE; scsi_ulto4b(address, scsi_cmd->address); scsi_cmd->layer_number = layer_number; scsi_cmd->format = format; scsi_ulto2b(dxfer_len, scsi_cmd->alloc_len); /* The AGID is the top two bits of this byte */ scsi_cmd->agid = agid << 6; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } Index: head/sys/cam/scsi/scsi_da.c =================================================================== --- head/sys/cam/scsi/scsi_da.c (revision 328917) +++ head/sys/cam/scsi/scsi_da.c (revision 328918) @@ -1,6274 +1,6273 @@ /*- * Implementation of SCSI Direct Access Peripheral driver for CAM. * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include "opt_da.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #include #ifdef _KERNEL /* * Note that there are probe ordering dependencies here. The order isn't * controlled by this enumeration, but by explicit state transitions in * dastart() and dadone(). Here are some of the dependencies: * * 1. RC should come first, before RC16, unless there is evidence that RC16 * is supported. * 2. BDC needs to come before any of the ATA probes, or the ZONE probe. * 3. The ATA probes should go in this order: * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE */ typedef enum { DA_STATE_PROBE_WP, DA_STATE_PROBE_RC, DA_STATE_PROBE_RC16, DA_STATE_PROBE_LBP, DA_STATE_PROBE_BLK_LIMITS, DA_STATE_PROBE_BDC, DA_STATE_PROBE_ATA, DA_STATE_PROBE_ATA_LOGDIR, DA_STATE_PROBE_ATA_IDDIR, DA_STATE_PROBE_ATA_SUP, DA_STATE_PROBE_ATA_ZONE, DA_STATE_PROBE_ZONE, DA_STATE_NORMAL } da_state; typedef enum { DA_FLAG_PACK_INVALID = 0x000001, DA_FLAG_NEW_PACK = 0x000002, DA_FLAG_PACK_LOCKED = 0x000004, DA_FLAG_PACK_REMOVABLE = 0x000008, DA_FLAG_NEED_OTAG = 0x000020, DA_FLAG_WAS_OTAG = 0x000040, DA_FLAG_RETRY_UA = 0x000080, DA_FLAG_OPEN = 0x000100, DA_FLAG_SCTX_INIT = 0x000200, DA_FLAG_CAN_RC16 = 0x000400, DA_FLAG_PROBED = 0x000800, DA_FLAG_DIRTY = 0x001000, DA_FLAG_ANNOUNCED = 0x002000, DA_FLAG_CAN_ATA_DMA = 0x004000, DA_FLAG_CAN_ATA_LOG = 0x008000, DA_FLAG_CAN_ATA_IDLOG = 0x010000, DA_FLAG_CAN_ATA_SUPCAP = 0x020000, DA_FLAG_CAN_ATA_ZONE = 0x040000 } da_flags; typedef enum { DA_Q_NONE = 0x00, DA_Q_NO_SYNC_CACHE = 0x01, DA_Q_NO_6_BYTE = 0x02, DA_Q_NO_PREVENT = 0x04, DA_Q_4K = 0x08, DA_Q_NO_RC16 = 0x10, DA_Q_NO_UNMAP = 0x20, DA_Q_RETRY_BUSY = 0x40, DA_Q_SMR_DM = 0x80, DA_Q_STRICT_UNMAP = 0x100 } da_quirks; #define DA_Q_BIT_STRING \ "\020" \ "\001NO_SYNC_CACHE" \ "\002NO_6_BYTE" \ "\003NO_PREVENT" \ "\0044K" \ "\005NO_RC16" \ "\006NO_UNMAP" \ "\007RETRY_BUSY" \ "\010SMR_DM" \ "\011STRICT_UNMAP" typedef enum { DA_CCB_PROBE_RC = 0x01, DA_CCB_PROBE_RC16 = 0x02, DA_CCB_PROBE_LBP = 0x03, DA_CCB_PROBE_BLK_LIMITS = 0x04, DA_CCB_PROBE_BDC = 0x05, DA_CCB_PROBE_ATA = 0x06, DA_CCB_BUFFER_IO = 0x07, DA_CCB_DUMP = 0x0A, DA_CCB_DELETE = 0x0B, DA_CCB_TUR = 0x0C, DA_CCB_PROBE_ZONE = 0x0D, DA_CCB_PROBE_ATA_LOGDIR = 0x0E, DA_CCB_PROBE_ATA_IDDIR = 0x0F, DA_CCB_PROBE_ATA_SUP = 0x10, DA_CCB_PROBE_ATA_ZONE = 0x11, DA_CCB_PROBE_WP = 0x12, DA_CCB_TYPE_MASK = 0x1F, DA_CCB_RETRY_UA = 0x20 } da_ccb_state; /* * Order here is important for method choice * * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes * using ATA_TRIM than the corresponding UNMAP results for a real world mysql * import taking 5mins. * */ typedef enum { DA_DELETE_NONE, DA_DELETE_DISABLE, DA_DELETE_ATA_TRIM, DA_DELETE_UNMAP, DA_DELETE_WS16, DA_DELETE_WS10, DA_DELETE_ZERO, DA_DELETE_MIN = DA_DELETE_ATA_TRIM, DA_DELETE_MAX = DA_DELETE_ZERO } da_delete_methods; /* * For SCSI, host managed drives show up as a separate device type. For * ATA, host managed drives also have a different device signature. * XXX KDM figure out the ATA host managed signature. */ typedef enum { DA_ZONE_NONE = 0x00, DA_ZONE_DRIVE_MANAGED = 0x01, DA_ZONE_HOST_AWARE = 0x02, DA_ZONE_HOST_MANAGED = 0x03 } da_zone_mode; /* * We distinguish between these interface cases in addition to the drive type: * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC * o ATA drive behind a SCSI translation layer that does not know about * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this * case, we would need to share the ATA code with the ada(4) driver. * o SCSI drive. */ typedef enum { DA_ZONE_IF_SCSI, DA_ZONE_IF_ATA_PASS, DA_ZONE_IF_ATA_SAT, } da_zone_interface; typedef enum { DA_ZONE_FLAG_RZ_SUP = 0x0001, DA_ZONE_FLAG_OPEN_SUP = 0x0002, DA_ZONE_FLAG_CLOSE_SUP = 0x0004, DA_ZONE_FLAG_FINISH_SUP = 0x0008, DA_ZONE_FLAG_RWP_SUP = 0x0010, DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP | DA_ZONE_FLAG_OPEN_SUP | DA_ZONE_FLAG_CLOSE_SUP | DA_ZONE_FLAG_FINISH_SUP | DA_ZONE_FLAG_RWP_SUP), DA_ZONE_FLAG_URSWRZ = 0x0020, DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET | DA_ZONE_FLAG_OPT_NONSEQ_SET | DA_ZONE_FLAG_MAX_SEQ_SET) } da_zone_flags; static struct da_zone_desc { da_zone_flags value; const char *desc; } da_zone_desc_table[] = { {DA_ZONE_FLAG_RZ_SUP, "Report Zones" }, {DA_ZONE_FLAG_OPEN_SUP, "Open" }, {DA_ZONE_FLAG_CLOSE_SUP, "Close" }, {DA_ZONE_FLAG_FINISH_SUP, "Finish" }, {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, }; typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb, struct bio *bp); static da_delete_func_t da_delete_trim; static da_delete_func_t da_delete_unmap; static da_delete_func_t da_delete_ws; static const void * da_delete_functions[] = { NULL, NULL, da_delete_trim, da_delete_unmap, da_delete_ws, da_delete_ws, da_delete_ws }; static const char *da_delete_method_names[] = { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" }; static const char *da_delete_method_desc[] = { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP", "WRITE SAME(10) with UNMAP", "ZERO" }; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct disk_params { u_int8_t heads; u_int32_t cylinders; u_int8_t secs_per_track; u_int32_t secsize; /* Number of bytes/sector */ u_int64_t sectors; /* total number sectors */ u_int stripesize; u_int stripeoffset; }; #define UNMAP_RANGE_MAX 0xffffffff #define UNMAP_HEAD_SIZE 8 #define UNMAP_RANGE_SIZE 16 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */ #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \ UNMAP_HEAD_SIZE) #define WS10_MAX_BLKS 0xffff #define WS16_MAX_BLKS 0xffffffff #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \ (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE) #define DA_WORK_TUR (1 << 16) typedef enum { DA_REF_OPEN = 1, DA_REF_OPEN_HOLD, DA_REF_CLOSE_HOLD, DA_REF_PROBE_HOLD, DA_REF_TUR, DA_REF_GEOM, DA_REF_SYSCTL, DA_REF_REPROBE, DA_REF_MAX /* KEEP LAST */ } da_ref_token; struct da_softc { struct cam_iosched_softc *cam_iosched; struct bio_queue_head delete_run_queue; LIST_HEAD(, ccb_hdr) pending_ccbs; int refcount; /* Active xpt_action() calls */ da_state state; da_flags flags; da_quirks quirks; int minimum_cmd_size; int error_inject; int trim_max_ranges; int delete_available; /* Delete methods possibly available */ da_zone_mode zone_mode; da_zone_interface zone_interface; da_zone_flags zone_flags; struct ata_gp_log_dir ata_logdir; int valid_logdir_len; struct ata_identify_log_pages ata_iddir; int valid_iddir_len; uint64_t optimal_seq_zones; uint64_t optimal_nonseq_zones; uint64_t max_seq_zones; u_int maxio; uint32_t unmap_max_ranges; uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */ uint32_t unmap_gran; uint32_t unmap_gran_align; uint64_t ws_max_blks; da_delete_methods delete_method_pref; da_delete_methods delete_method; da_delete_func_t *delete_func; int unmappedio; int rotating; struct disk_params params; struct disk *disk; union ccb saved_ccb; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct callout sendordered_c; uint64_t wwpn; uint8_t unmap_buf[UNMAP_BUF_SIZE]; struct scsi_read_capacity_data_long rcaplong; struct callout mediapoll_c; int ref_flags[DA_REF_MAX]; #ifdef CAM_IO_STATS struct sysctl_ctx_list sysctl_stats_ctx; struct sysctl_oid *sysctl_stats_tree; u_int errors; u_int timeouts; u_int invalidations; #endif #define DA_ANNOUNCETMP_SZ 80 char announce_temp[DA_ANNOUNCETMP_SZ]; #define DA_ANNOUNCE_SZ 400 char announcebuf[DA_ANNOUNCE_SZ]; }; #define dadeleteflag(softc, delete_method, enable) \ if (enable) { \ softc->delete_available |= (1 << delete_method); \ } else { \ softc->delete_available &= ~(1 << delete_method); \ } struct da_quirk_entry { struct scsi_inquiry_pattern inq_pat; da_quirks quirks; }; static const char quantum[] = "QUANTUM"; static const char microp[] = "MICROP"; static struct da_quirk_entry da_quirk_table[] = { /* SPI, FC devices */ { /* * Fujitsu M2513A MO drives. * Tested devices: M2513A2 firmware versions 1200 & 1300. * (dip switch selects whether T_DIRECT or T_OPTICAL device) * Reported by: W.Scholten */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* See above. */ {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This particular Fujitsu drive doesn't like the * synchronize cache command. * Reported by: Tom Jackson */ {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Matthew Jacob * in NetBSD PR kern/6027, August 24, 1998. */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Hellmuth Michaelis (hm@kts.org) * (PR 8882). */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't work correctly with 6 byte reads/writes. * Returns illegal request, and points to byte 9 of the * 6-byte CDB. * Reported by: Adam McDougall */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* See above. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The CISS RAID controllers do not support SYNC_CACHE */ {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The STEC SSDs sometimes hang on UNMAP. */ {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"}, /*quirks*/ DA_Q_NO_UNMAP }, { /* * VMware returns BUSY status when storage has transient * connectivity problems, so better wait. * Also VMware returns odd errors on misaligned UNMAPs. */ {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"}, /*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP }, /* USB mass storage devices supported by umass(4) */ { /* * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player * PR: kern/51675 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Power Quotient Int. (PQI) USB flash key * PR: kern/53067 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative Nomad MUVO mp3 player (USB) * PR: kern/53094 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Jungsoft NEXDISK USB flash key * PR: kern/54737 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * FreeDik USB Mini Data Drive * PR: kern/54786 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sigmatel USB Flash MP3 Player * PR: kern/57046 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Neuros USB Digital Audio Computer * PR: kern/63645 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SEAGRAND NP-900 MP3 Player * PR: kern/64563 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * iRiver iFP MP3 player (with UMS Firmware) * PR: kern/54881, i386/63941, kern/66124 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 * PR: kern/70158 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ZICPlay USB MP3 Player with FM * PR: kern/75057 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TEAC USB floppy mechanisms */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler II+ USB Pen-Drive. * Reported by: Pawel Jakub Dawidek */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * USB DISK Pro PMAP * Reported by: jhs * PR: usb/96381 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Motorola E398 Mobile Phone (TransFlash memory card). * Reported by: Wojciech A. Koszek * PR: usb/89889 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Qware BeatZkey! Pro * PR: usb/79164 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Time DPA20B 1GB MP3 Player * PR: usb/81846 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung USB key 128Mb * PR: usb/90081 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler 2.0 USB Flash memory. * PR: usb/89196 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative MUVO Slim mp3 player (USB) * PR: usb/86131 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3) * PR: usb/80487 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SanDisk Micro Cruzer 128MB * PR: usb/75970 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TOSHIBA TransMemory USB sticks * PR: kern/94660 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * PNY USB 3.0 Flash Drives */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16 }, { /* * PNY USB Flash keys * PR: usb/75578, usb/72344, usb/65436 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Genesys GL3224 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", "120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16 }, { /* * Genesys 6-in-1 Card Reader * PR: usb/94647 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Rekam Digital CAMERA * PR: usb/98713 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver H10 MP3 player * PR: usb/102547 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver U10 MP3 player * PR: usb/92306 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * X-Micro Flash Disk * PR: usb/96901 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * EasyMP3 EM732X USB 2.0 Flash MP3 Player * PR: usb/96546 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*", "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Denver MP3 player * PR: usb/107101 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Philips USB Key Audio KEY013 * PR: usb/68412 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { /* * JNC MP3 Player * PR: usb/94439 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SAMSUNG MP0402H * PR: usb/108427 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * I/O Magic USB flash - Giga Bank * PR: usb/108810 */ {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * JoyFly 128mb USB Flash Drive * PR: 96133 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ChipsBnk usb stick * PR: 103702 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A * PR: 129858 */ {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung YP-U3 mp3-player * PR: 125398 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*", "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sony Cyber-Shot DSC cameras * PR: usb/137035 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3", "1.00"}, /*quirks*/ DA_Q_NO_PREVENT }, { /* At least several Transcent USB sticks lie on RC16. */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*", "*"}, /*quirks*/ DA_Q_NO_RC16 }, { /* * I-O Data USB Flash Disk * PR: usb/211716 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_RC16 }, /* ATA/SATA devices over SAS/USB/... */ { /* Hitachi Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" }, /*quirks*/DA_Q_4K }, { /* Micron Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Olympus FE-210 camera */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LG UP3S MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Laser MP3-2GA13 MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LaCie external 250GB Hard drive des by Porsche * Submitted by: Ben Stuyts * PR: 121474 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, /* SATA SSDs */ { /* * Corsair Force 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Neutron GTX SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force GT & GS SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial M4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial RealSSD C300 SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 320 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 330 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 510 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 520 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel S3610 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel X25-M Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston E100 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston HyperX 3k SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Marvell SSDs (entry taken from OpenSolaris) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Deneva R Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 2 SSDs (inc pro series) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 750 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 830 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 840 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 845 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 850 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 843T Series SSDs (MZ7WD*) * Samsung PM851 Series SSDs (MZ7TE*) * Samsung PM853T Series SSDs (MZ7GE*) * Samsung SM863 Series SSDs (MZ7KM*) * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" }, /*quirks*/DA_Q_4K }, { /* * Same as for SAMSUNG MZ7* but enable the quirks for SSD * starting with MZ7* too */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" }, /*quirks*/DA_Q_4K }, { /* * SuperTalent TeraDrive CT SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" }, /*quirks*/DA_Q_4K }, { /* * XceedIOPS SATA SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" }, /*quirks*/DA_Q_4K }, { /* * Hama Innostor USB-Stick */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" }, /*quirks*/DA_Q_NO_RC16 }, { /* * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR) * Drive Managed SATA hard drive. This drive doesn't report * in firmware that it is a drive managed SMR drive. */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" }, /*quirks*/DA_Q_SMR_DM }, { /* * MX-ES USB Drive by Mach Xtreme */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"}, /*quirks*/DA_Q_NO_RC16 }, }; static disk_strategy_t dastrategy; static dumper_t dadump; static periph_init_t dainit; static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void dasysctlinit(void *context, int pending); static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS); static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS); static int dazonemodesysctl(SYSCTL_HANDLER_ARGS); static int dazonesupsysctl(SYSCTL_HANDLER_ARGS); static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS); static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method); static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method); static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method); static void daprobedone(struct cam_periph *periph, union ccb *ccb); static periph_ctor_t daregister; static periph_dtor_t dacleanup; static periph_start_t dastart; static periph_oninv_t daoninvalidate; static void dazonedone(struct cam_periph *periph, union ccb *ccb); static void dadone(struct cam_periph *periph, union ccb *done_ccb); static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static void daprevent(struct cam_periph *periph, int action); static void dareprobe(struct cam_periph *periph); static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_size); static timeout_t dasendorderedtag; static void dashutdown(void *arg, int howto); static timeout_t damediapoll; #ifndef DA_DEFAULT_POLL_PERIOD #define DA_DEFAULT_POLL_PERIOD 3 #endif #ifndef DA_DEFAULT_TIMEOUT #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ #endif #ifndef DA_DEFAULT_SOFTTIMEOUT #define DA_DEFAULT_SOFTTIMEOUT 0 #endif #ifndef DA_DEFAULT_RETRY #define DA_DEFAULT_RETRY 4 #endif #ifndef DA_DEFAULT_SEND_ORDERED #define DA_DEFAULT_SEND_ORDERED 1 #endif static int da_poll_period = DA_DEFAULT_POLL_PERIOD; static int da_retry_count = DA_DEFAULT_RETRY; static int da_default_timeout = DA_DEFAULT_TIMEOUT; static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT; static int da_send_ordered = DA_DEFAULT_SEND_ORDERED; static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, "CAM Direct Access Disk driver"); SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, &da_poll_period, 0, "Media polling period in seconds"); SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN, &da_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN, &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN, &da_send_ordered, 0, "Send Ordered Tags"); SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout, CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I", "Soft I/O timeout (ms)"); TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout); /* * DA_ORDEREDTAG_INTERVAL determines how often, relative * to the default timeout, we check to see whether an ordered * tagged transaction is appropriate to prevent simple tag * starvation. Since we'd like to ensure that there is at least * 1/2 of the timeout length left for a starved transaction to * complete after we've sent an ordered tag, we must poll at least * four times in every timeout period. This takes care of the worst * case where a starved transaction starts during an interval that * meets the requirement "don't send an ordered tag" test so it takes * us two intervals to determine that a tag must be sent. */ #ifndef DA_ORDEREDTAG_INTERVAL #define DA_ORDEREDTAG_INTERVAL 4 #endif static struct periph_driver dadriver = { dainit, "da", TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(da, dadriver); static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); /* * This driver takes out references / holds in well defined pairs, never * recursively. These macros / inline functions enforce those rules. They * are only enabled with DA_TRACK_REFS or INVARIANTS. If DA_TRACK_REFS is * defined to be 2 or larger, the tracking also includes debug printfs. */ #if defined(DA_TRACK_REFS) || defined(INVARIANTS) #ifndef DA_TRACK_REFS #define DA_TRACK_REFS 1 #endif #if DA_TRACK_REFS > 1 static const char *da_ref_text[] = { "bogus", "open", "open hold", "close hold", "reprobe hold", "Test Unit Ready", "Geom", "sysctl", "reprobe", "max -- also bogus" }; #define DA_PERIPH_PRINT(periph, msg, args...) \ CAM_PERIPH_PRINT(periph, msg, ##args) #else #define DA_PERIPH_PRINT(periph, msg, args...) #endif static inline void token_sanity(da_ref_token token) { if ((unsigned)token >= DA_REF_MAX) panic("Bad token value passed in %d\n", token); } static inline int da_periph_hold(struct cam_periph *periph, int priority, da_ref_token token) { int err = cam_periph_hold(periph, priority); token_sanity(token); DA_PERIPH_PRINT(periph, "Holding device %s (%d): %d\n", da_ref_text[token], token, err); if (err == 0) { int cnt; struct da_softc *softc = periph->softc; cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1); if (cnt != 0) panic("Re-holding for reason %d, cnt = %d", token, cnt); } return (err); } static inline void da_periph_unhold(struct cam_periph *periph, da_ref_token token) { int cnt; struct da_softc *softc = periph->softc; token_sanity(token); DA_PERIPH_PRINT(periph, "Unholding device %s (%d)\n", da_ref_text[token], token); cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); if (cnt != 1) panic("Unholding %d with cnt = %d", token, cnt); cam_periph_unhold(periph); } static inline int da_periph_acquire(struct cam_periph *periph, da_ref_token token) { int err = cam_periph_acquire(periph); token_sanity(token); DA_PERIPH_PRINT(periph, "acquiring device %s (%d): %d\n", da_ref_text[token], token, err); - if (err == CAM_REQ_CMP) { + if (err == 0) { int cnt; struct da_softc *softc = periph->softc; cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1); if (cnt != 0) panic("Re-refing for reason %d, cnt = %d", token, cnt); } return (err); } static inline void da_periph_release(struct cam_periph *periph, da_ref_token token) { int cnt; struct da_softc *softc = periph->softc; token_sanity(token); DA_PERIPH_PRINT(periph, "releasing device %s (%d)\n", da_ref_text[token], token); cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); if (cnt != 1) panic("Releasing %d with cnt = %d", token, cnt); cam_periph_release(periph); } static inline void da_periph_release_locked(struct cam_periph *periph, da_ref_token token) { int cnt; struct da_softc *softc = periph->softc; token_sanity(token); DA_PERIPH_PRINT(periph, "releasing device (locked) %s (%d)\n", da_ref_text[token], token); cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); if (cnt != 1) panic("Unholding %d with cnt = %d", token, cnt); cam_periph_release_locked(periph); } #define cam_periph_hold POISON #define cam_periph_unhold POISON #define cam_periph_acquire POISON #define cam_periph_release POISON #define cam_periph_release_locked POISON #else #define da_periph_hold(periph, prio, token) cam_periph_hold((periph), (prio)) #define da_periph_unhold(periph, token) cam_periph_unhold((periph)) #define da_periph_acquire(periph, token) cam_periph_acquire((periph)) #define da_periph_release(periph, token) cam_periph_release((periph)) #define da_periph_release_locked(periph, token) cam_periph_release_locked((periph)) #endif static int daopen(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; - if (da_periph_acquire(periph, DA_REF_OPEN) != CAM_REQ_CMP) { + if (da_periph_acquire(periph, DA_REF_OPEN) != 0) { return (ENXIO); } cam_periph_lock(periph); if ((error = da_periph_hold(periph, PRIBIO|PCATCH, DA_REF_OPEN_HOLD)) != 0) { cam_periph_unlock(periph); da_periph_release(periph, DA_REF_OPEN); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daopen\n")); softc = (struct da_softc *)periph->softc; dareprobe(periph); /* Wait for the disk size update. */ error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO, "dareprobe", 0); if (error != 0) xpt_print(periph->path, "unable to retrieve capacity data\n"); if (periph->flags & CAM_PERIPH_INVALID) error = ENXIO; if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_PREVENT); if (error == 0) { softc->flags &= ~DA_FLAG_PACK_INVALID; softc->flags |= DA_FLAG_OPEN; } da_periph_unhold(periph, DA_REF_OPEN_HOLD); cam_periph_unlock(periph); if (error != 0) da_periph_release(periph, DA_REF_OPEN); return (error); } static int daclose(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; periph = (struct cam_periph *)dp->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daclose\n")); if (da_periph_hold(periph, PRIBIO, DA_REF_CLOSE_HOLD) == 0) { /* Flush disk cache. */ if ((softc->flags & DA_FLAG_DIRTY) != 0 && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/1, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, 5 * 60 * 1000); cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, softc->disk->d_devstat); softc->flags &= ~DA_FLAG_DIRTY; xpt_release_ccb(ccb); } /* Allow medium removal. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_ALLOW); da_periph_unhold(periph, DA_REF_CLOSE_HOLD); } /* * If we've got removeable media, mark the blocksize as * unavailable, since it could change when new media is * inserted. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; softc->flags &= ~DA_FLAG_OPEN; while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); cam_periph_unlock(periph); da_periph_release(periph, DA_REF_OPEN); return (0); } static void daschedule(struct cam_periph *periph) { struct da_softc *softc = (struct da_softc *)periph->softc; if (softc->state != DA_STATE_NORMAL) return; cam_iosched_schedule(softc->cam_iosched, periph); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void dastrategy(struct bio *bp) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); /* * If the device has been made invalid, error out */ if ((softc->flags & DA_FLAG_PACK_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp)); /* * Zone commands must be ordered, because they can depend on the * effects of previously issued commands, and they may affect * commands after them. */ if (bp->bio_cmd == BIO_ZONE) bp->bio_flags |= BIO_ORDERED; /* * Place it in the queue of disk activities for this disk */ cam_iosched_queue_work(softc->cam_iosched, bp); /* * Schedule ourselves for performing the work. */ daschedule(periph); cam_periph_unlock(periph); return; } static int dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct cam_periph *periph; struct da_softc *softc; u_int secsize; struct ccb_scsiio csio; struct disk *dp; int error = 0; dp = arg; periph = dp->d_drv1; softc = (struct da_softc *)periph->softc; secsize = softc->params.secsize; if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) return (ENXIO); memset(&csio, 0, sizeof(csio)); if (length > 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_read_write(&csio, /*retries*/0, dadone, MSG_ORDERED_Q_TAG, /*read*/SCSI_RW_WRITE, /*byte2*/0, /*minimum_cmd_size*/ softc->minimum_cmd_size, offset / secsize, length / secsize, /*data_ptr*/(u_int8_t *) virtual, /*dxfer_len*/length, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); error = cam_periph_runccb((union ccb *)&csio, cam_periph_error, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) printf("Aborting dump due to I/O error.\n"); return (error); } /* * Sync the disk cache contents to the physical media. */ if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_synchronize_cache(&csio, /*retries*/0, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0,/* Cover the whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 5 * 1000); error = cam_periph_runccb((union ccb *)&csio, cam_periph_error, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); } return (error); } static int dagetattr(struct bio *bp) { int ret; struct cam_periph *periph; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static void dainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("da: Failed to attach master async callback " "due to status 0x%x!\n", status); } else if (da_send_ordered) { /* Register our shutdown event handler */ if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("dainit: shutdown event registration failed!\n"); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void dadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; da_periph_release(periph, DA_REF_GEOM); } static void daoninvalidate(struct cam_periph *periph) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, daasync, periph, periph->path); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); /* * Tell GEOM that we've gone away, we'll get a callback when it is * done cleaning up its resources. */ disk_gone(softc->disk); } static void dacleanup(struct cam_periph *periph) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) { #ifdef CAM_IO_STATS if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) xpt_print(periph->path, "can't remove sysctl stats context\n"); #endif if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); } callout_drain(&softc->mediapoll_c); disk_destroy(softc->disk); callout_drain(&softc->sendordered_c); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_DIRECT && SID_TYPE(&cgd->inq_data) != T_RBC && SID_TYPE(&cgd->inq_data) != T_OPTICAL && SID_TYPE(&cgd->inq_data) != T_ZBC_HM) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(daregister, daoninvalidate, dacleanup, dastart, "da", CAM_PERIPH_BIO, path, daasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("daasync: Unable to attach to new device " "due to status 0x%x\n", status); return; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct da_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_UNIT_ATTENTION: { union ccb *ccb; int error_code, sense_key, asc, ascq; softc = (struct da_softc *)periph->softc; ccb = (union ccb *)arg; /* * Handle all UNIT ATTENTIONs except our own, * as they will be handled by daerror(). */ if (xpt_path_periph(ccb->ccb_h.path) != periph && scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (asc == 0x2A && ascq == 0x09) { xpt_print(ccb->ccb_h.path, "Capacity data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); } else if (asc == 0x28 && ascq == 0x00) { softc->flags &= ~DA_FLAG_PROBED; disk_media_changed(softc->disk, M_NOWAIT); } else if (asc == 0x3F && ascq == 0x03) { xpt_print(ccb->ccb_h.path, "INQUIRY data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); } } break; } case AC_SCSI_AEN: softc = (struct da_softc *)periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { - if (da_periph_acquire(periph, DA_REF_TUR) == CAM_REQ_CMP) { + if (da_periph_acquire(periph, DA_REF_TUR) == 0) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* FALLTHROUGH */ case AC_SENT_BDR: case AC_BUS_RESET: { struct ccb_hdr *ccbh; softc = (struct da_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= DA_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= DA_CCB_RETRY_UA; break; } case AC_INQ_CHANGED: softc = (struct da_softc *)periph->softc; softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); break; default: break; } cam_periph_async(periph, code, path, arg); } static void dasysctlinit(void *context, int pending) { struct cam_periph *periph; struct da_softc *softc; char tmpstr[32], tmpstr2[16]; struct ccb_trans_settings cts; periph = (struct cam_periph *)context; /* * periph was held for us when this task was enqueued */ if (periph->flags & CAM_PERIPH_INVALID) { da_periph_release(periph, DA_REF_SYSCTL); return; } softc = (struct da_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= DA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("dasysctlinit: unable to allocate sysctl tree\n"); da_periph_release(periph, DA_REF_SYSCTL); return; } /* * Now register the sysctl handler, so the user can change the value on * the fly. */ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN, softc, 0, dadeletemethodsysctl, "A", "BIO_DELETE execution method"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW, softc, 0, dadeletemaxsysctl, "Q", "Maximum BIO_DELETE size"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", "Minimum CDB size"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, dazonemodesysctl, "A", "Zone Mode"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, dazonesupsysctl, "A", "Zone Support"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, "Optimal Number of Open Sequential Write Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_nonseq_zones", CTLFLAG_RD, &softc->optimal_nonseq_zones, "Optimal Number of Non-Sequentially Written Sequential Write " "Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, "Maximum Number of Open Sequential Write Required Zones"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "error_inject", CTLFLAG_RW, &softc->error_inject, 0, "error_inject leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "unmapped_io", CTLFLAG_RD, &softc->unmappedio, 0, "Unmapped I/O leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "rotating", CTLFLAG_RD, &softc->rotating, 0, "Rotating media"); /* * Add some addressing info. */ memset(&cts, 0, sizeof (cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cam_periph_lock(periph); xpt_action((union ccb *)&cts); cam_periph_unlock(periph); if (cts.ccb_h.status != CAM_REQ_CMP) { da_periph_release(periph, DA_REF_SYSCTL); return; } if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWPN) { softc->wwpn = fc->wwpn; SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "wwpn", CTLFLAG_RD, &softc->wwpn, "World Wide Port Name"); } } #ifdef CAM_IO_STATS /* * Now add some useful stats. * XXX These should live in cam_periph and be common to all periphs */ softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD, 0, "Statistics"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "errors", CTLFLAG_RD, &softc->errors, 0, "Transport errors reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "timeouts", CTLFLAG_RD, &softc->timeouts, 0, "Device timeouts reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "pack_invalidations", CTLFLAG_RD, &softc->invalidations, 0, "Device pack invalidations"); #endif cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); da_periph_release(periph, DA_REF_SYSCTL); } static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS) { int error; uint64_t value; struct da_softc *softc; softc = (struct da_softc *)arg1; value = softc->disk->d_delmaxsize; error = sysctl_handle_64(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* only accept values smaller than the calculated value */ if (value > dadeletemaxsize(softc, softc->delete_method)) { return (EINVAL); } softc->disk->d_delmaxsize = value; return (0); } static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* * Acceptable values here are 6, 10, 12 or 16. */ if (value < 6) value = 6; else if ((value > 6) && (value <= 10)) value = 10; else if ((value > 10) && (value <= 12)) value = 12; else if (value > 12) value = 16; *(int *)arg1 = value; return (0); } static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS) { sbintime_t value; int error; value = da_default_softtimeout / SBT_1MS; error = sysctl_handle_int(oidp, (int *)&value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* XXX Should clip this to a reasonable level */ if (value > da_default_timeout * 1000) return (EINVAL); da_default_softtimeout = value * SBT_1MS; return (0); } static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method) { softc->delete_method = delete_method; softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method); softc->delete_func = da_delete_functions[delete_method]; if (softc->delete_method > DA_DELETE_DISABLE) softc->disk->d_flags |= DISKFLAG_CANDELETE; else softc->disk->d_flags &= ~DISKFLAG_CANDELETE; } static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method) { off_t sectors; switch(delete_method) { case DA_DELETE_UNMAP: sectors = (off_t)softc->unmap_max_lba; break; case DA_DELETE_ATA_TRIM: sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges; break; case DA_DELETE_WS16: sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS); break; case DA_DELETE_ZERO: case DA_DELETE_WS10: sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS); break; default: return 0; } return (off_t)softc->params.secsize * omin(sectors, softc->params.sectors); } static void daprobedone(struct cam_periph *periph, union ccb *ccb) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; dadeletemethodchoose(softc, DA_DELETE_NONE); if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { char buf[80]; int i, sep; snprintf(buf, sizeof(buf), "Delete methods: <"); sep = 0; for (i = 0; i <= DA_DELETE_MAX; i++) { if ((softc->delete_available & (1 << i)) == 0 && i != softc->delete_method) continue; if (sep) strlcat(buf, ",", sizeof(buf)); strlcat(buf, da_delete_method_names[i], sizeof(buf)); if (i == softc->delete_method) strlcat(buf, "(*)", sizeof(buf)); sep = 1; } strlcat(buf, ">", sizeof(buf)); printf("%s%d: %s\n", periph->periph_name, periph->unit_number, buf); } /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(ccb); softc->state = DA_STATE_NORMAL; softc->flags |= DA_FLAG_PROBED; daschedule(periph); wakeup(&softc->disk->d_mediasize); if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) { softc->flags |= DA_FLAG_ANNOUNCED; da_periph_unhold(periph, DA_REF_PROBE_HOLD); } else da_periph_release_locked(periph, DA_REF_REPROBE); } static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method) { int i, methods; /* If available, prefer the method requested by user. */ i = softc->delete_method_pref; methods = softc->delete_available | (1 << DA_DELETE_DISABLE); if (methods & (1 << i)) { dadeletemethodset(softc, i); return; } /* Use the pre-defined order to choose the best performing delete. */ for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { if (i == DA_DELETE_ZERO) continue; if (softc->delete_available & (1 << i)) { dadeletemethodset(softc, i); return; } } /* Fallback to default. */ dadeletemethodset(softc, default_method); } static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS) { char buf[16]; const char *p; struct da_softc *softc; int i, error, value; softc = (struct da_softc *)arg1; value = softc->delete_method; if (value < 0 || value > DA_DELETE_MAX) p = "UNKNOWN"; else p = da_delete_method_names[value]; strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); for (i = 0; i <= DA_DELETE_MAX; i++) { if (strcmp(buf, da_delete_method_names[i]) == 0) break; } if (i > DA_DELETE_MAX) return (EINVAL); softc->delete_method_pref = i; dadeletemethodchoose(softc, DA_DELETE_NONE); return (0); } static int dazonemodesysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[40]; struct da_softc *softc; int error; softc = (struct da_softc *)arg1; switch (softc->zone_mode) { case DA_ZONE_DRIVE_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); break; case DA_ZONE_HOST_AWARE: snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); break; case DA_ZONE_HOST_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); break; case DA_ZONE_NONE: default: snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); break; } error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); return (error); } static int dazonesupsysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[180]; struct da_softc *softc; struct sbuf sb; int error, first; unsigned int i; softc = (struct da_softc *)arg1; error = 0; first = 1; sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); for (i = 0; i < sizeof(da_zone_desc_table) / sizeof(da_zone_desc_table[0]); i++) { if (softc->zone_flags & da_zone_desc_table[i].value) { if (first == 0) sbuf_printf(&sb, ", "); else first = 0; sbuf_cat(&sb, da_zone_desc_table[i].desc); } } if (first == 1) sbuf_printf(&sb, "None"); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); return (error); } static cam_status daregister(struct cam_periph *periph, void *arg) { struct da_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; char tmpstr[80]; caddr_t match; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("daregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("daregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { printf("daregister: Unable to probe new device. " "Unable to allocate iosched memory\n"); free(softc, M_DEVBUF); return(CAM_REQ_CMP_ERR); } LIST_INIT(&softc->pending_ccbs); softc->state = DA_STATE_PROBE_WP; bioq_init(&softc->delete_run_queue); if (SID_IS_REMOVABLE(&cgd->inq_data)) softc->flags |= DA_FLAG_PACK_REMOVABLE; softc->unmap_max_ranges = UNMAP_MAX_RANGES; softc->unmap_max_lba = UNMAP_RANGE_MAX; softc->unmap_gran = 0; softc->unmap_gran_align = 0; softc->ws_max_blks = WS16_MAX_BLKS; softc->trim_max_ranges = ATA_TRIM_MAX_RANGES; softc->rotating = 1; periph->softc = softc; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)da_quirk_table, nitems(da_quirk_table), sizeof(*da_quirk_table), scsi_inquiry_match); if (match != NULL) softc->quirks = ((struct da_quirk_entry *)match)->quirks; else softc->quirks = DA_Q_NONE; /* Check if the SIM does not want 6 byte commands */ xpt_path_inq(&cpi, periph->path); if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) softc->quirks |= DA_Q_NO_6_BYTE; if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM) softc->zone_mode = DA_ZONE_HOST_MANAGED; else if (softc->quirks & DA_Q_SMR_DM) softc->zone_mode = DA_ZONE_DRIVE_MANAGED; else softc->zone_mode = DA_ZONE_NONE; if (softc->zone_mode != DA_ZONE_NONE) { if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) softc->zone_interface = DA_ZONE_IF_ATA_SAT; else softc->zone_interface = DA_ZONE_IF_ATA_PASS; } else softc->zone_interface = DA_ZONE_IF_SCSI; } TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); /* * Take an exclusive refcount on the periph while dastart is called * to finish the probe. The reference will be dropped in dadone at * the end of probe. * * XXX if cam_periph_hold returns an error, we don't hold a refcount. */ (void)da_periph_hold(periph, PRIBIO, DA_REF_PROBE_HOLD); /* * Schedule a periodic event to occasionally send an * ordered tag to a device. */ callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, softc); cam_periph_unlock(periph); /* * RBC devices don't have to support READ(6), only READ(10). */ if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) softc->minimum_cmd_size = 10; else softc->minimum_cmd_size = 6; /* * Load the user's default, if any. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); /* * 6, 10, 12 and 16 are the currently permissible values. */ if (softc->minimum_cmd_size > 12) softc->minimum_cmd_size = 16; else if (softc->minimum_cmd_size > 10) softc->minimum_cmd_size = 12; else if (softc->minimum_cmd_size > 6) softc->minimum_cmd_size = 10; else softc->minimum_cmd_size = 6; /* Predict whether device may support READ CAPACITY(16). */ if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 && (softc->quirks & DA_Q_NO_RC16) == 0) { softc->flags |= DA_FLAG_CAN_RC16; } /* * Register this media as a disk. */ softc->disk = disk_alloc(); softc->disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); softc->disk->d_open = daopen; softc->disk->d_close = daclose; softc->disk->d_strategy = dastrategy; softc->disk->d_dump = dadump; softc->disk->d_getattr = dagetattr; softc->disk->d_gone = dadiskgonecb; softc->disk->d_name = "da"; softc->disk->d_drv1 = periph; if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; /* for safety */ else softc->maxio = cpi.maxio; softc->disk->d_maxsize = softc->maxio; softc->disk->d_unit = periph->unit_number; softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { softc->unmappedio = 1; softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; } cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], cgd->inq_data.product, sizeof(cgd->inq_data.product), sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * dadiskgonecb()) telling us that our provider has been freed. */ - if (da_periph_acquire(periph, DA_REF_GEOM) != CAM_REQ_CMP) { + if (da_periph_acquire(periph, DA_REF_GEOM) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); /* * Add async callbacks for events of interest. * I don't bother checking if this fails as, * in most cases, the system will function just * fine without them and the only alternative * would be to not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION | AC_INQ_CHANGED, daasync, periph, periph->path); /* * Emit an attribute changed notification just in case * physical path information arrived before our async * event handler was registered, but after anyone attaching * to our disk device polled it. */ disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); /* * Schedule a periodic media polling events. */ callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && (cgd->inq_flags & SID_AEN) == 0 && da_poll_period != 0) callout_reset(&softc->mediapoll_c, da_poll_period * hz, damediapoll, periph); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int da_zone_bio_to_scsi(int disk_zone_cmd) { switch (disk_zone_cmd) { case DISK_ZONE_OPEN: return ZBC_OUT_SA_OPEN; case DISK_ZONE_CLOSE: return ZBC_OUT_SA_CLOSE; case DISK_ZONE_FINISH: return ZBC_OUT_SA_FINISH; case DISK_ZONE_RWP: return ZBC_OUT_SA_RWP; } return -1; } static int da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, int *queue_ccb) { struct da_softc *softc; int error; error = 0; if (bp->bio_cmd != BIO_ZONE) { error = EINVAL; goto bailout; } softc = periph->softc; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: { int zone_flags; int zone_sa; uint64_t lba; zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd); if (zone_sa == -1) { xpt_print(periph->path, "Cannot translate zone " "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd); error = EINVAL; goto bailout; } zone_flags = 0; lba = bp->bio_zone.zone_params.rwp.id; if (bp->bio_zone.zone_params.rwp.flags & DISK_ZONE_RWP_FLAG_ALL) zone_flags |= ZBC_OUT_ALL; if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { scsi_zbc_out(&ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ zone_sa, /*zone_id*/ lba, /*zone_flags*/ zone_flags, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); } else { /* * Note that in this case, even though we can * technically use NCQ, we don't bother for several * reasons: * 1. It hasn't been tested on a SAT layer that * supports it. This is new as of SAT-4. * 2. Even when there is a SAT layer that supports * it, that SAT layer will also probably support * ZBC -> ZAC translation, since they are both * in the SAT-4 spec. * 3. Translation will likely be preferable to ATA * passthrough. LSI / Avago at least single * steps ATA passthrough commands in the HBA, * regardless of protocol, so unless that * changes, there is a performance penalty for * doing ATA passthrough no matter whether * you're using NCQ/FPDMA, DMA or PIO. * 4. It requires a 32-byte CDB, which at least at * this point in CAM requires a CDB pointer, which * would require us to allocate an additional bit * of storage separate from the CCB. */ error = scsi_ata_zac_mgmt_out(&ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*use_ncq*/ 0, /*zm_action*/ zone_sa, /*zone_id*/ lba, /*zone_flags*/ zone_flags, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (error != 0) { error = EINVAL; xpt_print(periph->path, "scsi_ata_zac_mgmt_out() returned an " "error!"); goto bailout; } } *queue_ccb = 1; break; } case DISK_ZONE_REPORT_ZONES: { uint8_t *rz_ptr; uint32_t num_entries, alloc_size; struct disk_zone_report *rep; rep = &bp->bio_zone.zone_params.report; num_entries = rep->entries_allocated; if (num_entries == 0) { xpt_print(periph->path, "No entries allocated for " "Report Zones request\n"); error = EINVAL; goto bailout; } alloc_size = sizeof(struct scsi_report_zones_hdr) + (sizeof(struct scsi_report_zones_desc) * num_entries); alloc_size = min(alloc_size, softc->disk->d_maxsize); rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO); if (rz_ptr == NULL) { xpt_print(periph->path, "Unable to allocate memory " "for Report Zones request\n"); error = ENOMEM; goto bailout; } if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { scsi_zbc_in(&ccb->csio, /*retries*/ da_retry_count, /*cbcfnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ ZBC_IN_SA_REPORT_ZONES, /*zone_start_lba*/ rep->starting_id, /*zone_options*/ rep->rep_options, /*data_ptr*/ rz_ptr, /*dxfer_len*/ alloc_size, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); } else { /* * Note that in this case, even though we can * technically use NCQ, we don't bother for several * reasons: * 1. It hasn't been tested on a SAT layer that * supports it. This is new as of SAT-4. * 2. Even when there is a SAT layer that supports * it, that SAT layer will also probably support * ZBC -> ZAC translation, since they are both * in the SAT-4 spec. * 3. Translation will likely be preferable to ATA * passthrough. LSI / Avago at least single * steps ATA passthrough commands in the HBA, * regardless of protocol, so unless that * changes, there is a performance penalty for * doing ATA passthrough no matter whether * you're using NCQ/FPDMA, DMA or PIO. * 4. It requires a 32-byte CDB, which at least at * this point in CAM requires a CDB pointer, which * would require us to allocate an additional bit * of storage separate from the CCB. */ error = scsi_ata_zac_mgmt_in(&ccb->csio, /*retries*/ da_retry_count, /*cbcfnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*use_ncq*/ 0, /*zm_action*/ ATA_ZM_REPORT_ZONES, /*zone_id*/ rep->starting_id, /*zone_flags*/ rep->rep_options, /*data_ptr*/ rz_ptr, /*dxfer_len*/ alloc_size, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (error != 0) { error = EINVAL; xpt_print(periph->path, "scsi_ata_zac_mgmt_in() returned an " "error!"); goto bailout; } } /* * For BIO_ZONE, this isn't normally needed. However, it * is used by devstat_end_transaction_bio() to determine * how much data was transferred. */ /* * XXX KDM we have a problem. But I'm not sure how to fix * it. devstat uses bio_bcount - bio_resid to calculate * the amount of data transferred. The GEOM disk code * uses bio_length - bio_resid to calculate the amount of * data in bio_completed. We have different structure * sizes above and below the ada(4) driver. So, if we * use the sizes above, the amount transferred won't be * quite accurate for devstat. If we use different sizes * for bio_bcount and bio_length (above and below * respectively), then the residual needs to match one or * the other. Everything is calculated after the bio * leaves the driver, so changing the values around isn't * really an option. For now, just set the count to the * passed in length. This means that the calculations * above (e.g. bio_completed) will be correct, but the * amount of data reported to devstat will be slightly * under or overstated. */ bp->bio_bcount = bp->bio_length; *queue_ccb = 1; break; } case DISK_ZONE_GET_PARAMS: { struct disk_zone_disk_params *params; params = &bp->bio_zone.zone_params.disk_params; bzero(params, sizeof(*params)); switch (softc->zone_mode) { case DA_ZONE_DRIVE_MANAGED: params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; break; case DA_ZONE_HOST_AWARE: params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; break; case DA_ZONE_HOST_MANAGED: params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; break; default: case DA_ZONE_NONE: params->zone_mode = DISK_ZONE_MODE_NONE; break; } if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ) params->flags |= DISK_ZONE_DISK_URSWRZ; if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) { params->optimal_seq_zones = softc->optimal_seq_zones; params->flags |= DISK_ZONE_OPT_SEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) { params->optimal_nonseq_zones = softc->optimal_nonseq_zones; params->flags |= DISK_ZONE_OPT_NONSEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) { params->max_seq_zones = softc->max_seq_zones; params->flags |= DISK_ZONE_MAX_SEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP) params->flags |= DISK_ZONE_RZ_SUP; if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP) params->flags |= DISK_ZONE_OPEN_SUP; if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP) params->flags |= DISK_ZONE_CLOSE_SUP; if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP) params->flags |= DISK_ZONE_FINISH_SUP; if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP) params->flags |= DISK_ZONE_RWP_SUP; break; } default: break; } bailout: return (error); } static void dastart(struct cam_periph *periph, union ccb *start_ccb) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n")); skipstate: switch (softc->state) { case DA_STATE_NORMAL: { struct bio *bp; uint8_t tag_code; more: bp = cam_iosched_next_bio(softc->cam_iosched); if (bp == NULL) { if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); scsi_test_unit_ready(&start_ccb->csio, /*retries*/ da_retry_count, dadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_TUR; xpt_action(start_ccb); } else xpt_release_ccb(start_ccb); break; } if (bp->bio_cmd == BIO_DELETE) { if (softc->delete_func != NULL) { softc->delete_func(periph, start_ccb, bp); goto out; } else { /* Not sure this is possible, but failsafe by lying and saying "sure, done." */ biofinish(bp, NULL, 0); goto more; } } if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); da_periph_release_locked(periph, DA_REF_TUR); } if ((bp->bio_flags & BIO_ORDERED) != 0 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { softc->flags &= ~DA_FLAG_NEED_OTAG; softc->flags |= DA_FLAG_WAS_OTAG; tag_code = MSG_ORDERED_Q_TAG; } else { tag_code = MSG_SIMPLE_Q_TAG; } switch (bp->bio_cmd) { case BIO_WRITE: case BIO_READ: { void *data_ptr; int rw_op; biotrack(bp, __func__); if (bp->bio_cmd == BIO_WRITE) { softc->flags |= DA_FLAG_DIRTY; rw_op = SCSI_RW_WRITE; } else { rw_op = SCSI_RW_READ; } data_ptr = bp->bio_data; if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { rw_op |= SCSI_RW_BIO; data_ptr = bp; } scsi_read_write(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/tag_code, rw_op, /*byte2*/0, softc->minimum_cmd_size, /*lba*/bp->bio_pblkno, /*block_count*/bp->bio_bcount / softc->params.secsize, data_ptr, /*dxfer_len*/ bp->bio_bcount, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) start_ccb->csio.bio = bp; #endif break; } case BIO_FLUSH: /* * If we don't support sync cache, or the disk * isn't dirty, FLUSH is a no-op. Use the * allocated * CCB for the next bio if one is * available. */ if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 || (softc->flags & DA_FLAG_DIRTY) == 0) { biodone(bp); goto skipstate; } /* * BIO_FLUSH doesn't currently communicate * range data, so we synchronize the cache * over the whole disk. We also force * ordered tag semantics the flush applies * to all previously queued I/O. */ scsi_synchronize_cache(&start_ccb->csio, /*retries*/1, /*cbfcnp*/dadone, MSG_ORDERED_Q_TAG, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, da_default_timeout*1000); /* * Clear the dirty flag before sending the command. * Either this sync cache will be successful, or it * will fail after a retry. If it fails, it is * unlikely to be successful if retried later, so * we'll save ourselves time by just marking the * device clean. */ softc->flags &= ~DA_FLAG_DIRTY; break; case BIO_ZONE: { int error, queue_ccb; queue_ccb = 0; error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb); if ((error != 0) || (queue_ccb == 0)) { biofinish(bp, NULL, error); xpt_release_ccb(start_ccb); return; } break; } } start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout); out: LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); /* We expect a unit attention from this device */ if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; softc->flags &= ~DA_FLAG_RETRY_UA; } start_ccb->ccb_h.ccb_bp = bp; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* May have more work to do, so ensure we stay scheduled */ daschedule(periph); break; } case DA_STATE_PROBE_WP: { void *mode_buf; int mode_buf_len; mode_buf_len = 192; mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT); if (mode_buf == NULL) { xpt_print(periph->path, "Unable to send mode sense - " "malloc failure\n"); softc->state = DA_STATE_PROBE_RC; goto skipstate; } scsi_mode_sense_len(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*dbd*/ FALSE, /*pc*/ SMS_PAGE_CTRL_CURRENT, /*page*/ SMS_ALL_PAGES_PAGE, /*param_buf*/ mode_buf, /*param_len*/ mode_buf_len, /*minimum_cmd_size*/ softc->minimum_cmd_size, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_RC: { struct scsi_read_capacity_data *rcap; rcap = (struct scsi_read_capacity_data *) malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcap == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity(&start_ccb->csio, /*retries*/da_retry_count, dadone, MSG_SIMPLE_Q_TAG, rcap, SSD_FULL_SIZE, /*timeout*/5000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_RC16: { struct scsi_read_capacity_data_long *rcaplong; rcaplong = (struct scsi_read_capacity_data_long *) malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcaplong == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity_16(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*lba*/ 0, /*reladr*/ 0, /*pmi*/ 0, /*rcap_buf*/ (uint8_t *)rcaplong, /*rcap_buf_len*/ sizeof(*rcaplong), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16; xpt_action(start_ccb); break; } case DA_STATE_PROBE_LBP: { struct scsi_vpd_logical_block_prov *lbp; if (!scsi_vpd_supported_page(periph, SVPD_LBP)) { /* * If we get here we don't support any SBC-3 delete * methods with UNMAP as the Logical Block Provisioning * VPD page support is required for devices which * support it according to T10/1799-D Revision 31 * however older revisions of the spec don't mandate * this so we currently don't remove these methods * from the available set. */ softc->state = DA_STATE_PROBE_BLK_LIMITS; goto skipstate; } lbp = (struct scsi_vpd_logical_block_prov *) malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO); if (lbp == NULL) { printf("dastart: Couldn't malloc lbp data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)lbp, /*inq_len*/sizeof(*lbp), /*evpd*/TRUE, /*page_code*/SVPD_LBP, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BLK_LIMITS: { struct scsi_vpd_block_limits *block_limits; if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) { /* Not supported skip to next probe */ softc->state = DA_STATE_PROBE_BDC; goto skipstate; } block_limits = (struct scsi_vpd_block_limits *) malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO); if (block_limits == NULL) { printf("dastart: Couldn't malloc block_limits data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)block_limits, /*inq_len*/sizeof(*block_limits), /*evpd*/TRUE, /*page_code*/SVPD_BLOCK_LIMITS, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BDC: { struct scsi_vpd_block_characteristics *bdc; if (!scsi_vpd_supported_page(periph, SVPD_BDC)) { softc->state = DA_STATE_PROBE_ATA; goto skipstate; } bdc = (struct scsi_vpd_block_characteristics *) malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); if (bdc == NULL) { printf("dastart: Couldn't malloc bdc data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)bdc, /*inq_len*/sizeof(*bdc), /*evpd*/TRUE, /*page_code*/SVPD_BDC, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA: { struct ata_params *ata_params; if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { if ((softc->zone_mode == DA_ZONE_HOST_AWARE) || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { /* * Note that if the ATA VPD page isn't * supported, we aren't talking to an ATA * device anyway. Support for that VPD * page is mandatory for SCSI to ATA (SAT) * translation layers. */ softc->state = DA_STATE_PROBE_ZONE; goto skipstate; } daprobedone(periph, start_ccb); break; } ata_params = (struct ata_params*) malloc(sizeof(*ata_params), M_SCSIDA,M_NOWAIT|M_ZERO); if (ata_params == NULL) { xpt_print(periph->path, "Couldn't malloc ata_params " "data\n"); /* da_free_periph??? */ break; } scsi_ata_identify(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*data_ptr*/(u_int8_t *)ata_params, /*dxfer_len*/sizeof(*ata_params), /*sense_len*/SSD_FULL_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_LOGDIR: { struct ata_gp_log_dir *log_dir; int retval; retval = 0; if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) { /* * If we don't have log support, not much point in * trying to probe zone support. */ daprobedone(periph, start_ccb); break; } /* * If we have an ATA device (the SCSI ATA Information VPD * page should be present and the ATA identify should have * succeeded) and it supports logs, ask for the log directory. */ log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO); if (log_dir == NULL) { xpt_print(periph->path, "Couldn't malloc log_dir " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_LOG_DIRECTORY, /*page_number*/ 0, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)log_dir, /*dxfer_len*/ sizeof(*log_dir), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(log_dir, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_IDDIR: { struct ata_identify_log_pages *id_dir; int retval; retval = 0; /* * Check here to see whether the Identify Device log is * supported in the directory of logs. If so, continue * with requesting the log of identify device pages. */ if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) { daprobedone(periph, start_ccb); break; } id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO); if (id_dir == NULL) { xpt_print(periph->path, "Couldn't malloc id_dir " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_PAGE_LIST, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)id_dir, /*dxfer_len*/ sizeof(*id_dir), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(id_dir, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_SUP: { struct ata_identify_log_sup_cap *sup_cap; int retval; retval = 0; /* * Check here to see whether the Supported Capabilities log * is in the list of Identify Device logs. */ if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) { daprobedone(periph, start_ccb); break; } sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO); if (sup_cap == NULL) { xpt_print(periph->path, "Couldn't malloc sup_cap " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_SUP_CAP, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)sup_cap, /*dxfer_len*/ sizeof(*sup_cap), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(sup_cap, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_ZONE: { struct ata_zoned_info_log *ata_zone; int retval; retval = 0; /* * Check here to see whether the zoned device information * page is supported. If so, continue on to request it. * If not, skip to DA_STATE_PROBE_LOG or done. */ if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) { daprobedone(periph, start_ccb); break; } ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA, M_NOWAIT|M_ZERO); if (ata_zone == NULL) { xpt_print(periph->path, "Couldn't malloc ata_zone " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_ZDI, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)ata_zone, /*dxfer_len*/ sizeof(*ata_zone), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(ata_zone, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ZONE: { struct scsi_vpd_zoned_bdc *bdc; /* * Note that this page will be supported for SCSI protocol * devices that support ZBC (SMR devices), as well as ATA * protocol devices that are behind a SAT (SCSI to ATA * Translation) layer that supports converting ZBC commands * to their ZAC equivalents. */ if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) { daprobedone(periph, start_ccb); break; } bdc = (struct scsi_vpd_zoned_bdc *) malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); if (bdc == NULL) { xpt_release_ccb(start_ccb); xpt_print(periph->path, "Couldn't malloc zone VPD " "data\n"); break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)bdc, /*inq_len*/sizeof(*bdc), /*evpd*/TRUE, /*page_code*/SVPD_ZONED_BDC, /*sense_len*/SSD_FULL_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE; xpt_action(start_ccb); break; } } } /* * In each of the methods below, while its the caller's * responsibility to ensure the request will fit into a * single device request, we might have changed the delete * method due to the device incorrectly advertising either * its supported methods or limits. * * To prevent this causing further issues we validate the * against the methods limits, and warn which would * otherwise be unnecessary. */ static void da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc;; struct bio *bp1; uint8_t *buf = softc->unmap_buf; struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE]; uint64_t lba, lastlba = (uint64_t)-1; uint64_t totalcount = 0; uint64_t count; uint32_t c, lastcount = 0, ranges = 0; /* * Currently this doesn't take the UNMAP * Granularity and Granularity Alignment * fields into account. * * This could result in both unoptimal unmap * requests as as well as UNMAP calls unmapping * fewer LBA's than requested. */ bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { /* * Note: ada and da are different in how they store the * pending bp's in a trim. ada stores all of them in the * trim_req.bps. da stores all but the first one in the * delete_run_queue. ada then completes all the bps in * its adadone() loop. da completes all the bps in the * delete_run_queue in dadone, and relies on the biodone * after to complete. This should be reconciled since there's * no real reason to do it differently. XXX */ if (bp1 != bp) bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, UNMAP_RANGE_MAX - lastcount); lastlba += c; lastcount += c; scsi_ulto4b(lastcount, d[ranges - 1].length); count -= c; lba += c; totalcount += c; } else if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0) { /* Align length of the previous range. */ if ((c = lastcount % softc->unmap_gran) != 0) { if (lastcount <= c) { totalcount -= lastcount; lastlba = (uint64_t)-1; lastcount = 0; ranges--; } else { totalcount -= c; lastlba -= c; lastcount -= c; scsi_ulto4b(lastcount, d[ranges - 1].length); } } /* Align beginning of the new range. */ c = (lba - softc->unmap_gran_align) % softc->unmap_gran; if (c != 0) { c = softc->unmap_gran - c; if (count <= c) { count = 0; } else { lba += c; count -= c; } } } while (count > 0) { c = omin(count, UNMAP_RANGE_MAX); if (totalcount + c > softc->unmap_max_lba || ranges >= softc->unmap_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld" "|| %d >= %d", da_delete_method_desc[softc->delete_method], totalcount + c, softc->unmap_max_lba, ranges, softc->unmap_max_ranges); break; } scsi_u64to8b(lba, d[ranges].lba); scsi_ulto4b(c, d[ranges].length); lba += c; totalcount += c; ranges++; count -= c; lastlba = lba; lastcount = c; } bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (ranges >= softc->unmap_max_ranges || totalcount + bp1->bio_bcount / softc->params.secsize > softc->unmap_max_lba) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); /* Align length of the last range. */ if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 && (c = lastcount % softc->unmap_gran) != 0) { if (lastcount <= c) ranges--; else scsi_ulto4b(lastcount - c, d[ranges - 1].length); } scsi_ulto2b(ranges * 16 + 6, &buf[0]); scsi_ulto2b(ranges * 16, &buf[2]); scsi_unmap(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/0, /*data_ptr*/ buf, /*dxfer_len*/ ranges * 16 + 8, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } static void da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc; struct bio *bp1; uint8_t *buf = softc->unmap_buf; uint64_t lastlba = (uint64_t)-1; uint64_t count; uint64_t lba; uint32_t lastcount = 0, c, requestcount; int ranges = 0, off, block_count; bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; requestcount = count; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, ATA_DSM_RANGE_MAX - lastcount); lastcount += c; off = (ranges - 1) * 8; buf[off + 6] = lastcount & 0xff; buf[off + 7] = (lastcount >> 8) & 0xff; count -= c; lba += c; } while (count > 0) { c = omin(count, ATA_DSM_RANGE_MAX); off = ranges * 8; buf[off + 0] = lba & 0xff; buf[off + 1] = (lba >> 8) & 0xff; buf[off + 2] = (lba >> 16) & 0xff; buf[off + 3] = (lba >> 24) & 0xff; buf[off + 4] = (lba >> 32) & 0xff; buf[off + 5] = (lba >> 40) & 0xff; buf[off + 6] = c & 0xff; buf[off + 7] = (c >> 8) & 0xff; lba += c; ranges++; count -= c; lastcount = c; if (count != 0 && ranges == softc->trim_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], requestcount, (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX); break; } } lastlba = lba; bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (bp1->bio_bcount / softc->params.secsize > (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); block_count = howmany(ranges, ATA_DSM_BLK_RANGES); scsi_ata_trim(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, block_count, /*data_ptr*/buf, /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } /* * We calculate ws_max_blks here based off d_delmaxsize instead * of using softc->ws_max_blks as it is absolute max for the * device not the protocol max which may well be lower. */ static void da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc; struct bio *bp1; uint64_t ws_max_blks; uint64_t lba; uint64_t count; /* forward compat with WS32 */ softc = (struct da_softc *)periph->softc; ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize; lba = bp->bio_pblkno; count = 0; bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); count += bp1->bio_bcount / softc->params.secsize; if (count > ws_max_blks) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], count, ws_max_blks); count = omin(count, ws_max_blks); break; } bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (lba + count != bp1->bio_pblkno || count + bp1->bio_bcount / softc->params.secsize > ws_max_blks) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); scsi_write_same(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/softc->delete_method == DA_DELETE_ZERO ? 0 : SWS_UNMAP, softc->delete_method == DA_DELETE_WS16 ? 16 : 10, /*lba*/lba, /*block_count*/count, /*data_ptr*/ __DECONST(void *, zero_region), /*dxfer_len*/ softc->params.secsize, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } static int cmd6workaround(union ccb *ccb) { struct scsi_rw_6 cmd6; struct scsi_rw_10 *cmd10; struct da_softc *softc; u_int8_t *cdb; struct bio *bp; int frozen; cdb = ccb->csio.cdb_io.cdb_bytes; softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) { da_delete_methods old_method = softc->delete_method; /* * Typically there are two reasons for failure here * 1. Delete method was detected as supported but isn't * 2. Delete failed due to invalid params e.g. too big * * While we will attempt to choose an alternative delete method * this may result in short deletes if the existing delete * requests from geom are big for the new method chosen. * * This method assumes that the error which triggered this * will not retry the io otherwise a panic will occur */ dadeleteflag(softc, old_method, 0); dadeletemethodchoose(softc, DA_DELETE_DISABLE); if (softc->delete_method == DA_DELETE_DISABLE) xpt_print(ccb->ccb_h.path, "%s failed, disabling BIO_DELETE\n", da_delete_method_desc[old_method]); else xpt_print(ccb->ccb_h.path, "%s failed, switching to %s BIO_DELETE\n", da_delete_method_desc[old_method], da_delete_method_desc[softc->delete_method]); while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL) cam_iosched_queue_work(softc->cam_iosched, bp); cam_iosched_queue_work(softc->cam_iosched, (struct bio *)ccb->ccb_h.ccb_bp); ccb->ccb_h.ccb_bp = NULL; return (0); } /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == PREVENT_ALLOW) && (softc->quirks & DA_Q_NO_PREVENT) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "PREVENT ALLOW MEDIUM REMOVAL not supported.\n"); softc->quirks |= DA_Q_NO_PREVENT; return (0); } /* Detect unsupported SYNCHRONIZE CACHE(10). */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == SYNCHRONIZE_CACHE) && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "SYNCHRONIZE CACHE(10) not supported.\n"); softc->quirks |= DA_Q_NO_SYNC_CACHE; softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE; return (0); } /* Translation only possible if CDB is an array and cmd is R/W6 */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || (*cdb != READ_6 && *cdb != WRITE_6)) return 0; xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, " "increasing minimum_cmd_size to 10.\n"); softc->minimum_cmd_size = 10; bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); cmd10 = (struct scsi_rw_10 *)cdb; cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; cmd10->byte2 = 0; scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); cmd10->reserved = 0; scsi_ulto2b(cmd6.length, cmd10->length); cmd10->control = cmd6.control; ccb->csio.cdb_len = sizeof(*cmd10); /* Requeue request, unfreezing queue if necessary */ frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_action(ccb); if (frozen) { cam_release_devq(ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } return (ERESTART); } static void dazonedone(struct cam_periph *periph, union ccb *ccb) { struct da_softc *softc; struct bio *bp; softc = periph->softc; bp = (struct bio *)ccb->ccb_h.ccb_bp; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: break; case DISK_ZONE_REPORT_ZONES: { uint32_t avail_len; struct disk_zone_report *rep; struct scsi_report_zones_hdr *hdr; struct scsi_report_zones_desc *desc; struct disk_zone_rep_entry *entry; uint32_t hdr_len, num_avail; uint32_t num_to_fill, i; int ata; rep = &bp->bio_zone.zone_params.report; avail_len = ccb->csio.dxfer_len - ccb->csio.resid; /* * Note that bio_resid isn't normally used for zone * commands, but it is used by devstat_end_transaction_bio() * to determine how much data was transferred. Because * the size of the SCSI/ATA data structures is different * than the size of the BIO interface structures, the * amount of data actually transferred from the drive will * be different than the amount of data transferred to * the user. */ bp->bio_resid = ccb->csio.resid; hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr; if (avail_len < sizeof(*hdr)) { /* * Is there a better error than EIO here? We asked * for at least the header, and we got less than * that. */ bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; break; } if (softc->zone_interface == DA_ZONE_IF_ATA_PASS) ata = 1; else ata = 0; hdr_len = ata ? le32dec(hdr->length) : scsi_4btoul(hdr->length); if (hdr_len > 0) rep->entries_available = hdr_len / sizeof(*desc); else rep->entries_available = 0; /* * NOTE: using the same values for the BIO version of the * same field as the SCSI/ATA values. This means we could * get some additional values that aren't defined in bio.h * if more values of the same field are defined later. */ rep->header.same = hdr->byte4 & SRZ_SAME_MASK; rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) : scsi_8btou64(hdr->maximum_lba); /* * If the drive reports no entries that match the query, * we're done. */ if (hdr_len == 0) { rep->entries_filled = 0; break; } num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), hdr_len / sizeof(*desc)); /* * If the drive didn't return any data, then we're done. */ if (num_avail == 0) { rep->entries_filled = 0; break; } num_to_fill = min(num_avail, rep->entries_allocated); /* * If the user didn't allocate any entries for us to fill, * we're done. */ if (num_to_fill == 0) { rep->entries_filled = 0; break; } for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; i < num_to_fill; i++, desc++, entry++) { /* * NOTE: we're mapping the values here directly * from the SCSI/ATA bit definitions to the bio.h * definitons. There is also a warning in * disk_zone.h, but the impact is that if * additional values are added in the SCSI/ATA * specs these will be visible to consumers of * this interface. */ entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; entry->zone_condition = (desc->zone_flags & SRZ_ZONE_COND_MASK) >> SRZ_ZONE_COND_SHIFT; entry->zone_flags |= desc->zone_flags & (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); entry->zone_length = ata ? le64dec(desc->zone_length) : scsi_8btou64(desc->zone_length); entry->zone_start_lba = ata ? le64dec(desc->zone_start_lba) : scsi_8btou64(desc->zone_start_lba); entry->write_pointer_lba = ata ? le64dec(desc->write_pointer_lba) : scsi_8btou64(desc->write_pointer_lba); } rep->entries_filled = num_to_fill; break; } case DISK_ZONE_GET_PARAMS: default: /* * In theory we should not get a GET_PARAMS bio, since it * should be handled without queueing the command to the * drive. */ panic("%s: Invalid zone command %d", __func__, bp->bio_zone.zone_cmd); break; } if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) free(ccb->csio.data_ptr, M_SCSIDA); } static void dadone(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; da_ccb_state state; softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); csio = &done_ccb->csio; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (csio->bio != NULL) biotrack(csio->bio, __func__); #endif state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; switch (state) { case DA_CCB_BUFFER_IO: case DA_CCB_DELETE: { struct bio *bp, *bp1; cam_periph_lock(periph); bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int error; int sf; if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = daerror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* * A retry was scheduled, so * just return. */ cam_periph_unlock(periph); return; } bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if (error != 0) { int queued_error; /* * return all queued I/O with EIO, so that * the client can retry these I/Os in the * proper order should it attempt to recover. */ queued_error = EIO; if (error == ENXIO && (softc->flags & DA_FLAG_PACK_INVALID)== 0) { /* * Catastrophic error. Mark our pack as * invalid. */ /* * XXX See if this is really a media * XXX change first? */ xpt_print(periph->path, "Invalidating pack\n"); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif queued_error = ENXIO; } cam_iosched_flush(softc->cam_iosched, NULL, queued_error); if (bp != NULL) { bp->bio_error = error; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } } else if (bp != NULL) { if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) bp->bio_flags |= BIO_ERROR; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else if (bp != NULL) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); if (bp->bio_cmd == BIO_ZONE) dazonedone(periph, done_ccb); else if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; if ((csio->resid > 0) && (bp->bio_cmd != BIO_ZONE)) bp->bio_flags |= BIO_ERROR; if (softc->error_inject != 0) { bp->bio_error = softc->error_inject; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; softc->error_inject = 0; } } if (bp != NULL) biotrack(bp, __func__); LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); if (LIST_EMPTY(&softc->pending_ccbs)) softc->flags |= DA_FLAG_WAS_OTAG; /* * We need to call cam_iosched before we call biodone so that we * don't measure any activity that happens in the completion * routine, which in the case of sendfile can be quite * extensive. */ cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); xpt_release_ccb(done_ccb); if (state == DA_CCB_DELETE) { TAILQ_HEAD(, bio) queue; TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue); softc->delete_run_queue.insert_point = NULL; /* * Normally, the xpt_release_ccb() above would make sure * that when we have more work to do, that work would * get kicked off. However, we specifically keep * delete_running set to 0 before the call above to * allow other I/O to progress when many BIO_DELETE * requests are pushed down. We set delete_running to 0 * and call daschedule again so that we don't stall if * there are no other I/Os pending apart from BIO_DELETEs. */ cam_iosched_trim_done(softc->cam_iosched); daschedule(periph); cam_periph_unlock(periph); while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bp1, bio_queue); bp1->bio_error = bp->bio_error; if (bp->bio_flags & BIO_ERROR) { bp1->bio_flags |= BIO_ERROR; bp1->bio_resid = bp1->bio_bcount; } else bp1->bio_resid = 0; biodone(bp1); } } else { daschedule(periph); cam_periph_unlock(periph); } if (bp != NULL) biodone(bp); return; } case DA_CCB_PROBE_WP: { struct scsi_mode_header_6 *mode_hdr6; struct scsi_mode_header_10 *mode_hdr10; uint8_t dev_spec; if (softc->minimum_cmd_size > 6) { mode_hdr10 = (struct scsi_mode_header_10 *)csio->data_ptr; dev_spec = mode_hdr10->dev_spec; } else { mode_hdr6 = (struct scsi_mode_header_6 *)csio->data_ptr; dev_spec = mode_hdr6->dev_spec; } if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { if ((dev_spec & 0x80) != 0) softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT; else softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT; } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); xpt_release_ccb(done_ccb); if ((softc->flags & DA_FLAG_CAN_RC16) != 0) softc->state = DA_STATE_PROBE_RC16; else softc->state = DA_STATE_PROBE_RC; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_RC: case DA_CCB_PROBE_RC16: { struct scsi_read_capacity_data *rdcap; struct scsi_read_capacity_data_long *rcaplong; char *announce_buf; int lbp; lbp = 0; rdcap = NULL; rcaplong = NULL; /* XXX TODO: can this be a malloc? */ announce_buf = softc->announce_temp; bzero(announce_buf, DA_ANNOUNCETMP_SZ); if (state == DA_CCB_PROBE_RC) rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; else rcaplong = (struct scsi_read_capacity_data_long *) csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct disk_params *dp; uint32_t block_size; uint64_t maxsector; u_int lalba; /* Lowest aligned LBA. */ if (state == DA_CCB_PROBE_RC) { block_size = scsi_4btoul(rdcap->length); maxsector = scsi_4btoul(rdcap->addr); lalba = 0; /* * According to SBC-2, if the standard 10 * byte READ CAPACITY command returns 2^32, * we should issue the 16 byte version of * the command, since the device in question * has more sectors than can be represented * with the short version of the command. */ if (maxsector == 0xffffffff) { free(rdcap, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_RC16; xpt_schedule(periph, priority); return; } } else { block_size = scsi_4btoul(rcaplong->length); maxsector = scsi_8btou64(rcaplong->addr); lalba = scsi_2btoul(rcaplong->lalba_lbp); } /* * Because GEOM code just will panic us if we * give them an 'illegal' value we'll avoid that * here. */ if (block_size == 0) { block_size = 512; if (maxsector == 0) maxsector = -1; } if (block_size >= MAXPHYS) { xpt_print(periph->path, "unsupportable block size %ju\n", (uintmax_t) block_size); announce_buf = NULL; cam_periph_invalidate(periph); } else { /* * We pass rcaplong into dasetgeom(), * because it will only use it if it is * non-NULL. */ dasetgeom(periph, block_size, maxsector, rcaplong, sizeof(*rcaplong)); lbp = (lalba & SRC16_LBPME_A); dp = &softc->params; snprintf(announce_buf, DA_ANNOUNCETMP_SZ, "%juMB (%ju %u byte sectors)", ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024), (uintmax_t)dp->sectors, dp->secsize); } } else { int error; /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } else if (error != 0) { int asc, ascq; int sense_key, error_code; int have_sense; cam_status status; struct ccb_getdev cgd; /* Don't wedge this device's queue */ status = done_ccb->ccb_h.status; if ((status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key, &asc, &ascq)) have_sense = TRUE; else have_sense = FALSE; /* * If we tried READ CAPACITY(16) and failed, * fallback to READ CAPACITY(10). */ if ((state == DA_CCB_PROBE_RC16) && (softc->flags & DA_FLAG_CAN_RC16) && (((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) || ((have_sense) && (error_code == SSD_CURRENT_ERROR) && (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) { softc->flags &= ~DA_FLAG_CAN_RC16; free(rdcap, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_RC; xpt_schedule(periph, priority); return; } /* * Attach to anything that claims to be a * direct access or optical disk device, * as long as it doesn't return a "Logical * unit not supported" (0x25) error. * "Internal Target Failure" (0x44) is also * special and typically means that the * device is a SATA drive behind a SATL * translation that's fallen into a * terminally fatal state. */ if ((have_sense) && (asc != 0x25) && (asc != 0x44) && (error_code == SSD_CURRENT_ERROR)) { const char *sense_key_desc; const char *asc_desc; dasetgeom(periph, 512, -1, NULL, 0); scsi_sense_desc(sense_key, asc, ascq, &cgd.inq_data, &sense_key_desc, &asc_desc); snprintf(announce_buf, DA_ANNOUNCETMP_SZ, "Attempt to query device " "size failed: %s, %s", sense_key_desc, asc_desc); } else { if (have_sense) scsi_sense_print( &done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, " "failed to attach to device\n"); announce_buf = NULL; /* * Free up resources. */ cam_periph_invalidate(periph); } } } free(csio->data_ptr, M_SCSIDA); if (announce_buf != NULL && ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) { struct sbuf sb; sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, announce_buf); xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, DA_Q_BIT_STRING); sbuf_finish(&sb); sbuf_putbuf(&sb); /* * Create our sysctl variables, now that we know * we have successfully attached. */ /* increase the refcount */ - if (da_periph_acquire(periph, DA_REF_SYSCTL) == CAM_REQ_CMP) { + if (da_periph_acquire(periph, DA_REF_SYSCTL) == 0) { taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); } else { /* XXX This message is useless! */ xpt_print(periph->path, "fatal error, " "could not acquire reference count\n"); } } /* We already probed the device. */ if (softc->flags & DA_FLAG_PROBED) { daprobedone(periph, done_ccb); return; } /* Ensure re-probe doesn't see old delete. */ softc->delete_available = 0; dadeleteflag(softc, DA_DELETE_ZERO, 1); if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) { /* * Based on older SBC-3 spec revisions * any of the UNMAP methods "may" be * available via LBP given this flag so * we flag all of them as available and * then remove those which further * probes confirm aren't available * later. * * We could also check readcap(16) p_type * flag to exclude one or more invalid * write same (X) types here */ dadeleteflag(softc, DA_DELETE_WS16, 1); dadeleteflag(softc, DA_DELETE_WS10, 1); dadeleteflag(softc, DA_DELETE_UNMAP, 1); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_LBP; xpt_schedule(periph, priority); return; } xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BDC; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_LBP: { struct scsi_vpd_logical_block_prov *lbp; lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { /* * T10/1799-D Revision 31 states at least one of these * must be supported but we don't currently enforce this. */ dadeleteflag(softc, DA_DELETE_WS16, (lbp->flags & SVPD_LBP_WS16)); dadeleteflag(softc, DA_DELETE_WS10, (lbp->flags & SVPD_LBP_WS10)); dadeleteflag(softc, DA_DELETE_UNMAP, (lbp->flags & SVPD_LBP_UNMAP)); } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure indicates we don't support any SBC-3 * delete methods with UNMAP */ } } free(lbp, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BLK_LIMITS; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_BLK_LIMITS: { struct scsi_vpd_block_limits *block_limits; block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t max_txfer_len = scsi_4btoul( block_limits->max_txfer_len); uint32_t max_unmap_lba_cnt = scsi_4btoul( block_limits->max_unmap_lba_cnt); uint32_t max_unmap_blk_cnt = scsi_4btoul( block_limits->max_unmap_blk_cnt); uint32_t unmap_gran = scsi_4btoul( block_limits->opt_unmap_grain); uint32_t unmap_gran_align = scsi_4btoul( block_limits->unmap_grain_align); uint64_t ws_max_blks = scsi_8btou64( block_limits->max_write_same_length); if (max_txfer_len != 0) { softc->disk->d_maxsize = MIN(softc->maxio, (off_t)max_txfer_len * softc->params.secsize); } /* * We should already support UNMAP but we check lba * and block count to be sure */ if (max_unmap_lba_cnt != 0x00L && max_unmap_blk_cnt != 0x00L) { softc->unmap_max_lba = max_unmap_lba_cnt; softc->unmap_max_ranges = min(max_unmap_blk_cnt, UNMAP_MAX_RANGES); if (unmap_gran > 1) { softc->unmap_gran = unmap_gran; if (unmap_gran_align & 0x80000000) { softc->unmap_gran_align = unmap_gran_align & 0x7fffffff; } } } else { /* * Unexpected UNMAP limits which means the * device doesn't actually support UNMAP */ dadeleteflag(softc, DA_DELETE_UNMAP, 0); } if (ws_max_blks != 0x00L) softc->ws_max_blks = ws_max_blks; } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure here doesn't mean UNMAP is not * supported as this is an optional page. */ softc->unmap_max_lba = 1; softc->unmap_max_ranges = 1; } } free(block_limits, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BDC; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_BDC: { struct scsi_vpd_block_device_characteristics *bdc; bdc = (struct scsi_vpd_block_device_characteristics *) csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; /* * Disable queue sorting for non-rotational media * by default. */ u_int16_t old_rate = softc->disk->d_rotation_rate; valid_len = csio->dxfer_len - csio->resid; if (SBDC_IS_PRESENT(bdc, valid_len, medium_rotation_rate)) { softc->disk->d_rotation_rate = scsi_2btoul(bdc->medium_rotation_rate); if (softc->disk->d_rotation_rate == SVPD_BDC_RATE_NON_ROTATING) { cam_iosched_set_sort_queue( softc->cam_iosched, 0); softc->rotating = 0; } if (softc->disk->d_rotation_rate != old_rate) { disk_attr_changed(softc->disk, "GEOM::rotation_rate", M_NOWAIT); } } if ((SBDC_IS_PRESENT(bdc, valid_len, flags)) && (softc->zone_mode == DA_ZONE_NONE)) { int ata_proto; if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) ata_proto = 1; else ata_proto = 0; /* * The Zoned field will only be set for * Drive Managed and Host Aware drives. If * they are Host Managed, the device type * in the standard INQUIRY data should be * set to T_ZBC_HM (0x14). */ if ((bdc->flags & SVPD_ZBC_MASK) == SVPD_HAW_ZBC) { softc->zone_mode = DA_ZONE_HOST_AWARE; softc->zone_interface = (ata_proto) ? DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; } else if ((bdc->flags & SVPD_ZBC_MASK) == SVPD_DM_ZBC) { softc->zone_mode =DA_ZONE_DRIVE_MANAGED; softc->zone_interface = (ata_proto) ? DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; } else if ((bdc->flags & SVPD_ZBC_MASK) != SVPD_ZBC_NR) { xpt_print(periph->path, "Unknown zoned " "type %#x", bdc->flags & SVPD_ZBC_MASK); } } } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(bdc, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_ATA; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_ATA: { int i; struct ata_params *ata_params; int continue_probe; int error; int16_t *ptr; ata_params = (struct ata_params *)csio->data_ptr; ptr = (uint16_t *)ata_params; continue_probe = 0; error = 0; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint16_t old_rate; for (i = 0; i < sizeof(*ata_params) / 2; i++) ptr[i] = le16toh(ptr[i]); if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM && (softc->quirks & DA_Q_NO_UNMAP) == 0) { dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1); if (ata_params->max_dsm_blocks != 0) softc->trim_max_ranges = min( softc->trim_max_ranges, ata_params->max_dsm_blocks * ATA_DSM_BLK_RANGES); } /* * Disable queue sorting for non-rotational media * by default. */ old_rate = softc->disk->d_rotation_rate; softc->disk->d_rotation_rate = ata_params->media_rotation_rate; if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) { cam_iosched_set_sort_queue(softc->cam_iosched, 0); softc->rotating = 0; } if (softc->disk->d_rotation_rate != old_rate) { disk_attr_changed(softc->disk, "GEOM::rotation_rate", M_NOWAIT); } if (ata_params->capabilities1 & ATA_SUPPORT_DMA) softc->flags |= DA_FLAG_CAN_ATA_DMA; if (ata_params->support.extension & ATA_SUPPORT_GENLOG) softc->flags |= DA_FLAG_CAN_ATA_LOG; /* * At this point, if we have a SATA host aware drive, * we communicate via ATA passthrough unless the * SAT layer supports ZBC -> ZAC translation. In * that case, */ /* * XXX KDM figure out how to detect a host managed * SATA drive. */ if (softc->zone_mode == DA_ZONE_NONE) { /* * Note that we don't override the zone * mode or interface if it has already been * set. This is because it has either been * set as a quirk, or when we probed the * SCSI Block Device Characteristics page, * the zoned field was set. The latter * means that the SAT layer supports ZBC to * ZAC translation, and we would prefer to * use that if it is available. */ if ((ata_params->support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) { softc->zone_mode = DA_ZONE_HOST_AWARE; softc->zone_interface = DA_ZONE_IF_ATA_PASS; } else if ((ata_params->support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) { softc->zone_mode =DA_ZONE_DRIVE_MANAGED; softc->zone_interface = DA_ZONE_IF_ATA_PASS; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ata_params, M_SCSIDA); if ((softc->zone_mode == DA_ZONE_HOST_AWARE) || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { /* * If the ATA IDENTIFY failed, we could be talking * to a SCSI drive, although that seems unlikely, * since the drive did report that it supported the * ATA Information VPD page. If the ATA IDENTIFY * succeeded, and the SAT layer doesn't support * ZBC -> ZAC translation, continue on to get the * directory of ATA logs, and complete the rest of * the ZAC probe. If the SAT layer does support * ZBC -> ZAC translation, we want to use that, * and we'll probe the SCSI Zoned Block Device * Characteristics VPD page next. */ if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_LOG) && (softc->zone_interface == DA_ZONE_IF_ATA_PASS)) softc->state = DA_STATE_PROBE_ATA_LOGDIR; else softc->state = DA_STATE_PROBE_ZONE; continue_probe = 1; } if (continue_probe != 0) { xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } else daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ATA_LOGDIR: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { error = 0; softc->valid_logdir_len = 0; bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); softc->valid_logdir_len = csio->dxfer_len - csio->resid; if (softc->valid_logdir_len > 0) bcopy(csio->data_ptr, &softc->ata_logdir, min(softc->valid_logdir_len, sizeof(softc->ata_logdir))); /* * Figure out whether the Identify Device log is * supported. The General Purpose log directory * has a header, and lists the number of pages * available for each GP log identified by the * offset into the list. */ if ((softc->valid_logdir_len >= ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) && (le16dec(softc->ata_logdir.header) == ATA_GP_LOG_DIR_VERSION) && (le16dec(&softc->ata_logdir.num_pages[ (ATA_IDENTIFY_DATA_LOG * sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ softc->flags |= DA_FLAG_CAN_ATA_IDLOG; } else { softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA log directory, * then ATA logs are effectively not * supported even if the bit is set in the * identify data. */ softc->flags &= ~(DA_FLAG_CAN_ATA_LOG | DA_FLAG_CAN_ATA_IDLOG); if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) { softc->state = DA_STATE_PROBE_ATA_IDDIR; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ATA_IDDIR: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { off_t entries_offset, max_entries; error = 0; softc->valid_iddir_len = 0; bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP | DA_FLAG_CAN_ATA_ZONE); softc->valid_iddir_len = csio->dxfer_len - csio->resid; if (softc->valid_iddir_len > 0) bcopy(csio->data_ptr, &softc->ata_iddir, min(softc->valid_iddir_len, sizeof(softc->ata_iddir))); entries_offset = __offsetof(struct ata_identify_log_pages,entries); max_entries = softc->valid_iddir_len - entries_offset; if ((softc->valid_iddir_len > (entries_offset + 1)) && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION) && (softc->ata_iddir.entry_count > 0)) { int num_entries, i; num_entries = softc->ata_iddir.entry_count; num_entries = min(num_entries, softc->valid_iddir_len - entries_offset); for (i = 0; i < num_entries && i < max_entries; i++) { if (softc->ata_iddir.entries[i] == ATA_IDL_SUP_CAP) softc->flags |= DA_FLAG_CAN_ATA_SUPCAP; else if (softc->ata_iddir.entries[i]== ATA_IDL_ZDI) softc->flags |= DA_FLAG_CAN_ATA_ZONE; if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) break; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data log * directory, then it effectively isn't * supported even if the ATA Log directory * a non-zero number of pages present for * this log. */ softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) { softc->state = DA_STATE_PROBE_ATA_SUP; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ATA_SUP: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; size_t needed_size; struct ata_identify_log_sup_cap *sup_cap; error = 0; sup_cap = (struct ata_identify_log_sup_cap *) csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_size = __offsetof(struct ata_identify_log_sup_cap, sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); if (valid_len >= needed_size) { uint64_t zoned, zac_cap; zoned = le64dec(sup_cap->zoned_cap); if (zoned & ATA_ZONED_VALID) { /* * This should have already been * set, because this is also in the * ATA identify data. */ if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) softc->zone_mode = DA_ZONE_HOST_AWARE; else if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) softc->zone_mode = DA_ZONE_DRIVE_MANAGED; } zac_cap = le64dec(sup_cap->sup_zac_cap); if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { if (zac_cap & ATA_REPORT_ZONES_SUP) softc->zone_flags |= DA_ZONE_FLAG_RZ_SUP; if (zac_cap & ATA_ND_OPEN_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_OPEN_SUP; if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_CLOSE_SUP; if (zac_cap & ATA_ND_FINISH_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_FINISH_SUP; if (zac_cap & ATA_ND_RWP_SUP) softc->zone_flags |= DA_ZONE_FLAG_RWP_SUP; } else { /* * This field was introduced in * ACS-4, r08 on April 28th, 2015. * If the drive firmware was written * to an earlier spec, it won't have * the field. So, assume all * commands are supported. */ softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data * Supported Capabilities page, clear the * flag... */ softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP; /* * And clear zone capabilities. */ softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) { softc->state = DA_STATE_PROBE_ATA_ZONE; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ATA_ZONE: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct ata_zoned_info_log *zi_log; uint32_t valid_len; size_t needed_size; zi_log = (struct ata_zoned_info_log *)csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_size = __offsetof(struct ata_zoned_info_log, version_info) + 1 + sizeof(zi_log->version_info); if (valid_len >= needed_size) { uint64_t tmpvar; tmpvar = le64dec(zi_log->zoned_cap); if (tmpvar & ATA_ZDI_CAP_VALID) { if (tmpvar & ATA_ZDI_CAP_URSWRZ) softc->zone_flags |= DA_ZONE_FLAG_URSWRZ; else softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ; } tmpvar = le64dec(zi_log->optimal_seq_zones); if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = (tmpvar & ATA_ZDI_OPT_SEQ_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = 0; } tmpvar =le64dec(zi_log->optimal_nonseq_zones); if (tmpvar & ATA_ZDI_OPT_NS_VALID) { softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = (tmpvar & ATA_ZDI_OPT_NS_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = 0; } tmpvar = le64dec(zi_log->max_seq_req_zones); if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = (tmpvar & ATA_ZDI_MAX_SEQ_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = 0; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { softc->flags &= ~DA_FLAG_CAN_ATA_ZONE; softc->flags &= ~DA_ZONE_FLAG_SET_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ZONE: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; size_t needed_len; struct scsi_vpd_zoned_bdc *zoned_bdc; error = 0; zoned_bdc = (struct scsi_vpd_zoned_bdc *) csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_len = __offsetof(struct scsi_vpd_zoned_bdc, max_seq_req_zones) + 1 + sizeof(zoned_bdc->max_seq_req_zones); if ((valid_len >= needed_len) && (scsi_2btoul(zoned_bdc->page_length) >= SVPD_ZBDC_PL)) { if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ) softc->zone_flags |= DA_ZONE_FLAG_URSWRZ; else softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ; softc->optimal_seq_zones = scsi_4btoul(zoned_bdc->optimal_seq_zones); softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_nonseq_zones = scsi_4btoul( zoned_bdc->optimal_nonseq_zones); softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->max_seq_zones = scsi_4btoul(zoned_bdc->max_seq_req_zones); softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; } /* * All of the zone commands are mandatory for SCSI * devices. * * XXX KDM this is valid as of September 2015. * Re-check this assumption once the SAT spec is * updated to support SCSI ZBC to ATA ZAC mapping. * Since ATA allows zone commands to be reported * as supported or not, this may not necessarily * be true for an ATA device behind a SAT (SCSI to * ATA Translation) layer. */ softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } daprobedone(periph, done_ccb); return; } case DA_CCB_DUMP: /* No-op. We're polling */ return; case DA_CCB_TUR: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } xpt_release_ccb(done_ccb); da_periph_release_locked(periph, DA_REF_TUR); return; } default: break; } xpt_release_ccb(done_ccb); } static void dareprobe(struct cam_periph *periph) { struct da_softc *softc; - cam_status status; + int status; softc = (struct da_softc *)periph->softc; /* Probe in progress; don't interfere. */ if (softc->state != DA_STATE_NORMAL) return; status = da_periph_acquire(periph, DA_REF_REPROBE); - KASSERT(status == CAM_REQ_CMP, - ("dareprobe: cam_periph_acquire failed")); + KASSERT(status == 0, ("dareprobe: cam_periph_acquire failed")); softc->state = DA_STATE_PROBE_WP; xpt_schedule(periph, CAM_PRIORITY_DEV); } static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct da_softc *softc; struct cam_periph *periph; int error, error_code, sense_key, asc, ascq; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->csio.bio != NULL) biotrack(ccb->csio.bio, __func__); #endif periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct da_softc *)periph->softc; /* * Automatically detect devices that do not support * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. */ error = 0; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { error = cmd6workaround(ccb); } else if (scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (sense_key == SSD_KEY_ILLEGAL_REQUEST) error = cmd6workaround(ccb); /* * If the target replied with CAPACITY DATA HAS CHANGED UA, * query the capacity and notify upper layers. */ else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x2A && ascq == 0x09) { xpt_print(periph->path, "Capacity data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x28 && ascq == 0x00) { softc->flags &= ~DA_FLAG_PROBED; disk_media_changed(softc->disk, M_NOWAIT); } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x3F && ascq == 0x03) { xpt_print(periph->path, "INQUIRY data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_NOT_READY && asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { softc->flags |= DA_FLAG_PACK_INVALID; disk_media_gone(softc->disk, M_NOWAIT); } } if (error == ERESTART) return (ERESTART); #ifdef CAM_IO_STATS switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: softc->timeouts++; break; case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: softc->errors++; break; default: break; } #endif /* * XXX * Until we have a better way of doing pack validation, * don't treat UAs as errors. */ sense_flags |= SF_RETRY_UA; if (softc->quirks & DA_Q_RETRY_BUSY) sense_flags |= SF_RETRY_BUSY; return(cam_periph_error(ccb, cam_flags, sense_flags)); } static void damediapoll(void *arg) { struct cam_periph *periph = arg; struct da_softc *softc = periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && LIST_EMPTY(&softc->pending_ccbs)) { - if (da_periph_acquire(periph, DA_REF_TUR) == CAM_REQ_CMP) { + if (da_periph_acquire(periph, DA_REF_TUR) == 0) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* Queue us up again */ if (da_poll_period != 0) callout_schedule(&softc->mediapoll_c, da_poll_period * hz); } static void daprevent(struct cam_periph *periph, int action) { struct da_softc *softc; union ccb *ccb; int error; softc = (struct da_softc *)periph->softc; if (((action == PR_ALLOW) && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) || ((action == PR_PREVENT) && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { return; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_prevent(&ccb->csio, /*retries*/1, /*cbcfp*/dadone, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, 5000); error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~DA_FLAG_PACK_LOCKED; else softc->flags |= DA_FLAG_PACK_LOCKED; } xpt_release_ccb(ccb); } static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len) { struct ccb_calc_geometry ccg; struct da_softc *softc; struct disk_params *dp; u_int lbppbe, lalba; int error; softc = (struct da_softc *)periph->softc; dp = &softc->params; dp->secsize = block_len; dp->sectors = maxsector + 1; if (rcaplong != NULL) { lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; lalba = scsi_2btoul(rcaplong->lalba_lbp); lalba &= SRC16_LALBA_A; } else { lbppbe = 0; lalba = 0; } if (lbppbe > 0) { dp->stripesize = block_len << lbppbe; dp->stripeoffset = (dp->stripesize - block_len * lalba) % dp->stripesize; } else if (softc->quirks & DA_Q_4K) { dp->stripesize = 4096; dp->stripeoffset = 0; } else if (softc->unmap_gran != 0) { dp->stripesize = block_len * softc->unmap_gran; dp->stripeoffset = (dp->stripesize - block_len * softc->unmap_gran_align) % dp->stripesize; } else { dp->stripesize = 0; dp->stripeoffset = 0; } /* * Have the controller provide us with a geometry * for this disk. The only time the geometry * matters is when we boot and the controller * is the only one knowledgeable enough to come * up with something that will make this a bootable * device. */ xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; ccg.block_size = dp->secsize; ccg.volume_size = dp->sectors; ccg.heads = 0; ccg.secs_per_track = 0; ccg.cylinders = 0; xpt_action((union ccb*)&ccg); if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* * We don't know what went wrong here- but just pick * a geometry so we don't have nasty things like divide * by zero. */ dp->heads = 255; dp->secs_per_track = 255; dp->cylinders = dp->sectors / (255 * 255); if (dp->cylinders == 0) { dp->cylinders = 1; } } else { dp->heads = ccg.heads; dp->secs_per_track = ccg.secs_per_track; dp->cylinders = ccg.cylinders; } /* * If the user supplied a read capacity buffer, and if it is * different than the previous buffer, update the data in the EDT. * If it's the same, we don't bother. This avoids sending an * update every time someone opens this device. */ if ((rcaplong != NULL) && (bcmp(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)) != 0)) { struct ccb_dev_advinfo cdai; xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.buftype = CDAI_TYPE_RCAPLONG; cdai.flags = CDAI_FLAG_STORE; cdai.bufsiz = rcap_len; cdai.buf = (uint8_t *)rcaplong; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.ccb_h.status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: failed to set read " "capacity advinfo\n", __func__); /* Use cam_error_print() to decode the status */ cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS, CAM_EPF_ALL); } else { bcopy(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)); } } softc->disk->d_sectorsize = softc->params.secsize; softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; softc->disk->d_stripesize = softc->params.stripesize; softc->disk->d_stripeoffset = softc->params.stripeoffset; /* XXX: these are not actually "firmware" values, so they may be wrong */ softc->disk->d_fwsectors = softc->params.secs_per_track; softc->disk->d_fwheads = softc->params.heads; softc->disk->d_devstat->block_size = softc->params.secsize; softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; error = disk_resize(softc->disk, M_NOWAIT); if (error != 0) xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error); } static void dasendorderedtag(void *arg) { struct da_softc *softc = arg; if (da_send_ordered) { if (!LIST_EMPTY(&softc->pending_ccbs)) { if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) softc->flags |= DA_FLAG_NEED_OTAG; softc->flags &= ~DA_FLAG_WAS_OTAG; } } /* Queue us up again */ callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, softc); } /* * Step through all DA peripheral drivers, and if the device is still open, * sync the disk cache to physical media. */ static void dashutdown(void * arg, int howto) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &dadriver) { softc = (struct da_softc *)periph->softc; if (SCHEDULER_STOPPED()) { /* If we paniced with the lock held, do not recurse. */ if (!cam_periph_owned(periph) && (softc->flags & DA_FLAG_OPEN)) { dadump(softc->disk, NULL, 0, 0, 0); } continue; } cam_periph_lock(periph); /* * We only sync the cache if the drive is still open, and * if the drive is capable of it.. */ if (((softc->flags & DA_FLAG_OPEN) == 0) || (softc->quirks & DA_Q_NO_SYNC_CACHE)) { cam_periph_unlock(periph); continue; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/0, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /* whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 60 * 60 * 1000); error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } #else /* !_KERNEL */ /* * XXX These are only left out of the kernel build to silence warnings. If, * for some reason these functions are used in the kernel, the ifdefs should * be moved so they are included both in the kernel and userland. */ void scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_format_unit *scsi_cmd; scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = FORMAT_UNIT; scsi_cmd->byte2 = byte2; scsi_ulto2b(ileave, scsi_cmd->interleave); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t list_format, uint32_t addr_desc_index, uint8_t *data_ptr, uint32_t dxfer_len, int minimum_cmd_size, uint8_t sense_len, uint32_t timeout) { uint8_t cdb_len; /* * These conditions allow using the 10 byte command. Otherwise we * need to use the 12 byte command. */ if ((minimum_cmd_size <= 10) && (addr_desc_index == 0) && (dxfer_len <= SRDD10_MAX_LENGTH)) { struct scsi_read_defect_data_10 *cdb10; cdb10 = (struct scsi_read_defect_data_10 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb10); bzero(cdb10, cdb_len); cdb10->opcode = READ_DEFECT_DATA_10; cdb10->format = list_format; scsi_ulto2b(dxfer_len, cdb10->alloc_length); } else { struct scsi_read_defect_data_12 *cdb12; cdb12 = (struct scsi_read_defect_data_12 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb12); bzero(cdb12, cdb_len); cdb12->opcode = READ_DEFECT_DATA_12; cdb12->format = list_format; scsi_ulto4b(dxfer_len, cdb12->alloc_length); scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index); } cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, cdb_len, timeout); } void scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t control, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_sanitize *scsi_cmd; scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = SANITIZE; scsi_cmd->byte2 = byte2; scsi_cmd->control = control; scsi_ulto2b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } #endif /* _KERNEL */ void scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t service_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout) { struct scsi_zbc_out *scsi_cmd; scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = ZBC_OUT; scsi_cmd->service_action = service_action; scsi_u64to8b(zone_id, scsi_cmd->zone_id); scsi_cmd->zone_flags = zone_flags; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba, uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout) { struct scsi_zbc_in *scsi_cmd; scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = ZBC_IN; scsi_cmd->service_action = service_action; scsi_ulto4b(dxfer_len, scsi_cmd->length); scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba); scsi_cmd->zone_options = zone_options; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } int scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout) { uint8_t command_out, protocol, ata_flags; uint16_t features_out; uint32_t sectors_out, auxiliary; int retval; retval = 0; if (use_ncq == 0) { command_out = ATA_ZAC_MANAGEMENT_OUT; features_out = (zm_action & 0xf) | (zone_flags << 8); ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; if (dxfer_len == 0) { protocol = AP_PROTO_NON_DATA; ata_flags |= AP_FLAG_TLEN_NO_DATA; sectors_out = 0; } else { protocol = AP_PROTO_DMA; ata_flags |= AP_FLAG_TLEN_SECT_CNT | AP_FLAG_TDIR_TO_DEV; sectors_out = ((dxfer_len >> 9) & 0xffff); } auxiliary = 0; } else { ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; if (dxfer_len == 0) { command_out = ATA_NCQ_NON_DATA; features_out = ATA_NCQ_ZAC_MGMT_OUT; /* * We're assuming the SCSI to ATA translation layer * will set the NCQ tag number in the tag field. * That isn't clear from the SAT-4 spec (as of rev 05). */ sectors_out = 0; ata_flags |= AP_FLAG_TLEN_NO_DATA; } else { command_out = ATA_SEND_FPDMA_QUEUED; /* * Note that we're defaulting to normal priority, * and assuming that the SCSI to ATA translation * layer will insert the NCQ tag number in the tag * field. That isn't clear in the SAT-4 spec (as * of rev 05). */ sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8; ata_flags |= AP_FLAG_TLEN_FEAT | AP_FLAG_TDIR_TO_DEV; /* * For SEND FPDMA QUEUED, the transfer length is * encoded in the FEATURE register, and 0 means * that 65536 512 byte blocks are to be tranferred. * In practice, it seems unlikely that we'll see * a transfer that large, and it may confuse the * the SAT layer, because generally that means that * 0 bytes should be transferred. */ if (dxfer_len == (65536 * 512)) { features_out = 0; } else if (dxfer_len <= (65535 * 512)) { features_out = ((dxfer_len >> 9) & 0xffff); } else { /* The transfer is too big. */ retval = 1; goto bailout; } } auxiliary = (zm_action & 0xf) | (zone_flags << 8); protocol = AP_PROTO_FPDMA; } protocol |= AP_EXTEND; retval = scsi_ata_pass(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features_out, /*sector_count*/ sectors_out, /*lba*/ zone_id, /*command*/ command_out, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ auxiliary, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*cdb_storage*/ cdb_storage, /*cdb_storage_len*/ cdb_storage_len, /*minimum_cmd_size*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout); bailout: return (retval); } int scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout) { uint8_t command_out, protocol; uint16_t features_out, sectors_out; uint32_t auxiliary; int ata_flags; int retval; retval = 0; ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS; if (use_ncq == 0) { command_out = ATA_ZAC_MANAGEMENT_IN; /* XXX KDM put a macro here */ features_out = (zm_action & 0xf) | (zone_flags << 8); sectors_out = dxfer_len >> 9; /* XXX KDM macro */ protocol = AP_PROTO_DMA; ata_flags |= AP_FLAG_TLEN_SECT_CNT; auxiliary = 0; } else { ata_flags |= AP_FLAG_TLEN_FEAT; command_out = ATA_RECV_FPDMA_QUEUED; sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8; /* * For RECEIVE FPDMA QUEUED, the transfer length is * encoded in the FEATURE register, and 0 means * that 65536 512 byte blocks are to be tranferred. * In practice, it seems unlikely that we'll see * a transfer that large, and it may confuse the * the SAT layer, because generally that means that * 0 bytes should be transferred. */ if (dxfer_len == (65536 * 512)) { features_out = 0; } else if (dxfer_len <= (65535 * 512)) { features_out = ((dxfer_len >> 9) & 0xffff); } else { /* The transfer is too big. */ retval = 1; goto bailout; } auxiliary = (zm_action & 0xf) | (zone_flags << 8), protocol = AP_PROTO_FPDMA; } protocol |= AP_EXTEND; retval = scsi_ata_pass(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features_out, /*sector_count*/ sectors_out, /*lba*/ zone_id, /*command*/ command_out, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ auxiliary, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */ /*cdb_storage*/ cdb_storage, /*cdb_storage_len*/ cdb_storage_len, /*minimum_cmd_size*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout); bailout: return (retval); } Index: head/sys/cam/scsi/scsi_enc.c =================================================================== --- head/sys/cam/scsi/scsi_enc.c (revision 328917) +++ head/sys/cam/scsi/scsi_enc.c (revision 328918) @@ -1,1046 +1,1046 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2000 Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_ses.h" MALLOC_DEFINE(M_SCSIENC, "SCSI ENC", "SCSI ENC buffers"); /* Enclosure type independent driver */ static d_open_t enc_open; static d_close_t enc_close; static d_ioctl_t enc_ioctl; static periph_init_t enc_init; static periph_ctor_t enc_ctor; static periph_oninv_t enc_oninvalidate; static periph_dtor_t enc_dtor; static void enc_async(void *, uint32_t, struct cam_path *, void *); static enctyp enc_type(struct ccb_getdev *); SYSCTL_NODE(_kern_cam, OID_AUTO, enc, CTLFLAG_RD, 0, "CAM Enclosure Services driver"); static struct periph_driver encdriver = { enc_init, "ses", TAILQ_HEAD_INITIALIZER(encdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(enc, encdriver); static struct cdevsw enc_cdevsw = { .d_version = D_VERSION, .d_open = enc_open, .d_close = enc_close, .d_ioctl = enc_ioctl, .d_name = "ses", .d_flags = D_TRACKCLOSE, }; static void enc_init(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, enc_async, NULL, NULL); if (status != CAM_REQ_CMP) { printf("enc: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void enc_devgonecb(void *arg) { struct cam_periph *periph; struct enc_softc *enc; struct mtx *mtx; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); enc = (struct enc_softc *)periph->softc; /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < enc->open_count; i++) cam_periph_release_locked(periph); enc->open_count = 0; /* * Release the reference held for the device node, it is gone now. */ cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); } static void enc_oninvalidate(struct cam_periph *periph) { struct enc_softc *enc; enc = periph->softc; enc->enc_flags |= ENC_FLAG_INVALID; /* If the sub-driver has an invalidate routine, call it */ if (enc->enc_vec.softc_invalidate != NULL) enc->enc_vec.softc_invalidate(enc); /* * Unregister any async callbacks. */ xpt_register_async(0, enc_async, periph, periph->path); /* * Shutdown our daemon. */ enc->enc_flags |= ENC_FLAG_SHUTDOWN; if (enc->enc_daemon != NULL) { /* Signal the ses daemon to terminate. */ wakeup(enc->enc_daemon); } callout_drain(&enc->status_updater); destroy_dev_sched_cb(enc->enc_dev, enc_devgonecb, periph); } static void enc_dtor(struct cam_periph *periph) { struct enc_softc *enc; enc = periph->softc; /* If the sub-driver has a cleanup routine, call it */ if (enc->enc_vec.softc_cleanup != NULL) enc->enc_vec.softc_cleanup(enc); if (enc->enc_boot_hold_ch.ich_func != NULL) { config_intrhook_disestablish(&enc->enc_boot_hold_ch); enc->enc_boot_hold_ch.ich_func = NULL; } ENC_FREE(enc); } static void enc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch(code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; path_id_t path_id; cgd = (struct ccb_getdev *)arg; if (arg == NULL) { break; } if (enc_type(cgd) == ENC_NONE) { /* * Schedule announcement of the ENC bindings for * this device if it is managed by a SEP. */ path_id = xpt_path_path_id(path); xpt_lock_buses(); TAILQ_FOREACH(periph, &encdriver.units, unit_links) { struct enc_softc *softc; softc = (struct enc_softc *)periph->softc; if (xpt_path_path_id(periph->path) != path_id || softc == NULL || (softc->enc_flags & ENC_FLAG_INITIALIZED) == 0 || softc->enc_vec.device_found == NULL) continue; softc->enc_vec.device_found(softc); } xpt_unlock_buses(); return; } status = cam_periph_alloc(enc_ctor, enc_oninvalidate, enc_dtor, NULL, "ses", CAM_PERIPH_BIO, path, enc_async, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) { printf("enc_async: Unable to probe new device due to " "status 0x%x\n", status); } break; } default: cam_periph_async(periph, code, path, arg); break; } } static int enc_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct enc_softc *softc; int error = 0; periph = (struct cam_periph *)dev->si_drv1; - if (cam_periph_acquire(periph) != CAM_REQ_CMP) + if (cam_periph_acquire(periph) != 0) return (ENXIO); cam_periph_lock(periph); softc = (struct enc_softc *)periph->softc; if ((softc->enc_flags & ENC_FLAG_INITIALIZED) == 0) { error = ENXIO; goto out; } if (softc->enc_flags & ENC_FLAG_INVALID) { error = ENXIO; goto out; } out: if (error != 0) cam_periph_release_locked(periph); else softc->open_count++; cam_periph_unlock(periph); return (error); } static int enc_close(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct enc_softc *enc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; mtx = cam_periph_mtx(periph); mtx_lock(mtx); enc = periph->softc; enc->open_count--; cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return (0); } int enc_error(union ccb *ccb, uint32_t cflags, uint32_t sflags) { struct enc_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct enc_softc *)periph->softc; return (cam_periph_error(ccb, cflags, sflags)); } static int enc_ioctl(struct cdev *dev, u_long cmd, caddr_t arg_addr, int flag, struct thread *td) { struct cam_periph *periph; encioc_enc_status_t tmp; encioc_string_t sstr; encioc_elm_status_t elms; encioc_elm_desc_t elmd; encioc_elm_devnames_t elmdn; encioc_element_t *uelm; enc_softc_t *enc; enc_cache_t *cache; void *addr; int error, i; if (arg_addr) addr = *((caddr_t *) arg_addr); else addr = NULL; periph = (struct cam_periph *)dev->si_drv1; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering encioctl\n")); cam_periph_lock(periph); enc = (struct enc_softc *)periph->softc; cache = &enc->enc_cache; /* * Now check to see whether we're initialized or not. * This actually should never fail as we're not supposed * to get past enc_open w/o successfully initializing * things. */ if ((enc->enc_flags & ENC_FLAG_INITIALIZED) == 0) { cam_periph_unlock(periph); return (ENXIO); } cam_periph_unlock(periph); error = 0; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("trying to do ioctl %#lx\n", cmd)); /* * If this command can change the device's state, * we must have the device open for writing. * * For commands that get information about the * device- we don't need to lock the peripheral * if we aren't running a command. The periph * also can't go away while a user process has * it open. */ switch (cmd) { case ENCIOC_GETNELM: case ENCIOC_GETELMMAP: case ENCIOC_GETENCSTAT: case ENCIOC_GETELMSTAT: case ENCIOC_GETELMDESC: case ENCIOC_GETELMDEVNAMES: case ENCIOC_GETENCNAME: case ENCIOC_GETENCID: break; default: if ((flag & FWRITE) == 0) { return (EBADF); } } /* * XXX The values read here are only valid for the current * configuration generation. We need these ioctls * to also pass in/out a generation number. */ sx_slock(&enc->enc_cache_lock); switch (cmd) { case ENCIOC_GETNELM: error = copyout(&cache->nelms, addr, sizeof (cache->nelms)); break; case ENCIOC_GETELMMAP: for (uelm = addr, i = 0; i != cache->nelms; i++) { encioc_element_t kelm; kelm.elm_idx = i; kelm.elm_subenc_id = cache->elm_map[i].subenclosure; kelm.elm_type = cache->elm_map[i].enctype; error = copyout(&kelm, &uelm[i], sizeof(kelm)); if (error) break; } break; case ENCIOC_GETENCSTAT: cam_periph_lock(periph); error = enc->enc_vec.get_enc_status(enc, 1); if (error) { cam_periph_unlock(periph); break; } tmp = cache->enc_status; cam_periph_unlock(periph); error = copyout(&tmp, addr, sizeof(tmp)); cache->enc_status = tmp; break; case ENCIOC_SETENCSTAT: error = copyin(addr, &tmp, sizeof(tmp)); if (error) break; cam_periph_lock(periph); error = enc->enc_vec.set_enc_status(enc, tmp, 1); cam_periph_unlock(periph); break; case ENCIOC_GETSTRING: case ENCIOC_SETSTRING: case ENCIOC_GETENCNAME: case ENCIOC_GETENCID: if (enc->enc_vec.handle_string == NULL) { error = EINVAL; break; } error = copyin(addr, &sstr, sizeof(sstr)); if (error) break; cam_periph_lock(periph); error = enc->enc_vec.handle_string(enc, &sstr, cmd); cam_periph_unlock(periph); break; case ENCIOC_GETELMSTAT: error = copyin(addr, &elms, sizeof(elms)); if (error) break; if (elms.elm_idx >= cache->nelms) { error = EINVAL; break; } cam_periph_lock(periph); error = enc->enc_vec.get_elm_status(enc, &elms, 1); cam_periph_unlock(periph); if (error) break; error = copyout(&elms, addr, sizeof(elms)); break; case ENCIOC_GETELMDESC: error = copyin(addr, &elmd, sizeof(elmd)); if (error) break; if (elmd.elm_idx >= cache->nelms) { error = EINVAL; break; } if (enc->enc_vec.get_elm_desc != NULL) { error = enc->enc_vec.get_elm_desc(enc, &elmd); if (error) break; } else elmd.elm_desc_len = 0; error = copyout(&elmd, addr, sizeof(elmd)); break; case ENCIOC_GETELMDEVNAMES: if (enc->enc_vec.get_elm_devnames == NULL) { error = EINVAL; break; } error = copyin(addr, &elmdn, sizeof(elmdn)); if (error) break; if (elmdn.elm_idx >= cache->nelms) { error = EINVAL; break; } cam_periph_lock(periph); error = (*enc->enc_vec.get_elm_devnames)(enc, &elmdn); cam_periph_unlock(periph); if (error) break; error = copyout(&elmdn, addr, sizeof(elmdn)); break; case ENCIOC_SETELMSTAT: error = copyin(addr, &elms, sizeof(elms)); if (error) break; if (elms.elm_idx >= cache->nelms) { error = EINVAL; break; } cam_periph_lock(periph); error = enc->enc_vec.set_elm_status(enc, &elms, 1); cam_periph_unlock(periph); break; case ENCIOC_INIT: cam_periph_lock(periph); error = enc->enc_vec.init_enc(enc); cam_periph_unlock(periph); break; default: cam_periph_lock(periph); error = cam_periph_ioctl(periph, cmd, arg_addr, enc_error); cam_periph_unlock(periph); break; } sx_sunlock(&enc->enc_cache_lock); return (error); } int enc_runcmd(struct enc_softc *enc, char *cdb, int cdbl, char *dptr, int *dlenp) { int error, dlen, tdlen; ccb_flags ddf; union ccb *ccb; CAM_DEBUG(enc->periph->path, CAM_DEBUG_TRACE, ("entering enc_runcmd\n")); if (dptr) { if ((dlen = *dlenp) < 0) { dlen = -dlen; ddf = CAM_DIR_OUT; } else { ddf = CAM_DIR_IN; } } else { dlen = 0; ddf = CAM_DIR_NONE; } if (cdbl > IOCDBLEN) { cdbl = IOCDBLEN; } ccb = cam_periph_getccb(enc->periph, CAM_PRIORITY_NORMAL); if (enc->enc_type == ENC_SEMB_SES || enc->enc_type == ENC_SEMB_SAFT) { tdlen = min(dlen, 1020); tdlen = (tdlen + 3) & ~3; cam_fill_ataio(&ccb->ataio, 0, NULL, ddf, 0, dptr, tdlen, 30 * 1000); if (cdb[0] == RECEIVE_DIAGNOSTIC) ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, cdb[2], 0x02, tdlen / 4); else if (cdb[0] == SEND_DIAGNOSTIC) ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, dlen > 0 ? dptr[0] : 0, 0x82, tdlen / 4); else if (cdb[0] == READ_BUFFER) ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, cdb[2], 0x00, tdlen / 4); else ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, dlen > 0 ? dptr[0] : 0, 0x80, tdlen / 4); } else { tdlen = dlen; cam_fill_csio(&ccb->csio, 0, NULL, ddf, MSG_SIMPLE_Q_TAG, dptr, dlen, sizeof (struct scsi_sense_data), cdbl, 60 * 1000); bcopy(cdb, ccb->csio.cdb_io.cdb_bytes, cdbl); } error = cam_periph_runccb(ccb, enc_error, ENC_CFLAGS, ENC_FLAGS, NULL); if (error) { if (dptr) { *dlenp = dlen; } } else { if (dptr) { if (ccb->ccb_h.func_code == XPT_ATA_IO) *dlenp = ccb->ataio.resid; else *dlenp = ccb->csio.resid; *dlenp += tdlen - dlen; } } xpt_release_ccb(ccb); CAM_DEBUG(enc->periph->path, CAM_DEBUG_SUBTRACE, ("exiting enc_runcmd: *dlenp = %d\n", *dlenp)); return (error); } void enc_log(struct enc_softc *enc, const char *fmt, ...) { va_list ap; printf("%s%d: ", enc->periph->periph_name, enc->periph->unit_number); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); } /* * The code after this point runs on many platforms, * so forgive the slightly awkward and nonconforming * appearance. */ /* * Is this a device that supports enclosure services? * * It's a pretty simple ruleset- if it is device type * 0x0D (13), it's an ENCLOSURE device. */ #define SAFTE_START 44 #define SAFTE_END 50 #define SAFTE_LEN SAFTE_END-SAFTE_START static enctyp enc_type(struct ccb_getdev *cgd) { int buflen; unsigned char *iqd; if (cgd->protocol == PROTO_SEMB) { iqd = (unsigned char *)&cgd->ident_data; if (STRNCMP(iqd + 43, "S-E-S", 5) == 0) return (ENC_SEMB_SES); else if (STRNCMP(iqd + 43, "SAF-TE", 6) == 0) return (ENC_SEMB_SAFT); return (ENC_NONE); } else if (cgd->protocol != PROTO_SCSI) return (ENC_NONE); iqd = (unsigned char *)&cgd->inq_data; buflen = min(sizeof(cgd->inq_data), SID_ADDITIONAL_LENGTH(&cgd->inq_data)); if ((iqd[0] & 0x1f) == T_ENCLOSURE) { if ((iqd[2] & 0x7) > 2) { return (ENC_SES); } else { return (ENC_SES_SCSI2); } return (ENC_NONE); } #ifdef SES_ENABLE_PASSTHROUGH if ((iqd[6] & 0x40) && (iqd[2] & 0x7) >= 2) { /* * PassThrough Device. */ return (ENC_SES_PASSTHROUGH); } #endif /* * The comparison is short for a reason- * some vendors were chopping it short. */ if (buflen < SAFTE_END - 2) { return (ENC_NONE); } if (STRNCMP((char *)&iqd[SAFTE_START], "SAF-TE", SAFTE_LEN - 2) == 0) { return (ENC_SAFT); } return (ENC_NONE); } /*================== Enclosure Monitoring/Processing Daemon ==================*/ /** * \brief Queue an update request for a given action, if needed. * * \param enc SES softc to queue the request for. * \param action Action requested. */ void enc_update_request(enc_softc_t *enc, uint32_t action) { if ((enc->pending_actions & (0x1 << action)) == 0) { enc->pending_actions |= (0x1 << action); ENC_DLOG(enc, "%s: queing requested action %d\n", __func__, action); if (enc->current_action == ENC_UPDATE_NONE) wakeup(enc->enc_daemon); } else { ENC_DLOG(enc, "%s: ignoring requested action %d - " "Already queued\n", __func__, action); } } /** * \brief Invoke the handler of the highest priority pending * state in the SES state machine. * * \param enc The SES instance invoking the state machine. */ static void enc_fsm_step(enc_softc_t *enc) { union ccb *ccb; uint8_t *buf; struct enc_fsm_state *cur_state; int error; uint32_t xfer_len; ENC_DLOG(enc, "%s enter %p\n", __func__, enc); enc->current_action = ffs(enc->pending_actions) - 1; enc->pending_actions &= ~(0x1 << enc->current_action); cur_state = &enc->enc_fsm_states[enc->current_action]; buf = NULL; if (cur_state->buf_size != 0) { cam_periph_unlock(enc->periph); buf = malloc(cur_state->buf_size, M_SCSIENC, M_WAITOK|M_ZERO); cam_periph_lock(enc->periph); } error = 0; ccb = NULL; if (cur_state->fill != NULL) { ccb = cam_periph_getccb(enc->periph, CAM_PRIORITY_NORMAL); error = cur_state->fill(enc, cur_state, ccb, buf); if (error != 0) goto done; error = cam_periph_runccb(ccb, cur_state->error, ENC_CFLAGS, ENC_FLAGS|SF_QUIET_IR, NULL); } if (ccb != NULL) { if (ccb->ccb_h.func_code == XPT_ATA_IO) xfer_len = ccb->ataio.dxfer_len - ccb->ataio.resid; else xfer_len = ccb->csio.dxfer_len - ccb->csio.resid; } else xfer_len = 0; cam_periph_unlock(enc->periph); cur_state->done(enc, cur_state, ccb, &buf, error, xfer_len); cam_periph_lock(enc->periph); done: ENC_DLOG(enc, "%s exit - result %d\n", __func__, error); ENC_FREE_AND_NULL(buf); if (ccb != NULL) xpt_release_ccb(ccb); } /** * \invariant Called with cam_periph mutex held. */ static void enc_status_updater(void *arg) { enc_softc_t *enc; enc = arg; if (enc->enc_vec.poll_status != NULL) enc->enc_vec.poll_status(enc); } static void enc_daemon(void *arg) { enc_softc_t *enc; enc = arg; cam_periph_lock(enc->periph); while ((enc->enc_flags & ENC_FLAG_SHUTDOWN) == 0) { if (enc->pending_actions == 0) { struct intr_config_hook *hook; /* * Reset callout and msleep, or * issue timed task completion * status command. */ enc->current_action = ENC_UPDATE_NONE; /* * We've been through our state machine at least * once. Allow the transition to userland. */ hook = &enc->enc_boot_hold_ch; if (hook->ich_func != NULL) { config_intrhook_disestablish(hook); hook->ich_func = NULL; } callout_reset(&enc->status_updater, 60*hz, enc_status_updater, enc); cam_periph_sleep(enc->periph, enc->enc_daemon, PUSER, "idle", 0); } else { enc_fsm_step(enc); } } enc->enc_daemon = NULL; cam_periph_unlock(enc->periph); cam_periph_release(enc->periph); kproc_exit(0); } static int enc_kproc_init(enc_softc_t *enc) { int result; callout_init_mtx(&enc->status_updater, cam_periph_mtx(enc->periph), 0); - if (cam_periph_acquire(enc->periph) != CAM_REQ_CMP) + if (cam_periph_acquire(enc->periph) != 0) return (ENXIO); result = kproc_create(enc_daemon, enc, &enc->enc_daemon, /*flags*/0, /*stackpgs*/0, "enc_daemon%d", enc->periph->unit_number); if (result == 0) { /* Do an initial load of all page data. */ cam_periph_lock(enc->periph); enc->enc_vec.poll_status(enc); cam_periph_unlock(enc->periph); } else cam_periph_release(enc->periph); return (result); } /** * \brief Interrupt configuration hook callback associated with * enc_boot_hold_ch. * * Since interrupts are always functional at the time of enclosure * configuration, there is nothing to be done when the callback occurs. * This hook is only registered to hold up boot processing while initial * eclosure processing occurs. * * \param arg The enclosure softc, but currently unused in this callback. */ static void enc_nop_confighook_cb(void *arg __unused) { } static cam_status enc_ctor(struct cam_periph *periph, void *arg) { cam_status status = CAM_REQ_CMP_ERR; int err; enc_softc_t *enc; struct ccb_getdev *cgd; char *tname; struct make_dev_args args; struct sbuf sb; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("enc_ctor: no getdev CCB, can't register device\n"); goto out; } enc = ENC_MALLOCZ(sizeof(*enc)); if (enc == NULL) { printf("enc_ctor: Unable to probe new device. " "Unable to allocate enc\n"); goto out; } enc->periph = periph; enc->current_action = ENC_UPDATE_INVALID; enc->enc_type = enc_type(cgd); sx_init(&enc->enc_cache_lock, "enccache"); switch (enc->enc_type) { case ENC_SES: case ENC_SES_SCSI2: case ENC_SES_PASSTHROUGH: case ENC_SEMB_SES: err = ses_softc_init(enc); break; case ENC_SAFT: case ENC_SEMB_SAFT: err = safte_softc_init(enc); break; case ENC_NONE: default: ENC_FREE(enc); return (CAM_REQ_CMP_ERR); } if (err) { xpt_print(periph->path, "error %d initializing\n", err); goto out; } /* * Hold off userland until we have made at least one pass * through our state machine so that physical path data is * present. */ if (enc->enc_vec.poll_status != NULL) { enc->enc_boot_hold_ch.ich_func = enc_nop_confighook_cb; enc->enc_boot_hold_ch.ich_arg = enc; config_intrhook_establish(&enc->enc_boot_hold_ch); } /* * The softc field is set only once the enc is fully initialized * so that we can rely on this field to detect partially * initialized periph objects in the AC_FOUND_DEVICE handler. */ periph->softc = enc; cam_periph_unlock(periph); if (enc->enc_vec.poll_status != NULL) { err = enc_kproc_init(enc); if (err) { xpt_print(periph->path, "error %d starting enc_daemon\n", err); goto out; } } /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } make_dev_args_init(&args); args.mda_devsw = &enc_cdevsw; args.mda_unit = periph->unit_number; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = periph; err = make_dev_s(&args, &enc->enc_dev, "%s%d", periph->periph_name, periph->unit_number); cam_periph_lock(periph); if (err != 0) { cam_periph_release_locked(periph); return (CAM_REQ_CMP_ERR); } enc->enc_flags |= ENC_FLAG_INITIALIZED; /* * Add an async callback so that we get notified if this * device goes away. */ xpt_register_async(AC_LOST_DEVICE, enc_async, periph, periph->path); switch (enc->enc_type) { default: case ENC_NONE: tname = "No ENC device"; break; case ENC_SES_SCSI2: tname = "SCSI-2 ENC Device"; break; case ENC_SES: tname = "SCSI-3 ENC Device"; break; case ENC_SES_PASSTHROUGH: tname = "ENC Passthrough Device"; break; case ENC_SAFT: tname = "SAF-TE Compliant Device"; break; case ENC_SEMB_SES: tname = "SEMB SES Device"; break; case ENC_SEMB_SAFT: tname = "SEMB SAF-TE Device"; break; } sbuf_new(&sb, enc->announce_buf, ENC_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, tname); sbuf_finish(&sb); sbuf_putbuf(&sb); status = CAM_REQ_CMP; out: if (status != CAM_REQ_CMP) enc_dtor(periph); return (status); } Index: head/sys/cam/scsi/scsi_pass.c =================================================================== --- head/sys/cam/scsi/scsi_pass.c (revision 328917) +++ head/sys/cam/scsi/scsi_pass.c (revision 328918) @@ -1,2276 +1,2274 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { PASS_FLAG_OPEN = 0x01, PASS_FLAG_LOCKED = 0x02, PASS_FLAG_INVALID = 0x04, PASS_FLAG_INITIAL_PHYSPATH = 0x08, PASS_FLAG_ZONE_INPROG = 0x10, PASS_FLAG_ZONE_VALID = 0x20, PASS_FLAG_UNMAPPED_CAPABLE = 0x40, PASS_FLAG_ABANDONED_REF_SET = 0x80 } pass_flags; typedef enum { PASS_STATE_NORMAL } pass_state; typedef enum { PASS_CCB_BUFFER_IO, PASS_CCB_QUEUED_IO } pass_ccb_types; #define ccb_type ppriv_field0 #define ccb_ioreq ppriv_ptr1 /* * The maximum number of memory segments we preallocate. */ #define PASS_MAX_SEGS 16 typedef enum { PASS_IO_NONE = 0x00, PASS_IO_USER_SEG_MALLOC = 0x01, PASS_IO_KERN_SEG_MALLOC = 0x02, PASS_IO_ABANDONED = 0x04 } pass_io_flags; struct pass_io_req { union ccb ccb; union ccb *alloced_ccb; union ccb *user_ccb_ptr; camq_entry user_periph_links; ccb_ppriv_area user_periph_priv; struct cam_periph_map_info mapinfo; pass_io_flags flags; ccb_flags data_flags; int num_user_segs; bus_dma_segment_t user_segs[PASS_MAX_SEGS]; int num_kern_segs; bus_dma_segment_t kern_segs[PASS_MAX_SEGS]; bus_dma_segment_t *user_segptr; bus_dma_segment_t *kern_segptr; int num_bufs; uint32_t dirs[CAM_PERIPH_MAXMAPS]; uint32_t lengths[CAM_PERIPH_MAXMAPS]; uint8_t *user_bufs[CAM_PERIPH_MAXMAPS]; uint8_t *kern_bufs[CAM_PERIPH_MAXMAPS]; struct bintime start_time; TAILQ_ENTRY(pass_io_req) links; }; struct pass_softc { pass_state state; pass_flags flags; u_int8_t pd_type; union ccb saved_ccb; int open_count; u_int maxio; struct devstat *device_stats; struct cdev *dev; struct cdev *alias_dev; struct task add_physpath_task; struct task shutdown_kqueue_task; struct selinfo read_select; TAILQ_HEAD(, pass_io_req) incoming_queue; TAILQ_HEAD(, pass_io_req) active_queue; TAILQ_HEAD(, pass_io_req) abandoned_queue; TAILQ_HEAD(, pass_io_req) done_queue; struct cam_periph *periph; char zone_name[12]; char io_zone_name[12]; uma_zone_t pass_zone; uma_zone_t pass_io_zone; size_t io_zone_size; }; static d_open_t passopen; static d_close_t passclose; static d_ioctl_t passioctl; static d_ioctl_t passdoioctl; static d_poll_t passpoll; static d_kqfilter_t passkqfilter; static void passreadfiltdetach(struct knote *kn); static int passreadfilt(struct knote *kn, long hint); static periph_init_t passinit; static periph_ctor_t passregister; static periph_oninv_t passoninvalidate; static periph_dtor_t passcleanup; static periph_start_t passstart; static void pass_shutdown_kqueue(void *context, int pending); static void pass_add_physpath(void *context, int pending); static void passasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void passdone(struct cam_periph *periph, union ccb *done_ccb); static int passcreatezone(struct cam_periph *periph); static void passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req); static int passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req, ccb_flags direction); static int passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req); static int passmemdone(struct cam_periph *periph, struct pass_io_req *io_req); static int passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb); static struct periph_driver passdriver = { passinit, "pass", TAILQ_HEAD_INITIALIZER(passdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(pass, passdriver); static struct cdevsw pass_cdevsw = { .d_version = D_VERSION, .d_flags = D_TRACKCLOSE, .d_open = passopen, .d_close = passclose, .d_ioctl = passioctl, .d_poll = passpoll, .d_kqfilter = passkqfilter, .d_name = "pass", }; static struct filterops passread_filtops = { .f_isfd = 1, .f_detach = passreadfiltdetach, .f_event = passreadfilt }; static MALLOC_DEFINE(M_SCSIPASS, "scsi_pass", "scsi passthrough buffers"); static void passinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, passasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("pass: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void passrejectios(struct cam_periph *periph) { struct pass_io_req *io_req, *io_req2; struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; /* * The user can no longer get status for I/O on the done queue, so * clean up all outstanding I/O on the done queue. */ TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) { TAILQ_REMOVE(&softc->done_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } /* * The underlying device is gone, so we can't issue these I/Os. * The devfs node has been shut down, so we can't return status to * the user. Free any I/O left on the incoming queue. */ TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) { TAILQ_REMOVE(&softc->incoming_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } /* * Normally we would put I/Os on the abandoned queue and acquire a * reference when we saw the final close. But, the device went * away and devfs may have moved everything off to deadfs by the * time the I/O done callback is called; as a result, we won't see * any more closes. So, if we have any active I/Os, we need to put * them on the abandoned queue. When the abandoned queue is empty, * we'll release the remaining reference (see below) to the peripheral. */ TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) { TAILQ_REMOVE(&softc->active_queue, io_req, links); io_req->flags |= PASS_IO_ABANDONED; TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links); } /* * If we put any I/O on the abandoned queue, acquire a reference. */ if ((!TAILQ_EMPTY(&softc->abandoned_queue)) && ((softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0)) { cam_periph_doacquire(periph); softc->flags |= PASS_FLAG_ABANDONED_REF_SET; } } static void passdevgonecb(void *arg) { struct cam_periph *periph; struct mtx *mtx; struct pass_softc *softc; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = (struct pass_softc *)periph->softc; KASSERT(softc->open_count >= 0, ("Negative open count %d", softc->open_count)); /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < softc->open_count; i++) cam_periph_release_locked(periph); softc->open_count = 0; /* * Release the reference held for the device node, it is gone now. * Accordingly, inform all queued I/Os of their fate. */ cam_periph_release_locked(periph); passrejectios(periph); /* * We reference the SIM lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); /* * We have to remove our kqueue context from a thread because it * may sleep. It would be nice if we could get a callback from * kqueue when it is done cleaning up resources. */ taskqueue_enqueue(taskqueue_thread, &softc->shutdown_kqueue_task); } static void passoninvalidate(struct cam_periph *periph) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, passasync, periph, periph->path); softc->flags |= PASS_FLAG_INVALID; /* * Tell devfs this device has gone away, and ask for a callback * when it has cleaned up its state. */ destroy_dev_sched_cb(softc->dev, passdevgonecb, periph); } static void passcleanup(struct cam_periph *periph) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); KASSERT(TAILQ_EMPTY(&softc->active_queue), ("%s called when there are commands on the active queue!\n", __func__)); KASSERT(TAILQ_EMPTY(&softc->abandoned_queue), ("%s called when there are commands on the abandoned queue!\n", __func__)); KASSERT(TAILQ_EMPTY(&softc->incoming_queue), ("%s called when there are commands on the incoming queue!\n", __func__)); KASSERT(TAILQ_EMPTY(&softc->done_queue), ("%s called when there are commands on the done queue!\n", __func__)); devstat_remove_entry(softc->device_stats); cam_periph_unlock(periph); /* * We call taskqueue_drain() for the physpath task to make sure it * is complete. We drop the lock because this can potentially * sleep. XXX KDM that is bad. Need a way to get a callback when * a taskqueue is drained. * * Note that we don't drain the kqueue shutdown task queue. This * is because we hold a reference on the periph for kqueue, and * release that reference from the kqueue shutdown task queue. So * we cannot come into this routine unless we've released that * reference. Also, because that could be the last reference, we * could be called from the cam_periph_release() call in * pass_shutdown_kqueue(). In that case, the taskqueue_drain() * would deadlock. It would be preferable if we had a way to * get a callback when a taskqueue is done. */ taskqueue_drain(taskqueue_thread, &softc->add_physpath_task); cam_periph_lock(periph); free(softc, M_DEVBUF); } static void pass_shutdown_kqueue(void *context, int pending) { struct cam_periph *periph; struct pass_softc *softc; periph = context; softc = periph->softc; knlist_clear(&softc->read_select.si_note, /*is_locked*/ 0); knlist_destroy(&softc->read_select.si_note); /* * Release the reference we held for kqueue. */ cam_periph_release(periph); } static void pass_add_physpath(void *context, int pending) { struct cam_periph *periph; struct pass_softc *softc; struct mtx *mtx; char *physpath; /* * If we have one, create a devfs alias for our * physical path. */ periph = context; softc = periph->softc; physpath = malloc(MAXPATHLEN, M_DEVBUF, M_WAITOK); mtx = cam_periph_mtx(periph); mtx_lock(mtx); if (periph->flags & CAM_PERIPH_INVALID) goto out; if (xpt_getattr(physpath, MAXPATHLEN, "GEOM::physpath", periph->path) == 0 && strlen(physpath) != 0) { mtx_unlock(mtx); make_dev_physpath_alias(MAKEDEV_WAITOK, &softc->alias_dev, softc->dev, softc->alias_dev, physpath); mtx_lock(mtx); } out: /* * Now that we've made our alias, we no longer have to have a * reference to the device. */ if ((softc->flags & PASS_FLAG_INITIAL_PHYSPATH) == 0) softc->flags |= PASS_FLAG_INITIAL_PHYSPATH; /* * We always acquire a reference to the periph before queueing this * task queue function, so it won't go away before we run. */ while (pending-- > 0) cam_periph_release_locked(periph); mtx_unlock(mtx); free(physpath, M_DEVBUF); } static void passasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(passregister, passoninvalidate, passcleanup, passstart, "pass", CAM_PERIPH_BIO, path, passasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("passasync: Unable to attach new device " "due to status %#x: %s\n", status, entry ? entry->status_text : "Unknown"); } break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct pass_softc *softc; - cam_status status; softc = (struct pass_softc *)periph->softc; /* * Acquire a reference to the periph before we * start the taskqueue, so that we don't run into * a situation where the periph goes away before * the task queue has a chance to run. */ - status = cam_periph_acquire(periph); - if (status != CAM_REQ_CMP) + if (cam_periph_acquire(periph) != 0) break; taskqueue_enqueue(taskqueue_thread, &softc->add_physpath_task); } break; } default: cam_periph_async(periph, code, path, arg); break; } } static cam_status passregister(struct cam_periph *periph, void *arg) { struct pass_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; struct make_dev_args args; int error, no_tags; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("%s: no getdev CCB, can't register device\n", __func__); return(CAM_REQ_CMP_ERR); } softc = (struct pass_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT); if (softc == NULL) { printf("%s: Unable to probe new device. " "Unable to allocate softc\n", __func__); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); softc->state = PASS_STATE_NORMAL; if (cgd->protocol == PROTO_SCSI || cgd->protocol == PROTO_ATAPI) softc->pd_type = SID_TYPE(&cgd->inq_data); else if (cgd->protocol == PROTO_SATAPM) softc->pd_type = T_ENCLOSURE; else softc->pd_type = T_DIRECT; periph->softc = softc; softc->periph = periph; TAILQ_INIT(&softc->incoming_queue); TAILQ_INIT(&softc->active_queue); TAILQ_INIT(&softc->abandoned_queue); TAILQ_INIT(&softc->done_queue); snprintf(softc->zone_name, sizeof(softc->zone_name), "%s%d", periph->periph_name, periph->unit_number); snprintf(softc->io_zone_name, sizeof(softc->io_zone_name), "%s%dIO", periph->periph_name, periph->unit_number); softc->io_zone_size = MAXPHYS; knlist_init_mtx(&softc->read_select.si_note, cam_periph_mtx(periph)); xpt_path_inq(&cpi, periph->path); if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; /* for safety */ else softc->maxio = cpi.maxio; /* real value */ if (cpi.hba_misc & PIM_UNMAPPED) softc->flags |= PASS_FLAG_UNMAPPED_CAPABLE; /* * We pass in 0 for a blocksize, since we don't * know what the blocksize of this device is, if * it even has a blocksize. */ cam_periph_unlock(periph); no_tags = (cgd->inq_data.flags & SID_CmdQue) == 0; softc->device_stats = devstat_new_entry("pass", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE | (no_tags ? DEVSTAT_NO_ORDERED_TAGS : 0), softc->pd_type | XPORT_DEVSTAT_TYPE(cpi.transport) | DEVSTAT_TYPE_PASS, DEVSTAT_PRIORITY_PASS); /* * Initialize the taskqueue handler for shutting down kqueue. */ TASK_INIT(&softc->shutdown_kqueue_task, /*priority*/ 0, pass_shutdown_kqueue, periph); /* * Acquire a reference to the periph that we can release once we've * cleaned up the kqueue. */ - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* Register the device */ make_dev_args_init(&args); args.mda_devsw = &pass_cdevsw; args.mda_unit = periph->unit_number; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = periph; error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); cam_periph_release_locked(periph); return (CAM_REQ_CMP_ERR); } /* * Hold a reference to the periph before we create the physical * path alias so it can't go away. */ - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } cam_periph_lock(periph); TASK_INIT(&softc->add_physpath_task, /*priority*/0, pass_add_physpath, periph); /* * See if physical path information is already available. */ taskqueue_enqueue(taskqueue_thread, &softc->add_physpath_task); /* * Add an async callback so that we get notified if * this device goes away or its physical path * (stored in the advanced info data of the EDT) has * changed. */ xpt_register_async(AC_LOST_DEVICE | AC_ADVINFO_CHANGED, passasync, periph, periph->path); if (bootverbose) xpt_announce_periph(periph, NULL); return(CAM_REQ_CMP); } static int passopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; - if (cam_periph_acquire(periph) != CAM_REQ_CMP) + if (cam_periph_acquire(periph) != 0) return (ENXIO); cam_periph_lock(periph); softc = (struct pass_softc *)periph->softc; if (softc->flags & PASS_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } /* * Don't allow access when we're running at a high securelevel. */ error = securelevel_gt(td->td_ucred, 1); if (error) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(error); } /* * Only allow read-write access. */ if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(EPERM); } /* * We don't allow nonblocking access. */ if ((flags & O_NONBLOCK) != 0) { xpt_print(periph->path, "can't do nonblocking access\n"); cam_periph_release_locked(periph); cam_periph_unlock(periph); return(EINVAL); } softc->open_count++; cam_periph_unlock(periph); return (error); } static int passclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = periph->softc; softc->open_count--; if (softc->open_count == 0) { struct pass_io_req *io_req, *io_req2; TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) { TAILQ_REMOVE(&softc->done_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) { TAILQ_REMOVE(&softc->incoming_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } /* * If there are any active I/Os, we need to forcibly acquire a * reference to the peripheral so that we don't go away * before they complete. We'll release the reference when * the abandoned queue is empty. */ io_req = TAILQ_FIRST(&softc->active_queue); if ((io_req != NULL) && (softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0) { cam_periph_doacquire(periph); softc->flags |= PASS_FLAG_ABANDONED_REF_SET; } /* * Since the I/O in the active queue is not under our * control, just set a flag so that we can clean it up when * it completes and put it on the abandoned queue. This * will prevent our sending spurious completions in the * event that the device is opened again before these I/Os * complete. */ TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) { TAILQ_REMOVE(&softc->active_queue, io_req, links); io_req->flags |= PASS_IO_ABANDONED; TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links); } } cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return (0); } static void passstart(struct cam_periph *periph, union ccb *start_ccb) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; switch (softc->state) { case PASS_STATE_NORMAL: { struct pass_io_req *io_req; /* * Check for any queued I/O requests that require an * allocated slot. */ io_req = TAILQ_FIRST(&softc->incoming_queue); if (io_req == NULL) { xpt_release_ccb(start_ccb); break; } TAILQ_REMOVE(&softc->incoming_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links); /* * Merge the user's CCB into the allocated CCB. */ xpt_merge_ccb(start_ccb, &io_req->ccb); start_ccb->ccb_h.ccb_type = PASS_CCB_QUEUED_IO; start_ccb->ccb_h.ccb_ioreq = io_req; start_ccb->ccb_h.cbfcnp = passdone; io_req->alloced_ccb = start_ccb; binuptime(&io_req->start_time); devstat_start_transaction(softc->device_stats, &io_req->start_time); xpt_action(start_ccb); /* * If we have any more I/O waiting, schedule ourselves again. */ if (!TAILQ_EMPTY(&softc->incoming_queue)) xpt_schedule(periph, CAM_PRIORITY_NORMAL); break; } default: break; } } static void passdone(struct cam_periph *periph, union ccb *done_ccb) { struct pass_softc *softc; struct ccb_scsiio *csio; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); csio = &done_ccb->csio; switch (csio->ccb_h.ccb_type) { case PASS_CCB_QUEUED_IO: { struct pass_io_req *io_req; io_req = done_ccb->ccb_h.ccb_ioreq; #if 0 xpt_print(periph->path, "%s: called for user CCB %p\n", __func__, io_req->user_ccb_ptr); #endif if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) && (done_ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) && ((io_req->flags & PASS_IO_ABANDONED) == 0)) { int error; error = passerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); if (error == ERESTART) { /* * A retry was scheduled, so * just return. */ return; } } /* * Copy the allocated CCB contents back to the malloced CCB * so we can give status back to the user when he requests it. */ bcopy(done_ccb, &io_req->ccb, sizeof(*done_ccb)); /* * Log data/transaction completion with devstat(9). */ switch (done_ccb->ccb_h.func_code) { case XPT_SCSI_IO: devstat_end_transaction(softc->device_stats, done_ccb->csio.dxfer_len - done_ccb->csio.resid, done_ccb->csio.tag_action & 0x3, ((done_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (done_ccb->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, &io_req->start_time); break; case XPT_ATA_IO: devstat_end_transaction(softc->device_stats, done_ccb->ataio.dxfer_len - done_ccb->ataio.resid, 0, /* Not used in ATA */ ((done_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (done_ccb->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, &io_req->start_time); break; case XPT_SMP_IO: /* * XXX KDM this isn't quite right, but there isn't * currently an easy way to represent a bidirectional * transfer in devstat. The only way to do it * and have the byte counts come out right would * mean that we would have to record two * transactions, one for the request and one for the * response. For now, so that we report something, * just treat the entire thing as a read. */ devstat_end_transaction(softc->device_stats, done_ccb->smpio.smp_request_len + done_ccb->smpio.smp_response_len, DEVSTAT_TAG_SIMPLE, DEVSTAT_READ, NULL, &io_req->start_time); break; default: devstat_end_transaction(softc->device_stats, 0, DEVSTAT_TAG_NONE, DEVSTAT_NO_DATA, NULL, &io_req->start_time); break; } /* * In the normal case, take the completed I/O off of the * active queue and put it on the done queue. Notitfy the * user that we have a completed I/O. */ if ((io_req->flags & PASS_IO_ABANDONED) == 0) { TAILQ_REMOVE(&softc->active_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links); selwakeuppri(&softc->read_select, PRIBIO); KNOTE_LOCKED(&softc->read_select.si_note, 0); } else { /* * In the case of an abandoned I/O (final close * without fetching the I/O), take it off of the * abandoned queue and free it. */ TAILQ_REMOVE(&softc->abandoned_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); /* * Release the done_ccb here, since we may wind up * freeing the peripheral when we decrement the * reference count below. */ xpt_release_ccb(done_ccb); /* * If the abandoned queue is empty, we can release * our reference to the periph since we won't have * any more completions coming. */ if ((TAILQ_EMPTY(&softc->abandoned_queue)) && (softc->flags & PASS_FLAG_ABANDONED_REF_SET)) { softc->flags &= ~PASS_FLAG_ABANDONED_REF_SET; cam_periph_release_locked(periph); } /* * We have already released the CCB, so we can * return. */ return; } break; } } xpt_release_ccb(done_ccb); } static int passcreatezone(struct cam_periph *periph) { struct pass_softc *softc; int error; error = 0; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); KASSERT(((softc->flags & PASS_FLAG_ZONE_VALID) == 0), ("%s called when the pass(4) zone is valid!\n", __func__)); KASSERT((softc->pass_zone == NULL), ("%s called when the pass(4) zone is allocated!\n", __func__)); if ((softc->flags & PASS_FLAG_ZONE_INPROG) == 0) { /* * We're the first context through, so we need to create * the pass(4) UMA zone for I/O requests. */ softc->flags |= PASS_FLAG_ZONE_INPROG; /* * uma_zcreate() does a blocking (M_WAITOK) allocation, * so we cannot hold a mutex while we call it. */ cam_periph_unlock(periph); softc->pass_zone = uma_zcreate(softc->zone_name, sizeof(struct pass_io_req), NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/ 0); softc->pass_io_zone = uma_zcreate(softc->io_zone_name, softc->io_zone_size, NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/ 0); cam_periph_lock(periph); if ((softc->pass_zone == NULL) || (softc->pass_io_zone == NULL)) { if (softc->pass_zone == NULL) xpt_print(periph->path, "unable to allocate " "IO Req UMA zone\n"); else xpt_print(periph->path, "unable to allocate " "IO UMA zone\n"); softc->flags &= ~PASS_FLAG_ZONE_INPROG; goto bailout; } /* * Set the flags appropriately and notify any other waiters. */ softc->flags &= PASS_FLAG_ZONE_INPROG; softc->flags |= PASS_FLAG_ZONE_VALID; wakeup(&softc->pass_zone); } else { /* * In this case, the UMA zone has not yet been created, but * another context is in the process of creating it. We * need to sleep until the creation is either done or has * failed. */ while ((softc->flags & PASS_FLAG_ZONE_INPROG) && ((softc->flags & PASS_FLAG_ZONE_VALID) == 0)) { error = msleep(&softc->pass_zone, cam_periph_mtx(periph), PRIBIO, "paszon", 0); if (error != 0) goto bailout; } /* * If the zone creation failed, no luck for the user. */ if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0){ error = ENOMEM; goto bailout; } } bailout: return (error); } static void passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req) { union ccb *ccb; u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; int i, numbufs; ccb = &io_req->ccb; switch (ccb->ccb_h.func_code) { case XPT_DEV_MATCH: numbufs = min(io_req->num_bufs, 2); if (numbufs == 1) { data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; } else { data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; } break; case XPT_SCSI_IO: case XPT_CONT_TARGET_IO: data_ptrs[0] = &ccb->csio.data_ptr; numbufs = min(io_req->num_bufs, 1); break; case XPT_ATA_IO: data_ptrs[0] = &ccb->ataio.data_ptr; numbufs = min(io_req->num_bufs, 1); break; case XPT_SMP_IO: numbufs = min(io_req->num_bufs, 2); data_ptrs[0] = &ccb->smpio.smp_request; data_ptrs[1] = &ccb->smpio.smp_response; break; case XPT_DEV_ADVINFO: numbufs = min(io_req->num_bufs, 1); data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; break; case XPT_NVME_IO: case XPT_NVME_ADMIN: data_ptrs[0] = &ccb->nvmeio.data_ptr; numbufs = min(io_req->num_bufs, 1); break; default: /* allow ourselves to be swapped once again */ return; break; /* NOTREACHED */ } if (io_req->flags & PASS_IO_USER_SEG_MALLOC) { free(io_req->user_segptr, M_SCSIPASS); io_req->user_segptr = NULL; } /* * We only want to free memory we malloced. */ if (io_req->data_flags == CAM_DATA_VADDR) { for (i = 0; i < io_req->num_bufs; i++) { if (io_req->kern_bufs[i] == NULL) continue; free(io_req->kern_bufs[i], M_SCSIPASS); io_req->kern_bufs[i] = NULL; } } else if (io_req->data_flags == CAM_DATA_SG) { for (i = 0; i < io_req->num_kern_segs; i++) { if ((uint8_t *)(uintptr_t) io_req->kern_segptr[i].ds_addr == NULL) continue; uma_zfree(softc->pass_io_zone, (uint8_t *)(uintptr_t) io_req->kern_segptr[i].ds_addr); io_req->kern_segptr[i].ds_addr = 0; } } if (io_req->flags & PASS_IO_KERN_SEG_MALLOC) { free(io_req->kern_segptr, M_SCSIPASS); io_req->kern_segptr = NULL; } if (io_req->data_flags != CAM_DATA_PADDR) { for (i = 0; i < numbufs; i++) { /* * Restore the user's buffer pointers to their * previous values. */ if (io_req->user_bufs[i] != NULL) *data_ptrs[i] = io_req->user_bufs[i]; } } } static int passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req, ccb_flags direction) { bus_size_t kern_watermark, user_watermark, len_copied, len_to_copy; bus_dma_segment_t *user_sglist, *kern_sglist; int i, j, error; error = 0; kern_watermark = 0; user_watermark = 0; len_to_copy = 0; len_copied = 0; user_sglist = io_req->user_segptr; kern_sglist = io_req->kern_segptr; for (i = 0, j = 0; i < io_req->num_user_segs && j < io_req->num_kern_segs;) { uint8_t *user_ptr, *kern_ptr; len_to_copy = min(user_sglist[i].ds_len -user_watermark, kern_sglist[j].ds_len - kern_watermark); user_ptr = (uint8_t *)(uintptr_t)user_sglist[i].ds_addr; user_ptr = user_ptr + user_watermark; kern_ptr = (uint8_t *)(uintptr_t)kern_sglist[j].ds_addr; kern_ptr = kern_ptr + kern_watermark; user_watermark += len_to_copy; kern_watermark += len_to_copy; if (!useracc(user_ptr, len_to_copy, (direction == CAM_DIR_IN) ? VM_PROT_WRITE : VM_PROT_READ)) { xpt_print(periph->path, "%s: unable to access user " "S/G list element %p len %zu\n", __func__, user_ptr, len_to_copy); error = EFAULT; goto bailout; } if (direction == CAM_DIR_IN) { error = copyout(kern_ptr, user_ptr, len_to_copy); if (error != 0) { xpt_print(periph->path, "%s: copyout of %u " "bytes from %p to %p failed with " "error %d\n", __func__, len_to_copy, kern_ptr, user_ptr, error); goto bailout; } } else { error = copyin(user_ptr, kern_ptr, len_to_copy); if (error != 0) { xpt_print(periph->path, "%s: copyin of %u " "bytes from %p to %p failed with " "error %d\n", __func__, len_to_copy, user_ptr, kern_ptr, error); goto bailout; } } len_copied += len_to_copy; if (user_sglist[i].ds_len == user_watermark) { i++; user_watermark = 0; } if (kern_sglist[j].ds_len == kern_watermark) { j++; kern_watermark = 0; } } bailout: return (error); } static int passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req) { union ccb *ccb; struct pass_softc *softc; int numbufs, i; uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; uint32_t lengths[CAM_PERIPH_MAXMAPS]; uint32_t dirs[CAM_PERIPH_MAXMAPS]; uint32_t num_segs; uint16_t *seg_cnt_ptr; size_t maxmap; int error; cam_periph_assert(periph, MA_NOTOWNED); softc = periph->softc; error = 0; ccb = &io_req->ccb; maxmap = 0; num_segs = 0; seg_cnt_ptr = NULL; switch(ccb->ccb_h.func_code) { case XPT_DEV_MATCH: if (ccb->cdm.match_buf_len == 0) { printf("%s: invalid match buffer length 0\n", __func__); return(EINVAL); } if (ccb->cdm.pattern_buf_len > 0) { data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; lengths[0] = ccb->cdm.pattern_buf_len; dirs[0] = CAM_DIR_OUT; data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; lengths[1] = ccb->cdm.match_buf_len; dirs[1] = CAM_DIR_IN; numbufs = 2; } else { data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; lengths[0] = ccb->cdm.match_buf_len; dirs[0] = CAM_DIR_IN; numbufs = 1; } io_req->data_flags = CAM_DATA_VADDR; break; case XPT_SCSI_IO: case XPT_CONT_TARGET_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); /* * The user shouldn't be able to supply a bio. */ if ((ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) return (EINVAL); io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK; data_ptrs[0] = &ccb->csio.data_ptr; lengths[0] = ccb->csio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; num_segs = ccb->csio.sglist_cnt; seg_cnt_ptr = &ccb->csio.sglist_cnt; numbufs = 1; maxmap = softc->maxio; break; case XPT_ATA_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); /* * We only support a single virtual address for ATA I/O. */ if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) return (EINVAL); io_req->data_flags = CAM_DATA_VADDR; data_ptrs[0] = &ccb->ataio.data_ptr; lengths[0] = ccb->ataio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; numbufs = 1; maxmap = softc->maxio; break; case XPT_SMP_IO: io_req->data_flags = CAM_DATA_VADDR; data_ptrs[0] = &ccb->smpio.smp_request; lengths[0] = ccb->smpio.smp_request_len; dirs[0] = CAM_DIR_OUT; data_ptrs[1] = &ccb->smpio.smp_response; lengths[1] = ccb->smpio.smp_response_len; dirs[1] = CAM_DIR_IN; numbufs = 2; maxmap = softc->maxio; break; case XPT_DEV_ADVINFO: if (ccb->cdai.bufsiz == 0) return (0); io_req->data_flags = CAM_DATA_VADDR; data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; lengths[0] = ccb->cdai.bufsiz; dirs[0] = CAM_DIR_IN; numbufs = 1; break; case XPT_NVME_ADMIN: case XPT_NVME_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return (0); io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK; data_ptrs[0] = &ccb->nvmeio.data_ptr; lengths[0] = ccb->nvmeio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; num_segs = ccb->nvmeio.sglist_cnt; seg_cnt_ptr = &ccb->nvmeio.sglist_cnt; numbufs = 1; maxmap = softc->maxio; break; default: return(EINVAL); break; /* NOTREACHED */ } io_req->num_bufs = numbufs; /* * If there is a maximum, check to make sure that the user's * request fits within the limit. In general, we should only have * a maximum length for requests that go to hardware. Otherwise it * is whatever we're able to malloc. */ for (i = 0; i < numbufs; i++) { io_req->user_bufs[i] = *data_ptrs[i]; io_req->dirs[i] = dirs[i]; io_req->lengths[i] = lengths[i]; if (maxmap == 0) continue; if (lengths[i] <= maxmap) continue; xpt_print(periph->path, "%s: data length %u > max allowed %u " "bytes\n", __func__, lengths[i], maxmap); error = EINVAL; goto bailout; } switch (io_req->data_flags) { case CAM_DATA_VADDR: /* Map or copy the buffer into kernel address space */ for (i = 0; i < numbufs; i++) { uint8_t *tmp_buf; /* * If for some reason no length is specified, we * don't need to allocate anything. */ if (io_req->lengths[i] == 0) continue; /* * Make sure that the user's buffer is accessible * to that process. */ if (!useracc(io_req->user_bufs[i], io_req->lengths[i], (io_req->dirs[i] == CAM_DIR_IN) ? VM_PROT_WRITE : VM_PROT_READ)) { xpt_print(periph->path, "%s: user address %p " "length %u is not accessible\n", __func__, io_req->user_bufs[i], io_req->lengths[i]); error = EFAULT; goto bailout; } tmp_buf = malloc(lengths[i], M_SCSIPASS, M_WAITOK | M_ZERO); io_req->kern_bufs[i] = tmp_buf; *data_ptrs[i] = tmp_buf; #if 0 xpt_print(periph->path, "%s: malloced %p len %u, user " "buffer %p, operation: %s\n", __func__, tmp_buf, lengths[i], io_req->user_bufs[i], (dirs[i] == CAM_DIR_IN) ? "read" : "write"); #endif /* * We only need to copy in if the user is writing. */ if (dirs[i] != CAM_DIR_OUT) continue; error = copyin(io_req->user_bufs[i], io_req->kern_bufs[i], lengths[i]); if (error != 0) { xpt_print(periph->path, "%s: copy of user " "buffer from %p to %p failed with " "error %d\n", __func__, io_req->user_bufs[i], io_req->kern_bufs[i], error); goto bailout; } } break; case CAM_DATA_PADDR: /* Pass down the pointer as-is */ break; case CAM_DATA_SG: { size_t sg_length, size_to_go, alloc_size; uint32_t num_segs_needed; /* * Copy the user S/G list in, and then copy in the * individual segments. */ /* * We shouldn't see this, but check just in case. */ if (numbufs != 1) { xpt_print(periph->path, "%s: cannot currently handle " "more than one S/G list per CCB\n", __func__); error = EINVAL; goto bailout; } /* * We have to have at least one segment. */ if (num_segs == 0) { xpt_print(periph->path, "%s: CAM_DATA_SG flag set, " "but sglist_cnt=0!\n", __func__); error = EINVAL; goto bailout; } /* * Make sure the user specified the total length and didn't * just leave it to us to decode the S/G list. */ if (lengths[0] == 0) { xpt_print(periph->path, "%s: no dxfer_len specified, " "but CAM_DATA_SG flag is set!\n", __func__); error = EINVAL; goto bailout; } /* * We allocate buffers in io_zone_size increments for an * S/G list. This will generally be MAXPHYS. */ if (lengths[0] <= softc->io_zone_size) num_segs_needed = 1; else { num_segs_needed = lengths[0] / softc->io_zone_size; if ((lengths[0] % softc->io_zone_size) != 0) num_segs_needed++; } /* Figure out the size of the S/G list */ sg_length = num_segs * sizeof(bus_dma_segment_t); io_req->num_user_segs = num_segs; io_req->num_kern_segs = num_segs_needed; /* Save the user's S/G list pointer for later restoration */ io_req->user_bufs[0] = *data_ptrs[0]; /* * If we have enough segments allocated by default to handle * the length of the user's S/G list, */ if (num_segs > PASS_MAX_SEGS) { io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) * num_segs, M_SCSIPASS, M_WAITOK | M_ZERO); io_req->flags |= PASS_IO_USER_SEG_MALLOC; } else io_req->user_segptr = io_req->user_segs; if (!useracc(*data_ptrs[0], sg_length, VM_PROT_READ)) { xpt_print(periph->path, "%s: unable to access user " "S/G list at %p\n", __func__, *data_ptrs[0]); error = EFAULT; goto bailout; } error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length); if (error != 0) { xpt_print(periph->path, "%s: copy of user S/G list " "from %p to %p failed with error %d\n", __func__, *data_ptrs[0], io_req->user_segptr, error); goto bailout; } if (num_segs_needed > PASS_MAX_SEGS) { io_req->kern_segptr = malloc(sizeof(bus_dma_segment_t) * num_segs_needed, M_SCSIPASS, M_WAITOK | M_ZERO); io_req->flags |= PASS_IO_KERN_SEG_MALLOC; } else { io_req->kern_segptr = io_req->kern_segs; } /* * Allocate the kernel S/G list. */ for (size_to_go = lengths[0], i = 0; size_to_go > 0 && i < num_segs_needed; i++, size_to_go -= alloc_size) { uint8_t *kern_ptr; alloc_size = min(size_to_go, softc->io_zone_size); kern_ptr = uma_zalloc(softc->pass_io_zone, M_WAITOK); io_req->kern_segptr[i].ds_addr = (bus_addr_t)(uintptr_t)kern_ptr; io_req->kern_segptr[i].ds_len = alloc_size; } if (size_to_go > 0) { printf("%s: size_to_go = %zu, software error!\n", __func__, size_to_go); error = EINVAL; goto bailout; } *data_ptrs[0] = (uint8_t *)io_req->kern_segptr; *seg_cnt_ptr = io_req->num_kern_segs; /* * We only need to copy data here if the user is writing. */ if (dirs[0] == CAM_DIR_OUT) error = passcopysglist(periph, io_req, dirs[0]); break; } case CAM_DATA_SG_PADDR: { size_t sg_length; /* * We shouldn't see this, but check just in case. */ if (numbufs != 1) { printf("%s: cannot currently handle more than one " "S/G list per CCB\n", __func__); error = EINVAL; goto bailout; } /* * We have to have at least one segment. */ if (num_segs == 0) { xpt_print(periph->path, "%s: CAM_DATA_SG_PADDR flag " "set, but sglist_cnt=0!\n", __func__); error = EINVAL; goto bailout; } /* * Make sure the user specified the total length and didn't * just leave it to us to decode the S/G list. */ if (lengths[0] == 0) { xpt_print(periph->path, "%s: no dxfer_len specified, " "but CAM_DATA_SG flag is set!\n", __func__); error = EINVAL; goto bailout; } /* Figure out the size of the S/G list */ sg_length = num_segs * sizeof(bus_dma_segment_t); io_req->num_user_segs = num_segs; io_req->num_kern_segs = io_req->num_user_segs; /* Save the user's S/G list pointer for later restoration */ io_req->user_bufs[0] = *data_ptrs[0]; if (num_segs > PASS_MAX_SEGS) { io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) * num_segs, M_SCSIPASS, M_WAITOK | M_ZERO); io_req->flags |= PASS_IO_USER_SEG_MALLOC; } else io_req->user_segptr = io_req->user_segs; io_req->kern_segptr = io_req->user_segptr; error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length); if (error != 0) { xpt_print(periph->path, "%s: copy of user S/G list " "from %p to %p failed with error %d\n", __func__, *data_ptrs[0], io_req->user_segptr, error); goto bailout; } break; } default: case CAM_DATA_BIO: /* * A user shouldn't be attaching a bio to the CCB. It * isn't a user-accessible structure. */ error = EINVAL; break; } bailout: if (error != 0) passiocleanup(softc, io_req); return (error); } static int passmemdone(struct cam_periph *periph, struct pass_io_req *io_req) { struct pass_softc *softc; int error; int i; error = 0; softc = (struct pass_softc *)periph->softc; switch (io_req->data_flags) { case CAM_DATA_VADDR: /* * Copy back to the user buffer if this was a read. */ for (i = 0; i < io_req->num_bufs; i++) { if (io_req->dirs[i] != CAM_DIR_IN) continue; error = copyout(io_req->kern_bufs[i], io_req->user_bufs[i], io_req->lengths[i]); if (error != 0) { xpt_print(periph->path, "Unable to copy %u " "bytes from %p to user address %p\n", io_req->lengths[i], io_req->kern_bufs[i], io_req->user_bufs[i]); goto bailout; } } break; case CAM_DATA_PADDR: /* Do nothing. The pointer is a physical address already */ break; case CAM_DATA_SG: /* * Copy back to the user buffer if this was a read. * Restore the user's S/G list buffer pointer. */ if (io_req->dirs[0] == CAM_DIR_IN) error = passcopysglist(periph, io_req, io_req->dirs[0]); break; case CAM_DATA_SG_PADDR: /* * Restore the user's S/G list buffer pointer. No need to * copy. */ break; default: case CAM_DATA_BIO: error = EINVAL; break; } bailout: /* * Reset the user's pointers to their original values and free * allocated memory. */ passiocleanup(softc, io_req); return (error); } static int passioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; if ((error = passdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { error = cam_compat_ioctl(dev, cmd, addr, flag, td, passdoioctl); } return (error); } static int passdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int error; uint32_t priority; periph = (struct cam_periph *)dev->si_drv1; cam_periph_lock(periph); softc = (struct pass_softc *)periph->softc; error = 0; switch (cmd) { case CAMIOCOMMAND: { union ccb *inccb; union ccb *ccb; int ccb_malloced; inccb = (union ccb *)addr; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (inccb->ccb_h.func_code == XPT_SCSI_IO) inccb->csio.bio = NULL; #endif if (inccb->ccb_h.flags & CAM_UNLOCKED) { error = EINVAL; break; } /* * Some CCB types, like scan bus and scan lun can only go * through the transport layer device. */ if (inccb->ccb_h.func_code & XPT_FC_XPT_ONLY) { xpt_print(periph->path, "CCB function code %#x is " "restricted to the XPT device\n", inccb->ccb_h.func_code); error = ENODEV; break; } /* Compatibility for RL/priority-unaware code. */ priority = inccb->ccb_h.pinfo.priority; if (priority <= CAM_PRIORITY_OOB) priority += CAM_PRIORITY_OOB + 1; /* * Non-immediate CCBs need a CCB from the per-device pool * of CCBs, which is scheduled by the transport layer. * Immediate CCBs and user-supplied CCBs should just be * malloced. */ if ((inccb->ccb_h.func_code & XPT_FC_QUEUED) && ((inccb->ccb_h.func_code & XPT_FC_USER_CCB) == 0)) { ccb = cam_periph_getccb(periph, priority); ccb_malloced = 0; } else { ccb = xpt_alloc_ccb_nowait(); if (ccb != NULL) xpt_setup_ccb(&ccb->ccb_h, periph->path, priority); ccb_malloced = 1; } if (ccb == NULL) { xpt_print(periph->path, "unable to allocate CCB\n"); error = ENOMEM; break; } error = passsendccb(periph, ccb, inccb); if (ccb_malloced) xpt_free_ccb(ccb); else xpt_release_ccb(ccb); break; } case CAMIOQUEUE: { struct pass_io_req *io_req; union ccb **user_ccb, *ccb; xpt_opcode fc; if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0) { error = passcreatezone(periph); if (error != 0) goto bailout; } /* * We're going to do a blocking allocation for this I/O * request, so we have to drop the lock. */ cam_periph_unlock(periph); io_req = uma_zalloc(softc->pass_zone, M_WAITOK | M_ZERO); ccb = &io_req->ccb; user_ccb = (union ccb **)addr; /* * Unlike the CAMIOCOMMAND ioctl above, we only have a * pointer to the user's CCB, so we have to copy the whole * thing in to a buffer we have allocated (above) instead * of allowing the ioctl code to malloc a buffer and copy * it in. * * This is an advantage for this asynchronous interface, * since we don't want the memory to get freed while the * CCB is outstanding. */ #if 0 xpt_print(periph->path, "Copying user CCB %p to " "kernel address %p\n", *user_ccb, ccb); #endif error = copyin(*user_ccb, ccb, sizeof(*ccb)); if (error != 0) { xpt_print(periph->path, "Copy of user CCB %p to " "kernel address %p failed with error %d\n", *user_ccb, ccb, error); goto camioqueue_error; } #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.bio = NULL; #endif if (ccb->ccb_h.flags & CAM_UNLOCKED) { error = EINVAL; goto camioqueue_error; } if (ccb->ccb_h.flags & CAM_CDB_POINTER) { if (ccb->csio.cdb_len > IOCDBLEN) { error = EINVAL; goto camioqueue_error; } error = copyin(ccb->csio.cdb_io.cdb_ptr, ccb->csio.cdb_io.cdb_bytes, ccb->csio.cdb_len); if (error != 0) goto camioqueue_error; ccb->ccb_h.flags &= ~CAM_CDB_POINTER; } /* * Some CCB types, like scan bus and scan lun can only go * through the transport layer device. */ if (ccb->ccb_h.func_code & XPT_FC_XPT_ONLY) { xpt_print(periph->path, "CCB function code %#x is " "restricted to the XPT device\n", ccb->ccb_h.func_code); error = ENODEV; goto camioqueue_error; } /* * Save the user's CCB pointer as well as his linked list * pointers and peripheral private area so that we can * restore these later. */ io_req->user_ccb_ptr = *user_ccb; io_req->user_periph_links = ccb->ccb_h.periph_links; io_req->user_periph_priv = ccb->ccb_h.periph_priv; /* * Now that we've saved the user's values, we can set our * own peripheral private entry. */ ccb->ccb_h.ccb_ioreq = io_req; /* Compatibility for RL/priority-unaware code. */ priority = ccb->ccb_h.pinfo.priority; if (priority <= CAM_PRIORITY_OOB) priority += CAM_PRIORITY_OOB + 1; /* * Setup fields in the CCB like the path and the priority. * The path in particular cannot be done in userland, since * it is a pointer to a kernel data structure. */ xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, priority, ccb->ccb_h.flags); /* * Setup our done routine. There is no way for the user to * have a valid pointer here. */ ccb->ccb_h.cbfcnp = passdone; fc = ccb->ccb_h.func_code; /* * If this function code has memory that can be mapped in * or out, we need to call passmemsetup(). */ if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO) || (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH) || (fc == XPT_DEV_ADVINFO) || (fc == XPT_NVME_ADMIN) || (fc == XPT_NVME_IO)) { error = passmemsetup(periph, io_req); if (error != 0) goto camioqueue_error; } else io_req->mapinfo.num_bufs_used = 0; cam_periph_lock(periph); /* * Everything goes on the incoming queue initially. */ TAILQ_INSERT_TAIL(&softc->incoming_queue, io_req, links); /* * If the CCB is queued, and is not a user CCB, then * we need to allocate a slot for it. Call xpt_schedule() * so that our start routine will get called when a CCB is * available. */ if ((fc & XPT_FC_QUEUED) && ((fc & XPT_FC_USER_CCB) == 0)) { xpt_schedule(periph, priority); break; } /* * At this point, the CCB in question is either an * immediate CCB (like XPT_DEV_ADVINFO) or it is a user CCB * and therefore should be malloced, not allocated via a slot. * Remove the CCB from the incoming queue and add it to the * active queue. */ TAILQ_REMOVE(&softc->incoming_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links); xpt_action(ccb); /* * If this is not a queued CCB (i.e. it is an immediate CCB), * then it is already done. We need to put it on the done * queue for the user to fetch. */ if ((fc & XPT_FC_QUEUED) == 0) { TAILQ_REMOVE(&softc->active_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links); } break; camioqueue_error: uma_zfree(softc->pass_zone, io_req); cam_periph_lock(periph); break; } case CAMIOGET: { union ccb **user_ccb; struct pass_io_req *io_req; int old_error; user_ccb = (union ccb **)addr; old_error = 0; io_req = TAILQ_FIRST(&softc->done_queue); if (io_req == NULL) { error = ENOENT; break; } /* * Remove the I/O from the done queue. */ TAILQ_REMOVE(&softc->done_queue, io_req, links); /* * We have to drop the lock during the copyout because the * copyout can result in VM faults that require sleeping. */ cam_periph_unlock(periph); /* * Do any needed copies (e.g. for reads) and revert the * pointers in the CCB back to the user's pointers. */ error = passmemdone(periph, io_req); old_error = error; io_req->ccb.ccb_h.periph_links = io_req->user_periph_links; io_req->ccb.ccb_h.periph_priv = io_req->user_periph_priv; #if 0 xpt_print(periph->path, "Copying to user CCB %p from " "kernel address %p\n", *user_ccb, &io_req->ccb); #endif error = copyout(&io_req->ccb, *user_ccb, sizeof(union ccb)); if (error != 0) { xpt_print(periph->path, "Copy to user CCB %p from " "kernel address %p failed with error %d\n", *user_ccb, &io_req->ccb, error); } /* * Prefer the first error we got back, and make sure we * don't overwrite bad status with good. */ if (old_error != 0) error = old_error; cam_periph_lock(periph); /* * At this point, if there was an error, we could potentially * re-queue the I/O and try again. But why? The error * would almost certainly happen again. We might as well * not leak memory. */ uma_zfree(softc->pass_zone, io_req); break; } default: error = cam_periph_ioctl(periph, cmd, addr, passerror); break; } bailout: cam_periph_unlock(periph); return(error); } static int passpoll(struct cdev *dev, int poll_events, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int revents; periph = (struct cam_periph *)dev->si_drv1; softc = (struct pass_softc *)periph->softc; revents = poll_events & (POLLOUT | POLLWRNORM); if ((poll_events & (POLLIN | POLLRDNORM)) != 0) { cam_periph_lock(periph); if (!TAILQ_EMPTY(&softc->done_queue)) { revents |= poll_events & (POLLIN | POLLRDNORM); } cam_periph_unlock(periph); if (revents == 0) selrecord(td, &softc->read_select); } return (revents); } static int passkqfilter(struct cdev *dev, struct knote *kn) { struct cam_periph *periph; struct pass_softc *softc; periph = (struct cam_periph *)dev->si_drv1; softc = (struct pass_softc *)periph->softc; kn->kn_hook = (caddr_t)periph; kn->kn_fop = &passread_filtops; knlist_add(&softc->read_select.si_note, kn, 0); return (0); } static void passreadfiltdetach(struct knote *kn) { struct cam_periph *periph; struct pass_softc *softc; periph = (struct cam_periph *)kn->kn_hook; softc = (struct pass_softc *)periph->softc; knlist_remove(&softc->read_select.si_note, kn, 0); } static int passreadfilt(struct knote *kn, long hint) { struct cam_periph *periph; struct pass_softc *softc; int retval; periph = (struct cam_periph *)kn->kn_hook; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); if (TAILQ_EMPTY(&softc->done_queue)) retval = 0; else retval = 1; return (retval); } /* * Generally, "ccb" should be the CCB supplied by the kernel. "inccb" * should be the CCB that is copied in from the user. */ static int passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb) { struct pass_softc *softc; struct cam_periph_map_info mapinfo; uint8_t *cmd; xpt_opcode fc; int error; softc = (struct pass_softc *)periph->softc; /* * There are some fields in the CCB header that need to be * preserved, the rest we get from the user. */ xpt_merge_ccb(ccb, inccb); if (ccb->ccb_h.flags & CAM_CDB_POINTER) { cmd = __builtin_alloca(ccb->csio.cdb_len); error = copyin(ccb->csio.cdb_io.cdb_ptr, cmd, ccb->csio.cdb_len); if (error) return (error); ccb->csio.cdb_io.cdb_ptr = cmd; } /* */ ccb->ccb_h.cbfcnp = passdone; /* * Let cam_periph_mapmem do a sanity check on the data pointer format. * Even if no data transfer is needed, it's a cheap check and it * simplifies the code. */ fc = ccb->ccb_h.func_code; if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO) || (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH) || (fc == XPT_DEV_ADVINFO) || (fc == XPT_MMC_IO) || (fc == XPT_NVME_ADMIN) || (fc == XPT_NVME_IO)) { bzero(&mapinfo, sizeof(mapinfo)); /* * cam_periph_mapmem calls into proc and vm functions that can * sleep as well as trigger I/O, so we can't hold the lock. * Dropping it here is reasonably safe. */ cam_periph_unlock(periph); error = cam_periph_mapmem(ccb, &mapinfo, softc->maxio); cam_periph_lock(periph); /* * cam_periph_mapmem returned an error, we can't continue. * Return the error to the user. */ if (error) return(error); } else /* Ensure that the unmap call later on is a no-op. */ mapinfo.num_bufs_used = 0; /* * If the user wants us to perform any error recovery, then honor * that request. Otherwise, it's up to the user to perform any * error recovery. */ cam_periph_runccb(ccb, (ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ? passerror : NULL, /* cam_flags */ CAM_RETRY_SELTO, /* sense_flags */ SF_RETRY_UA | SF_NO_PRINT, softc->device_stats); cam_periph_unmapmem(ccb, &mapinfo); ccb->ccb_h.cbfcnp = NULL; ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv; bcopy(ccb, inccb, sizeof(union ccb)); return(0); } static int passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct cam_periph *periph; struct pass_softc *softc; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct pass_softc *)periph->softc; return(cam_periph_error(ccb, cam_flags, sense_flags)); } Index: head/sys/cam/scsi/scsi_pt.c =================================================================== --- head/sys/cam/scsi/scsi_pt.c (revision 328917) +++ head/sys/cam/scsi/scsi_pt.c (revision 328918) @@ -1,637 +1,637 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Implementation of SCSI Processor Target Peripheral driver for CAM. * * Copyright (c) 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_pt.h" typedef enum { PT_STATE_PROBE, PT_STATE_NORMAL } pt_state; typedef enum { PT_FLAG_NONE = 0x00, PT_FLAG_OPEN = 0x01, PT_FLAG_DEVICE_INVALID = 0x02, PT_FLAG_RETRY_UA = 0x04 } pt_flags; typedef enum { PT_CCB_BUFFER_IO = 0x01, PT_CCB_RETRY_UA = 0x04, PT_CCB_BUFFER_IO_UA = PT_CCB_BUFFER_IO|PT_CCB_RETRY_UA } pt_ccb_state; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct pt_softc { struct bio_queue_head bio_queue; struct devstat *device_stats; LIST_HEAD(, ccb_hdr) pending_ccbs; pt_state state; pt_flags flags; union ccb saved_ccb; int io_timeout; struct cdev *dev; }; static d_open_t ptopen; static d_close_t ptclose; static d_strategy_t ptstrategy; static periph_init_t ptinit; static void ptasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static periph_ctor_t ptctor; static periph_oninv_t ptoninvalidate; static periph_dtor_t ptdtor; static periph_start_t ptstart; static void ptdone(struct cam_periph *periph, union ccb *done_ccb); static d_ioctl_t ptioctl; static int pterror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); void scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int tag_action, int readop, u_int byte2, u_int32_t xfer_len, u_int8_t *data_ptr, u_int8_t sense_len, u_int32_t timeout); static struct periph_driver ptdriver = { ptinit, "pt", TAILQ_HEAD_INITIALIZER(ptdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(pt, ptdriver); static struct cdevsw pt_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ptopen, .d_close = ptclose, .d_read = physread, .d_write = physwrite, .d_ioctl = ptioctl, .d_strategy = ptstrategy, .d_name = "pt", }; #ifndef SCSI_PT_DEFAULT_TIMEOUT #define SCSI_PT_DEFAULT_TIMEOUT 60 #endif static int ptopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct pt_softc *softc; int error = 0; periph = (struct cam_periph *)dev->si_drv1; - if (cam_periph_acquire(periph) != CAM_REQ_CMP) + if (cam_periph_acquire(periph) != 0) return (ENXIO); softc = (struct pt_softc *)periph->softc; cam_periph_lock(periph); if (softc->flags & PT_FLAG_DEVICE_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } if ((softc->flags & PT_FLAG_OPEN) == 0) softc->flags |= PT_FLAG_OPEN; else { error = EBUSY; cam_periph_release(periph); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ptopen: dev=%s\n", devtoname(dev))); cam_periph_unlock(periph); return (error); } static int ptclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct pt_softc *softc; periph = (struct cam_periph *)dev->si_drv1; softc = (struct pt_softc *)periph->softc; cam_periph_lock(periph); softc->flags &= ~PT_FLAG_OPEN; cam_periph_release_locked(periph); cam_periph_unlock(periph); return (0); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void ptstrategy(struct bio *bp) { struct cam_periph *periph; struct pt_softc *softc; periph = (struct cam_periph *)bp->bio_dev->si_drv1; bp->bio_resid = bp->bio_bcount; if (periph == NULL) { biofinish(bp, NULL, ENXIO); return; } cam_periph_lock(periph); softc = (struct pt_softc *)periph->softc; /* * If the device has been made invalid, error out */ if ((softc->flags & PT_FLAG_DEVICE_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * Place it in the queue of disk activities for this disk */ bioq_insert_tail(&softc->bio_queue, bp); /* * Schedule ourselves for performing the work. */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); return; } static void ptinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, ptasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("pt: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static cam_status ptctor(struct cam_periph *periph, void *arg) { struct pt_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; struct make_dev_args args; int error; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("ptregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct pt_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); if (softc == NULL) { printf("daregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); LIST_INIT(&softc->pending_ccbs); softc->state = PT_STATE_NORMAL; bioq_init(&softc->bio_queue); softc->io_timeout = SCSI_PT_DEFAULT_TIMEOUT * 1000; periph->softc = softc; xpt_path_inq(&cpi, periph->path); cam_periph_unlock(periph); make_dev_args_init(&args); args.mda_devsw = &pt_cdevsw; args.mda_unit = periph->unit_number; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = periph; error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } softc->device_stats = devstat_new_entry("pt", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_OTHER); cam_periph_lock(periph); /* * Add async callbacks for bus reset and * bus device reset calls. I don't bother * checking if this fails as, in most cases, * the system will function just fine without * them and the only alternative would be to * not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE, ptasync, periph, periph->path); /* Tell the user we've attached to the device */ xpt_announce_periph(periph, NULL); return(CAM_REQ_CMP); } static void ptoninvalidate(struct cam_periph *periph) { struct pt_softc *softc; softc = (struct pt_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, ptasync, periph, periph->path); softc->flags |= PT_FLAG_DEVICE_INVALID; /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ bioq_flush(&softc->bio_queue, NULL, ENXIO); } static void ptdtor(struct cam_periph *periph) { struct pt_softc *softc; softc = (struct pt_softc *)periph->softc; devstat_remove_entry(softc->device_stats); cam_periph_unlock(periph); destroy_dev(softc->dev); cam_periph_lock(periph); free(softc, M_DEVBUF); } static void ptasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_PROCESSOR) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(ptctor, ptoninvalidate, ptdtor, ptstart, "pt", CAM_PERIPH_BIO, path, ptasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("ptasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_SENT_BDR: case AC_BUS_RESET: { struct pt_softc *softc; struct ccb_hdr *ccbh; softc = (struct pt_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= PT_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= PT_CCB_RETRY_UA; } /* FALLTHROUGH */ default: cam_periph_async(periph, code, path, arg); break; } } static void ptstart(struct cam_periph *periph, union ccb *start_ccb) { struct pt_softc *softc; struct bio *bp; softc = (struct pt_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ptstart\n")); /* * See if there is a buf with work for us to do.. */ bp = bioq_first(&softc->bio_queue); if (bp == NULL) { xpt_release_ccb(start_ccb); } else { bioq_remove(&softc->bio_queue, bp); devstat_start_transaction_bio(softc->device_stats, bp); scsi_send_receive(&start_ccb->csio, /*retries*/4, ptdone, MSG_SIMPLE_Q_TAG, bp->bio_cmd == BIO_READ, /*byte2*/0, bp->bio_bcount, bp->bio_data, /*sense_len*/SSD_FULL_SIZE, /*timeout*/softc->io_timeout); start_ccb->ccb_h.ccb_state = PT_CCB_BUFFER_IO_UA; /* * Block out any asynchronous callbacks * while we touch the pending ccb list. */ LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); start_ccb->ccb_h.ccb_bp = bp; bp = bioq_first(&softc->bio_queue); xpt_action(start_ccb); if (bp != NULL) { /* Have more work to do, so ensure we stay scheduled */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } } static void ptdone(struct cam_periph *periph, union ccb *done_ccb) { struct pt_softc *softc; struct ccb_scsiio *csio; softc = (struct pt_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ptdone\n")); csio = &done_ccb->csio; switch (csio->ccb_h.ccb_state) { case PT_CCB_BUFFER_IO: case PT_CCB_BUFFER_IO_UA: { struct bio *bp; bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int error; int sf; if ((csio->ccb_h.ccb_state & PT_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = pterror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } if (error != 0) { if (error == ENXIO) { /* * Catastrophic error. Mark our device * as invalid. */ xpt_print(periph->path, "Invalidating device\n"); softc->flags |= PT_FLAG_DEVICE_INVALID; } /* * return all queued I/O with EIO, so that * the client can retry these I/Os in the * proper order should it attempt to recover. */ bioq_flush(&softc->bio_queue, NULL, EIO); bp->bio_error = error; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } else { bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) { /* Short transfer ??? */ bp->bio_flags |= BIO_ERROR; } } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else { bp->bio_resid = csio->resid; if (bp->bio_resid != 0) bp->bio_flags |= BIO_ERROR; } /* * Block out any asynchronous callbacks * while we touch the pending ccb list. */ LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); biofinish(bp, softc->device_stats, 0); break; } } xpt_release_ccb(done_ccb); } static int pterror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct pt_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct pt_softc *)periph->softc; return(cam_periph_error(ccb, cam_flags, sense_flags)); } static int ptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cam_periph *periph; struct pt_softc *softc; int error = 0; periph = (struct cam_periph *)dev->si_drv1; softc = (struct pt_softc *)periph->softc; cam_periph_lock(periph); switch(cmd) { case PTIOCGETTIMEOUT: if (softc->io_timeout >= 1000) *(int *)addr = softc->io_timeout / 1000; else *(int *)addr = 0; break; case PTIOCSETTIMEOUT: if (*(int *)addr < 1) { error = EINVAL; break; } softc->io_timeout = *(int *)addr * 1000; break; default: error = cam_periph_ioctl(periph, cmd, addr, pterror); break; } cam_periph_unlock(periph); return(error); } void scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int tag_action, int readop, u_int byte2, u_int32_t xfer_len, u_int8_t *data_ptr, u_int8_t sense_len, u_int32_t timeout) { struct scsi_send_receive *scsi_cmd; scsi_cmd = (struct scsi_send_receive *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = readop ? RECEIVE : SEND; scsi_cmd->byte2 = byte2; scsi_ulto3b(xfer_len, scsi_cmd->xfer_len); scsi_cmd->control = 0; cam_fill_csio(csio, retries, cbfcnp, /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT, tag_action, data_ptr, xfer_len, sense_len, sizeof(*scsi_cmd), timeout); } Index: head/sys/cam/scsi/scsi_sa.c =================================================================== --- head/sys/cam/scsi/scsi_sa.c (revision 328917) +++ head/sys/cam/scsi/scsi_sa.c (revision 328918) @@ -1,5918 +1,5918 @@ /*- * Implementation of SCSI Sequential Access Peripheral driver for CAM. * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999, 2000 Matthew Jacob * Copyright (c) 2013, 2014, 2015 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #ifdef _KERNEL #include #include #endif #include #include #include #include #include #include #ifdef _KERNEL #include #include #include #include #endif #include #include #ifndef _KERNEL #include #include #endif #include #include #include #include #include #include #include #include #ifdef _KERNEL #include "opt_sa.h" #ifndef SA_IO_TIMEOUT #define SA_IO_TIMEOUT 32 #endif #ifndef SA_SPACE_TIMEOUT #define SA_SPACE_TIMEOUT 1 * 60 #endif #ifndef SA_REWIND_TIMEOUT #define SA_REWIND_TIMEOUT 2 * 60 #endif #ifndef SA_ERASE_TIMEOUT #define SA_ERASE_TIMEOUT 4 * 60 #endif #ifndef SA_REP_DENSITY_TIMEOUT #define SA_REP_DENSITY_TIMEOUT 90 #endif #define SCSIOP_TIMEOUT (60 * 1000) /* not an option */ #define IO_TIMEOUT (SA_IO_TIMEOUT * 60 * 1000) #define REWIND_TIMEOUT (SA_REWIND_TIMEOUT * 60 * 1000) #define ERASE_TIMEOUT (SA_ERASE_TIMEOUT * 60 * 1000) #define SPACE_TIMEOUT (SA_SPACE_TIMEOUT * 60 * 1000) #define REP_DENSITY_TIMEOUT (SA_REP_DENSITY_TIMEOUT * 60 * 1000) /* * Additional options that can be set for config: SA_1FM_AT_EOT */ #ifndef UNUSED_PARAMETER #define UNUSED_PARAMETER(x) x = x #endif #define QFRLS(ccb) \ if (((ccb)->ccb_h.status & CAM_DEV_QFRZN) != 0) \ cam_release_devq((ccb)->ccb_h.path, 0, 0, 0, FALSE) /* * Driver states */ static MALLOC_DEFINE(M_SCSISA, "SCSI sa", "SCSI sequential access buffers"); typedef enum { SA_STATE_NORMAL, SA_STATE_ABNORMAL } sa_state; #define ccb_pflags ppriv_field0 #define ccb_bp ppriv_ptr1 /* bits in ccb_pflags */ #define SA_POSITION_UPDATED 0x1 typedef enum { SA_FLAG_OPEN = 0x0001, SA_FLAG_FIXED = 0x0002, SA_FLAG_TAPE_LOCKED = 0x0004, SA_FLAG_TAPE_MOUNTED = 0x0008, SA_FLAG_TAPE_WP = 0x0010, SA_FLAG_TAPE_WRITTEN = 0x0020, SA_FLAG_EOM_PENDING = 0x0040, SA_FLAG_EIO_PENDING = 0x0080, SA_FLAG_EOF_PENDING = 0x0100, SA_FLAG_ERR_PENDING = (SA_FLAG_EOM_PENDING|SA_FLAG_EIO_PENDING| SA_FLAG_EOF_PENDING), SA_FLAG_INVALID = 0x0200, SA_FLAG_COMP_ENABLED = 0x0400, SA_FLAG_COMP_SUPP = 0x0800, SA_FLAG_COMP_UNSUPP = 0x1000, SA_FLAG_TAPE_FROZEN = 0x2000, SA_FLAG_PROTECT_SUPP = 0x4000, SA_FLAG_COMPRESSION = (SA_FLAG_COMP_SUPP|SA_FLAG_COMP_ENABLED| SA_FLAG_COMP_UNSUPP), SA_FLAG_SCTX_INIT = 0x8000 } sa_flags; typedef enum { SA_MODE_REWIND = 0x00, SA_MODE_NOREWIND = 0x01, SA_MODE_OFFLINE = 0x02 } sa_mode; typedef enum { SA_PARAM_NONE = 0x000, SA_PARAM_BLOCKSIZE = 0x001, SA_PARAM_DENSITY = 0x002, SA_PARAM_COMPRESSION = 0x004, SA_PARAM_BUFF_MODE = 0x008, SA_PARAM_NUMBLOCKS = 0x010, SA_PARAM_WP = 0x020, SA_PARAM_SPEED = 0x040, SA_PARAM_DENSITY_EXT = 0x080, SA_PARAM_LBP = 0x100, SA_PARAM_ALL = 0x1ff } sa_params; typedef enum { SA_QUIRK_NONE = 0x000, SA_QUIRK_NOCOMP = 0x001, /* Can't deal with compression at all*/ SA_QUIRK_FIXED = 0x002, /* Force fixed mode */ SA_QUIRK_VARIABLE = 0x004, /* Force variable mode */ SA_QUIRK_2FM = 0x008, /* Needs Two File Marks at EOD */ SA_QUIRK_1FM = 0x010, /* No more than 1 File Mark at EOD */ SA_QUIRK_NODREAD = 0x020, /* Don't try and dummy read density */ SA_QUIRK_NO_MODESEL = 0x040, /* Don't do mode select at all */ SA_QUIRK_NO_CPAGE = 0x080, /* Don't use DEVICE COMPRESSION page */ SA_QUIRK_NO_LONG_POS = 0x100 /* No long position information */ } sa_quirks; #define SA_QUIRK_BIT_STRING \ "\020" \ "\001NOCOMP" \ "\002FIXED" \ "\003VARIABLE" \ "\0042FM" \ "\0051FM" \ "\006NODREAD" \ "\007NO_MODESEL" \ "\010NO_CPAGE" \ "\011NO_LONG_POS" #define SAMODE(z) (dev2unit(z) & 0x3) #define SA_IS_CTRL(z) (dev2unit(z) & (1 << 4)) #define SA_NOT_CTLDEV 0 #define SA_CTLDEV 1 #define SA_ATYPE_R 0 #define SA_ATYPE_NR 1 #define SA_ATYPE_ER 2 #define SA_NUM_ATYPES 3 #define SAMINOR(ctl, access) \ ((ctl << 4) | (access & 0x3)) struct sa_devs { struct cdev *ctl_dev; struct cdev *r_dev; struct cdev *nr_dev; struct cdev *er_dev; }; #define SASBADDBASE(sb, indent, data, xfmt, name, type, xsize, desc) \ sbuf_printf(sb, "%*s<%s type=\"%s\" size=\"%zd\" " \ "fmt=\"%s\" desc=\"%s\">" #xfmt "\n", indent, "", \ #name, #type, xsize, #xfmt, desc ? desc : "", data, #name); #define SASBADDINT(sb, indent, data, fmt, name) \ SASBADDBASE(sb, indent, data, fmt, name, int, sizeof(data), \ NULL) #define SASBADDINTDESC(sb, indent, data, fmt, name, desc) \ SASBADDBASE(sb, indent, data, fmt, name, int, sizeof(data), \ desc) #define SASBADDUINT(sb, indent, data, fmt, name) \ SASBADDBASE(sb, indent, data, fmt, name, uint, sizeof(data), \ NULL) #define SASBADDUINTDESC(sb, indent, data, fmt, name, desc) \ SASBADDBASE(sb, indent, data, fmt, name, uint, sizeof(data), \ desc) #define SASBADDFIXEDSTR(sb, indent, data, fmt, name) \ SASBADDBASE(sb, indent, data, fmt, name, str, sizeof(data), \ NULL) #define SASBADDFIXEDSTRDESC(sb, indent, data, fmt, name, desc) \ SASBADDBASE(sb, indent, data, fmt, name, str, sizeof(data), \ desc) #define SASBADDVARSTR(sb, indent, data, fmt, name, maxlen) \ SASBADDBASE(sb, indent, data, fmt, name, str, maxlen, NULL) #define SASBADDVARSTRDESC(sb, indent, data, fmt, name, maxlen, desc) \ SASBADDBASE(sb, indent, data, fmt, name, str, maxlen, desc) #define SASBADDNODE(sb, indent, name) { \ sbuf_printf(sb, "%*s<%s type=\"%s\">\n", indent, "", #name, \ "node"); \ indent += 2; \ } #define SASBADDNODENUM(sb, indent, name, num) { \ sbuf_printf(sb, "%*s<%s type=\"%s\" num=\"%d\">\n", indent, "", \ #name, "node", num); \ indent += 2; \ } #define SASBENDNODE(sb, indent, name) { \ indent -= 2; \ sbuf_printf(sb, "%*s\n", indent, "", #name); \ } #define SA_DENSITY_TYPES 4 struct sa_prot_state { int initialized; uint32_t prot_method; uint32_t pi_length; uint32_t lbp_w; uint32_t lbp_r; uint32_t rbdp; }; struct sa_prot_info { struct sa_prot_state cur_prot_state; struct sa_prot_state pending_prot_state; }; /* * A table mapping protection parameters to their types and values. */ struct sa_prot_map { char *name; mt_param_set_type param_type; off_t offset; uint32_t min_val; uint32_t max_val; uint32_t *value; } sa_prot_table[] = { { "prot_method", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, prot_method), /*min_val*/ 0, /*max_val*/ 255, NULL }, { "pi_length", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, pi_length), /*min_val*/ 0, /*max_val*/ SA_CTRL_DP_PI_LENGTH_MASK, NULL }, { "lbp_w", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, lbp_w), /*min_val*/ 0, /*max_val*/ 1, NULL }, { "lbp_r", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, lbp_r), /*min_val*/ 0, /*max_val*/ 1, NULL }, { "rbdp", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, rbdp), /*min_val*/ 0, /*max_val*/ 1, NULL } }; #define SA_NUM_PROT_ENTS nitems(sa_prot_table) #define SA_PROT_ENABLED(softc) ((softc->flags & SA_FLAG_PROTECT_SUPP) \ && (softc->prot_info.cur_prot_state.initialized != 0) \ && (softc->prot_info.cur_prot_state.prot_method != 0)) #define SA_PROT_LEN(softc) softc->prot_info.cur_prot_state.pi_length struct sa_softc { sa_state state; sa_flags flags; sa_quirks quirks; u_int si_flags; struct cam_periph *periph; struct bio_queue_head bio_queue; int queue_count; struct devstat *device_stats; struct sa_devs devs; int open_count; int num_devs_to_destroy; int blk_gran; int blk_mask; int blk_shift; u_int32_t max_blk; u_int32_t min_blk; u_int32_t maxio; u_int32_t cpi_maxio; int allow_io_split; int inject_eom; int set_pews_status; u_int32_t comp_algorithm; u_int32_t saved_comp_algorithm; u_int32_t media_blksize; u_int32_t last_media_blksize; u_int32_t media_numblks; u_int8_t media_density; u_int8_t speed; u_int8_t scsi_rev; u_int8_t dsreg; /* mtio mt_dsreg, redux */ int buffer_mode; int filemarks; union ccb saved_ccb; int last_resid_was_io; uint8_t density_type_bits[SA_DENSITY_TYPES]; int density_info_valid[SA_DENSITY_TYPES]; uint8_t density_info[SA_DENSITY_TYPES][SRDS_MAX_LENGTH]; struct sa_prot_info prot_info; int sili; int eot_warn; /* * Current position information. -1 means that the given value is * unknown. fileno and blkno are always calculated. blkno is * relative to the previous file mark. rep_fileno and rep_blkno * are as reported by the drive, if it supports the long form * report for the READ POSITION command. rep_blkno is relative to * the beginning of the partition. * * bop means that the drive is at the beginning of the partition. * eop means that the drive is between early warning and end of * partition, inside the current partition. * bpew means that the position is in a PEWZ (Programmable Early * Warning Zone) */ daddr_t partition; /* Absolute from BOT */ daddr_t fileno; /* Relative to beginning of partition */ daddr_t blkno; /* Relative to last file mark */ daddr_t rep_blkno; /* Relative to beginning of partition */ daddr_t rep_fileno; /* Relative to beginning of partition */ int bop; /* Beginning of Partition */ int eop; /* End of Partition */ int bpew; /* Beyond Programmable Early Warning */ /* * Latched Error Info */ struct { struct scsi_sense_data _last_io_sense; u_int64_t _last_io_resid; u_int8_t _last_io_cdb[CAM_MAX_CDBLEN]; struct scsi_sense_data _last_ctl_sense; u_int64_t _last_ctl_resid; u_int8_t _last_ctl_cdb[CAM_MAX_CDBLEN]; #define last_io_sense errinfo._last_io_sense #define last_io_resid errinfo._last_io_resid #define last_io_cdb errinfo._last_io_cdb #define last_ctl_sense errinfo._last_ctl_sense #define last_ctl_resid errinfo._last_ctl_resid #define last_ctl_cdb errinfo._last_ctl_cdb } errinfo; /* * Misc other flags/state */ u_int32_t : 29, open_rdonly : 1, /* open read-only */ open_pending_mount : 1, /* open pending mount */ ctrl_mode : 1; /* control device open */ struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; }; struct sa_quirk_entry { struct scsi_inquiry_pattern inq_pat; /* matching pattern */ sa_quirks quirks; /* specific quirk type */ u_int32_t prefblk; /* preferred blocksize when in fixed mode */ }; static struct sa_quirk_entry sa_quirk_table[] = { { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "OnStream", "ADR*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_NODREAD | SA_QUIRK_1FM|SA_QUIRK_NO_MODESEL, 32768 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "Python 06408*", "*"}, SA_QUIRK_NODREAD, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "Python 25601*", "*"}, SA_QUIRK_NOCOMP|SA_QUIRK_NODREAD, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "Python*", "*"}, SA_QUIRK_NODREAD, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "VIPER 150*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "VIPER 2525 25462", "-011"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM|SA_QUIRK_NODREAD, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "VIPER 2525*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 1024 }, #if 0 { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "C15*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_NO_CPAGE, 0, }, #endif { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "C56*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "T20*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "T4000*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "HP-88780*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", "*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "M4 DATA", "123107 SCSI*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { /* jreynold@primenet.com */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "Seagate", "STT8000N*", "*"}, SA_QUIRK_1FM, 0 }, { /* mike@sentex.net */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "Seagate", "STT20000*", "*"}, SA_QUIRK_1FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "SEAGATE", "DAT 06241-XXX", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 3600", "U07:"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 3800", "*"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 4100", "*"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 4200", "*"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " SLR*", "*"}, SA_QUIRK_1FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "5525ES*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "51000*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 1024 } }; static d_open_t saopen; static d_close_t saclose; static d_strategy_t sastrategy; static d_ioctl_t saioctl; static periph_init_t sainit; static periph_ctor_t saregister; static periph_oninv_t saoninvalidate; static periph_dtor_t sacleanup; static periph_start_t sastart; static void saasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void sadone(struct cam_periph *periph, union ccb *start_ccb); static int saerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int samarkswanted(struct cam_periph *); static int sacheckeod(struct cam_periph *periph); static int sagetparams(struct cam_periph *periph, sa_params params_to_get, u_int32_t *blocksize, u_int8_t *density, u_int32_t *numblocks, int *buff_mode, u_int8_t *write_protect, u_int8_t *speed, int *comp_supported, int *comp_enabled, u_int32_t *comp_algorithm, sa_comp_t *comp_page, struct scsi_control_data_prot_subpage *prot_page, int dp_size, int prot_changeable); static int sasetprot(struct cam_periph *periph, struct sa_prot_state *new_prot); static int sasetparams(struct cam_periph *periph, sa_params params_to_set, u_int32_t blocksize, u_int8_t density, u_int32_t comp_algorithm, u_int32_t sense_flags); static int sasetsili(struct cam_periph *periph, struct mtparamset *ps, int num_params); static int saseteotwarn(struct cam_periph *periph, struct mtparamset *ps, int num_params); static void safillprot(struct sa_softc *softc, int *indent, struct sbuf *sb); static void sapopulateprots(struct sa_prot_state *cur_state, struct sa_prot_map *new_table, int table_ents); static struct sa_prot_map *safindprotent(char *name, struct sa_prot_map *table, int table_ents); static int sasetprotents(struct cam_periph *periph, struct mtparamset *ps, int num_params); static struct sa_param_ent *safindparament(struct mtparamset *ps); static int saparamsetlist(struct cam_periph *periph, struct mtsetlist *list, int need_copy); static int saextget(struct cdev *dev, struct cam_periph *periph, struct sbuf *sb, struct mtextget *g); static int saparamget(struct sa_softc *softc, struct sbuf *sb); static void saprevent(struct cam_periph *periph, int action); static int sarewind(struct cam_periph *periph); static int saspace(struct cam_periph *periph, int count, scsi_space_code code); static void sadevgonecb(void *arg); static void sasetupdev(struct sa_softc *softc, struct cdev *dev); static int samount(struct cam_periph *, int, struct cdev *); static int saretension(struct cam_periph *periph); static int sareservereleaseunit(struct cam_periph *periph, int reserve); static int saloadunload(struct cam_periph *periph, int load); static int saerase(struct cam_periph *periph, int longerase); static int sawritefilemarks(struct cam_periph *periph, int nmarks, int setmarks, int immed); static int sagetpos(struct cam_periph *periph); static int sardpos(struct cam_periph *periph, int, u_int32_t *); static int sasetpos(struct cam_periph *periph, int, struct mtlocate *); static void safilldenstypesb(struct sbuf *sb, int *indent, uint8_t *buf, int buf_len, int is_density); static void safilldensitysb(struct sa_softc *softc, int *indent, struct sbuf *sb); #ifndef SA_DEFAULT_IO_SPLIT #define SA_DEFAULT_IO_SPLIT 0 #endif static int sa_allow_io_split = SA_DEFAULT_IO_SPLIT; /* * Tunable to allow the user to set a global allow_io_split value. Note * that this WILL GO AWAY in FreeBSD 11.0. Silently splitting the I/O up * is bad behavior, because it hides the true tape block size from the * application. */ static SYSCTL_NODE(_kern_cam, OID_AUTO, sa, CTLFLAG_RD, 0, "CAM Sequential Access Tape Driver"); SYSCTL_INT(_kern_cam_sa, OID_AUTO, allow_io_split, CTLFLAG_RDTUN, &sa_allow_io_split, 0, "Default I/O split value"); static struct periph_driver sadriver = { sainit, "sa", TAILQ_HEAD_INITIALIZER(sadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(sa, sadriver); /* For 2.2-stable support */ #ifndef D_TAPE #define D_TAPE 0 #endif static struct cdevsw sa_cdevsw = { .d_version = D_VERSION, .d_open = saopen, .d_close = saclose, .d_read = physread, .d_write = physwrite, .d_ioctl = saioctl, .d_strategy = sastrategy, .d_name = "sa", .d_flags = D_TAPE | D_TRACKCLOSE, }; static int saopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct sa_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { return (ENXIO); } cam_periph_lock(periph); softc = (struct sa_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE|CAM_DEBUG_INFO, ("saopen(%s): softc=0x%x\n", devtoname(dev), softc->flags)); if (SA_IS_CTRL(dev)) { softc->ctrl_mode = 1; softc->open_count++; cam_periph_unlock(periph); return (0); } if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } if (softc->flags & SA_FLAG_OPEN) { error = EBUSY; } else if (softc->flags & SA_FLAG_INVALID) { error = ENXIO; } else { /* * Preserve whether this is a read_only open. */ softc->open_rdonly = (flags & O_RDWR) == O_RDONLY; /* * The function samount ensures media is loaded and ready. * It also does a device RESERVE if the tape isn't yet mounted. * * If the mount fails and this was a non-blocking open, * make this a 'open_pending_mount' action. */ error = samount(periph, flags, dev); if (error && (flags & O_NONBLOCK)) { softc->flags |= SA_FLAG_OPEN; softc->open_pending_mount = 1; softc->open_count++; cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } } if (error) { cam_periph_unhold(periph); cam_periph_unlock(periph); cam_periph_release(periph); return (error); } saprevent(periph, PR_PREVENT); softc->flags |= SA_FLAG_OPEN; softc->open_count++; cam_periph_unhold(periph); cam_periph_unlock(periph); return (error); } static int saclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct sa_softc *softc; int mode, error, writing, tmp, i; int closedbits = SA_FLAG_OPEN; mode = SAMODE(dev); periph = (struct cam_periph *)dev->si_drv1; cam_periph_lock(periph); softc = (struct sa_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE|CAM_DEBUG_INFO, ("saclose(%s): softc=0x%x\n", devtoname(dev), softc->flags)); softc->open_rdonly = 0; if (SA_IS_CTRL(dev)) { softc->ctrl_mode = 0; softc->open_count--; cam_periph_unlock(periph); cam_periph_release(periph); return (0); } if (softc->open_pending_mount) { softc->flags &= ~SA_FLAG_OPEN; softc->open_pending_mount = 0; softc->open_count--; cam_periph_unlock(periph); cam_periph_release(periph); return (0); } if ((error = cam_periph_hold(periph, PRIBIO)) != 0) { cam_periph_unlock(periph); return (error); } /* * Were we writing the tape? */ writing = (softc->flags & SA_FLAG_TAPE_WRITTEN) != 0; /* * See whether or not we need to write filemarks. If this * fails, we probably have to assume we've lost tape * position. */ error = sacheckeod(periph); if (error) { xpt_print(periph->path, "failed to write terminating filemark(s)\n"); softc->flags |= SA_FLAG_TAPE_FROZEN; } /* * Whatever we end up doing, allow users to eject tapes from here on. */ saprevent(periph, PR_ALLOW); /* * Decide how to end... */ if ((softc->flags & SA_FLAG_TAPE_MOUNTED) == 0) { closedbits |= SA_FLAG_TAPE_FROZEN; } else switch (mode) { case SA_MODE_OFFLINE: /* * An 'offline' close is an unconditional release of * frozen && mount conditions, irrespective of whether * these operations succeeded. The reason for this is * to allow at least some kind of programmatic way * around our state getting all fouled up. If somebody * issues an 'offline' command, that will be allowed * to clear state. */ (void) sarewind(periph); (void) saloadunload(periph, FALSE); closedbits |= SA_FLAG_TAPE_MOUNTED|SA_FLAG_TAPE_FROZEN; break; case SA_MODE_REWIND: /* * If the rewind fails, return an error- if anyone cares, * but not overwriting any previous error. * * We don't clear the notion of mounted here, but we do * clear the notion of frozen if we successfully rewound. */ tmp = sarewind(periph); if (tmp) { if (error != 0) error = tmp; } else { closedbits |= SA_FLAG_TAPE_FROZEN; } break; case SA_MODE_NOREWIND: /* * If we're not rewinding/unloading the tape, find out * whether we need to back up over one of two filemarks * we wrote (if we wrote two filemarks) so that appends * from this point on will be sane. */ if (error == 0 && writing && (softc->quirks & SA_QUIRK_2FM)) { tmp = saspace(periph, -1, SS_FILEMARKS); if (tmp) { xpt_print(periph->path, "unable to backspace " "over one of double filemarks at end of " "tape\n"); xpt_print(periph->path, "it is possible that " "this device needs a SA_QUIRK_1FM quirk set" "for it\n"); softc->flags |= SA_FLAG_TAPE_FROZEN; } } break; default: xpt_print(periph->path, "unknown mode 0x%x in saclose\n", mode); /* NOTREACHED */ break; } /* * We wish to note here that there are no more filemarks to be written. */ softc->filemarks = 0; softc->flags &= ~SA_FLAG_TAPE_WRITTEN; /* * And we are no longer open for business. */ softc->flags &= ~closedbits; softc->open_count--; /* * Invalidate any density information that depends on having tape * media in the drive. */ for (i = 0; i < SA_DENSITY_TYPES; i++) { if (softc->density_type_bits[i] & SRDS_MEDIA) softc->density_info_valid[i] = 0; } /* * Inform users if tape state if frozen.... */ if (softc->flags & SA_FLAG_TAPE_FROZEN) { xpt_print(periph->path, "tape is now frozen- use an OFFLINE, " "REWIND or MTEOM command to clear this state.\n"); } /* release the device if it is no longer mounted */ if ((softc->flags & SA_FLAG_TAPE_MOUNTED) == 0) sareservereleaseunit(periph, FALSE); cam_periph_unhold(periph); cam_periph_unlock(periph); cam_periph_release(periph); return (error); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void sastrategy(struct bio *bp) { struct cam_periph *periph; struct sa_softc *softc; bp->bio_resid = bp->bio_bcount; if (SA_IS_CTRL(bp->bio_dev)) { biofinish(bp, NULL, EINVAL); return; } periph = (struct cam_periph *)bp->bio_dev->si_drv1; cam_periph_lock(periph); softc = (struct sa_softc *)periph->softc; if (softc->flags & SA_FLAG_INVALID) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } if (softc->flags & SA_FLAG_TAPE_FROZEN) { cam_periph_unlock(periph); biofinish(bp, NULL, EPERM); return; } /* * This should actually never occur as the write(2) * system call traps attempts to write to a read-only * file descriptor. */ if (bp->bio_cmd == BIO_WRITE && softc->open_rdonly) { cam_periph_unlock(periph); biofinish(bp, NULL, EBADF); return; } if (softc->open_pending_mount) { int error = samount(periph, 0, bp->bio_dev); if (error) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } saprevent(periph, PR_PREVENT); softc->open_pending_mount = 0; } /* * If it's a null transfer, return immediately */ if (bp->bio_bcount == 0) { cam_periph_unlock(periph); biodone(bp); return; } /* valid request? */ if (softc->flags & SA_FLAG_FIXED) { /* * Fixed block device. The byte count must * be a multiple of our block size. */ if (((softc->blk_mask != ~0) && ((bp->bio_bcount & softc->blk_mask) != 0)) || ((softc->blk_mask == ~0) && ((bp->bio_bcount % softc->min_blk) != 0))) { xpt_print(periph->path, "Invalid request. Fixed block " "device requests must be a multiple of %d bytes\n", softc->min_blk); cam_periph_unlock(periph); biofinish(bp, NULL, EINVAL); return; } } else if ((bp->bio_bcount > softc->max_blk) || (bp->bio_bcount < softc->min_blk) || (bp->bio_bcount & softc->blk_mask) != 0) { xpt_print_path(periph->path); printf("Invalid request. Variable block " "device requests must be "); if (softc->blk_mask != 0) { printf("a multiple of %d ", (0x1 << softc->blk_gran)); } printf("between %d and %d bytes\n", softc->min_blk, softc->max_blk); cam_periph_unlock(periph); biofinish(bp, NULL, EINVAL); return; } /* * Place it at the end of the queue. */ bioq_insert_tail(&softc->bio_queue, bp); softc->queue_count++; #if 0 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("sastrategy: queuing a %ld %s byte %s\n", bp->bio_bcount, (softc->flags & SA_FLAG_FIXED)? "fixed" : "variable", (bp->bio_cmd == BIO_READ)? "read" : "write")); #endif if (softc->queue_count > 1) { CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("sastrategy: queue count now %d\n", softc->queue_count)); } /* * Schedule ourselves for performing the work. */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); return; } static int sasetsili(struct cam_periph *periph, struct mtparamset *ps, int num_params) { uint32_t sili_blocksize; struct sa_softc *softc; int error; error = 0; softc = (struct sa_softc *)periph->softc; if (ps->value_type != MT_PARAM_SET_SIGNED) { snprintf(ps->error_str, sizeof(ps->error_str), "sili is a signed parameter"); goto bailout; } if ((ps->value.value_signed < 0) || (ps->value.value_signed > 1)) { snprintf(ps->error_str, sizeof(ps->error_str), "invalid sili value %jd", (intmax_t)ps->value.value_signed); goto bailout_error; } /* * We only set the SILI flag in variable block * mode. You'll get a check condition in fixed * block mode if things don't line up in any case. */ if (softc->flags & SA_FLAG_FIXED) { snprintf(ps->error_str, sizeof(ps->error_str), "can't set sili bit in fixed block mode"); goto bailout_error; } if (softc->sili == ps->value.value_signed) goto bailout; if (ps->value.value_signed == 1) sili_blocksize = 4; else sili_blocksize = 0; error = sasetparams(periph, SA_PARAM_BLOCKSIZE, sili_blocksize, 0, 0, SF_QUIET_IR); if (error != 0) { snprintf(ps->error_str, sizeof(ps->error_str), "sasetparams() returned error %d", error); goto bailout_error; } softc->sili = ps->value.value_signed; bailout: ps->status = MT_PARAM_STATUS_OK; return (error); bailout_error: ps->status = MT_PARAM_STATUS_ERROR; if (error == 0) error = EINVAL; return (error); } static int saseteotwarn(struct cam_periph *periph, struct mtparamset *ps, int num_params) { struct sa_softc *softc; int error; error = 0; softc = (struct sa_softc *)periph->softc; if (ps->value_type != MT_PARAM_SET_SIGNED) { snprintf(ps->error_str, sizeof(ps->error_str), "eot_warn is a signed parameter"); ps->status = MT_PARAM_STATUS_ERROR; goto bailout; } if ((ps->value.value_signed < 0) || (ps->value.value_signed > 1)) { snprintf(ps->error_str, sizeof(ps->error_str), "invalid eot_warn value %jd\n", (intmax_t)ps->value.value_signed); ps->status = MT_PARAM_STATUS_ERROR; goto bailout; } softc->eot_warn = ps->value.value_signed; ps->status = MT_PARAM_STATUS_OK; bailout: if (ps->status != MT_PARAM_STATUS_OK) error = EINVAL; return (error); } static void safillprot(struct sa_softc *softc, int *indent, struct sbuf *sb) { int tmpint; SASBADDNODE(sb, *indent, protection); if (softc->flags & SA_FLAG_PROTECT_SUPP) tmpint = 1; else tmpint = 0; SASBADDINTDESC(sb, *indent, tmpint, %d, protection_supported, "Set to 1 if protection information is supported"); if ((tmpint != 0) && (softc->prot_info.cur_prot_state.initialized != 0)) { struct sa_prot_state *prot; prot = &softc->prot_info.cur_prot_state; SASBADDUINTDESC(sb, *indent, prot->prot_method, %u, prot_method, "Current Protection Method"); SASBADDUINTDESC(sb, *indent, prot->pi_length, %u, pi_length, "Length of Protection Information"); SASBADDUINTDESC(sb, *indent, prot->lbp_w, %u, lbp_w, "Check Protection on Writes"); SASBADDUINTDESC(sb, *indent, prot->lbp_r, %u, lbp_r, "Check and Include Protection on Reads"); SASBADDUINTDESC(sb, *indent, prot->rbdp, %u, rbdp, "Transfer Protection Information for RECOVER " "BUFFERED DATA command"); } SASBENDNODE(sb, *indent, protection); } static void sapopulateprots(struct sa_prot_state *cur_state, struct sa_prot_map *new_table, int table_ents) { int i; bcopy(sa_prot_table, new_table, min(table_ents * sizeof(*new_table), sizeof(sa_prot_table))); table_ents = min(table_ents, SA_NUM_PROT_ENTS); for (i = 0; i < table_ents; i++) new_table[i].value = (uint32_t *)((uint8_t *)cur_state + new_table[i].offset); return; } static struct sa_prot_map * safindprotent(char *name, struct sa_prot_map *table, int table_ents) { char *prot_name = "protection."; int i, prot_len; prot_len = strlen(prot_name); /* * This shouldn't happen, but we check just in case. */ if (strncmp(name, prot_name, prot_len) != 0) goto bailout; for (i = 0; i < table_ents; i++) { if (strcmp(&name[prot_len], table[i].name) != 0) continue; return (&table[i]); } bailout: return (NULL); } static int sasetprotents(struct cam_periph *periph, struct mtparamset *ps, int num_params) { struct sa_softc *softc; struct sa_prot_map prot_ents[SA_NUM_PROT_ENTS]; struct sa_prot_state new_state; int error; int i; softc = (struct sa_softc *)periph->softc; error = 0; /* * Make sure that this tape drive supports protection information. * Otherwise we can't set anything. */ if ((softc->flags & SA_FLAG_PROTECT_SUPP) == 0) { snprintf(ps[0].error_str, sizeof(ps[0].error_str), "Protection information is not supported for this device"); ps[0].status = MT_PARAM_STATUS_ERROR; goto bailout; } /* * We can't operate with physio(9) splitting enabled, because there * is no way to insure (especially in variable block mode) that * what the user writes (with a checksum block at the end) will * make it into the sa(4) driver intact. */ if ((softc->si_flags & SI_NOSPLIT) == 0) { snprintf(ps[0].error_str, sizeof(ps[0].error_str), "Protection information cannot be enabled with I/O " "splitting"); ps[0].status = MT_PARAM_STATUS_ERROR; goto bailout; } /* * Take the current cached protection state and use that as the * basis for our new entries. */ bcopy(&softc->prot_info.cur_prot_state, &new_state, sizeof(new_state)); /* * Populate the table mapping property names to pointers into the * state structure. */ sapopulateprots(&new_state, prot_ents, SA_NUM_PROT_ENTS); /* * For each parameter the user passed in, make sure the name, type * and value are valid. */ for (i = 0; i < num_params; i++) { struct sa_prot_map *ent; ent = safindprotent(ps[i].value_name, prot_ents, SA_NUM_PROT_ENTS); if (ent == NULL) { ps[i].status = MT_PARAM_STATUS_ERROR; snprintf(ps[i].error_str, sizeof(ps[i].error_str), "Invalid protection entry name %s", ps[i].value_name); error = EINVAL; goto bailout; } if (ent->param_type != ps[i].value_type) { ps[i].status = MT_PARAM_STATUS_ERROR; snprintf(ps[i].error_str, sizeof(ps[i].error_str), "Supplied type %d does not match actual type %d", ps[i].value_type, ent->param_type); error = EINVAL; goto bailout; } if ((ps[i].value.value_unsigned < ent->min_val) || (ps[i].value.value_unsigned > ent->max_val)) { ps[i].status = MT_PARAM_STATUS_ERROR; snprintf(ps[i].error_str, sizeof(ps[i].error_str), "Value %ju is outside valid range %u - %u", (uintmax_t)ps[i].value.value_unsigned, ent->min_val, ent->max_val); error = EINVAL; goto bailout; } *(ent->value) = ps[i].value.value_unsigned; } /* * Actually send the protection settings to the drive. */ error = sasetprot(periph, &new_state); if (error != 0) { for (i = 0; i < num_params; i++) { ps[i].status = MT_PARAM_STATUS_ERROR; snprintf(ps[i].error_str, sizeof(ps[i].error_str), "Unable to set parameter, see dmesg(8)"); } goto bailout; } /* * Let the user know that his settings were stored successfully. */ for (i = 0; i < num_params; i++) ps[i].status = MT_PARAM_STATUS_OK; bailout: return (error); } /* * Entry handlers generally only handle a single entry. Node handlers will * handle a contiguous range of parameters to set in a single call. */ typedef enum { SA_PARAM_TYPE_ENTRY, SA_PARAM_TYPE_NODE } sa_param_type; struct sa_param_ent { char *name; sa_param_type param_type; int (*set_func)(struct cam_periph *periph, struct mtparamset *ps, int num_params); } sa_param_table[] = { {"sili", SA_PARAM_TYPE_ENTRY, sasetsili }, {"eot_warn", SA_PARAM_TYPE_ENTRY, saseteotwarn }, {"protection.", SA_PARAM_TYPE_NODE, sasetprotents } }; static struct sa_param_ent * safindparament(struct mtparamset *ps) { unsigned int i; for (i = 0; i < nitems(sa_param_table); i++){ /* * For entries, we compare all of the characters. For * nodes, we only compare the first N characters. The node * handler will decode the rest. */ if (sa_param_table[i].param_type == SA_PARAM_TYPE_ENTRY) { if (strcmp(ps->value_name, sa_param_table[i].name) != 0) continue; } else { if (strncmp(ps->value_name, sa_param_table[i].name, strlen(sa_param_table[i].name)) != 0) continue; } return (&sa_param_table[i]); } return (NULL); } /* * Go through a list of parameters, coalescing contiguous parameters with * the same parent node into a single call to a set_func. */ static int saparamsetlist(struct cam_periph *periph, struct mtsetlist *list, int need_copy) { int i, contig_ents; int error; struct mtparamset *params, *first; struct sa_param_ent *first_ent; error = 0; params = NULL; if (list->num_params == 0) /* Nothing to do */ goto bailout; /* * Verify that the user has the correct structure size. */ if ((list->num_params * sizeof(struct mtparamset)) != list->param_len) { xpt_print(periph->path, "%s: length of params %d != " "sizeof(struct mtparamset) %zd * num_params %d\n", __func__, list->param_len, sizeof(struct mtparamset), list->num_params); error = EINVAL; goto bailout; } if (need_copy != 0) { /* * XXX KDM will dropping the lock cause an issue here? */ cam_periph_unlock(periph); params = malloc(list->param_len, M_SCSISA, M_WAITOK | M_ZERO); error = copyin(list->params, params, list->param_len); cam_periph_lock(periph); if (error != 0) goto bailout; } else { params = list->params; } contig_ents = 0; first = NULL; first_ent = NULL; for (i = 0; i < list->num_params; i++) { struct sa_param_ent *ent; ent = safindparament(¶ms[i]); if (ent == NULL) { snprintf(params[i].error_str, sizeof(params[i].error_str), "%s: cannot find parameter %s", __func__, params[i].value_name); params[i].status = MT_PARAM_STATUS_ERROR; break; } if (first != NULL) { if (first_ent == ent) { /* * We're still in a contiguous list of * parameters that can be handled by one * node handler. */ contig_ents++; continue; } else { error = first_ent->set_func(periph, first, contig_ents); first = NULL; first_ent = NULL; contig_ents = 0; if (error != 0) { error = 0; break; } } } if (ent->param_type == SA_PARAM_TYPE_NODE) { first = ¶ms[i]; first_ent = ent; contig_ents = 1; } else { error = ent->set_func(periph, ¶ms[i], 1); if (error != 0) { error = 0; break; } } } if (first != NULL) first_ent->set_func(periph, first, contig_ents); bailout: if (need_copy != 0) { if (error != EFAULT) { cam_periph_unlock(periph); copyout(params, list->params, list->param_len); cam_periph_lock(periph); } free(params, M_SCSISA); } return (error); } static int sagetparams_common(struct cdev *dev, struct cam_periph *periph) { struct sa_softc *softc; u_int8_t write_protect; int comp_enabled, comp_supported, error; softc = (struct sa_softc *)periph->softc; if (softc->open_pending_mount) return (0); /* The control device may issue getparams() if there are no opens. */ if (SA_IS_CTRL(dev) && (softc->flags & SA_FLAG_OPEN) != 0) return (0); error = sagetparams(periph, SA_PARAM_ALL, &softc->media_blksize, &softc->media_density, &softc->media_numblks, &softc->buffer_mode, &write_protect, &softc->speed, &comp_supported, &comp_enabled, &softc->comp_algorithm, NULL, NULL, 0, 0); if (error) return (error); if (write_protect) softc->flags |= SA_FLAG_TAPE_WP; else softc->flags &= ~SA_FLAG_TAPE_WP; softc->flags &= ~SA_FLAG_COMPRESSION; if (comp_supported) { if (softc->saved_comp_algorithm == 0) softc->saved_comp_algorithm = softc->comp_algorithm; softc->flags |= SA_FLAG_COMP_SUPP; if (comp_enabled) softc->flags |= SA_FLAG_COMP_ENABLED; } else softc->flags |= SA_FLAG_COMP_UNSUPP; return (0); } #define PENDING_MOUNT_CHECK(softc, periph, dev) \ if (softc->open_pending_mount) { \ error = samount(periph, 0, dev); \ if (error) { \ break; \ } \ saprevent(periph, PR_PREVENT); \ softc->open_pending_mount = 0; \ } static int saioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { struct cam_periph *periph; struct sa_softc *softc; scsi_space_code spaceop; int didlockperiph = 0; int mode; int error = 0; mode = SAMODE(dev); error = 0; /* shut up gcc */ spaceop = 0; /* shut up gcc */ periph = (struct cam_periph *)dev->si_drv1; cam_periph_lock(periph); softc = (struct sa_softc *)periph->softc; /* * Check for control mode accesses. We allow MTIOCGET and * MTIOCERRSTAT (but need to be the only one open in order * to clear latched status), and MTSETBSIZE, MTSETDNSTY * and MTCOMP (but need to be the only one accessing this * device to run those). */ if (SA_IS_CTRL(dev)) { switch (cmd) { case MTIOCGETEOTMODEL: case MTIOCGET: case MTIOCEXTGET: case MTIOCPARAMGET: case MTIOCRBLIM: break; case MTIOCERRSTAT: /* * If the periph isn't already locked, lock it * so our MTIOCERRSTAT can reset latched error stats. * * If the periph is already locked, skip it because * we're just getting status and it'll be up to the * other thread that has this device open to do * an MTIOCERRSTAT that would clear latched status. */ if ((periph->flags & CAM_PERIPH_LOCKED) == 0) { error = cam_periph_hold(periph, PRIBIO|PCATCH); if (error != 0) { cam_periph_unlock(periph); return (error); } didlockperiph = 1; } break; case MTIOCTOP: { struct mtop *mt = (struct mtop *) arg; /* * Check to make sure it's an OP we can perform * with no media inserted. */ switch (mt->mt_op) { case MTSETBSIZ: case MTSETDNSTY: case MTCOMP: mt = NULL; /* FALLTHROUGH */ default: break; } if (mt != NULL) { break; } /* FALLTHROUGH */ } case MTIOCSETEOTMODEL: /* * We need to acquire the peripheral here rather * than at open time because we are sharing writable * access to data structures. */ error = cam_periph_hold(periph, PRIBIO|PCATCH); if (error != 0) { cam_periph_unlock(periph); return (error); } didlockperiph = 1; break; default: cam_periph_unlock(periph); return (EINVAL); } } /* * Find the device that the user is talking about */ switch (cmd) { case MTIOCGET: { struct mtget *g = (struct mtget *)arg; error = sagetparams_common(dev, periph); if (error) break; bzero(g, sizeof(struct mtget)); g->mt_type = MT_ISAR; if (softc->flags & SA_FLAG_COMP_UNSUPP) { g->mt_comp = MT_COMP_UNSUPP; g->mt_comp0 = MT_COMP_UNSUPP; g->mt_comp1 = MT_COMP_UNSUPP; g->mt_comp2 = MT_COMP_UNSUPP; g->mt_comp3 = MT_COMP_UNSUPP; } else { if ((softc->flags & SA_FLAG_COMP_ENABLED) == 0) { g->mt_comp = MT_COMP_DISABLED; } else { g->mt_comp = softc->comp_algorithm; } g->mt_comp0 = softc->comp_algorithm; g->mt_comp1 = softc->comp_algorithm; g->mt_comp2 = softc->comp_algorithm; g->mt_comp3 = softc->comp_algorithm; } g->mt_density = softc->media_density; g->mt_density0 = softc->media_density; g->mt_density1 = softc->media_density; g->mt_density2 = softc->media_density; g->mt_density3 = softc->media_density; g->mt_blksiz = softc->media_blksize; g->mt_blksiz0 = softc->media_blksize; g->mt_blksiz1 = softc->media_blksize; g->mt_blksiz2 = softc->media_blksize; g->mt_blksiz3 = softc->media_blksize; g->mt_fileno = softc->fileno; g->mt_blkno = softc->blkno; g->mt_dsreg = (short) softc->dsreg; /* * Yes, we know that this is likely to overflow */ if (softc->last_resid_was_io) { if ((g->mt_resid = (short) softc->last_io_resid) != 0) { if (SA_IS_CTRL(dev) == 0 || didlockperiph) { softc->last_io_resid = 0; } } } else { if ((g->mt_resid = (short)softc->last_ctl_resid) != 0) { if (SA_IS_CTRL(dev) == 0 || didlockperiph) { softc->last_ctl_resid = 0; } } } error = 0; break; } case MTIOCEXTGET: case MTIOCPARAMGET: { struct mtextget *g = (struct mtextget *)arg; char *tmpstr2; struct sbuf *sb; /* * Report drive status using an XML format. */ /* * XXX KDM will dropping the lock cause any problems here? */ cam_periph_unlock(periph); sb = sbuf_new(NULL, NULL, g->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { g->status = MT_EXT_GET_ERROR; snprintf(g->error_str, sizeof(g->error_str), "Unable to allocate %d bytes for status info", g->alloc_len); cam_periph_lock(periph); goto extget_bailout; } cam_periph_lock(periph); if (cmd == MTIOCEXTGET) error = saextget(dev, periph, sb, g); else error = saparamget(softc, sb); if (error != 0) goto extget_bailout; error = sbuf_finish(sb); if (error == ENOMEM) { g->status = MT_EXT_GET_NEED_MORE_SPACE; error = 0; } else if (error != 0) { g->status = MT_EXT_GET_ERROR; snprintf(g->error_str, sizeof(g->error_str), "Error %d returned from sbuf_finish()", error); } else g->status = MT_EXT_GET_OK; error = 0; tmpstr2 = sbuf_data(sb); g->fill_len = strlen(tmpstr2) + 1; cam_periph_unlock(periph); error = copyout(tmpstr2, g->status_xml, g->fill_len); cam_periph_lock(periph); extget_bailout: sbuf_delete(sb); break; } case MTIOCPARAMSET: { struct mtsetlist list; struct mtparamset *ps = (struct mtparamset *)arg; bzero(&list, sizeof(list)); list.num_params = 1; list.param_len = sizeof(*ps); list.params = ps; error = saparamsetlist(periph, &list, /*need_copy*/ 0); break; } case MTIOCSETLIST: { struct mtsetlist *list = (struct mtsetlist *)arg; error = saparamsetlist(periph, list, /*need_copy*/ 1); break; } case MTIOCERRSTAT: { struct scsi_tape_errors *sep = &((union mterrstat *)arg)->scsi_errstat; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("saioctl: MTIOCERRSTAT\n")); bzero(sep, sizeof(*sep)); sep->io_resid = softc->last_io_resid; bcopy((caddr_t) &softc->last_io_sense, sep->io_sense, sizeof (sep->io_sense)); bcopy((caddr_t) &softc->last_io_cdb, sep->io_cdb, sizeof (sep->io_cdb)); sep->ctl_resid = softc->last_ctl_resid; bcopy((caddr_t) &softc->last_ctl_sense, sep->ctl_sense, sizeof (sep->ctl_sense)); bcopy((caddr_t) &softc->last_ctl_cdb, sep->ctl_cdb, sizeof (sep->ctl_cdb)); if ((SA_IS_CTRL(dev) == 0 && !softc->open_pending_mount) || didlockperiph) bzero((caddr_t) &softc->errinfo, sizeof (softc->errinfo)); error = 0; break; } case MTIOCTOP: { struct mtop *mt; int count; PENDING_MOUNT_CHECK(softc, periph, dev); mt = (struct mtop *)arg; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("saioctl: op=0x%x count=0x%x\n", mt->mt_op, mt->mt_count)); count = mt->mt_count; switch (mt->mt_op) { case MTWEOF: /* write an end-of-file marker */ /* * We don't need to clear the SA_FLAG_TAPE_WRITTEN * flag because by keeping track of filemarks * we have last written we know whether or not * we need to write more when we close the device. */ error = sawritefilemarks(periph, count, FALSE, FALSE); break; case MTWEOFI: /* write an end-of-file marker without waiting */ error = sawritefilemarks(periph, count, FALSE, TRUE); break; case MTWSS: /* write a setmark */ error = sawritefilemarks(periph, count, TRUE, FALSE); break; case MTBSR: /* backward space record */ case MTFSR: /* forward space record */ case MTBSF: /* backward space file */ case MTFSF: /* forward space file */ case MTBSS: /* backward space setmark */ case MTFSS: /* forward space setmark */ case MTEOD: /* space to end of recorded medium */ { int nmarks; spaceop = SS_FILEMARKS; nmarks = softc->filemarks; error = sacheckeod(periph); if (error) { xpt_print(periph->path, "EOD check prior to spacing failed\n"); softc->flags |= SA_FLAG_EIO_PENDING; break; } nmarks -= softc->filemarks; switch(mt->mt_op) { case MTBSR: count = -count; /* FALLTHROUGH */ case MTFSR: spaceop = SS_BLOCKS; break; case MTBSF: count = -count; /* FALLTHROUGH */ case MTFSF: break; case MTBSS: count = -count; /* FALLTHROUGH */ case MTFSS: spaceop = SS_SETMARKS; break; case MTEOD: spaceop = SS_EOD; count = 0; nmarks = 0; break; default: error = EINVAL; break; } if (error) break; nmarks = softc->filemarks; /* * XXX: Why are we checking again? */ error = sacheckeod(periph); if (error) break; nmarks -= softc->filemarks; error = saspace(periph, count - nmarks, spaceop); /* * At this point, clear that we've written the tape * and that we've written any filemarks. We really * don't know what the applications wishes to do next- * the sacheckeod's will make sure we terminated the * tape correctly if we'd been writing, but the next * action the user application takes will set again * whether we need to write filemarks. */ softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->filemarks = 0; break; } case MTREW: /* rewind */ PENDING_MOUNT_CHECK(softc, periph, dev); (void) sacheckeod(periph); error = sarewind(periph); /* see above */ softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->flags &= ~SA_FLAG_ERR_PENDING; softc->filemarks = 0; break; case MTERASE: /* erase */ PENDING_MOUNT_CHECK(softc, periph, dev); error = saerase(periph, count); softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->flags &= ~SA_FLAG_ERR_PENDING; break; case MTRETENS: /* re-tension tape */ PENDING_MOUNT_CHECK(softc, periph, dev); error = saretension(periph); softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->flags &= ~SA_FLAG_ERR_PENDING; break; case MTOFFL: /* rewind and put the drive offline */ PENDING_MOUNT_CHECK(softc, periph, dev); (void) sacheckeod(periph); /* see above */ softc->flags &= ~SA_FLAG_TAPE_WRITTEN; softc->filemarks = 0; error = sarewind(periph); /* clear the frozen flag anyway */ softc->flags &= ~SA_FLAG_TAPE_FROZEN; /* * Be sure to allow media removal before ejecting. */ saprevent(periph, PR_ALLOW); if (error == 0) { error = saloadunload(periph, FALSE); if (error == 0) { softc->flags &= ~SA_FLAG_TAPE_MOUNTED; } } break; case MTLOAD: error = saloadunload(periph, TRUE); break; case MTNOP: /* no operation, sets status only */ case MTCACHE: /* enable controller cache */ case MTNOCACHE: /* disable controller cache */ error = 0; break; case MTSETBSIZ: /* Set block size for device */ PENDING_MOUNT_CHECK(softc, periph, dev); if ((softc->sili != 0) && (count != 0)) { xpt_print(periph->path, "Can't enter fixed " "block mode with SILI enabled\n"); error = EINVAL; break; } error = sasetparams(periph, SA_PARAM_BLOCKSIZE, count, 0, 0, 0); if (error == 0) { softc->last_media_blksize = softc->media_blksize; softc->media_blksize = count; if (count) { softc->flags |= SA_FLAG_FIXED; if (powerof2(count)) { softc->blk_shift = ffs(count) - 1; softc->blk_mask = count - 1; } else { softc->blk_mask = ~0; softc->blk_shift = 0; } /* * Make the user's desire 'persistent'. */ softc->quirks &= ~SA_QUIRK_VARIABLE; softc->quirks |= SA_QUIRK_FIXED; } else { softc->flags &= ~SA_FLAG_FIXED; if (softc->max_blk == 0) { softc->max_blk = ~0; } softc->blk_shift = 0; if (softc->blk_gran != 0) { softc->blk_mask = softc->blk_gran - 1; } else { softc->blk_mask = 0; } /* * Make the user's desire 'persistent'. */ softc->quirks |= SA_QUIRK_VARIABLE; softc->quirks &= ~SA_QUIRK_FIXED; } } break; case MTSETDNSTY: /* Set density for device and mode */ PENDING_MOUNT_CHECK(softc, periph, dev); if (count > UCHAR_MAX) { error = EINVAL; break; } else { error = sasetparams(periph, SA_PARAM_DENSITY, 0, count, 0, 0); } break; case MTCOMP: /* enable compression */ PENDING_MOUNT_CHECK(softc, periph, dev); /* * Some devices don't support compression, and * don't like it if you ask them for the * compression page. */ if ((softc->quirks & SA_QUIRK_NOCOMP) || (softc->flags & SA_FLAG_COMP_UNSUPP)) { error = ENODEV; break; } error = sasetparams(periph, SA_PARAM_COMPRESSION, 0, 0, count, SF_NO_PRINT); break; default: error = EINVAL; } break; } case MTIOCIEOT: case MTIOCEEOT: error = 0; break; case MTIOCRDSPOS: PENDING_MOUNT_CHECK(softc, periph, dev); error = sardpos(periph, 0, (u_int32_t *) arg); break; case MTIOCRDHPOS: PENDING_MOUNT_CHECK(softc, periph, dev); error = sardpos(periph, 1, (u_int32_t *) arg); break; case MTIOCSLOCATE: case MTIOCHLOCATE: { struct mtlocate locate_info; int hard; bzero(&locate_info, sizeof(locate_info)); locate_info.logical_id = *((uint32_t *)arg); if (cmd == MTIOCSLOCATE) hard = 0; else hard = 1; PENDING_MOUNT_CHECK(softc, periph, dev); error = sasetpos(periph, hard, &locate_info); break; } case MTIOCEXTLOCATE: PENDING_MOUNT_CHECK(softc, periph, dev); error = sasetpos(periph, /*hard*/ 0, (struct mtlocate *)arg); softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->flags &= ~SA_FLAG_ERR_PENDING; softc->filemarks = 0; break; case MTIOCGETEOTMODEL: error = 0; if (softc->quirks & SA_QUIRK_1FM) mode = 1; else mode = 2; *((u_int32_t *) arg) = mode; break; case MTIOCSETEOTMODEL: error = 0; switch (*((u_int32_t *) arg)) { case 1: softc->quirks &= ~SA_QUIRK_2FM; softc->quirks |= SA_QUIRK_1FM; break; case 2: softc->quirks &= ~SA_QUIRK_1FM; softc->quirks |= SA_QUIRK_2FM; break; default: error = EINVAL; break; } break; case MTIOCRBLIM: { struct mtrblim *rblim; rblim = (struct mtrblim *)arg; rblim->granularity = softc->blk_gran; rblim->min_block_length = softc->min_blk; rblim->max_block_length = softc->max_blk; break; } default: error = cam_periph_ioctl(periph, cmd, arg, saerror); break; } /* * Check to see if we cleared a frozen state */ if (error == 0 && (softc->flags & SA_FLAG_TAPE_FROZEN)) { switch(cmd) { case MTIOCRDSPOS: case MTIOCRDHPOS: case MTIOCSLOCATE: case MTIOCHLOCATE: /* * XXX KDM look at this. */ softc->fileno = (daddr_t) -1; softc->blkno = (daddr_t) -1; softc->rep_blkno = (daddr_t) -1; softc->rep_fileno = (daddr_t) -1; softc->partition = (daddr_t) -1; softc->flags &= ~SA_FLAG_TAPE_FROZEN; xpt_print(periph->path, "tape state now unfrozen.\n"); break; default: break; } } if (didlockperiph) { cam_periph_unhold(periph); } cam_periph_unlock(periph); return (error); } static void sainit(void) { cam_status status; /* * Install a global async callback. */ status = xpt_register_async(AC_FOUND_DEVICE, saasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("sa: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void sadevgonecb(void *arg) { struct cam_periph *periph; struct mtx *mtx; struct sa_softc *softc; periph = (struct cam_periph *)arg; softc = (struct sa_softc *)periph->softc; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc->num_devs_to_destroy--; if (softc->num_devs_to_destroy == 0) { int i; /* * When we have gotten all of our callbacks, we will get * no more close calls from devfs. So if we have any * dangling opens, we need to release the reference held * for that particular context. */ for (i = 0; i < softc->open_count; i++) cam_periph_release_locked(periph); softc->open_count = 0; /* * Release the reference held for devfs, all of our * instances are gone now. */ cam_periph_release_locked(periph); } /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); } static void saoninvalidate(struct cam_periph *periph) { struct sa_softc *softc; softc = (struct sa_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, saasync, periph, periph->path); softc->flags |= SA_FLAG_INVALID; /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ bioq_flush(&softc->bio_queue, NULL, ENXIO); softc->queue_count = 0; /* * Tell devfs that all of our devices have gone away, and ask for a * callback when it has cleaned up its state. */ destroy_dev_sched_cb(softc->devs.ctl_dev, sadevgonecb, periph); destroy_dev_sched_cb(softc->devs.r_dev, sadevgonecb, periph); destroy_dev_sched_cb(softc->devs.nr_dev, sadevgonecb, periph); destroy_dev_sched_cb(softc->devs.er_dev, sadevgonecb, periph); } static void sacleanup(struct cam_periph *periph) { struct sa_softc *softc; softc = (struct sa_softc *)periph->softc; cam_periph_unlock(periph); if ((softc->flags & SA_FLAG_SCTX_INIT) != 0 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); cam_periph_lock(periph); devstat_remove_entry(softc->device_stats); free(softc, M_SCSISA); } static void saasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_SEQUENTIAL) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(saregister, saoninvalidate, sacleanup, sastart, "sa", CAM_PERIPH_BIO, path, saasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("saasync: Unable to probe new device " "due to status 0x%x\n", status); break; } default: cam_periph_async(periph, code, path, arg); break; } } static void sasetupdev(struct sa_softc *softc, struct cdev *dev) { dev->si_iosize_max = softc->maxio; dev->si_flags |= softc->si_flags; /* * Keep a count of how many non-alias devices we have created, * so we can make sure we clean them all up on shutdown. Aliases * are cleaned up when we destroy the device they're an alias for. */ if ((dev->si_flags & SI_ALIAS) == 0) softc->num_devs_to_destroy++; } static void sasysctlinit(void *context, int pending) { struct cam_periph *periph; struct sa_softc *softc; char tmpstr[32], tmpstr2[16]; periph = (struct cam_periph *)context; /* * If the periph is invalid, no need to setup the sysctls. */ if (periph->flags & CAM_PERIPH_INVALID) goto bailout; softc = (struct sa_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM SA unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%u", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= SA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_sa), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) goto bailout; SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "allow_io_split", CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &softc->allow_io_split, 0, "Allow Splitting I/O"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "maxio", CTLFLAG_RD, &softc->maxio, 0, "Maximum I/O size"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "cpi_maxio", CTLFLAG_RD, &softc->cpi_maxio, 0, "Maximum Controller I/O size"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "inject_eom", CTLFLAG_RW, &softc->inject_eom, 0, "Queue EOM for the next write/read"); bailout: /* * Release the reference that was held when this task was enqueued. */ cam_periph_release(periph); } static cam_status saregister(struct cam_periph *periph, void *arg) { struct sa_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; struct make_dev_args args; caddr_t match; char tmpstr[80]; int error; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("saregister: no getdev CCB, can't register device\n"); return (CAM_REQ_CMP_ERR); } softc = (struct sa_softc *) malloc(sizeof (*softc), M_SCSISA, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("saregister: Unable to probe new device. " "Unable to allocate softc\n"); return (CAM_REQ_CMP_ERR); } softc->scsi_rev = SID_ANSI_REV(&cgd->inq_data); softc->state = SA_STATE_NORMAL; softc->fileno = (daddr_t) -1; softc->blkno = (daddr_t) -1; softc->rep_fileno = (daddr_t) -1; softc->rep_blkno = (daddr_t) -1; softc->partition = (daddr_t) -1; softc->bop = -1; softc->eop = -1; softc->bpew = -1; bioq_init(&softc->bio_queue); softc->periph = periph; periph->softc = softc; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)sa_quirk_table, nitems(sa_quirk_table), sizeof(*sa_quirk_table), scsi_inquiry_match); if (match != NULL) { softc->quirks = ((struct sa_quirk_entry *)match)->quirks; softc->last_media_blksize = ((struct sa_quirk_entry *)match)->prefblk; } else softc->quirks = SA_QUIRK_NONE; /* * Long format data for READ POSITION was introduced in SSC, which * was after SCSI-2. (Roughly equivalent to SCSI-3.) If the drive * reports that it is SCSI-2 or older, it is unlikely to support * long position data, but it might. Some drives from that era * claim to be SCSI-2, but do support long position information. * So, instead of immediately disabling long position information * for SCSI-2 devices, we'll try one pass through sagetpos(), and * then disable long position information if we get an error. */ if (cgd->inq_data.version <= SCSI_REV_CCS) softc->quirks |= SA_QUIRK_NO_LONG_POS; if (cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) { struct ccb_dev_advinfo cdai; struct scsi_vpd_extended_inquiry_data ext_inq; bzero(&ext_inq, sizeof(ext_inq)); xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.flags = CDAI_FLAG_NONE; cdai.buftype = CDAI_TYPE_EXT_INQ; cdai.bufsiz = sizeof(ext_inq); cdai.buf = (uint8_t *)&ext_inq; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if ((cdai.ccb_h.status == CAM_REQ_CMP) && (ext_inq.flags1 & SVPD_EID_SA_SPT_LBP)) softc->flags |= SA_FLAG_PROTECT_SUPP; } xpt_path_inq(&cpi, periph->path); /* * The SA driver supports a blocksize, but we don't know the * blocksize until we media is inserted. So, set a flag to * indicate that the blocksize is unavailable right now. */ cam_periph_unlock(periph); softc->device_stats = devstat_new_entry("sa", periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_TAPE); /* * Load the default value that is either compiled in, or loaded * in the global kern.cam.sa.allow_io_split tunable. */ softc->allow_io_split = sa_allow_io_split; /* * Load a per-instance tunable, if it exists. NOTE that this * tunable WILL GO AWAY in FreeBSD 11.0. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.sa.%u.allow_io_split", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->allow_io_split); /* * If maxio isn't set, we fall back to DFLTPHYS. Otherwise we take * the smaller of cpi.maxio or MAXPHYS. */ if (cpi.maxio == 0) softc->maxio = DFLTPHYS; else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; else softc->maxio = cpi.maxio; /* * Record the controller's maximum I/O size so we can report it to * the user later. */ softc->cpi_maxio = cpi.maxio; /* * By default we tell physio that we do not want our I/O split. * The user needs to have a 1:1 mapping between the size of his * write to a tape character device and the size of the write * that actually goes down to the drive. */ if (softc->allow_io_split == 0) softc->si_flags = SI_NOSPLIT; else softc->si_flags = 0; TASK_INIT(&softc->sysctl_task, 0, sasysctlinit, periph); /* * If the SIM supports unmapped I/O, let physio know that we can * handle unmapped buffers. */ if (cpi.hba_misc & PIM_UNMAPPED) softc->si_flags |= SI_UNMAPPED; /* * Acquire a reference to the periph before we create the devfs * instances for it. We'll release this reference once the devfs * instances have been freed. */ - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } make_dev_args_init(&args); args.mda_devsw = &sa_cdevsw; args.mda_si_drv1 = softc->periph; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0660; args.mda_unit = SAMINOR(SA_CTLDEV, SA_ATYPE_R); error = make_dev_s(&args, &softc->devs.ctl_dev, "%s%d.ctl", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } sasetupdev(softc, softc->devs.ctl_dev); args.mda_unit = SAMINOR(SA_NOT_CTLDEV, SA_ATYPE_R); error = make_dev_s(&args, &softc->devs.r_dev, "%s%d", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } sasetupdev(softc, softc->devs.r_dev); args.mda_unit = SAMINOR(SA_NOT_CTLDEV, SA_ATYPE_NR); error = make_dev_s(&args, &softc->devs.nr_dev, "n%s%d", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } sasetupdev(softc, softc->devs.nr_dev); args.mda_unit = SAMINOR(SA_NOT_CTLDEV, SA_ATYPE_ER); error = make_dev_s(&args, &softc->devs.er_dev, "e%s%d", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } sasetupdev(softc, softc->devs.er_dev); cam_periph_lock(periph); softc->density_type_bits[0] = 0; softc->density_type_bits[1] = SRDS_MEDIA; softc->density_type_bits[2] = SRDS_MEDIUM_TYPE; softc->density_type_bits[3] = SRDS_MEDIUM_TYPE | SRDS_MEDIA; /* * Bump the peripheral refcount for the sysctl thread, in case we * get invalidated before the thread has a chance to run. */ cam_periph_acquire(periph); taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); /* * Add an async callback so that we get * notified if this device goes away. */ xpt_register_async(AC_LOST_DEVICE, saasync, periph, periph->path); xpt_announce_periph(periph, NULL); xpt_announce_quirks(periph, softc->quirks, SA_QUIRK_BIT_STRING); return (CAM_REQ_CMP); } static void sastart(struct cam_periph *periph, union ccb *start_ccb) { struct sa_softc *softc; softc = (struct sa_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sastart\n")); switch (softc->state) { case SA_STATE_NORMAL: { /* Pull a buffer from the queue and get going on it */ struct bio *bp; /* * See if there is a buf with work for us to do.. */ bp = bioq_first(&softc->bio_queue); if (bp == NULL) { xpt_release_ccb(start_ccb); } else if (((softc->flags & SA_FLAG_ERR_PENDING) != 0) || (softc->inject_eom != 0)) { struct bio *done_bp; if (softc->inject_eom != 0) { softc->flags |= SA_FLAG_EOM_PENDING; softc->inject_eom = 0; /* * If we're injecting EOM for writes, we * need to keep PEWS set for 3 queries * to cover 2 position requests from the * kernel via sagetpos(), and then allow * for one for the user to see the BPEW * flag (e.g. via mt status). After that, * it will be cleared. */ if (bp->bio_cmd == BIO_WRITE) softc->set_pews_status = 3; else softc->set_pews_status = 1; } again: softc->queue_count--; bioq_remove(&softc->bio_queue, bp); bp->bio_resid = bp->bio_bcount; done_bp = bp; if ((softc->flags & SA_FLAG_EOM_PENDING) != 0) { /* * We have two different behaviors for * writes when we hit either Early Warning * or the PEWZ (Programmable Early Warning * Zone). The default behavior is that * for all writes that are currently * queued after the write where we saw the * early warning, we will return the write * with the residual equal to the count. * i.e. tell the application that 0 bytes * were written. * * The alternate behavior, which is enabled * when eot_warn is set, is that in * addition to setting the residual equal * to the count, we will set the error * to ENOSPC. * * In either case, once queued writes are * cleared out, we clear the error flag * (see below) and the application is free to * attempt to write more. */ if (softc->eot_warn != 0) { bp->bio_flags |= BIO_ERROR; bp->bio_error = ENOSPC; } else bp->bio_error = 0; } else if ((softc->flags & SA_FLAG_EOF_PENDING) != 0) { /* * This can only happen if we're reading * in fixed length mode. In this case, * we dump the rest of the list the * same way. */ bp->bio_error = 0; if (bioq_first(&softc->bio_queue) != NULL) { biodone(done_bp); goto again; } } else if ((softc->flags & SA_FLAG_EIO_PENDING) != 0) { bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; } bp = bioq_first(&softc->bio_queue); /* * Only if we have no other buffers queued up * do we clear the pending error flag. */ if (bp == NULL) softc->flags &= ~SA_FLAG_ERR_PENDING; CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("sastart- ERR_PENDING now 0x%x, bp is %sNULL, " "%d more buffers queued up\n", (softc->flags & SA_FLAG_ERR_PENDING), (bp != NULL)? "not " : " ", softc->queue_count)); xpt_release_ccb(start_ccb); biodone(done_bp); } else { u_int32_t length; bioq_remove(&softc->bio_queue, bp); softc->queue_count--; length = bp->bio_bcount; if ((softc->flags & SA_FLAG_FIXED) != 0) { if (softc->blk_shift != 0) { length = length >> softc->blk_shift; } else if (softc->media_blksize != 0) { length = length / softc->media_blksize; } else { bp->bio_error = EIO; xpt_print(periph->path, "zero blocksize" " for FIXED length writes?\n"); biodone(bp); break; } #if 0 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO, ("issuing a %d fixed record %s\n", length, (bp->bio_cmd == BIO_READ)? "read" : "write")); #endif } else { #if 0 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO, ("issuing a %d variable byte %s\n", length, (bp->bio_cmd == BIO_READ)? "read" : "write")); #endif } devstat_start_transaction_bio(softc->device_stats, bp); /* * Some people have theorized that we should * suppress illegal length indication if we are * running in variable block mode so that we don't * have to request sense every time our requested * block size is larger than the written block. * The residual information from the ccb allows * us to identify this situation anyway. The only * problem with this is that we will not get * information about blocks that are larger than * our read buffer unless we set the block size * in the mode page to something other than 0. * * I believe that this is a non-issue. If user apps * don't adjust their read size to match our record * size, that's just life. Anyway, the typical usage * would be to issue, e.g., 64KB reads and occasionally * have to do deal with 512 byte or 1KB intermediate * records. * * That said, though, we now support setting the * SILI bit on reads, and we set the blocksize to 4 * bytes when we do that. This gives us * compatibility with software that wants this, * although the only real difference between that * and not setting the SILI bit on reads is that we * won't get a check condition on reads where our * request size is larger than the block on tape. * That probably only makes a real difference in * non-packetized SCSI, where you have to go back * to the drive to request sense and thus incur * more latency. */ softc->dsreg = (bp->bio_cmd == BIO_READ)? MTIO_DSREG_RD : MTIO_DSREG_WR; scsi_sa_read_write(&start_ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, (bp->bio_cmd == BIO_READ ? SCSI_RW_READ : SCSI_RW_WRITE) | ((bp->bio_flags & BIO_UNMAPPED) != 0 ? SCSI_RW_BIO : 0), softc->sili, (softc->flags & SA_FLAG_FIXED) != 0, length, (bp->bio_flags & BIO_UNMAPPED) != 0 ? (void *)bp : bp->bio_data, bp->bio_bcount, SSD_FULL_SIZE, IO_TIMEOUT); start_ccb->ccb_h.ccb_pflags &= ~SA_POSITION_UPDATED; start_ccb->ccb_h.ccb_bp = bp; bp = bioq_first(&softc->bio_queue); xpt_action(start_ccb); } if (bp != NULL) { /* Have more work to do, so ensure we stay scheduled */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); } break; } case SA_STATE_ABNORMAL: default: panic("state 0x%x in sastart", softc->state); break; } } static void sadone(struct cam_periph *periph, union ccb *done_ccb) { struct sa_softc *softc; struct ccb_scsiio *csio; struct bio *bp; int error; softc = (struct sa_softc *)periph->softc; csio = &done_ccb->csio; softc->dsreg = MTIO_DSREG_REST; bp = (struct bio *)done_ccb->ccb_h.ccb_bp; error = 0; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if ((error = saerror(done_ccb, 0, 0)) == ERESTART) { /* * A retry was scheduled, so just return. */ return; } } if (error == EIO) { /* * Catastrophic error. Mark the tape as frozen * (we no longer know tape position). * * Return all queued I/O with EIO, and unfreeze * our queue so that future transactions that * attempt to fix this problem can get to the * device. * */ softc->flags |= SA_FLAG_TAPE_FROZEN; bioq_flush(&softc->bio_queue, NULL, EIO); } if (error != 0) { bp->bio_resid = bp->bio_bcount; bp->bio_error = error; bp->bio_flags |= BIO_ERROR; /* * In the error case, position is updated in saerror. */ } else { bp->bio_resid = csio->resid; bp->bio_error = 0; if (csio->resid != 0) { bp->bio_flags |= BIO_ERROR; } if (bp->bio_cmd == BIO_WRITE) { softc->flags |= SA_FLAG_TAPE_WRITTEN; softc->filemarks = 0; } if (!(csio->ccb_h.ccb_pflags & SA_POSITION_UPDATED) && (softc->blkno != (daddr_t) -1)) { if ((softc->flags & SA_FLAG_FIXED) != 0) { u_int32_t l; if (softc->blk_shift != 0) { l = bp->bio_bcount >> softc->blk_shift; } else { l = bp->bio_bcount / softc->media_blksize; } softc->blkno += (daddr_t) l; } else { softc->blkno++; } } } /* * If we had an error (immediate or pending), * release the device queue now. */ if (error || (softc->flags & SA_FLAG_ERR_PENDING)) cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); if (error || bp->bio_resid) { CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("error %d resid %ld count %ld\n", error, bp->bio_resid, bp->bio_bcount)); } biofinish(bp, softc->device_stats, 0); xpt_release_ccb(done_ccb); } /* * Mount the tape (make sure it's ready for I/O). */ static int samount(struct cam_periph *periph, int oflags, struct cdev *dev) { struct sa_softc *softc; union ccb *ccb; int error; /* * oflags can be checked for 'kind' of open (read-only check) - later * dev can be checked for a control-mode or compression open - later */ UNUSED_PARAMETER(oflags); UNUSED_PARAMETER(dev); softc = (struct sa_softc *)periph->softc; /* * This should determine if something has happened since the last * open/mount that would invalidate the mount. We do *not* want * to retry this command- we just want the status. But we only * do this if we're mounted already- if we're not mounted, * we don't care about the unit read state and can instead use * this opportunity to attempt to reserve the tape unit. */ if (softc->flags & SA_FLAG_TAPE_MOUNTED) { ccb = cam_periph_getccb(periph, 1); scsi_test_unit_ready(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, IO_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); if (error == ENXIO) { softc->flags &= ~SA_FLAG_TAPE_MOUNTED; scsi_test_unit_ready(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, IO_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); } else if (error) { /* * We don't need to freeze the tape because we * will now attempt to rewind/load it. */ softc->flags &= ~SA_FLAG_TAPE_MOUNTED; if (CAM_DEBUGGED(periph->path, CAM_DEBUG_INFO)) { xpt_print(periph->path, "error %d on TUR in samount\n", error); } } } else { error = sareservereleaseunit(periph, TRUE); if (error) { return (error); } ccb = cam_periph_getccb(periph, 1); scsi_test_unit_ready(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, IO_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); } if ((softc->flags & SA_FLAG_TAPE_MOUNTED) == 0) { struct scsi_read_block_limits_data *rblim = NULL; int comp_enabled, comp_supported; u_int8_t write_protect, guessing = 0; /* * Clear out old state. */ softc->flags &= ~(SA_FLAG_TAPE_WP|SA_FLAG_TAPE_WRITTEN| SA_FLAG_ERR_PENDING|SA_FLAG_COMPRESSION); softc->filemarks = 0; /* * *Very* first off, make sure we're loaded to BOT. */ scsi_load_unload(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, FALSE, FALSE, 1, SSD_FULL_SIZE, REWIND_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); /* * In case this doesn't work, do a REWIND instead */ if (error) { scsi_rewind(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, SSD_FULL_SIZE, REWIND_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); } if (error) { xpt_release_ccb(ccb); goto exit; } /* * Do a dummy test read to force access to the * media so that the drive will really know what's * there. We actually don't really care what the * blocksize on tape is and don't expect to really * read a full record. */ rblim = (struct scsi_read_block_limits_data *) malloc(8192, M_SCSISA, M_NOWAIT); if (rblim == NULL) { xpt_print(periph->path, "no memory for test read\n"); xpt_release_ccb(ccb); error = ENOMEM; goto exit; } if ((softc->quirks & SA_QUIRK_NODREAD) == 0) { scsi_sa_read_write(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, 1, FALSE, 0, 8192, (void *) rblim, 8192, SSD_FULL_SIZE, IO_TIMEOUT); (void) cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); scsi_rewind(&ccb->csio, 1, sadone, MSG_SIMPLE_Q_TAG, FALSE, SSD_FULL_SIZE, REWIND_TIMEOUT); error = cam_periph_runccb(ccb, saerror, CAM_RETRY_SELTO, SF_NO_PRINT | SF_RETRY_UA, softc->device_stats); if (error) { xpt_print(periph->path, "unable to rewind after test read\n"); xpt_release_ccb(ccb); goto exit; } } /* * Next off, determine block limits. */ scsi_read_block_limits(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, rblim, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, CAM_RETRY_SELTO, SF_NO_PRINT | SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); if (error != 0) { /* * If it's less than SCSI-2, READ BLOCK LIMITS is not * a MANDATORY command. Anyway- it doesn't matter- * we can proceed anyway. */ softc->blk_gran = 0; softc->max_blk = ~0; softc->min_blk = 0; } else { if (softc->scsi_rev >= SCSI_REV_SPC) { softc->blk_gran = RBL_GRAN(rblim); } else { softc->blk_gran = 0; } /* * We take max_blk == min_blk to mean a default to * fixed mode- but note that whatever we get out of * sagetparams below will actually determine whether * we are actually *in* fixed mode. */ softc->max_blk = scsi_3btoul(rblim->maximum); softc->min_blk = scsi_2btoul(rblim->minimum); } /* * Next, perform a mode sense to determine * current density, blocksize, compression etc. */ error = sagetparams(periph, SA_PARAM_ALL, &softc->media_blksize, &softc->media_density, &softc->media_numblks, &softc->buffer_mode, &write_protect, &softc->speed, &comp_supported, &comp_enabled, &softc->comp_algorithm, NULL, NULL, 0, 0); if (error != 0) { /* * We could work a little harder here. We could * adjust our attempts to get information. It * might be an ancient tape drive. If someone * nudges us, we'll do that. */ goto exit; } /* * If no quirk has determined that this is a device that is * preferred to be in fixed or variable mode, now is the time * to find out. */ if ((softc->quirks & (SA_QUIRK_FIXED|SA_QUIRK_VARIABLE)) == 0) { guessing = 1; /* * This could be expensive to find out. Luckily we * only need to do this once. If we start out in * 'default' mode, try and set ourselves to one * of the densities that would determine a wad * of other stuff. Go from highest to lowest. */ if (softc->media_density == SCSI_DEFAULT_DENSITY) { int i; static u_int8_t ctry[] = { SCSI_DENSITY_HALFINCH_PE, SCSI_DENSITY_HALFINCH_6250C, SCSI_DENSITY_HALFINCH_6250, SCSI_DENSITY_HALFINCH_1600, SCSI_DENSITY_HALFINCH_800, SCSI_DENSITY_QIC_4GB, SCSI_DENSITY_QIC_2GB, SCSI_DENSITY_QIC_525_320, SCSI_DENSITY_QIC_150, SCSI_DENSITY_QIC_120, SCSI_DENSITY_QIC_24, SCSI_DENSITY_QIC_11_9TRK, SCSI_DENSITY_QIC_11_4TRK, SCSI_DENSITY_QIC_1320, SCSI_DENSITY_QIC_3080, 0 }; for (i = 0; ctry[i]; i++) { error = sasetparams(periph, SA_PARAM_DENSITY, 0, ctry[i], 0, SF_NO_PRINT); if (error == 0) { softc->media_density = ctry[i]; break; } } } switch (softc->media_density) { case SCSI_DENSITY_QIC_11_4TRK: case SCSI_DENSITY_QIC_11_9TRK: case SCSI_DENSITY_QIC_24: case SCSI_DENSITY_QIC_120: case SCSI_DENSITY_QIC_150: case SCSI_DENSITY_QIC_525_320: case SCSI_DENSITY_QIC_1320: case SCSI_DENSITY_QIC_3080: softc->quirks &= ~SA_QUIRK_2FM; softc->quirks |= SA_QUIRK_FIXED|SA_QUIRK_1FM; softc->last_media_blksize = 512; break; case SCSI_DENSITY_QIC_4GB: case SCSI_DENSITY_QIC_2GB: softc->quirks &= ~SA_QUIRK_2FM; softc->quirks |= SA_QUIRK_FIXED|SA_QUIRK_1FM; softc->last_media_blksize = 1024; break; default: softc->last_media_blksize = softc->media_blksize; softc->quirks |= SA_QUIRK_VARIABLE; break; } } /* * If no quirk has determined that this is a device that needs * to have 2 Filemarks at EOD, now is the time to find out. */ if ((softc->quirks & SA_QUIRK_2FM) == 0) { switch (softc->media_density) { case SCSI_DENSITY_HALFINCH_800: case SCSI_DENSITY_HALFINCH_1600: case SCSI_DENSITY_HALFINCH_6250: case SCSI_DENSITY_HALFINCH_6250C: case SCSI_DENSITY_HALFINCH_PE: softc->quirks &= ~SA_QUIRK_1FM; softc->quirks |= SA_QUIRK_2FM; break; default: break; } } /* * Now validate that some info we got makes sense. */ if ((softc->max_blk < softc->media_blksize) || (softc->min_blk > softc->media_blksize && softc->media_blksize)) { xpt_print(periph->path, "BLOCK LIMITS (%d..%d) could not match current " "block settings (%d)- adjusting\n", softc->min_blk, softc->max_blk, softc->media_blksize); softc->max_blk = softc->min_blk = softc->media_blksize; } /* * Now put ourselves into the right frame of mind based * upon quirks... */ tryagain: /* * If we want to be in FIXED mode and our current blocksize * is not equal to our last blocksize (if nonzero), try and * set ourselves to this last blocksize (as the 'preferred' * block size). The initial quirkmatch at registry sets the * initial 'last' blocksize. If, for whatever reason, this * 'last' blocksize is zero, set the blocksize to 512, * or min_blk if that's larger. */ if ((softc->quirks & SA_QUIRK_FIXED) && (softc->quirks & SA_QUIRK_NO_MODESEL) == 0 && (softc->media_blksize != softc->last_media_blksize)) { softc->media_blksize = softc->last_media_blksize; if (softc->media_blksize == 0) { softc->media_blksize = 512; if (softc->media_blksize < softc->min_blk) { softc->media_blksize = softc->min_blk; } } error = sasetparams(periph, SA_PARAM_BLOCKSIZE, softc->media_blksize, 0, 0, SF_NO_PRINT); if (error) { xpt_print(periph->path, "unable to set fixed blocksize to %d\n", softc->media_blksize); goto exit; } } if ((softc->quirks & SA_QUIRK_VARIABLE) && (softc->media_blksize != 0)) { softc->last_media_blksize = softc->media_blksize; softc->media_blksize = 0; error = sasetparams(periph, SA_PARAM_BLOCKSIZE, 0, 0, 0, SF_NO_PRINT); if (error) { /* * If this fails and we were guessing, just * assume that we got it wrong and go try * fixed block mode. Don't even check against * density code at this point. */ if (guessing) { softc->quirks &= ~SA_QUIRK_VARIABLE; softc->quirks |= SA_QUIRK_FIXED; if (softc->last_media_blksize == 0) softc->last_media_blksize = 512; goto tryagain; } xpt_print(periph->path, "unable to set variable blocksize\n"); goto exit; } } /* * Now that we have the current block size, * set up some parameters for sastart's usage. */ if (softc->media_blksize) { softc->flags |= SA_FLAG_FIXED; if (powerof2(softc->media_blksize)) { softc->blk_shift = ffs(softc->media_blksize) - 1; softc->blk_mask = softc->media_blksize - 1; } else { softc->blk_mask = ~0; softc->blk_shift = 0; } } else { /* * The SCSI-3 spec allows 0 to mean "unspecified". * The SCSI-1 spec allows 0 to mean 'infinite'. * * Either works here. */ if (softc->max_blk == 0) { softc->max_blk = ~0; } softc->blk_shift = 0; if (softc->blk_gran != 0) { softc->blk_mask = softc->blk_gran - 1; } else { softc->blk_mask = 0; } } if (write_protect) softc->flags |= SA_FLAG_TAPE_WP; if (comp_supported) { if (softc->saved_comp_algorithm == 0) softc->saved_comp_algorithm = softc->comp_algorithm; softc->flags |= SA_FLAG_COMP_SUPP; if (comp_enabled) softc->flags |= SA_FLAG_COMP_ENABLED; } else softc->flags |= SA_FLAG_COMP_UNSUPP; if ((softc->buffer_mode == SMH_SA_BUF_MODE_NOBUF) && (softc->quirks & SA_QUIRK_NO_MODESEL) == 0) { error = sasetparams(periph, SA_PARAM_BUFF_MODE, 0, 0, 0, SF_NO_PRINT); if (error == 0) { softc->buffer_mode = SMH_SA_BUF_MODE_SIBUF; } else { xpt_print(periph->path, "unable to set buffered mode\n"); } error = 0; /* not an error */ } if (error == 0) { softc->flags |= SA_FLAG_TAPE_MOUNTED; } exit: if (rblim != NULL) free(rblim, M_SCSISA); if (error != 0) { softc->dsreg = MTIO_DSREG_NIL; } else { softc->fileno = softc->blkno = 0; softc->rep_fileno = softc->rep_blkno = -1; softc->partition = 0; softc->dsreg = MTIO_DSREG_REST; } #ifdef SA_1FM_AT_EOD if ((softc->quirks & SA_QUIRK_2FM) == 0) softc->quirks |= SA_QUIRK_1FM; #else if ((softc->quirks & SA_QUIRK_1FM) == 0) softc->quirks |= SA_QUIRK_2FM; #endif } else xpt_release_ccb(ccb); /* * If we return an error, we're not mounted any more, * so release any device reservation. */ if (error != 0) { (void) sareservereleaseunit(periph, FALSE); } else { /* * Clear I/O residual. */ softc->last_io_resid = 0; softc->last_ctl_resid = 0; } return (error); } /* * How many filemarks do we need to write if we were to terminate the * tape session right now? Note that this can be a negative number */ static int samarkswanted(struct cam_periph *periph) { int markswanted; struct sa_softc *softc; softc = (struct sa_softc *)periph->softc; markswanted = 0; if ((softc->flags & SA_FLAG_TAPE_WRITTEN) != 0) { markswanted++; if (softc->quirks & SA_QUIRK_2FM) markswanted++; } markswanted -= softc->filemarks; return (markswanted); } static int sacheckeod(struct cam_periph *periph) { int error; int markswanted; markswanted = samarkswanted(periph); if (markswanted > 0) { error = sawritefilemarks(periph, markswanted, FALSE, FALSE); } else { error = 0; } return (error); } static int saerror(union ccb *ccb, u_int32_t cflgs, u_int32_t sflgs) { static const char *toobig = "%d-byte tape record bigger than supplied buffer\n"; struct cam_periph *periph; struct sa_softc *softc; struct ccb_scsiio *csio; struct scsi_sense_data *sense; uint64_t resid = 0; int64_t info = 0; cam_status status; int error_code, sense_key, asc, ascq, error, aqvalid, stream_valid; int sense_len; uint8_t stream_bits; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct sa_softc *)periph->softc; csio = &ccb->csio; sense = &csio->sense_data; sense_len = csio->sense_len - csio->sense_resid; scsi_extract_sense_len(sense, sense_len, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); if (asc != -1 && ascq != -1) aqvalid = 1; else aqvalid = 0; if (scsi_get_stream_info(sense, sense_len, NULL, &stream_bits) == 0) stream_valid = 1; else stream_valid = 0; error = 0; status = csio->ccb_h.status & CAM_STATUS_MASK; /* * Calculate/latch up, any residuals... We do this in a funny 2-step * so we can print stuff here if we have CAM_DEBUG enabled for this * unit. */ if (status == CAM_SCSI_STATUS_ERROR) { if (scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &resid, &info) == 0) { if ((softc->flags & SA_FLAG_FIXED) != 0) resid *= softc->media_blksize; } else { resid = csio->dxfer_len; info = resid; if ((softc->flags & SA_FLAG_FIXED) != 0) { if (softc->media_blksize) info /= softc->media_blksize; } } if (csio->cdb_io.cdb_bytes[0] == SA_READ || csio->cdb_io.cdb_bytes[0] == SA_WRITE) { bcopy((caddr_t) sense, (caddr_t) &softc->last_io_sense, sizeof (struct scsi_sense_data)); bcopy(csio->cdb_io.cdb_bytes, softc->last_io_cdb, (int) csio->cdb_len); softc->last_io_resid = resid; softc->last_resid_was_io = 1; } else { bcopy((caddr_t) sense, (caddr_t) &softc->last_ctl_sense, sizeof (struct scsi_sense_data)); bcopy(csio->cdb_io.cdb_bytes, softc->last_ctl_cdb, (int) csio->cdb_len); softc->last_ctl_resid = resid; softc->last_resid_was_io = 0; } CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("CDB[0]=0x%x Key 0x%x " "ASC/ASCQ 0x%x/0x%x CAM STATUS 0x%x flags 0x%x resid %jd " "dxfer_len %d\n", csio->cdb_io.cdb_bytes[0] & 0xff, sense_key, asc, ascq, status, (stream_valid) ? stream_bits : 0, (intmax_t)resid, csio->dxfer_len)); } else { CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Cam Status 0x%x\n", status)); } switch (status) { case CAM_REQ_CMP: return (0); case CAM_SCSI_STATUS_ERROR: /* * If a read/write command, we handle it here. */ if (csio->cdb_io.cdb_bytes[0] == SA_READ || csio->cdb_io.cdb_bytes[0] == SA_WRITE) { break; } /* * If this was just EOM/EOP, Filemark, Setmark or ILI detected * on a non read/write command, we assume it's not an error * and propagate the residule and return. */ if ((aqvalid && asc == 0 && ascq > 0 && ascq <= 5) || (aqvalid == 0 && sense_key == SSD_KEY_NO_SENSE)) { csio->resid = resid; QFRLS(ccb); return (0); } /* * Otherwise, we let the common code handle this. */ return (cam_periph_error(ccb, cflgs, sflgs)); /* * XXX: To Be Fixed * We cannot depend upon CAM honoring retry counts for these. */ case CAM_SCSI_BUS_RESET: case CAM_BDR_SENT: if (ccb->ccb_h.retry_count <= 0) { return (EIO); } /* FALLTHROUGH */ default: return (cam_periph_error(ccb, cflgs, sflgs)); } /* * Handle filemark, end of tape, mismatched record sizes.... * From this point out, we're only handling read/write cases. * Handle writes && reads differently. */ if (csio->cdb_io.cdb_bytes[0] == SA_WRITE) { if (sense_key == SSD_KEY_VOLUME_OVERFLOW) { csio->resid = resid; error = ENOSPC; } else if ((stream_valid != 0) && (stream_bits & SSD_EOM)) { softc->flags |= SA_FLAG_EOM_PENDING; /* * Grotesque as it seems, the few times * I've actually seen a non-zero resid, * the tape drive actually lied and had * written all the data!. */ csio->resid = 0; } } else { csio->resid = resid; if (sense_key == SSD_KEY_BLANK_CHECK) { if (softc->quirks & SA_QUIRK_1FM) { error = 0; softc->flags |= SA_FLAG_EOM_PENDING; } else { error = EIO; } } else if ((stream_valid != 0) && (stream_bits & SSD_FILEMARK)){ if (softc->flags & SA_FLAG_FIXED) { error = -1; softc->flags |= SA_FLAG_EOF_PENDING; } /* * Unconditionally, if we detected a filemark on a read, * mark that we've run moved a file ahead. */ if (softc->fileno != (daddr_t) -1) { softc->fileno++; softc->blkno = 0; csio->ccb_h.ccb_pflags |= SA_POSITION_UPDATED; } } } /* * Incorrect Length usually applies to read, but can apply to writes. */ if (error == 0 && (stream_valid != 0) && (stream_bits & SSD_ILI)) { if (info < 0) { xpt_print(csio->ccb_h.path, toobig, csio->dxfer_len - info); csio->resid = csio->dxfer_len; error = EIO; } else { csio->resid = resid; if (softc->flags & SA_FLAG_FIXED) { softc->flags |= SA_FLAG_EIO_PENDING; } /* * Bump the block number if we hadn't seen a filemark. * Do this independent of errors (we've moved anyway). */ if ((stream_valid == 0) || (stream_bits & SSD_FILEMARK) == 0) { if (softc->blkno != (daddr_t) -1) { softc->blkno++; csio->ccb_h.ccb_pflags |= SA_POSITION_UPDATED; } } } } if (error <= 0) { /* * Unfreeze the queue if frozen as we're not returning anything * to our waiters that would indicate an I/O error has occurred * (yet). */ QFRLS(ccb); error = 0; } return (error); } static int sagetparams(struct cam_periph *periph, sa_params params_to_get, u_int32_t *blocksize, u_int8_t *density, u_int32_t *numblocks, int *buff_mode, u_int8_t *write_protect, u_int8_t *speed, int *comp_supported, int *comp_enabled, u_int32_t *comp_algorithm, sa_comp_t *tcs, struct scsi_control_data_prot_subpage *prot_page, int dp_size, int prot_changeable) { union ccb *ccb; void *mode_buffer; struct scsi_mode_header_6 *mode_hdr; struct scsi_mode_blk_desc *mode_blk; int mode_buffer_len; struct sa_softc *softc; u_int8_t cpage; int error; cam_status status; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); if (softc->quirks & SA_QUIRK_NO_CPAGE) cpage = SA_DEVICE_CONFIGURATION_PAGE; else cpage = SA_DATA_COMPRESSION_PAGE; retry: mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk); if (params_to_get & SA_PARAM_COMPRESSION) { if (softc->quirks & SA_QUIRK_NOCOMP) { *comp_supported = FALSE; params_to_get &= ~SA_PARAM_COMPRESSION; } else mode_buffer_len += sizeof (sa_comp_t); } /* XXX Fix M_NOWAIT */ mode_buffer = malloc(mode_buffer_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode_buffer == NULL) { xpt_release_ccb(ccb); return (ENOMEM); } mode_hdr = (struct scsi_mode_header_6 *)mode_buffer; mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1]; /* it is safe to retry this */ scsi_mode_sense(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, FALSE, SMS_PAGE_CTRL_CURRENT, (params_to_get & SA_PARAM_COMPRESSION) ? cpage : SMS_VENDOR_SPECIFIC_PAGE, mode_buffer, mode_buffer_len, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); status = ccb->ccb_h.status & CAM_STATUS_MASK; if (error == EINVAL && (params_to_get & SA_PARAM_COMPRESSION) != 0) { /* * Hmm. Let's see if we can try another page... * If we've already done that, give up on compression * for this device and remember this for the future * and attempt the request without asking for compression * info. */ if (cpage == SA_DATA_COMPRESSION_PAGE) { cpage = SA_DEVICE_CONFIGURATION_PAGE; goto retry; } softc->quirks |= SA_QUIRK_NOCOMP; free(mode_buffer, M_SCSISA); goto retry; } else if (status == CAM_SCSI_STATUS_ERROR) { /* Tell the user about the fatal error. */ scsi_sense_print(&ccb->csio); goto sagetparamsexit; } /* * If the user only wants the compression information, and * the device doesn't send back the block descriptor, it's * no big deal. If the user wants more than just * compression, though, and the device doesn't pass back the * block descriptor, we need to send another mode sense to * get the block descriptor. */ if ((mode_hdr->blk_desc_len == 0) && (params_to_get & SA_PARAM_COMPRESSION) && (params_to_get & ~(SA_PARAM_COMPRESSION))) { /* * Decrease the mode buffer length by the size of * the compression page, to make sure the data * there doesn't get overwritten. */ mode_buffer_len -= sizeof (sa_comp_t); /* * Now move the compression page that we presumably * got back down the memory chunk a little bit so * it doesn't get spammed. */ bcopy(&mode_hdr[0], &mode_hdr[1], sizeof (sa_comp_t)); bzero(&mode_hdr[0], sizeof (mode_hdr[0])); /* * Now, we issue another mode sense and just ask * for the block descriptor, etc. */ scsi_mode_sense(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, SMS_PAGE_CTRL_CURRENT, SMS_VENDOR_SPECIFIC_PAGE, mode_buffer, mode_buffer_len, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); if (error != 0) goto sagetparamsexit; } if (params_to_get & SA_PARAM_BLOCKSIZE) *blocksize = scsi_3btoul(mode_blk->blklen); if (params_to_get & SA_PARAM_NUMBLOCKS) *numblocks = scsi_3btoul(mode_blk->nblocks); if (params_to_get & SA_PARAM_BUFF_MODE) *buff_mode = mode_hdr->dev_spec & SMH_SA_BUF_MODE_MASK; if (params_to_get & SA_PARAM_DENSITY) *density = mode_blk->density; if (params_to_get & SA_PARAM_WP) *write_protect = (mode_hdr->dev_spec & SMH_SA_WP)? TRUE : FALSE; if (params_to_get & SA_PARAM_SPEED) *speed = mode_hdr->dev_spec & SMH_SA_SPEED_MASK; if (params_to_get & SA_PARAM_COMPRESSION) { sa_comp_t *ntcs = (sa_comp_t *) &mode_blk[1]; if (cpage == SA_DATA_COMPRESSION_PAGE) { struct scsi_data_compression_page *cp = &ntcs->dcomp; *comp_supported = (cp->dce_and_dcc & SA_DCP_DCC)? TRUE : FALSE; *comp_enabled = (cp->dce_and_dcc & SA_DCP_DCE)? TRUE : FALSE; *comp_algorithm = scsi_4btoul(cp->comp_algorithm); } else { struct scsi_dev_conf_page *cp = &ntcs->dconf; /* * We don't really know whether this device supports * Data Compression if the algorithm field is * zero. Just say we do. */ *comp_supported = TRUE; *comp_enabled = (cp->sel_comp_alg != SA_COMP_NONE)? TRUE : FALSE; *comp_algorithm = cp->sel_comp_alg; } if (tcs != NULL) bcopy(ntcs, tcs, sizeof (sa_comp_t)); } if ((params_to_get & SA_PARAM_DENSITY_EXT) && (softc->scsi_rev >= SCSI_REV_SPC)) { int i; for (i = 0; i < SA_DENSITY_TYPES; i++) { scsi_report_density_support(&ccb->csio, /*retries*/ 1, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*media*/ softc->density_type_bits[i] & SRDS_MEDIA, /*medium_type*/ softc->density_type_bits[i] & SRDS_MEDIUM_TYPE, /*data_ptr*/ softc->density_info[i], /*length*/ sizeof(softc->density_info[i]), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ REP_DENSITY_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); status = ccb->ccb_h.status & CAM_STATUS_MASK; /* * Some tape drives won't support this command at * all, but hopefully we'll minimize that with the * check for SPC or greater support above. If they * don't support the default report (neither the * MEDIA or MEDIUM_TYPE bits set), then there is * really no point in continuing on to look for * other reports. */ if ((error != 0) || (status != CAM_REQ_CMP)) { error = 0; softc->density_info_valid[i] = 0; if (softc->density_type_bits[i] == 0) break; else continue; } softc->density_info_valid[i] = ccb->csio.dxfer_len - ccb->csio.resid; } } /* * Get logical block protection parameters if the drive supports it. */ if ((params_to_get & SA_PARAM_LBP) && (softc->flags & SA_FLAG_PROTECT_SUPP)) { struct scsi_mode_header_10 *mode10_hdr; struct scsi_control_data_prot_subpage *dp_page; struct scsi_mode_sense_10 *cdb; struct sa_prot_state *prot; int dp_len, returned_len; if (dp_size == 0) dp_size = sizeof(*dp_page); dp_len = sizeof(*mode10_hdr) + dp_size; mode10_hdr = malloc(dp_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode10_hdr == NULL) { error = ENOMEM; goto sagetparamsexit; } scsi_mode_sense_len(&ccb->csio, /*retries*/ 5, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*dbd*/ TRUE, /*page_code*/ (prot_changeable == 0) ? SMS_PAGE_CTRL_CURRENT : SMS_PAGE_CTRL_CHANGEABLE, /*page*/ SMS_CONTROL_MODE_PAGE, /*param_buf*/ (uint8_t *)mode10_hdr, /*param_len*/ dp_len, /*minimum_cmd_size*/ 10, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SCSIOP_TIMEOUT); /* * XXX KDM we need to be able to set the subpage in the * fill function. */ cdb = (struct scsi_mode_sense_10 *)ccb->csio.cdb_io.cdb_bytes; cdb->subpage = SA_CTRL_DP_SUBPAGE_CODE; error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); if (error != 0) { free(mode10_hdr, M_SCSISA); goto sagetparamsexit; } status = ccb->ccb_h.status & CAM_STATUS_MASK; if (status != CAM_REQ_CMP) { error = EINVAL; free(mode10_hdr, M_SCSISA); goto sagetparamsexit; } /* * The returned data length at least has to be long enough * for us to look at length in the mode page header. */ returned_len = ccb->csio.dxfer_len - ccb->csio.resid; if (returned_len < sizeof(mode10_hdr->data_length)) { error = EINVAL; free(mode10_hdr, M_SCSISA); goto sagetparamsexit; } returned_len = min(returned_len, sizeof(mode10_hdr->data_length) + scsi_2btoul(mode10_hdr->data_length)); dp_page = (struct scsi_control_data_prot_subpage *) &mode10_hdr[1]; /* * We also have to have enough data to include the prot_bits * in the subpage. */ if (returned_len < (sizeof(*mode10_hdr) + __offsetof(struct scsi_control_data_prot_subpage, prot_bits) + sizeof(dp_page->prot_bits))) { error = EINVAL; free(mode10_hdr, M_SCSISA); goto sagetparamsexit; } prot = &softc->prot_info.cur_prot_state; prot->prot_method = dp_page->prot_method; prot->pi_length = dp_page->pi_length & SA_CTRL_DP_PI_LENGTH_MASK; prot->lbp_w = (dp_page->prot_bits & SA_CTRL_DP_LBP_W) ? 1 :0; prot->lbp_r = (dp_page->prot_bits & SA_CTRL_DP_LBP_R) ? 1 :0; prot->rbdp = (dp_page->prot_bits & SA_CTRL_DP_RBDP) ? 1 :0; prot->initialized = 1; if (prot_page != NULL) bcopy(dp_page, prot_page, min(sizeof(*prot_page), sizeof(*dp_page))); free(mode10_hdr, M_SCSISA); } if (CAM_DEBUGGED(periph->path, CAM_DEBUG_INFO)) { int idx; char *xyz = mode_buffer; xpt_print_path(periph->path); printf("Mode Sense Data="); for (idx = 0; idx < mode_buffer_len; idx++) printf(" 0x%02x", xyz[idx] & 0xff); printf("\n"); } sagetparamsexit: xpt_release_ccb(ccb); free(mode_buffer, M_SCSISA); return (error); } /* * Set protection information to the pending protection information stored * in the softc. */ static int sasetprot(struct cam_periph *periph, struct sa_prot_state *new_prot) { struct sa_softc *softc; struct scsi_control_data_prot_subpage *dp_page, *dp_changeable; struct scsi_mode_header_10 *mode10_hdr, *mode10_changeable; union ccb *ccb; uint8_t current_speed; size_t dp_size, dp_page_length; int dp_len, buff_mode; int error; softc = (struct sa_softc *)periph->softc; mode10_hdr = NULL; mode10_changeable = NULL; ccb = NULL; /* * Start off with the size set to the actual length of the page * that we have defined. */ dp_size = sizeof(*dp_changeable); dp_page_length = dp_size - __offsetof(struct scsi_control_data_prot_subpage, prot_method); retry_length: dp_len = sizeof(*mode10_changeable) + dp_size; mode10_changeable = malloc(dp_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode10_changeable == NULL) { error = ENOMEM; goto bailout; } dp_changeable = (struct scsi_control_data_prot_subpage *)&mode10_changeable[1]; /* * First get the data protection page changeable parameters mask. * We need to know which parameters the drive supports changing. * We also need to know what the drive claims that its page length * is. The reason is that IBM drives in particular are very picky * about the page length. They want it (the length set in the * page structure itself) to be 28 bytes, and they want the * parameter list length specified in the mode select header to be * 40 bytes. So, to work with IBM drives as well as any other tape * drive, find out what the drive claims the page length is, and * make sure that we match that. */ error = sagetparams(periph, SA_PARAM_SPEED | SA_PARAM_LBP, NULL, NULL, NULL, &buff_mode, NULL, ¤t_speed, NULL, NULL, NULL, NULL, dp_changeable, dp_size, /*prot_changeable*/ 1); if (error != 0) goto bailout; if (scsi_2btoul(dp_changeable->length) > dp_page_length) { dp_page_length = scsi_2btoul(dp_changeable->length); dp_size = dp_page_length + __offsetof(struct scsi_control_data_prot_subpage, prot_method); free(mode10_changeable, M_SCSISA); mode10_changeable = NULL; goto retry_length; } mode10_hdr = malloc(dp_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode10_hdr == NULL) { error = ENOMEM; goto bailout; } dp_page = (struct scsi_control_data_prot_subpage *)&mode10_hdr[1]; /* * Now grab the actual current settings in the page. */ error = sagetparams(periph, SA_PARAM_SPEED | SA_PARAM_LBP, NULL, NULL, NULL, &buff_mode, NULL, ¤t_speed, NULL, NULL, NULL, NULL, dp_page, dp_size, /*prot_changeable*/ 0); if (error != 0) goto bailout; /* These two fields need to be 0 for MODE SELECT */ scsi_ulto2b(0, mode10_hdr->data_length); mode10_hdr->medium_type = 0; /* We are not including a block descriptor */ scsi_ulto2b(0, mode10_hdr->blk_desc_len); mode10_hdr->dev_spec = current_speed; /* if set, set single-initiator buffering mode */ if (softc->buffer_mode == SMH_SA_BUF_MODE_SIBUF) { mode10_hdr->dev_spec |= SMH_SA_BUF_MODE_SIBUF; } /* * For each field, make sure that the drive allows changing it * before bringing in the user's setting. */ if (dp_changeable->prot_method != 0) dp_page->prot_method = new_prot->prot_method; if (dp_changeable->pi_length & SA_CTRL_DP_PI_LENGTH_MASK) { dp_page->pi_length &= ~SA_CTRL_DP_PI_LENGTH_MASK; dp_page->pi_length |= (new_prot->pi_length & SA_CTRL_DP_PI_LENGTH_MASK); } if (dp_changeable->prot_bits & SA_CTRL_DP_LBP_W) { if (new_prot->lbp_w) dp_page->prot_bits |= SA_CTRL_DP_LBP_W; else dp_page->prot_bits &= ~SA_CTRL_DP_LBP_W; } if (dp_changeable->prot_bits & SA_CTRL_DP_LBP_R) { if (new_prot->lbp_r) dp_page->prot_bits |= SA_CTRL_DP_LBP_R; else dp_page->prot_bits &= ~SA_CTRL_DP_LBP_R; } if (dp_changeable->prot_bits & SA_CTRL_DP_RBDP) { if (new_prot->rbdp) dp_page->prot_bits |= SA_CTRL_DP_RBDP; else dp_page->prot_bits &= ~SA_CTRL_DP_RBDP; } ccb = cam_periph_getccb(periph, 1); scsi_mode_select_len(&ccb->csio, /*retries*/ 5, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*scsi_page_fmt*/ TRUE, /*save_pages*/ FALSE, /*param_buf*/ (uint8_t *)mode10_hdr, /*param_len*/ dp_len, /*minimum_cmd_size*/ 10, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); if (error != 0) goto bailout; if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = EINVAL; goto bailout; } /* * The operation was successful. We could just copy the settings * the user requested, but just in case the drive ignored some of * our settings, let's ask for status again. */ error = sagetparams(periph, SA_PARAM_SPEED | SA_PARAM_LBP, NULL, NULL, NULL, &buff_mode, NULL, ¤t_speed, NULL, NULL, NULL, NULL, dp_page, dp_size, 0); bailout: if (ccb != NULL) xpt_release_ccb(ccb); free(mode10_hdr, M_SCSISA); free(mode10_changeable, M_SCSISA); return (error); } /* * The purpose of this function is to set one of four different parameters * for a tape drive: * - blocksize * - density * - compression / compression algorithm * - buffering mode * * The assumption is that this will be called from saioctl(), and therefore * from a process context. Thus the waiting malloc calls below. If that * assumption ever changes, the malloc calls should be changed to be * NOWAIT mallocs. * * Any or all of the four parameters may be set when this function is * called. It should handle setting more than one parameter at once. */ static int sasetparams(struct cam_periph *periph, sa_params params_to_set, u_int32_t blocksize, u_int8_t density, u_int32_t calg, u_int32_t sense_flags) { struct sa_softc *softc; u_int32_t current_blocksize; u_int32_t current_calg; u_int8_t current_density; u_int8_t current_speed; int comp_enabled, comp_supported; void *mode_buffer; int mode_buffer_len; struct scsi_mode_header_6 *mode_hdr; struct scsi_mode_blk_desc *mode_blk; sa_comp_t *ccomp, *cpage; int buff_mode; union ccb *ccb = NULL; int error; softc = (struct sa_softc *)periph->softc; ccomp = malloc(sizeof (sa_comp_t), M_SCSISA, M_NOWAIT); if (ccomp == NULL) return (ENOMEM); /* * Since it doesn't make sense to set the number of blocks, or * write protection, we won't try to get the current value. We * always want to get the blocksize, so we can set it back to the * proper value. */ error = sagetparams(periph, params_to_set | SA_PARAM_BLOCKSIZE | SA_PARAM_SPEED, ¤t_blocksize, ¤t_density, NULL, &buff_mode, NULL, ¤t_speed, &comp_supported, &comp_enabled, ¤t_calg, ccomp, NULL, 0, 0); if (error != 0) { free(ccomp, M_SCSISA); return (error); } mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk); if (params_to_set & SA_PARAM_COMPRESSION) mode_buffer_len += sizeof (sa_comp_t); mode_buffer = malloc(mode_buffer_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode_buffer == NULL) { free(ccomp, M_SCSISA); return (ENOMEM); } mode_hdr = (struct scsi_mode_header_6 *)mode_buffer; mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1]; ccb = cam_periph_getccb(periph, 1); retry: if (params_to_set & SA_PARAM_COMPRESSION) { if (mode_blk) { cpage = (sa_comp_t *)&mode_blk[1]; } else { cpage = (sa_comp_t *)&mode_hdr[1]; } bcopy(ccomp, cpage, sizeof (sa_comp_t)); cpage->hdr.pagecode &= ~0x80; } else cpage = NULL; /* * If the caller wants us to set the blocksize, use the one they * pass in. Otherwise, use the blocksize we got back from the * mode select above. */ if (mode_blk) { if (params_to_set & SA_PARAM_BLOCKSIZE) scsi_ulto3b(blocksize, mode_blk->blklen); else scsi_ulto3b(current_blocksize, mode_blk->blklen); /* * Set density if requested, else preserve old density. * SCSI_SAME_DENSITY only applies to SCSI-2 or better * devices, else density we've latched up in our softc. */ if (params_to_set & SA_PARAM_DENSITY) { mode_blk->density = density; } else if (softc->scsi_rev > SCSI_REV_CCS) { mode_blk->density = SCSI_SAME_DENSITY; } else { mode_blk->density = softc->media_density; } } /* * For mode selects, these two fields must be zero. */ mode_hdr->data_length = 0; mode_hdr->medium_type = 0; /* set the speed to the current value */ mode_hdr->dev_spec = current_speed; /* if set, set single-initiator buffering mode */ if (softc->buffer_mode == SMH_SA_BUF_MODE_SIBUF) { mode_hdr->dev_spec |= SMH_SA_BUF_MODE_SIBUF; } if (mode_blk) mode_hdr->blk_desc_len = sizeof(struct scsi_mode_blk_desc); else mode_hdr->blk_desc_len = 0; /* * First, if the user wants us to set the compression algorithm or * just turn compression on, check to make sure that this drive * supports compression. */ if (params_to_set & SA_PARAM_COMPRESSION) { /* * If the compression algorithm is 0, disable compression. * If the compression algorithm is non-zero, enable * compression and set the compression type to the * specified compression algorithm, unless the algorithm is * MT_COMP_ENABLE. In that case, we look at the * compression algorithm that is currently set and if it is * non-zero, we leave it as-is. If it is zero, and we have * saved a compression algorithm from a time when * compression was enabled before, set the compression to * the saved value. */ switch (ccomp->hdr.pagecode & ~0x80) { case SA_DEVICE_CONFIGURATION_PAGE: { struct scsi_dev_conf_page *dcp = &cpage->dconf; if (calg == 0) { dcp->sel_comp_alg = SA_COMP_NONE; break; } if (calg != MT_COMP_ENABLE) { dcp->sel_comp_alg = calg; } else if (dcp->sel_comp_alg == SA_COMP_NONE && softc->saved_comp_algorithm != 0) { dcp->sel_comp_alg = softc->saved_comp_algorithm; } break; } case SA_DATA_COMPRESSION_PAGE: if (ccomp->dcomp.dce_and_dcc & SA_DCP_DCC) { struct scsi_data_compression_page *dcp = &cpage->dcomp; if (calg == 0) { /* * Disable compression, but leave the * decompression and the capability bit * alone. */ dcp->dce_and_dcc = SA_DCP_DCC; dcp->dde_and_red |= SA_DCP_DDE; break; } /* enable compression && decompression */ dcp->dce_and_dcc = SA_DCP_DCE | SA_DCP_DCC; dcp->dde_and_red |= SA_DCP_DDE; /* * If there, use compression algorithm from caller. * Otherwise, if there's a saved compression algorithm * and there is no current algorithm, use the saved * algorithm. Else parrot back what we got and hope * for the best. */ if (calg != MT_COMP_ENABLE) { scsi_ulto4b(calg, dcp->comp_algorithm); scsi_ulto4b(calg, dcp->decomp_algorithm); } else if (scsi_4btoul(dcp->comp_algorithm) == 0 && softc->saved_comp_algorithm != 0) { scsi_ulto4b(softc->saved_comp_algorithm, dcp->comp_algorithm); scsi_ulto4b(softc->saved_comp_algorithm, dcp->decomp_algorithm); } break; } /* * Compression does not appear to be supported- * at least via the DATA COMPRESSION page. It * would be too much to ask us to believe that * the page itself is supported, but incorrectly * reports an ability to manipulate data compression, * so we'll assume that this device doesn't support * compression. We can just fall through for that. */ /* FALLTHROUGH */ default: /* * The drive doesn't seem to support compression, * so turn off the set compression bit. */ params_to_set &= ~SA_PARAM_COMPRESSION; xpt_print(periph->path, "device does not seem to support compression\n"); /* * If that was the only thing the user wanted us to set, * clean up allocated resources and return with * 'operation not supported'. */ if (params_to_set == SA_PARAM_NONE) { free(mode_buffer, M_SCSISA); xpt_release_ccb(ccb); return (ENODEV); } /* * That wasn't the only thing the user wanted us to set. * So, decrease the stated mode buffer length by the * size of the compression mode page. */ mode_buffer_len -= sizeof(sa_comp_t); } } /* It is safe to retry this operation */ scsi_mode_select(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, (params_to_set & SA_PARAM_COMPRESSION)? TRUE : FALSE, FALSE, mode_buffer, mode_buffer_len, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, sense_flags, softc->device_stats); if (CAM_DEBUGGED(periph->path, CAM_DEBUG_INFO)) { int idx; char *xyz = mode_buffer; xpt_print_path(periph->path); printf("Err%d, Mode Select Data=", error); for (idx = 0; idx < mode_buffer_len; idx++) printf(" 0x%02x", xyz[idx] & 0xff); printf("\n"); } if (error) { /* * If we can, try without setting density/blocksize. */ if (mode_blk) { if ((params_to_set & (SA_PARAM_DENSITY|SA_PARAM_BLOCKSIZE)) == 0) { mode_blk = NULL; goto retry; } } else { mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1]; cpage = (sa_comp_t *)&mode_blk[1]; } /* * If we were setting the blocksize, and that failed, we * want to set it to its original value. If we weren't * setting the blocksize, we don't want to change it. */ scsi_ulto3b(current_blocksize, mode_blk->blklen); /* * Set density if requested, else preserve old density. * SCSI_SAME_DENSITY only applies to SCSI-2 or better * devices, else density we've latched up in our softc. */ if (params_to_set & SA_PARAM_DENSITY) { mode_blk->density = current_density; } else if (softc->scsi_rev > SCSI_REV_CCS) { mode_blk->density = SCSI_SAME_DENSITY; } else { mode_blk->density = softc->media_density; } if (params_to_set & SA_PARAM_COMPRESSION) bcopy(ccomp, cpage, sizeof (sa_comp_t)); /* * The retry count is the only CCB field that might have been * changed that we care about, so reset it back to 1. */ ccb->ccb_h.retry_count = 1; cam_periph_runccb(ccb, saerror, 0, sense_flags, softc->device_stats); } xpt_release_ccb(ccb); if (ccomp != NULL) free(ccomp, M_SCSISA); if (params_to_set & SA_PARAM_COMPRESSION) { if (error) { softc->flags &= ~SA_FLAG_COMP_ENABLED; /* * Even if we get an error setting compression, * do not say that we don't support it. We could * have been wrong, or it may be media specific. * softc->flags &= ~SA_FLAG_COMP_SUPP; */ softc->saved_comp_algorithm = softc->comp_algorithm; softc->comp_algorithm = 0; } else { softc->flags |= SA_FLAG_COMP_ENABLED; softc->comp_algorithm = calg; } } free(mode_buffer, M_SCSISA); return (error); } static int saextget(struct cdev *dev, struct cam_periph *periph, struct sbuf *sb, struct mtextget *g) { int indent, error; char tmpstr[80]; struct sa_softc *softc; int tmpint; uint32_t maxio_tmp; struct ccb_getdev cgd; softc = (struct sa_softc *)periph->softc; error = 0; error = sagetparams_common(dev, periph); if (error) goto extget_bailout; if (!SA_IS_CTRL(dev) && !softc->open_pending_mount) sagetpos(periph); indent = 0; SASBADDNODE(sb, indent, mtextget); /* * Basic CAM peripheral information. */ SASBADDVARSTR(sb, indent, periph->periph_name, %s, periph_name, strlen(periph->periph_name) + 1); SASBADDUINT(sb, indent, periph->unit_number, %u, unit_number); xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if ((cgd.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { g->status = MT_EXT_GET_ERROR; snprintf(g->error_str, sizeof(g->error_str), "Error %#x returned for XPT_GDEV_TYPE CCB", cgd.ccb_h.status); goto extget_bailout; } cam_strvis(tmpstr, cgd.inq_data.vendor, sizeof(cgd.inq_data.vendor), sizeof(tmpstr)); SASBADDVARSTRDESC(sb, indent, tmpstr, %s, vendor, sizeof(cgd.inq_data.vendor) + 1, "SCSI Vendor ID"); cam_strvis(tmpstr, cgd.inq_data.product, sizeof(cgd.inq_data.product), sizeof(tmpstr)); SASBADDVARSTRDESC(sb, indent, tmpstr, %s, product, sizeof(cgd.inq_data.product) + 1, "SCSI Product ID"); cam_strvis(tmpstr, cgd.inq_data.revision, sizeof(cgd.inq_data.revision), sizeof(tmpstr)); SASBADDVARSTRDESC(sb, indent, tmpstr, %s, revision, sizeof(cgd.inq_data.revision) + 1, "SCSI Revision"); if (cgd.serial_num_len > 0) { char *tmpstr2; size_t ts2_len; int ts2_malloc; ts2_len = 0; if (cgd.serial_num_len > sizeof(tmpstr)) { ts2_len = cgd.serial_num_len + 1; ts2_malloc = 1; tmpstr2 = malloc(ts2_len, M_SCSISA, M_NOWAIT | M_ZERO); /* * The 80 characters allocated on the stack above * will handle the vast majority of serial numbers. * If we run into one that is larger than that, and * we can't malloc the length without blocking, * bail out with an out of memory error. */ if (tmpstr2 == NULL) { error = ENOMEM; goto extget_bailout; } } else { ts2_len = sizeof(tmpstr); ts2_malloc = 0; tmpstr2 = tmpstr; } cam_strvis(tmpstr2, cgd.serial_num, cgd.serial_num_len, ts2_len); SASBADDVARSTRDESC(sb, indent, tmpstr2, %s, serial_num, (ssize_t)cgd.serial_num_len + 1, "Serial Number"); if (ts2_malloc != 0) free(tmpstr2, M_SCSISA); } else { /* * We return a serial_num element in any case, but it will * be empty if the device has no serial number. */ tmpstr[0] = '\0'; SASBADDVARSTRDESC(sb, indent, tmpstr, %s, serial_num, (ssize_t)0, "Serial Number"); } SASBADDUINTDESC(sb, indent, softc->maxio, %u, maxio, "Maximum I/O size allowed by driver and controller"); SASBADDUINTDESC(sb, indent, softc->cpi_maxio, %u, cpi_maxio, "Maximum I/O size reported by controller"); SASBADDUINTDESC(sb, indent, softc->max_blk, %u, max_blk, "Maximum block size supported by tape drive and media"); SASBADDUINTDESC(sb, indent, softc->min_blk, %u, min_blk, "Minimum block size supported by tape drive and media"); SASBADDUINTDESC(sb, indent, softc->blk_gran, %u, blk_gran, "Block granularity supported by tape drive and media"); maxio_tmp = min(softc->max_blk, softc->maxio); SASBADDUINTDESC(sb, indent, maxio_tmp, %u, max_effective_iosize, "Maximum possible I/O size"); SASBADDINTDESC(sb, indent, softc->flags & SA_FLAG_FIXED ? 1 : 0, %d, fixed_mode, "Set to 1 for fixed block mode, 0 for variable block"); /* * XXX KDM include SIM, bus, target, LUN? */ if (softc->flags & SA_FLAG_COMP_UNSUPP) tmpint = 0; else tmpint = 1; SASBADDINTDESC(sb, indent, tmpint, %d, compression_supported, "Set to 1 if compression is supported, 0 if not"); if (softc->flags & SA_FLAG_COMP_ENABLED) tmpint = 1; else tmpint = 0; SASBADDINTDESC(sb, indent, tmpint, %d, compression_enabled, "Set to 1 if compression is enabled, 0 if not"); SASBADDUINTDESC(sb, indent, softc->comp_algorithm, %u, compression_algorithm, "Numeric compression algorithm"); safillprot(softc, &indent, sb); SASBADDUINTDESC(sb, indent, softc->media_blksize, %u, media_blocksize, "Block size reported by drive or set by user"); SASBADDINTDESC(sb, indent, (intmax_t)softc->fileno, %jd, calculated_fileno, "Calculated file number, -1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->blkno, %jd, calculated_rel_blkno, "Calculated block number relative to file, " "set to -1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->rep_fileno, %jd, reported_fileno, "File number reported by drive, -1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->rep_blkno, %jd, reported_blkno, "Block number relative to BOP/BOT reported by " "drive, -1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->partition, %jd, partition, "Current partition number, 0 is the default"); SASBADDINTDESC(sb, indent, softc->bop, %d, bop, "Set to 1 if drive is at the beginning of partition/tape, 0 if " "not, -1 if unknown"); SASBADDINTDESC(sb, indent, softc->eop, %d, eop, "Set to 1 if drive is past early warning, 0 if not, -1 if unknown"); SASBADDINTDESC(sb, indent, softc->bpew, %d, bpew, "Set to 1 if drive is past programmable early warning, 0 if not, " "-1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->last_io_resid, %jd, residual, "Residual for the last I/O"); /* * XXX KDM should we send a string with the current driver * status already decoded instead of a numeric value? */ SASBADDINTDESC(sb, indent, softc->dsreg, %d, dsreg, "Current state of the driver"); safilldensitysb(softc, &indent, sb); SASBENDNODE(sb, indent, mtextget); extget_bailout: return (error); } static int saparamget(struct sa_softc *softc, struct sbuf *sb) { int indent; indent = 0; SASBADDNODE(sb, indent, mtparamget); SASBADDINTDESC(sb, indent, softc->sili, %d, sili, "Suppress an error on underlength variable reads"); SASBADDINTDESC(sb, indent, softc->eot_warn, %d, eot_warn, "Return an error to warn that end of tape is approaching"); safillprot(softc, &indent, sb); SASBENDNODE(sb, indent, mtparamget); return (0); } static void saprevent(struct cam_periph *periph, int action) { struct sa_softc *softc; union ccb *ccb; int error, sf; softc = (struct sa_softc *)periph->softc; if ((action == PR_ALLOW) && (softc->flags & SA_FLAG_TAPE_LOCKED) == 0) return; if ((action == PR_PREVENT) && (softc->flags & SA_FLAG_TAPE_LOCKED) != 0) return; /* * We can be quiet about illegal requests. */ if (CAM_DEBUGGED(periph->path, CAM_DEBUG_INFO)) { sf = 0; } else sf = SF_QUIET_IR; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_prevent(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, sf, softc->device_stats); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~SA_FLAG_TAPE_LOCKED; else softc->flags |= SA_FLAG_TAPE_LOCKED; } xpt_release_ccb(ccb); } static int sarewind(struct cam_periph *periph) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_rewind(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, SSD_FULL_SIZE, REWIND_TIMEOUT); softc->dsreg = MTIO_DSREG_REW; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); if (error == 0) { softc->partition = softc->fileno = softc->blkno = (daddr_t) 0; softc->rep_fileno = softc->rep_blkno = (daddr_t) 0; } else { softc->fileno = softc->blkno = (daddr_t) -1; softc->partition = (daddr_t) -1; softc->rep_fileno = softc->rep_blkno = (daddr_t) -1; } return (error); } static int saspace(struct cam_periph *periph, int count, scsi_space_code code) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* This cannot be retried */ scsi_space(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, code, count, SSD_FULL_SIZE, SPACE_TIMEOUT); /* * Clear residual because we will be using it. */ softc->last_ctl_resid = 0; softc->dsreg = (count < 0)? MTIO_DSREG_REV : MTIO_DSREG_FWD; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); /* * If a spacing operation has failed, we need to invalidate * this mount. * * If the spacing operation was setmarks or to end of recorded data, * we no longer know our relative position. * * If the spacing operations was spacing files in reverse, we * take account of the residual, but still check against less * than zero- if we've gone negative, we must have hit BOT. * * If the spacing operations was spacing records in reverse and * we have a residual, we've either hit BOT or hit a filemark. * In the former case, we know our new record number (0). In * the latter case, we have absolutely no idea what the real * record number is- we've stopped between the end of the last * record in the previous file and the filemark that stopped * our spacing backwards. */ if (error) { softc->fileno = softc->blkno = (daddr_t) -1; softc->rep_blkno = softc->partition = (daddr_t) -1; softc->rep_fileno = (daddr_t) -1; } else if (code == SS_SETMARKS || code == SS_EOD) { softc->fileno = softc->blkno = (daddr_t) -1; } else if (code == SS_FILEMARKS && softc->fileno != (daddr_t) -1) { softc->fileno += (count - softc->last_ctl_resid); if (softc->fileno < 0) /* we must of hit BOT */ softc->fileno = 0; softc->blkno = 0; } else if (code == SS_BLOCKS && softc->blkno != (daddr_t) -1) { softc->blkno += (count - softc->last_ctl_resid); if (count < 0) { if (softc->last_ctl_resid || softc->blkno < 0) { if (softc->fileno == 0) { softc->blkno = 0; } else { softc->blkno = (daddr_t) -1; } } } } if (error == 0) sagetpos(periph); return (error); } static int sawritefilemarks(struct cam_periph *periph, int nmarks, int setmarks, int immed) { union ccb *ccb; struct sa_softc *softc; int error, nwm = 0; softc = (struct sa_softc *)periph->softc; if (softc->open_rdonly) return (EBADF); ccb = cam_periph_getccb(periph, 1); /* * Clear residual because we will be using it. */ softc->last_ctl_resid = 0; softc->dsreg = MTIO_DSREG_FMK; /* this *must* not be retried */ scsi_write_filemarks(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, immed, setmarks, nmarks, SSD_FULL_SIZE, IO_TIMEOUT); softc->dsreg = MTIO_DSREG_REST; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); if (error == 0 && nmarks) { struct sa_softc *softc = (struct sa_softc *)periph->softc; nwm = nmarks - softc->last_ctl_resid; softc->filemarks += nwm; } xpt_release_ccb(ccb); /* * Update relative positions (if we're doing that). */ if (error) { softc->fileno = softc->blkno = softc->partition = (daddr_t) -1; } else if (softc->fileno != (daddr_t) -1) { softc->fileno += nwm; softc->blkno = 0; } /* * Ask the tape drive for position information. */ sagetpos(periph); /* * If we got valid position information, since we just wrote a file * mark, we know we're at the file mark and block 0 after that * filemark. */ if (softc->rep_fileno != (daddr_t) -1) { softc->fileno = softc->rep_fileno; softc->blkno = 0; } return (error); } static int sagetpos(struct cam_periph *periph) { union ccb *ccb; struct scsi_tape_position_long_data long_pos; struct sa_softc *softc = (struct sa_softc *)periph->softc; int error; if (softc->quirks & SA_QUIRK_NO_LONG_POS) { softc->rep_fileno = (daddr_t) -1; softc->rep_blkno = (daddr_t) -1; softc->bop = softc->eop = softc->bpew = -1; return (EOPNOTSUPP); } bzero(&long_pos, sizeof(long_pos)); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_read_position_10(&ccb->csio, /*retries*/ 1, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ SA_RPOS_LONG_FORM, /*data_ptr*/ (uint8_t *)&long_pos, /*length*/ sizeof(long_pos), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SCSIOP_TIMEOUT); softc->dsreg = MTIO_DSREG_RBSY; error = cam_periph_runccb(ccb, saerror, 0, SF_QUIET_IR, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; if (error == 0) { if (long_pos.flags & SA_RPOS_LONG_MPU) { /* * If the drive doesn't know what file mark it is * on, our calculated filemark isn't going to be * accurate either. */ softc->fileno = (daddr_t) -1; softc->rep_fileno = (daddr_t) -1; } else { softc->fileno = softc->rep_fileno = scsi_8btou64(long_pos.logical_file_num); } if (long_pos.flags & SA_RPOS_LONG_LONU) { softc->partition = (daddr_t) -1; softc->rep_blkno = (daddr_t) -1; /* * If the tape drive doesn't know its block * position, we can't claim to know it either. */ softc->blkno = (daddr_t) -1; } else { softc->partition = scsi_4btoul(long_pos.partition); softc->rep_blkno = scsi_8btou64(long_pos.logical_object_num); } if (long_pos.flags & SA_RPOS_LONG_BOP) softc->bop = 1; else softc->bop = 0; if (long_pos.flags & SA_RPOS_LONG_EOP) softc->eop = 1; else softc->eop = 0; if ((long_pos.flags & SA_RPOS_LONG_BPEW) || (softc->set_pews_status != 0)) { softc->bpew = 1; if (softc->set_pews_status > 0) softc->set_pews_status--; } else softc->bpew = 0; } else if (error == EINVAL) { /* * If this drive returned an invalid-request type error, * then it likely doesn't support the long form report. */ softc->quirks |= SA_QUIRK_NO_LONG_POS; } if (error != 0) { softc->rep_fileno = softc->rep_blkno = (daddr_t) -1; softc->partition = (daddr_t) -1; softc->bop = softc->eop = softc->bpew = -1; } xpt_release_ccb(ccb); return (error); } static int sardpos(struct cam_periph *periph, int hard, u_int32_t *blkptr) { struct scsi_tape_position_data loc; union ccb *ccb; struct sa_softc *softc = (struct sa_softc *)periph->softc; int error; /* * We try and flush any buffered writes here if we were writing * and we're trying to get hardware block position. It eats * up performance substantially, but I'm wary of drive firmware. * * I think that *logical* block position is probably okay- * but hardware block position might have to wait for data * to hit media to be valid. Caveat Emptor. */ if (hard && (softc->flags & SA_FLAG_TAPE_WRITTEN)) { error = sawritefilemarks(periph, 0, 0, 0); if (error && error != EACCES) return (error); } ccb = cam_periph_getccb(periph, 1); scsi_read_position(&ccb->csio, 1, sadone, MSG_SIMPLE_Q_TAG, hard, &loc, SSD_FULL_SIZE, SCSIOP_TIMEOUT); softc->dsreg = MTIO_DSREG_RBSY; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; if (error == 0) { if (loc.flags & SA_RPOS_UNCERTAIN) { error = EINVAL; /* nothing is certain */ } else { *blkptr = scsi_4btoul(loc.firstblk); } } xpt_release_ccb(ccb); return (error); } static int sasetpos(struct cam_periph *periph, int hard, struct mtlocate *locate_info) { union ccb *ccb; struct sa_softc *softc; int locate16; int immed, cp; int error; /* * We used to try and flush any buffered writes here. * Now we push this onto user applications to either * flush the pending writes themselves (via a zero count * WRITE FILEMARKS command) or they can trust their tape * drive to do this correctly for them. */ softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); cp = locate_info->flags & MT_LOCATE_FLAG_CHANGE_PART ? 1 : 0; immed = locate_info->flags & MT_LOCATE_FLAG_IMMED ? 1 : 0; /* * Determine whether we have to use LOCATE or LOCATE16. The hard * bit is only possible with LOCATE, but the new ioctls do not * allow setting that bit. So we can't get into the situation of * having the hard bit set with a block address that is larger than * 32-bits. */ if (hard != 0) locate16 = 0; else if ((locate_info->dest_type != MT_LOCATE_DEST_OBJECT) || (locate_info->block_address_mode != MT_LOCATE_BAM_IMPLICIT) || (locate_info->logical_id > SA_SPOS_MAX_BLK)) locate16 = 1; else locate16 = 0; if (locate16 != 0) { scsi_locate_16(&ccb->csio, /*retries*/ 1, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*immed*/ immed, /*cp*/ cp, /*dest_type*/ locate_info->dest_type, /*bam*/ locate_info->block_address_mode, /*partition*/ locate_info->partition, /*logical_id*/ locate_info->logical_id, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SPACE_TIMEOUT); } else { scsi_locate_10(&ccb->csio, /*retries*/ 1, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*immed*/ immed, /*cp*/ cp, /*hard*/ hard, /*partition*/ locate_info->partition, /*block_address*/ locate_info->logical_id, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SPACE_TIMEOUT); } softc->dsreg = MTIO_DSREG_POS; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); /* * We assume the calculated file and block numbers are unknown * unless we have enough information to populate them. */ softc->fileno = softc->blkno = (daddr_t) -1; /* * If the user requested changing the partition and the request * succeeded, note the partition. */ if ((error == 0) && (cp != 0)) softc->partition = locate_info->partition; else softc->partition = (daddr_t) -1; if (error == 0) { switch (locate_info->dest_type) { case MT_LOCATE_DEST_FILE: /* * This is the only case where we can reliably * calculate the file and block numbers. */ softc->fileno = locate_info->logical_id; softc->blkno = 0; break; case MT_LOCATE_DEST_OBJECT: case MT_LOCATE_DEST_SET: case MT_LOCATE_DEST_EOD: default: break; } } /* * Ask the drive for current position information. */ sagetpos(periph); return (error); } static int saretension(struct cam_periph *periph) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_load_unload(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, FALSE, FALSE, TRUE, TRUE, SSD_FULL_SIZE, ERASE_TIMEOUT); softc->dsreg = MTIO_DSREG_TEN; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); if (error == 0) { softc->partition = softc->fileno = softc->blkno = (daddr_t) 0; sagetpos(periph); } else softc->partition = softc->fileno = softc->blkno = (daddr_t) -1; return (error); } static int sareservereleaseunit(struct cam_periph *periph, int reserve) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_reserve_release_unit(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, 0, SSD_FULL_SIZE, SCSIOP_TIMEOUT, reserve); softc->dsreg = MTIO_DSREG_RBSY; error = cam_periph_runccb(ccb, saerror, 0, SF_RETRY_UA | SF_NO_PRINT, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); /* * If the error was Illegal Request, then the device doesn't support * RESERVE/RELEASE. This is not an error. */ if (error == EINVAL) { error = 0; } return (error); } static int saloadunload(struct cam_periph *periph, int load) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_load_unload(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, FALSE, FALSE, FALSE, load, SSD_FULL_SIZE, REWIND_TIMEOUT); softc->dsreg = (load)? MTIO_DSREG_LD : MTIO_DSREG_UNL; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); if (error || load == 0) { softc->partition = softc->fileno = softc->blkno = (daddr_t) -1; softc->rep_fileno = softc->rep_blkno = (daddr_t) -1; } else if (error == 0) { softc->partition = softc->fileno = softc->blkno = (daddr_t) 0; sagetpos(periph); } return (error); } static int saerase(struct cam_periph *periph, int longerase) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; if (softc->open_rdonly) return (EBADF); ccb = cam_periph_getccb(periph, 1); scsi_erase(&ccb->csio, 1, sadone, MSG_SIMPLE_Q_TAG, FALSE, longerase, SSD_FULL_SIZE, ERASE_TIMEOUT); softc->dsreg = MTIO_DSREG_ZER; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); return (error); } /* * Fill an sbuf with density data in XML format. This particular macro * works for multi-byte integer fields. * * Note that 1 byte fields aren't supported here. The reason is that the * compiler does not evaluate the sizeof(), and assumes that any of the * sizes are possible for a given field. So passing in a multi-byte * field will result in a warning that the assignment makes an integer * from a pointer without a cast, if there is an assignment in the 1 byte * case. */ #define SAFILLDENSSB(dens_data, sb, indent, field, desc_remain, \ len_to_go, cur_offset, desc){ \ size_t cur_field_len; \ \ cur_field_len = sizeof(dens_data->field); \ if (desc_remain < cur_field_len) { \ len_to_go -= desc_remain; \ cur_offset += desc_remain; \ continue; \ } \ len_to_go -= cur_field_len; \ cur_offset += cur_field_len; \ desc_remain -= cur_field_len; \ \ switch (sizeof(dens_data->field)) { \ case 1: \ KASSERT(1 == 0, ("Programmer error, invalid 1 byte " \ "field width for SAFILLDENSFIELD")); \ break; \ case 2: \ SASBADDUINTDESC(sb, indent, \ scsi_2btoul(dens_data->field), %u, field, desc); \ break; \ case 3: \ SASBADDUINTDESC(sb, indent, \ scsi_3btoul(dens_data->field), %u, field, desc); \ break; \ case 4: \ SASBADDUINTDESC(sb, indent, \ scsi_4btoul(dens_data->field), %u, field, desc); \ break; \ case 8: \ SASBADDUINTDESC(sb, indent, \ (uintmax_t)scsi_8btou64(dens_data->field), %ju, \ field, desc); \ break; \ default: \ break; \ } \ }; /* * Fill an sbuf with density data in XML format. This particular macro * works for strings. */ #define SAFILLDENSSBSTR(dens_data, sb, indent, field, desc_remain, \ len_to_go, cur_offset, desc){ \ size_t cur_field_len; \ char tmpstr[32]; \ \ cur_field_len = sizeof(dens_data->field); \ if (desc_remain < cur_field_len) { \ len_to_go -= desc_remain; \ cur_offset += desc_remain; \ continue; \ } \ len_to_go -= cur_field_len; \ cur_offset += cur_field_len; \ desc_remain -= cur_field_len; \ \ cam_strvis(tmpstr, dens_data->field, \ sizeof(dens_data->field), sizeof(tmpstr)); \ SASBADDVARSTRDESC(sb, indent, tmpstr, %s, field, \ strlen(tmpstr) + 1, desc); \ }; /* * Fill an sbuf with density data descriptors. */ static void safilldenstypesb(struct sbuf *sb, int *indent, uint8_t *buf, int buf_len, int is_density) { struct scsi_density_hdr *hdr; uint32_t hdr_len; int len_to_go, cur_offset; int length_offset; int num_reports, need_close; /* * We need at least the header length. Note that this isn't an * error, not all tape drives will have every data type. */ if (buf_len < sizeof(*hdr)) goto bailout; hdr = (struct scsi_density_hdr *)buf; hdr_len = scsi_2btoul(hdr->length); len_to_go = min(buf_len - sizeof(*hdr), hdr_len); if (is_density) { length_offset = __offsetof(struct scsi_density_data, bits_per_mm); } else { length_offset = __offsetof(struct scsi_medium_type_data, num_density_codes); } cur_offset = sizeof(*hdr); num_reports = 0; need_close = 0; while (len_to_go > length_offset) { struct scsi_density_data *dens_data; struct scsi_medium_type_data *type_data; int desc_remain; size_t cur_field_len; dens_data = NULL; type_data = NULL; if (is_density) { dens_data =(struct scsi_density_data *)&buf[cur_offset]; if (dens_data->byte2 & SDD_DLV) desc_remain = scsi_2btoul(dens_data->length); else desc_remain = SDD_DEFAULT_LENGTH - length_offset; } else { type_data = (struct scsi_medium_type_data *) &buf[cur_offset]; desc_remain = scsi_2btoul(type_data->length); } len_to_go -= length_offset; desc_remain = min(desc_remain, len_to_go); cur_offset += length_offset; if (need_close != 0) { SASBENDNODE(sb, *indent, density_entry); } SASBADDNODENUM(sb, *indent, density_entry, num_reports); num_reports++; need_close = 1; if (is_density) { SASBADDUINTDESC(sb, *indent, dens_data->primary_density_code, %u, primary_density_code, "Primary Density Code"); SASBADDUINTDESC(sb, *indent, dens_data->secondary_density_code, %u, secondary_density_code, "Secondary Density Code"); SASBADDUINTDESC(sb, *indent, dens_data->byte2 & ~SDD_DLV, %#x, density_flags, "Density Flags"); SAFILLDENSSB(dens_data, sb, *indent, bits_per_mm, desc_remain, len_to_go, cur_offset, "Bits per mm"); SAFILLDENSSB(dens_data, sb, *indent, media_width, desc_remain, len_to_go, cur_offset, "Media width"); SAFILLDENSSB(dens_data, sb, *indent, tracks, desc_remain, len_to_go, cur_offset, "Number of Tracks"); SAFILLDENSSB(dens_data, sb, *indent, capacity, desc_remain, len_to_go, cur_offset, "Capacity"); SAFILLDENSSBSTR(dens_data, sb, *indent, assigning_org, desc_remain, len_to_go, cur_offset, "Assigning Organization"); SAFILLDENSSBSTR(dens_data, sb, *indent, density_name, desc_remain, len_to_go, cur_offset, "Density Name"); SAFILLDENSSBSTR(dens_data, sb, *indent, description, desc_remain, len_to_go, cur_offset, "Description"); } else { int i; SASBADDUINTDESC(sb, *indent, type_data->medium_type, %u, medium_type, "Medium Type"); cur_field_len = __offsetof(struct scsi_medium_type_data, media_width) - __offsetof(struct scsi_medium_type_data, num_density_codes); if (desc_remain < cur_field_len) { len_to_go -= desc_remain; cur_offset += desc_remain; continue; } len_to_go -= cur_field_len; cur_offset += cur_field_len; desc_remain -= cur_field_len; SASBADDINTDESC(sb, *indent, type_data->num_density_codes, %d, num_density_codes, "Number of Density Codes"); SASBADDNODE(sb, *indent, density_code_list); for (i = 0; i < type_data->num_density_codes; i++) { SASBADDUINTDESC(sb, *indent, type_data->primary_density_codes[i], %u, density_code, "Density Code"); } SASBENDNODE(sb, *indent, density_code_list); SAFILLDENSSB(type_data, sb, *indent, media_width, desc_remain, len_to_go, cur_offset, "Media width"); SAFILLDENSSB(type_data, sb, *indent, medium_length, desc_remain, len_to_go, cur_offset, "Medium length"); /* * Account for the two reserved bytes. */ cur_field_len = sizeof(type_data->reserved2); if (desc_remain < cur_field_len) { len_to_go -= desc_remain; cur_offset += desc_remain; continue; } len_to_go -= cur_field_len; cur_offset += cur_field_len; desc_remain -= cur_field_len; SAFILLDENSSBSTR(type_data, sb, *indent, assigning_org, desc_remain, len_to_go, cur_offset, "Assigning Organization"); SAFILLDENSSBSTR(type_data, sb, *indent, medium_type_name, desc_remain, len_to_go, cur_offset, "Medium type name"); SAFILLDENSSBSTR(type_data, sb, *indent, description, desc_remain, len_to_go, cur_offset, "Description"); } } if (need_close != 0) { SASBENDNODE(sb, *indent, density_entry); } bailout: return; } /* * Fill an sbuf with density data information */ static void safilldensitysb(struct sa_softc *softc, int *indent, struct sbuf *sb) { int i, is_density; SASBADDNODE(sb, *indent, mtdensity); SASBADDUINTDESC(sb, *indent, softc->media_density, %u, media_density, "Current Medium Density"); is_density = 0; for (i = 0; i < SA_DENSITY_TYPES; i++) { int tmpint; if (softc->density_info_valid[i] == 0) continue; SASBADDNODE(sb, *indent, density_report); if (softc->density_type_bits[i] & SRDS_MEDIUM_TYPE) { tmpint = 1; is_density = 0; } else { tmpint = 0; is_density = 1; } SASBADDINTDESC(sb, *indent, tmpint, %d, medium_type_report, "Medium type report"); if (softc->density_type_bits[i] & SRDS_MEDIA) tmpint = 1; else tmpint = 0; SASBADDINTDESC(sb, *indent, tmpint, %d, media_report, "Media report"); safilldenstypesb(sb, indent, softc->density_info[i], softc->density_info_valid[i], is_density); SASBENDNODE(sb, *indent, density_report); } SASBENDNODE(sb, *indent, mtdensity); } #endif /* _KERNEL */ /* * Read tape block limits command. */ void scsi_read_block_limits(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, struct scsi_read_block_limits_data *rlimit_buf, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_block_limits *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_IN, tag_action, (u_int8_t *)rlimit_buf, sizeof(*rlimit_buf), sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_read_block_limits *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_BLOCK_LIMITS; } void scsi_sa_read_write(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int readop, int sli, int fixed, u_int32_t length, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_sa_rw *scsi_cmd; int read; read = (readop & SCSI_RW_DIRMASK) == SCSI_RW_READ; scsi_cmd = (struct scsi_sa_rw *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? SA_READ : SA_WRITE; scsi_cmd->sli_fixed = 0; if (sli && read) scsi_cmd->sli_fixed |= SAR_SLI; if (fixed) scsi_cmd->sli_fixed |= SARW_FIXED; scsi_ulto3b(length, scsi_cmd->length); scsi_cmd->control = 0; cam_fill_csio(csio, retries, cbfcnp, (read ? CAM_DIR_IN : CAM_DIR_OUT) | ((readop & SCSI_RW_BIO) != 0 ? CAM_DATA_BIO : 0), tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_load_unload(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, int eot, int reten, int load, u_int8_t sense_len, u_int32_t timeout) { struct scsi_load_unload *scsi_cmd; scsi_cmd = (struct scsi_load_unload *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = LOAD_UNLOAD; if (immediate) scsi_cmd->immediate = SLU_IMMED; if (eot) scsi_cmd->eot_reten_load |= SLU_EOT; if (reten) scsi_cmd->eot_reten_load |= SLU_RETEN; if (load) scsi_cmd->eot_reten_load |= SLU_LOAD; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_rewind(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, u_int8_t sense_len, u_int32_t timeout) { struct scsi_rewind *scsi_cmd; scsi_cmd = (struct scsi_rewind *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REWIND; if (immediate) scsi_cmd->immediate = SREW_IMMED; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_space(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, scsi_space_code code, u_int32_t count, u_int8_t sense_len, u_int32_t timeout) { struct scsi_space *scsi_cmd; scsi_cmd = (struct scsi_space *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = SPACE; scsi_cmd->code = code; scsi_ulto3b(count, scsi_cmd->count); scsi_cmd->control = 0; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_write_filemarks(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, int setmark, u_int32_t num_marks, u_int8_t sense_len, u_int32_t timeout) { struct scsi_write_filemarks *scsi_cmd; scsi_cmd = (struct scsi_write_filemarks *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = WRITE_FILEMARKS; if (immediate) scsi_cmd->byte2 |= SWFMRK_IMMED; if (setmark) scsi_cmd->byte2 |= SWFMRK_WSMK; scsi_ulto3b(num_marks, scsi_cmd->num_marks); cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } /* * The reserve and release unit commands differ only by their opcodes. */ void scsi_reserve_release_unit(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int third_party, int third_party_id, u_int8_t sense_len, u_int32_t timeout, int reserve) { struct scsi_reserve_release_unit *scsi_cmd; scsi_cmd = (struct scsi_reserve_release_unit *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); if (reserve) scsi_cmd->opcode = RESERVE_UNIT; else scsi_cmd->opcode = RELEASE_UNIT; if (third_party) { scsi_cmd->lun_thirdparty |= SRRU_3RD_PARTY; scsi_cmd->lun_thirdparty |= ((third_party_id << SRRU_3RD_SHAMT) & SRRU_3RD_MASK); } cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_erase(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, int long_erase, u_int8_t sense_len, u_int32_t timeout) { struct scsi_erase *scsi_cmd; scsi_cmd = (struct scsi_erase *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = ERASE; if (immediate) scsi_cmd->lun_imm_long |= SE_IMMED; if (long_erase) scsi_cmd->lun_imm_long |= SE_LONG; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } /* * Read Tape Position command. */ void scsi_read_position(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int hardsoft, struct scsi_tape_position_data *sbp, u_int8_t sense_len, u_int32_t timeout) { struct scsi_tape_read_position *scmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_IN, tag_action, (u_int8_t *)sbp, sizeof (*sbp), sense_len, sizeof(*scmd), timeout); scmd = (struct scsi_tape_read_position *)&csio->cdb_io.cdb_bytes; bzero(scmd, sizeof(*scmd)); scmd->opcode = READ_POSITION; scmd->byte1 = hardsoft; } /* * Read Tape Position command. */ void scsi_read_position_10(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int service_action, u_int8_t *data_ptr, u_int32_t length, u_int32_t sense_len, u_int32_t timeout) { struct scsi_tape_read_position *scmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/length, sense_len, sizeof(*scmd), timeout); scmd = (struct scsi_tape_read_position *)&csio->cdb_io.cdb_bytes; bzero(scmd, sizeof(*scmd)); scmd->opcode = READ_POSITION; scmd->byte1 = service_action; /* * The length is only currently set (as of SSC4r03) if the extended * form is specified. The other forms have fixed lengths. */ if (service_action == SA_RPOS_EXTENDED_FORM) scsi_ulto2b(length, scmd->length); } /* * Set Tape Position command. */ void scsi_set_position(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int hardsoft, u_int32_t blkno, u_int8_t sense_len, u_int32_t timeout) { struct scsi_tape_locate *scmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, (u_int8_t *)NULL, 0, sense_len, sizeof(*scmd), timeout); scmd = (struct scsi_tape_locate *)&csio->cdb_io.cdb_bytes; bzero(scmd, sizeof(*scmd)); scmd->opcode = LOCATE; if (hardsoft) scmd->byte1 |= SA_SPOS_BT; scsi_ulto4b(blkno, scmd->blkaddr); } /* * XXX KDM figure out how to make a compatibility function. */ void scsi_locate_10(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immed, int cp, int hard, int64_t partition, u_int32_t block_address, int sense_len, u_int32_t timeout) { struct scsi_tape_locate *scmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scmd), timeout); scmd = (struct scsi_tape_locate *)&csio->cdb_io.cdb_bytes; bzero(scmd, sizeof(*scmd)); scmd->opcode = LOCATE; if (immed) scmd->byte1 |= SA_SPOS_IMMED; if (cp) scmd->byte1 |= SA_SPOS_CP; if (hard) scmd->byte1 |= SA_SPOS_BT; scsi_ulto4b(block_address, scmd->blkaddr); scmd->partition = partition; } void scsi_locate_16(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immed, int cp, u_int8_t dest_type, int bam, int64_t partition, u_int64_t logical_id, int sense_len, u_int32_t timeout) { struct scsi_locate_16 *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_locate_16 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = LOCATE_16; if (immed) scsi_cmd->byte1 |= SA_LC_IMMEDIATE; if (cp) scsi_cmd->byte1 |= SA_LC_CP; scsi_cmd->byte1 |= (dest_type << SA_LC_DEST_TYPE_SHIFT); scsi_cmd->byte2 |= bam; scsi_cmd->partition = partition; scsi_u64to8b(logical_id, scsi_cmd->logical_id); } void scsi_report_density_support(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int media, int medium_type, u_int8_t *data_ptr, u_int32_t length, u_int32_t sense_len, u_int32_t timeout) { struct scsi_report_density_support *scsi_cmd; scsi_cmd =(struct scsi_report_density_support *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REPORT_DENSITY_SUPPORT; if (media != 0) scsi_cmd->byte1 |= SRDS_MEDIA; if (medium_type != 0) scsi_cmd->byte1 |= SRDS_MEDIUM_TYPE; scsi_ulto2b(length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_set_capacity(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int byte1, u_int32_t proportion, u_int32_t sense_len, u_int32_t timeout) { struct scsi_set_capacity *scsi_cmd; scsi_cmd = (struct scsi_set_capacity *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SET_CAPACITY; scsi_cmd->byte1 = byte1; scsi_ulto2b(proportion, scsi_cmd->cap_proportion); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_format_medium(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int byte1, int byte2, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t sense_len, u_int32_t timeout) { struct scsi_format_medium *scsi_cmd; scsi_cmd = (struct scsi_format_medium*)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = FORMAT_MEDIUM; scsi_cmd->byte1 = byte1; scsi_cmd->byte2 = byte2; scsi_ulto2b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/(dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_allow_overwrite(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int allow_overwrite, int partition, u_int64_t logical_id, u_int32_t sense_len, u_int32_t timeout) { struct scsi_allow_overwrite *scsi_cmd; scsi_cmd = (struct scsi_allow_overwrite *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = ALLOW_OVERWRITE; scsi_cmd->allow_overwrite = allow_overwrite; scsi_cmd->partition = partition; scsi_u64to8b(logical_id, scsi_cmd->logical_id); cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scsi_cmd), timeout); } Index: head/sys/cam/scsi/scsi_sg.c =================================================================== --- head/sys/cam/scsi/scsi_sg.c (revision 328917) +++ head/sys/cam/scsi/scsi_sg.c (revision 328918) @@ -1,1016 +1,1016 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007 Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * scsi_sg peripheral driver. This driver is meant to implement the Linux * SG passthrough interface for SCSI. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { SG_FLAG_LOCKED = 0x01, SG_FLAG_INVALID = 0x02 } sg_flags; typedef enum { SG_STATE_NORMAL } sg_state; typedef enum { SG_RDWR_FREE, SG_RDWR_INPROG, SG_RDWR_DONE } sg_rdwr_state; typedef enum { SG_CCB_RDWR_IO } sg_ccb_types; #define ccb_type ppriv_field0 #define ccb_rdwr ppriv_ptr1 struct sg_rdwr { TAILQ_ENTRY(sg_rdwr) rdwr_link; int tag; int state; int buf_len; char *buf; union ccb *ccb; union { struct sg_header hdr; struct sg_io_hdr io_hdr; } hdr; }; struct sg_softc { sg_state state; sg_flags flags; int open_count; u_int maxio; struct devstat *device_stats; TAILQ_HEAD(, sg_rdwr) rdwr_done; struct cdev *dev; int sg_timeout; int sg_user_timeout; uint8_t pd_type; union ccb saved_ccb; }; static d_open_t sgopen; static d_close_t sgclose; static d_ioctl_t sgioctl; static d_write_t sgwrite; static d_read_t sgread; static periph_init_t sginit; static periph_ctor_t sgregister; static periph_oninv_t sgoninvalidate; static periph_dtor_t sgcleanup; static void sgasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static void sgdone(struct cam_periph *periph, union ccb *done_ccb); static int sgsendccb(struct cam_periph *periph, union ccb *ccb); static int sgsendrdwr(struct cam_periph *periph, union ccb *ccb); static int sgerror(union ccb *ccb, uint32_t cam_flags, uint32_t sense_flags); static void sg_scsiio_status(struct ccb_scsiio *csio, u_short *hoststat, u_short *drvstat); static int scsi_group_len(u_char cmd); static struct periph_driver sgdriver = { sginit, "sg", TAILQ_HEAD_INITIALIZER(sgdriver.units), /* gen */ 0 }; PERIPHDRIVER_DECLARE(sg, sgdriver); static struct cdevsw sg_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT | D_TRACKCLOSE, .d_open = sgopen, .d_close = sgclose, .d_ioctl = sgioctl, .d_write = sgwrite, .d_read = sgread, .d_name = "sg", }; static int sg_version = 30125; static void sginit(void) { cam_status status; /* * Install a global async callback. This callback will receive aync * callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, sgasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("sg: Failed to attach master async callbac " "due to status 0x%x!\n", status); } } static void sgdevgonecb(void *arg) { struct cam_periph *periph; struct sg_softc *softc; struct mtx *mtx; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = (struct sg_softc *)periph->softc; KASSERT(softc->open_count >= 0, ("Negative open count %d", softc->open_count)); /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < softc->open_count; i++) cam_periph_release_locked(periph); softc->open_count = 0; /* * Release the reference held for the device node, it is gone now. */ cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); } static void sgoninvalidate(struct cam_periph *periph) { struct sg_softc *softc; softc = (struct sg_softc *)periph->softc; /* * Deregister any async callbacks. */ xpt_register_async(0, sgasync, periph, periph->path); softc->flags |= SG_FLAG_INVALID; /* * Tell devfs this device has gone away, and ask for a callback * when it has cleaned up its state. */ destroy_dev_sched_cb(softc->dev, sgdevgonecb, periph); /* * XXX Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ } static void sgcleanup(struct cam_periph *periph) { struct sg_softc *softc; softc = (struct sg_softc *)periph->softc; devstat_remove_entry(softc->device_stats); free(softc, M_DEVBUF); } static void sgasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; /* * Allocate a peripheral instance for this device and * start the probe process. */ status = cam_periph_alloc(sgregister, sgoninvalidate, sgcleanup, NULL, "sg", CAM_PERIPH_BIO, path, sgasync, AC_FOUND_DEVICE, cgd); if ((status != CAM_REQ_CMP) && (status != CAM_REQ_INPROG)) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("sgasync: Unable to attach new device " "due to status %#x: %s\n", status, entry ? entry->status_text : "Unknown"); } break; } default: cam_periph_async(periph, code, path, arg); break; } } static cam_status sgregister(struct cam_periph *periph, void *arg) { struct sg_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; struct make_dev_args args; int no_tags, error; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("sgregister: no getdev CCB, can't register device\n"); return (CAM_REQ_CMP_ERR); } softc = malloc(sizeof(*softc), M_DEVBUF, M_ZERO | M_NOWAIT); if (softc == NULL) { printf("sgregister: Unable to allocate softc\n"); return (CAM_REQ_CMP_ERR); } softc->state = SG_STATE_NORMAL; softc->pd_type = SID_TYPE(&cgd->inq_data); softc->sg_timeout = SG_DEFAULT_TIMEOUT / SG_DEFAULT_HZ * hz; softc->sg_user_timeout = SG_DEFAULT_TIMEOUT; TAILQ_INIT(&softc->rdwr_done); periph->softc = softc; xpt_path_inq(&cpi, periph->path); if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; /* for safety */ else softc->maxio = cpi.maxio; /* real value */ /* * We pass in 0 for all blocksize, since we don't know what the * blocksize of the device is, if it even has a blocksize. */ cam_periph_unlock(periph); no_tags = (cgd->inq_data.flags & SID_CmdQue) == 0; softc->device_stats = devstat_new_entry("sg", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE | (no_tags ? DEVSTAT_NO_ORDERED_TAGS : 0), softc->pd_type | XPORT_DEVSTAT_TYPE(cpi.transport) | DEVSTAT_TYPE_PASS, DEVSTAT_PRIORITY_PASS); /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* Register the device */ make_dev_args_init(&args); args.mda_devsw = &sg_cdevsw; args.mda_unit = periph->unit_number; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = periph; error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); cam_periph_release_locked(periph); return (CAM_REQ_CMP_ERR); } if (periph->unit_number < 26) { (void)make_dev_alias(softc->dev, "sg%c", periph->unit_number + 'a'); } else { (void)make_dev_alias(softc->dev, "sg%c%c", ((periph->unit_number / 26) - 1) + 'a', (periph->unit_number % 26) + 'a'); } cam_periph_lock(periph); /* * Add as async callback so that we get * notified if this device goes away. */ xpt_register_async(AC_LOST_DEVICE, sgasync, periph, periph->path); if (bootverbose) xpt_announce_periph(periph, NULL); return (CAM_REQ_CMP); } static void sgdone(struct cam_periph *periph, union ccb *done_ccb) { struct sg_softc *softc; struct ccb_scsiio *csio; softc = (struct sg_softc *)periph->softc; csio = &done_ccb->csio; switch (csio->ccb_h.ccb_type) { case SG_CCB_RDWR_IO: { struct sg_rdwr *rdwr; int state; devstat_end_transaction(softc->device_stats, csio->dxfer_len, csio->tag_action & 0xf, ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (csio->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, NULL); rdwr = done_ccb->ccb_h.ccb_rdwr; state = rdwr->state; rdwr->state = SG_RDWR_DONE; wakeup(rdwr); break; } default: panic("unknown sg CCB type"); } } static int sgopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct sg_softc *softc; int error = 0; periph = (struct cam_periph *)dev->si_drv1; - if (cam_periph_acquire(periph) != CAM_REQ_CMP) + if (cam_periph_acquire(periph) != 0) return (ENXIO); /* * Don't allow access when we're running at a high securelevel. */ error = securelevel_gt(td->td_ucred, 1); if (error) { cam_periph_release(periph); return (error); } cam_periph_lock(periph); softc = (struct sg_softc *)periph->softc; if (softc->flags & SG_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return (ENXIO); } softc->open_count++; cam_periph_unlock(periph); return (error); } static int sgclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct sg_softc *softc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = periph->softc; softc->open_count--; cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return (0); } static int sgioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { union ccb *ccb; struct ccb_scsiio *csio; struct cam_periph *periph; struct sg_softc *softc; struct sg_io_hdr *req; int dir, error; periph = (struct cam_periph *)dev->si_drv1; cam_periph_lock(periph); softc = (struct sg_softc *)periph->softc; error = 0; switch (cmd) { case SG_GET_VERSION_NUM: { int *version = (int *)arg; *version = sg_version; break; } case SG_SET_TIMEOUT: { u_int user_timeout = *(u_int *)arg; softc->sg_user_timeout = user_timeout; softc->sg_timeout = user_timeout / SG_DEFAULT_HZ * hz; break; } case SG_GET_TIMEOUT: /* * The value is returned directly to the syscall. */ td->td_retval[0] = softc->sg_user_timeout; error = 0; break; case SG_IO: req = (struct sg_io_hdr *)arg; if (req->cmd_len > IOCDBLEN) { error = EINVAL; break; } if (req->iovec_count != 0) { error = EOPNOTSUPP; break; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; error = copyin(req->cmdp, &csio->cdb_io.cdb_bytes, req->cmd_len); if (error) { xpt_release_ccb(ccb); break; } switch(req->dxfer_direction) { case SG_DXFER_TO_DEV: dir = CAM_DIR_OUT; break; case SG_DXFER_FROM_DEV: dir = CAM_DIR_IN; break; case SG_DXFER_TO_FROM_DEV: dir = CAM_DIR_IN | CAM_DIR_OUT; break; case SG_DXFER_NONE: default: dir = CAM_DIR_NONE; break; } cam_fill_csio(csio, /*retries*/1, sgdone, dir|CAM_DEV_QFRZDIS, MSG_SIMPLE_Q_TAG, req->dxferp, req->dxfer_len, req->mx_sb_len, req->cmd_len, req->timeout); error = sgsendccb(periph, ccb); if (error) { req->host_status = DID_ERROR; req->driver_status = DRIVER_INVALID; xpt_release_ccb(ccb); break; } req->status = csio->scsi_status; req->masked_status = (csio->scsi_status >> 1) & 0x7f; sg_scsiio_status(csio, &req->host_status, &req->driver_status); req->resid = csio->resid; req->duration = csio->ccb_h.timeout; req->info = 0; if ((csio->ccb_h.status & CAM_AUTOSNS_VALID) && (req->sbp != NULL)) { req->sb_len_wr = req->mx_sb_len - csio->sense_resid; error = copyout(&csio->sense_data, req->sbp, req->sb_len_wr); } xpt_release_ccb(ccb); break; case SG_GET_RESERVED_SIZE: { int *size = (int *)arg; *size = DFLTPHYS; break; } case SG_GET_SCSI_ID: { struct sg_scsi_id *id = (struct sg_scsi_id *)arg; id->host_no = cam_sim_path(xpt_path_sim(periph->path)); id->channel = xpt_path_path_id(periph->path); id->scsi_id = xpt_path_target_id(periph->path); id->lun = xpt_path_lun_id(periph->path); id->scsi_type = softc->pd_type; id->h_cmd_per_lun = 1; id->d_queue_depth = 1; id->unused[0] = 0; id->unused[1] = 0; break; } case SG_GET_SG_TABLESIZE: { int *size = (int *)arg; *size = 0; break; } case SG_EMULATED_HOST: case SG_SET_TRANSFORM: case SG_GET_TRANSFORM: case SG_GET_NUM_WAITING: case SG_SCSI_RESET: case SG_GET_REQUEST_TABLE: case SG_SET_KEEP_ORPHAN: case SG_GET_KEEP_ORPHAN: case SG_GET_ACCESS_COUNT: case SG_SET_FORCE_LOW_DMA: case SG_GET_LOW_DMA: case SG_SET_FORCE_PACK_ID: case SG_GET_PACK_ID: case SG_SET_RESERVED_SIZE: case SG_GET_COMMAND_Q: case SG_SET_COMMAND_Q: case SG_SET_DEBUG: case SG_NEXT_CMD_LEN: default: #ifdef CAMDEBUG printf("sgioctl: rejecting cmd 0x%lx\n", cmd); #endif error = ENODEV; break; } cam_periph_unlock(periph); return (error); } static int sgwrite(struct cdev *dev, struct uio *uio, int ioflag) { union ccb *ccb; struct cam_periph *periph; struct ccb_scsiio *csio; struct sg_softc *sc; struct sg_header *hdr; struct sg_rdwr *rdwr; u_char cdb_cmd; char *buf; int error = 0, cdb_len, buf_len, dir; periph = dev->si_drv1; rdwr = malloc(sizeof(*rdwr), M_DEVBUF, M_WAITOK | M_ZERO); hdr = &rdwr->hdr.hdr; /* Copy in the header block and sanity check it */ if (uio->uio_resid < sizeof(*hdr)) { error = EINVAL; goto out_hdr; } error = uiomove(hdr, sizeof(*hdr), uio); if (error) goto out_hdr; /* XXX: We don't support SG 3.x read/write API. */ if (hdr->reply_len < 0) { error = ENODEV; goto out_hdr; } ccb = xpt_alloc_ccb(); if (ccb == NULL) { error = ENOMEM; goto out_hdr; } csio = &ccb->csio; /* * Copy in the CDB block. The designers of the interface didn't * bother to provide a size for this in the header, so we have to * figure it out ourselves. */ if (uio->uio_resid < 1) goto out_ccb; error = uiomove(&cdb_cmd, 1, uio); if (error) goto out_ccb; if (hdr->twelve_byte) cdb_len = 12; else cdb_len = scsi_group_len(cdb_cmd); /* * We've already read the first byte of the CDB and advanced the uio * pointer. Just read the rest. */ csio->cdb_io.cdb_bytes[0] = cdb_cmd; error = uiomove(&csio->cdb_io.cdb_bytes[1], cdb_len - 1, uio); if (error) goto out_ccb; /* * Now set up the data block. Again, the designers didn't bother * to make this reliable. */ buf_len = uio->uio_resid; if (buf_len != 0) { buf = malloc(buf_len, M_DEVBUF, M_WAITOK | M_ZERO); error = uiomove(buf, buf_len, uio); if (error) goto out_buf; dir = CAM_DIR_OUT; } else if (hdr->reply_len != 0) { buf = malloc(hdr->reply_len, M_DEVBUF, M_WAITOK | M_ZERO); buf_len = hdr->reply_len; dir = CAM_DIR_IN; } else { buf = NULL; buf_len = 0; dir = CAM_DIR_NONE; } cam_periph_lock(periph); sc = periph->softc; xpt_setup_ccb(&ccb->ccb_h, periph->path, CAM_PRIORITY_NORMAL); cam_fill_csio(csio, /*retries*/1, sgdone, dir|CAM_DEV_QFRZDIS, MSG_SIMPLE_Q_TAG, buf, buf_len, SG_MAX_SENSE, cdb_len, sc->sg_timeout); /* * Send off the command and hope that it works. This path does not * go through sgstart because the I/O is supposed to be asynchronous. */ rdwr->buf = buf; rdwr->buf_len = buf_len; rdwr->tag = hdr->pack_id; rdwr->ccb = ccb; rdwr->state = SG_RDWR_INPROG; ccb->ccb_h.ccb_rdwr = rdwr; ccb->ccb_h.ccb_type = SG_CCB_RDWR_IO; TAILQ_INSERT_TAIL(&sc->rdwr_done, rdwr, rdwr_link); error = sgsendrdwr(periph, ccb); cam_periph_unlock(periph); return (error); out_buf: free(buf, M_DEVBUF); out_ccb: xpt_free_ccb(ccb); out_hdr: free(rdwr, M_DEVBUF); return (error); } static int sgread(struct cdev *dev, struct uio *uio, int ioflag) { struct ccb_scsiio *csio; struct cam_periph *periph; struct sg_softc *sc; struct sg_header *hdr; struct sg_rdwr *rdwr; u_short hstat, dstat; int error, pack_len, reply_len, pack_id; periph = dev->si_drv1; /* XXX The pack len field needs to be updated and written out instead * of discarded. Not sure how to do that. */ uio->uio_rw = UIO_WRITE; if ((error = uiomove(&pack_len, 4, uio)) != 0) return (error); if ((error = uiomove(&reply_len, 4, uio)) != 0) return (error); if ((error = uiomove(&pack_id, 4, uio)) != 0) return (error); uio->uio_rw = UIO_READ; cam_periph_lock(periph); sc = periph->softc; search: TAILQ_FOREACH(rdwr, &sc->rdwr_done, rdwr_link) { if (rdwr->tag == pack_id) break; } if ((rdwr == NULL) || (rdwr->state != SG_RDWR_DONE)) { if (cam_periph_sleep(periph, rdwr, PCATCH, "sgread", 0) == ERESTART) return (EAGAIN); goto search; } TAILQ_REMOVE(&sc->rdwr_done, rdwr, rdwr_link); cam_periph_unlock(periph); hdr = &rdwr->hdr.hdr; csio = &rdwr->ccb->csio; sg_scsiio_status(csio, &hstat, &dstat); hdr->host_status = hstat; hdr->driver_status = dstat; hdr->target_status = csio->scsi_status >> 1; switch (hstat) { case DID_OK: case DID_PASSTHROUGH: case DID_SOFT_ERROR: hdr->result = 0; break; case DID_NO_CONNECT: case DID_BUS_BUSY: case DID_TIME_OUT: hdr->result = EBUSY; break; case DID_BAD_TARGET: case DID_ABORT: case DID_PARITY: case DID_RESET: case DID_BAD_INTR: case DID_ERROR: default: hdr->result = EIO; break; } if (dstat == DRIVER_SENSE) { bcopy(&csio->sense_data, hdr->sense_buffer, min(csio->sense_len, SG_MAX_SENSE)); #ifdef CAMDEBUG scsi_sense_print(csio); #endif } error = uiomove(&hdr->result, sizeof(*hdr) - offsetof(struct sg_header, result), uio); if ((error == 0) && (hdr->result == 0)) error = uiomove(rdwr->buf, rdwr->buf_len, uio); cam_periph_lock(periph); xpt_free_ccb(rdwr->ccb); cam_periph_unlock(periph); free(rdwr->buf, M_DEVBUF); free(rdwr, M_DEVBUF); return (error); } static int sgsendccb(struct cam_periph *periph, union ccb *ccb) { struct sg_softc *softc; struct cam_periph_map_info mapinfo; int error; softc = periph->softc; bzero(&mapinfo, sizeof(mapinfo)); /* * cam_periph_mapmem calls into proc and vm functions that can * sleep as well as trigger I/O, so we can't hold the lock. * Dropping it here is reasonably safe. * The only CCB opcode that is possible here is XPT_SCSI_IO, no * need for additional checks. */ cam_periph_unlock(periph); error = cam_periph_mapmem(ccb, &mapinfo, softc->maxio); cam_periph_lock(periph); if (error) return (error); error = cam_periph_runccb(ccb, sgerror, CAM_RETRY_SELTO, SF_RETRY_UA, softc->device_stats); cam_periph_unmapmem(ccb, &mapinfo); return (error); } static int sgsendrdwr(struct cam_periph *periph, union ccb *ccb) { struct sg_softc *softc; softc = periph->softc; devstat_start_transaction(softc->device_stats, NULL); xpt_action(ccb); return (0); } static int sgerror(union ccb *ccb, uint32_t cam_flags, uint32_t sense_flags) { struct cam_periph *periph; struct sg_softc *softc; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct sg_softc *)periph->softc; return (cam_periph_error(ccb, cam_flags, sense_flags)); } static void sg_scsiio_status(struct ccb_scsiio *csio, u_short *hoststat, u_short *drvstat) { int status; status = csio->ccb_h.status; switch (status & CAM_STATUS_MASK) { case CAM_REQ_CMP: *hoststat = DID_OK; *drvstat = 0; break; case CAM_REQ_CMP_ERR: *hoststat = DID_ERROR; *drvstat = 0; break; case CAM_REQ_ABORTED: *hoststat = DID_ABORT; *drvstat = 0; break; case CAM_REQ_INVALID: *hoststat = DID_ERROR; *drvstat = DRIVER_INVALID; break; case CAM_DEV_NOT_THERE: *hoststat = DID_BAD_TARGET; *drvstat = 0; break; case CAM_SEL_TIMEOUT: *hoststat = DID_NO_CONNECT; *drvstat = 0; break; case CAM_CMD_TIMEOUT: *hoststat = DID_TIME_OUT; *drvstat = 0; break; case CAM_SCSI_STATUS_ERROR: *hoststat = DID_ERROR; *drvstat = 0; break; case CAM_SCSI_BUS_RESET: *hoststat = DID_RESET; *drvstat = 0; break; case CAM_UNCOR_PARITY: *hoststat = DID_PARITY; *drvstat = 0; break; case CAM_SCSI_BUSY: *hoststat = DID_BUS_BUSY; *drvstat = 0; break; default: *hoststat = DID_ERROR; *drvstat = DRIVER_ERROR; } if (status & CAM_AUTOSNS_VALID) *drvstat = DRIVER_SENSE; } static int scsi_group_len(u_char cmd) { int len[] = {6, 10, 10, 12, 12, 12, 10, 10}; int group; group = (cmd >> 5) & 0x7; return (len[group]); } Index: head/sys/cam/scsi/scsi_xpt.c =================================================================== --- head/sys/cam/scsi/scsi_xpt.c (revision 328917) +++ head/sys/cam/scsi/scsi_xpt.c (revision 328918) @@ -1,3236 +1,3234 @@ /*- * Implementation of the SCSI Transport * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for xpt_print below */ #include "opt_cam.h" struct scsi_quirk_entry { struct scsi_inquiry_pattern inq_pat; u_int8_t quirks; #define CAM_QUIRK_NOLUNS 0x01 #define CAM_QUIRK_NOVPDS 0x02 #define CAM_QUIRK_HILUNS 0x04 #define CAM_QUIRK_NOHILUNS 0x08 #define CAM_QUIRK_NORPTLUNS 0x10 u_int mintags; u_int maxtags; }; #define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk)) static int cam_srch_hi = 0; static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT | CTLFLAG_RWTUN, 0, 0, sysctl_cam_search_luns, "I", "allow search above LUN 7 for SCSI3 and greater devices"); #define CAM_SCSI2_MAXLUN 8 #define CAM_CAN_GET_SIMPLE_LUN(x, i) \ ((((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ RPL_LUNDATA_ATYP_PERIPH) || \ (((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ RPL_LUNDATA_ATYP_FLAT)) #define CAM_GET_SIMPLE_LUN(lp, i, lval) \ if (((lp)->luns[(i)].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ RPL_LUNDATA_ATYP_PERIPH) { \ (lval) = (lp)->luns[(i)].lundata[1]; \ } else { \ (lval) = (lp)->luns[(i)].lundata[0]; \ (lval) &= RPL_LUNDATA_FLAT_LUN_MASK; \ (lval) <<= 8; \ (lval) |= (lp)->luns[(i)].lundata[1]; \ } #define CAM_GET_LUN(lp, i, lval) \ (lval) = scsi_8btou64((lp)->luns[(i)].lundata); \ (lval) = CAM_EXTLUN_BYTE_SWIZZLE(lval); /* * If we're not quirked to search <= the first 8 luns * and we are either quirked to search above lun 8, * or we're > SCSI-2 and we've enabled hilun searching, * or we're > SCSI-2 and the last lun was a success, * we can look for luns above lun 8. */ #define CAN_SRCH_HI_SPARSE(dv) \ (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0) \ && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS) \ || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi))) #define CAN_SRCH_HI_DENSE(dv) \ (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0) \ && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS) \ || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2))) static periph_init_t probe_periph_init; static struct periph_driver probe_driver = { probe_periph_init, "probe", TAILQ_HEAD_INITIALIZER(probe_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(probe, probe_driver); typedef enum { PROBE_TUR, PROBE_INQUIRY, /* this counts as DV0 for Basic Domain Validation */ PROBE_FULL_INQUIRY, PROBE_REPORT_LUNS, PROBE_MODE_SENSE, PROBE_SUPPORTED_VPD_LIST, PROBE_DEVICE_ID, PROBE_EXTENDED_INQUIRY, PROBE_SERIAL_NUM, PROBE_TUR_FOR_NEGOTIATION, PROBE_INQUIRY_BASIC_DV1, PROBE_INQUIRY_BASIC_DV2, PROBE_DV_EXIT, PROBE_DONE, PROBE_INVALID } probe_action; static char *probe_action_text[] = { "PROBE_TUR", "PROBE_INQUIRY", "PROBE_FULL_INQUIRY", "PROBE_REPORT_LUNS", "PROBE_MODE_SENSE", "PROBE_SUPPORTED_VPD_LIST", "PROBE_DEVICE_ID", "PROBE_EXTENDED_INQUIRY", "PROBE_SERIAL_NUM", "PROBE_TUR_FOR_NEGOTIATION", "PROBE_INQUIRY_BASIC_DV1", "PROBE_INQUIRY_BASIC_DV2", "PROBE_DV_EXIT", "PROBE_DONE", "PROBE_INVALID" }; #define PROBE_SET_ACTION(softc, newaction) \ do { \ char **text; \ text = probe_action_text; \ CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ ("Probe %s to %s\n", text[(softc)->action], \ text[(newaction)])); \ (softc)->action = (newaction); \ } while(0) typedef enum { PROBE_INQUIRY_CKSUM = 0x01, PROBE_SERIAL_CKSUM = 0x02, PROBE_NO_ANNOUNCE = 0x04, PROBE_EXTLUN = 0x08 } probe_flags; typedef struct { TAILQ_HEAD(, ccb_hdr) request_ccbs; probe_action action; union ccb saved_ccb; probe_flags flags; MD5_CTX context; u_int8_t digest[16]; struct cam_periph *periph; } probe_softc; static const char quantum[] = "QUANTUM"; static const char sony[] = "SONY"; static const char west_digital[] = "WDIGTL"; static const char samsung[] = "SAMSUNG"; static const char seagate[] = "SEAGATE"; static const char microp[] = "MICROP"; static struct scsi_quirk_entry scsi_quirk_table[] = { { /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Unfortunately, the Quantum Atlas III has the same * problem as the Atlas II drives above. * Reported by: "Johan Granlund" * * For future reference, the drive with the problem was: * QUANTUM QM39100TD-SW N1B0 * * It's possible that Quantum will fix the problem in later * firmware revisions. If that happens, the quirk entry * will need to be made specific to the firmware revisions * with the problem. * */ /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* * 18 Gig Atlas III, same problem as the 9G version. * Reported by: Andre Albsmeier * * * For future reference, the drive with the problem was: * QUANTUM QM318000TD-S N491 */ /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* * Broken tagged queuing drive * Reported by: Bret Ford * and: Martin Renters */ { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, /* * The Seagate Medalist Pro drives have very poor write * performance with anything more than 2 tags. * * Reported by: Paul van der Zwan * Drive: * * Reported by: Jeremy Lea * Drive: * * No one has actually reported that the 9G version * (ST39140*) of the Medalist Pro has the same problem, but * we're assuming that it does because the 4G and 6.5G * versions of the drive are broken. */ { { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/2 }, { { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/2 }, { { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/2 }, { /* * Experiences command timeouts under load with a * tag count higher than 55. */ { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST3146855LW", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/55 }, { /* * Slow when tagged queueing is enabled. Write performance * steadily drops off with more and more concurrent * transactions. Best sequential write performance with * tagged queueing turned off and write caching turned on. * * PR: kern/10398 * Submitted by: Hideaki Okada * Drive: DCAS-34330 w/ "S65A" firmware. * * The drive with the problem had the "S65A" firmware * revision, and has also been reported (by Stephen J. * Roznowski ) for a drive with the "S61A" * firmware revision. * * Although no one has reported problems with the 2 gig * version of the DCAS drive, the assumption is that it * has the same problems as the 4 gig version. Therefore * this quirk entries disables tagged queueing for all * DCAS drives. */ { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* This does not support other than LUN 0 */ { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255 }, { /* * Broken tagged queuing drive. * Submitted by: * NAKAJI Hiroyuki * in PR kern/9535 */ { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Slow when tagged queueing is enabled. (1.5MB/sec versus * 8MB/sec.) * Submitted by: Andrew Gallatin * Best performance with these drives is achieved with * tagged queueing turned off, and write caching turned on. */ { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Slow when tagged queueing is enabled. (1.5MB/sec versus * 8MB/sec.) * Submitted by: Andrew Gallatin * Best performance with these drives is achieved with * tagged queueing turned off, and write caching turned on. */ { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Doesn't handle queue full condition correctly, * so we need to limit maxtags to what the device * can handle instead of determining this automatically. */ { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" }, /*quirks*/0, /*mintags*/2, /*maxtags*/32 }, { /* Really only one LUN */ { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* I can't believe we need a quirk for DPT volumes. */ { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/255 }, { /* * Many Sony CDROM drives don't like multi-LUN probing. */ { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * This drive doesn't like multiple LUN probing. * Submitted by: Parag Patel */ { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * The 8200 doesn't like multi-lun probing, and probably * don't like serial number requests either. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", "EXB-8200*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * Let's try the same as above, but for a drive that says * it's an IPL-6860 but is actually an EXB 8200. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", "IPL-6860*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * These Hitachi drives don't like multi-lun probing. * The PR submitter has a DK319H, but says that the Linux * kernel has a similar work-around for the DK312 and DK314, * so all DK31* drives are quirked here. * PR: misc/18793 * Submitted by: Paul Haddad */ { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255 }, { /* * The Hitachi CJ series with J8A8 firmware apparently has * problems with tagged commands. * PR: 23536 * Reported by: amagai@nue.org */ { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * These are the large storage arrays. * Submitted by: William Carrel */ { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" }, CAM_QUIRK_HILUNS, 2, 1024 }, { /* * This old revision of the TDC3600 is also SCSI-1, and * hangs upon serial number probing. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 3600", "U07:" }, CAM_QUIRK_NOVPDS, /*mintags*/0, /*maxtags*/0 }, { /* * Would repond to all LUNs if asked for. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER", "CP150", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * Would repond to all LUNs if asked for. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", "96X2*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* Submitted by: Matthew Dodd */ { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* Submitted by: Matthew Dodd */ { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* TeraSolutions special settings for TRC-22 RAID */ { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" }, /*quirks*/0, /*mintags*/55, /*maxtags*/255 }, { /* Veritas Storage Appliance */ { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" }, CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024 }, { /* * Would respond to all LUNs. Device type and removable * flag are jumper-selectable. */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix", "Tahiti 1", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* EasyRAID E5A aka. areca ARC-6010 */ { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" }, CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255 }, { { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { { T_DIRECT, SIP_MEDIA_REMOVABLE, "Garmin", "*", "*" }, CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255 }, { { T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic", "STORAGE DEVICE*", "120?" }, CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255 }, { { T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic", "MassStorageClass", "1533" }, CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255 }, { /* Default tagged queuing parameters for all devices */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, /*vendor*/"*", /*product*/"*", /*revision*/"*" }, /*quirks*/0, /*mintags*/2, /*maxtags*/255 }, }; static cam_status proberegister(struct cam_periph *periph, void *arg); static void probeschedule(struct cam_periph *probe_periph); static void probestart(struct cam_periph *periph, union ccb *start_ccb); static void proberequestdefaultnegotiation(struct cam_periph *periph); static int proberequestbackoff(struct cam_periph *periph, struct cam_ed *device); static void probedone(struct cam_periph *periph, union ccb *done_ccb); static void probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new, probe_flags flags); static void probecleanup(struct cam_periph *periph); static void scsi_find_quirk(struct cam_ed *device); static void scsi_scan_bus(struct cam_periph *periph, union ccb *ccb); static void scsi_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *ccb); static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); static struct cam_ed * scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static void scsi_devise_transport(struct cam_path *path); static void scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update); static void scsi_toggle_tags(struct cam_path *path); static void scsi_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static void scsi_action(union ccb *start_ccb); static void scsi_announce_periph(struct cam_periph *periph); static void scsi_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb); static void scsi_proto_announce(struct cam_ed *device); static void scsi_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb); static void scsi_proto_denounce(struct cam_ed *device); static void scsi_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb); static void scsi_proto_debug_out(union ccb *ccb); static void _scsi_announce_periph(struct cam_periph *, u_int *, u_int *, struct ccb_trans_settings *); static struct xpt_xport_ops scsi_xport_ops = { .alloc_device = scsi_alloc_device, .action = scsi_action, .async = scsi_dev_async, .announce = scsi_announce_periph, .announce_sbuf = scsi_announce_periph_sbuf, }; #define SCSI_XPT_XPORT(x, X) \ static struct xpt_xport scsi_xport_ ## x = { \ .xport = XPORT_ ## X, \ .name = #x, \ .ops = &scsi_xport_ops, \ }; \ CAM_XPT_XPORT(scsi_xport_ ## x); SCSI_XPT_XPORT(spi, SPI); SCSI_XPT_XPORT(sas, SAS); SCSI_XPT_XPORT(fc, FC); SCSI_XPT_XPORT(usb, USB); SCSI_XPT_XPORT(iscsi, ISCSI); SCSI_XPT_XPORT(srp, SRP); SCSI_XPT_XPORT(ppb, PPB); #undef SCSI_XPORT_XPORT static struct xpt_proto_ops scsi_proto_ops = { .announce = scsi_proto_announce, .announce_sbuf = scsi_proto_announce_sbuf, .denounce = scsi_proto_denounce, .denounce_sbuf = scsi_proto_denounce_sbuf, .debug_out = scsi_proto_debug_out, }; static struct xpt_proto scsi_proto = { .proto = PROTO_SCSI, .name = "scsi", .ops = &scsi_proto_ops, }; CAM_XPT_PROTO(scsi_proto); static void probe_periph_init() { } static cam_status proberegister(struct cam_periph *periph, void *arg) { union ccb *request_ccb; /* CCB representing the probe request */ - cam_status status; probe_softc *softc; request_ccb = (union ccb *)arg; if (request_ccb == NULL) { printf("proberegister: no probe CCB, " "can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT); if (softc == NULL) { printf("proberegister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } TAILQ_INIT(&softc->request_ccbs); TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->flags = 0; periph->softc = softc; softc->periph = periph; softc->action = PROBE_INVALID; - status = cam_periph_acquire(periph); - if (status != CAM_REQ_CMP) { - return (status); - } + if (cam_periph_acquire(periph) != 0) + return (CAM_REQ_CMP_ERR); + CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); scsi_devise_transport(periph->path); /* * Ensure we've waited at least a bus settle * delay before attempting to probe the device. * For HBAs that don't do bus resets, this won't make a difference. */ cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset, scsi_delay); probeschedule(periph); return(CAM_REQ_CMP); } static void probeschedule(struct cam_periph *periph) { struct ccb_pathinq cpi; union ccb *ccb; probe_softc *softc; softc = (probe_softc *)periph->softc; ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); xpt_path_inq(&cpi, periph->path); /* * If a device has gone away and another device, or the same one, * is back in the same place, it should have a unit attention * condition pending. It will not report the unit attention in * response to an inquiry, which may leave invalid transfer * negotiations in effect. The TUR will reveal the unit attention * condition. Only send the TUR for lun 0, since some devices * will get confused by commands other than inquiry to non-existent * luns. If you think a device has gone away start your scan from * lun 0. This will insure that any bogus transfer settings are * invalidated. * * If we haven't seen the device before and the controller supports * some kind of transfer negotiation, negotiate with the first * sent command if no bus reset was performed at startup. This * ensures that the device is not confused by transfer negotiation * settings left over by loader or BIOS action. */ if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0) && (ccb->ccb_h.target_lun == 0)) { PROBE_SET_ACTION(softc, PROBE_TUR); } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) { proberequestdefaultnegotiation(periph); PROBE_SET_ACTION(softc, PROBE_INQUIRY); } else { PROBE_SET_ACTION(softc, PROBE_INQUIRY); } if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) softc->flags |= PROBE_NO_ANNOUNCE; else softc->flags &= ~PROBE_NO_ANNOUNCE; if (cpi.hba_misc & PIM_EXTLUNS) softc->flags |= PROBE_EXTLUN; else softc->flags &= ~PROBE_EXTLUN; xpt_schedule(periph, CAM_PRIORITY_XPT); } static void probestart(struct cam_periph *periph, union ccb *start_ccb) { /* Probe the device that our peripheral driver points to */ struct ccb_scsiio *csio; probe_softc *softc; CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); softc = (probe_softc *)periph->softc; csio = &start_ccb->csio; again: switch (softc->action) { case PROBE_TUR: case PROBE_TUR_FOR_NEGOTIATION: case PROBE_DV_EXIT: { scsi_test_unit_ready(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, /*timeout*/60000); break; } case PROBE_INQUIRY: case PROBE_FULL_INQUIRY: case PROBE_INQUIRY_BASIC_DV1: case PROBE_INQUIRY_BASIC_DV2: { u_int inquiry_len; struct scsi_inquiry_data *inq_buf; inq_buf = &periph->path->device->inq_data; /* * If the device is currently configured, we calculate an * MD5 checksum of the inquiry data, and if the serial number * length is greater than 0, add the serial number data * into the checksum as well. Once the inquiry and the * serial number check finish, we attempt to figure out * whether we still have the same device. */ if (((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) && ((softc->flags & PROBE_INQUIRY_CKSUM) == 0)) { MD5Init(&softc->context); MD5Update(&softc->context, (unsigned char *)inq_buf, sizeof(struct scsi_inquiry_data)); softc->flags |= PROBE_INQUIRY_CKSUM; if (periph->path->device->serial_num_len > 0) { MD5Update(&softc->context, periph->path->device->serial_num, periph->path->device->serial_num_len); softc->flags |= PROBE_SERIAL_CKSUM; } MD5Final(softc->digest, &softc->context); } if (softc->action == PROBE_INQUIRY) inquiry_len = SHORT_INQUIRY_LENGTH; else inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf); /* * Some parallel SCSI devices fail to send an * ignore wide residue message when dealing with * odd length inquiry requests. Round up to be * safe. */ inquiry_len = roundup2(inquiry_len, 2); if (softc->action == PROBE_INQUIRY_BASIC_DV1 || softc->action == PROBE_INQUIRY_BASIC_DV2) { inq_buf = malloc(inquiry_len, M_CAMXPT, M_NOWAIT); } if (inq_buf == NULL) { xpt_print(periph->path, "malloc failure- skipping Basic" "Domain Validation\n"); PROBE_SET_ACTION(softc, PROBE_DV_EXIT); scsi_test_unit_ready(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, /*timeout*/60000); break; } scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (u_int8_t *)inq_buf, inquiry_len, /*evpd*/FALSE, /*page_code*/0, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } case PROBE_REPORT_LUNS: { void *rp; rp = malloc(periph->path->target->rpl_size, M_CAMXPT, M_NOWAIT | M_ZERO); if (rp == NULL) { struct scsi_inquiry_data *inq_buf; inq_buf = &periph->path->device->inq_data; xpt_print(periph->path, "Unable to alloc report luns storage\n"); if (INQ_DATA_TQ_ENABLED(inq_buf)) PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); else PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); goto again; } scsi_report_luns(csio, 5, probedone, MSG_SIMPLE_Q_TAG, RPL_REPORT_DEFAULT, rp, periph->path->target->rpl_size, SSD_FULL_SIZE, 60000); break; break; } case PROBE_MODE_SENSE: { void *mode_buf; int mode_buf_len; mode_buf_len = sizeof(struct scsi_mode_header_6) + sizeof(struct scsi_mode_blk_desc) + sizeof(struct scsi_control_page); mode_buf = malloc(mode_buf_len, M_CAMXPT, M_NOWAIT); if (mode_buf != NULL) { scsi_mode_sense(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, /*dbd*/FALSE, SMS_PAGE_CTRL_CURRENT, SMS_CONTROL_MODE_PAGE, mode_buf, mode_buf_len, SSD_FULL_SIZE, /*timeout*/60000); break; } xpt_print(periph->path, "Unable to mode sense control page - " "malloc failure\n"); PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); } /* FALLTHROUGH */ case PROBE_SUPPORTED_VPD_LIST: { struct scsi_vpd_supported_page_list *vpd_list; struct cam_ed *device; vpd_list = NULL; device = periph->path->device; if ((SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOVPDS) == 0) vpd_list = malloc(sizeof(*vpd_list), M_CAMXPT, M_NOWAIT | M_ZERO); if (vpd_list != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (u_int8_t *)vpd_list, sizeof(*vpd_list), /*evpd*/TRUE, SVPD_SUPPORTED_PAGE_LIST, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } done: /* * We'll have to do without, let our probedone * routine finish up for us. */ start_ccb->csio.data_ptr = NULL; cam_freeze_devq(periph->path); cam_periph_doacquire(periph); probedone(periph, start_ccb); return; } case PROBE_DEVICE_ID: { struct scsi_vpd_device_id *devid; devid = NULL; if (scsi_vpd_supported_page(periph, SVPD_DEVICE_ID)) devid = malloc(SVPD_DEVICE_ID_MAX_SIZE, M_CAMXPT, M_NOWAIT | M_ZERO); if (devid != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (uint8_t *)devid, SVPD_DEVICE_ID_MAX_SIZE, /*evpd*/TRUE, SVPD_DEVICE_ID, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } goto done; } case PROBE_EXTENDED_INQUIRY: { struct scsi_vpd_extended_inquiry_data *ext_inq; ext_inq = NULL; if (scsi_vpd_supported_page(periph, SVPD_EXTENDED_INQUIRY_DATA)) ext_inq = malloc(sizeof(*ext_inq), M_CAMXPT, M_NOWAIT | M_ZERO); if (ext_inq != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (uint8_t *)ext_inq, sizeof(*ext_inq), /*evpd*/TRUE, SVPD_EXTENDED_INQUIRY_DATA, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } /* * We'll have to do without, let our probedone * routine finish up for us. */ goto done; } case PROBE_SERIAL_NUM: { struct scsi_vpd_unit_serial_number *serial_buf; struct cam_ed* device; serial_buf = NULL; device = periph->path->device; if (device->serial_num != NULL) { free(device->serial_num, M_CAMXPT); device->serial_num = NULL; device->serial_num_len = 0; } if (scsi_vpd_supported_page(periph, SVPD_UNIT_SERIAL_NUMBER)) serial_buf = (struct scsi_vpd_unit_serial_number *) malloc(sizeof(*serial_buf), M_CAMXPT, M_NOWAIT|M_ZERO); if (serial_buf != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (u_int8_t *)serial_buf, sizeof(*serial_buf), /*evpd*/TRUE, SVPD_UNIT_SERIAL_NUMBER, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } goto done; } default: panic("probestart: invalid action state 0x%x\n", softc->action); } start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; cam_periph_doacquire(periph); xpt_action(start_ccb); } static void proberequestdefaultnegotiation(struct cam_periph *periph) { struct ccb_trans_settings cts; xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) { return; } cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); } /* * Backoff Negotiation Code- only pertinent for SPI devices. */ static int proberequestbackoff(struct cam_periph *periph, struct cam_ed *device) { struct ccb_trans_settings cts; struct ccb_trans_settings_spi *spi; memset(&cts, 0, sizeof (cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) { if (bootverbose) { xpt_print(periph->path, "failed to get current device settings\n"); } return (0); } if (cts.transport != XPORT_SPI) { if (bootverbose) { xpt_print(periph->path, "not SPI transport\n"); } return (0); } spi = &cts.xport_specific.spi; /* * We cannot renegotiate sync rate if we don't have one. */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { if (bootverbose) { xpt_print(periph->path, "no sync rate known\n"); } return (0); } /* * We'll assert that we don't have to touch PPR options- the * SIM will see what we do with period and offset and adjust * the PPR options as appropriate. */ /* * A sync rate with unknown or zero offset is nonsensical. * A sync period of zero means Async. */ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0 || spi->sync_offset == 0 || spi->sync_period == 0) { if (bootverbose) { xpt_print(periph->path, "no sync rate available\n"); } return (0); } if (device->flags & CAM_DEV_DV_HIT_BOTTOM) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("hit async: giving up on DV\n")); return (0); } /* * Jump sync_period up by one, but stop at 5MHz and fall back to Async. * We don't try to remember 'last' settings to see if the SIM actually * gets into the speed we want to set. We check on the SIM telling * us that a requested speed is bad, but otherwise don't try and * check the speed due to the asynchronous and handshake nature * of speed setting. */ spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; for (;;) { spi->sync_period++; if (spi->sync_period >= 0xf) { spi->sync_period = 0; spi->sync_offset = 0; CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("setting to async for DV\n")); /* * Once we hit async, we don't want to try * any more settings. */ device->flags |= CAM_DEV_DV_HIT_BOTTOM; } else if (bootverbose) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("DV: period 0x%x\n", spi->sync_period)); printf("setting period to 0x%x\n", spi->sync_period); } cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) { break; } CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("DV: failed to set period 0x%x\n", spi->sync_period)); if (spi->sync_period == 0) { return (0); } } return (1); } #define CCB_COMPLETED_OK(ccb) (((ccb).status & CAM_STATUS_MASK) == CAM_REQ_CMP) static void probedone(struct cam_periph *periph, union ccb *done_ccb) { probe_softc *softc; struct cam_path *path; struct scsi_inquiry_data *inq_buf; u_int32_t priority; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); softc = (probe_softc *)periph->softc; path = done_ccb->ccb_h.path; priority = done_ccb->ccb_h.pinfo.priority; cam_periph_assert(periph, MA_OWNED); switch (softc->action) { case PROBE_TUR: { if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, SF_NO_PRINT) == ERESTART) { outr: /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } PROBE_SET_ACTION(softc, PROBE_INQUIRY); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); out: /* Drop freeze taken due to CAM_DEV_QFREEZE and release. */ cam_release_devq(path, 0, 0, 0, FALSE); cam_periph_release_locked(periph); return; } case PROBE_INQUIRY: case PROBE_FULL_INQUIRY: { if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { u_int8_t periph_qual; path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; scsi_find_quirk(path->device); inq_buf = &path->device->inq_data; periph_qual = SID_QUAL(inq_buf); if (periph_qual == SID_QUAL_LU_CONNECTED || periph_qual == SID_QUAL_LU_OFFLINE) { u_int8_t len; /* * We conservatively request only * SHORT_INQUIRY_LEN bytes of inquiry * information during our first try * at sending an INQUIRY. If the device * has more information to give, * perform a second request specifying * the amount of information the device * is willing to give. */ len = inq_buf->additional_length + offsetof(struct scsi_inquiry_data, additional_length) + 1; if (softc->action == PROBE_INQUIRY && len > SHORT_INQUIRY_LENGTH) { PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } scsi_devise_transport(path); if (path->device->lun_id == 0 && SID_ANSI_REV(inq_buf) > SCSI_REV_SPC2 && (SCSI_QUIRK(path->device)->quirks & CAM_QUIRK_NORPTLUNS) == 0) { PROBE_SET_ACTION(softc, PROBE_REPORT_LUNS); /* * Start with room for *one* lun. */ periph->path->target->rpl_size = 16; } else if (INQ_DATA_TQ_ENABLED(inq_buf)) PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); else PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); if (path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } else if (path->device->lun_id == 0 && SID_ANSI_REV(inq_buf) >= SCSI_REV_SPC2 && (SCSI_QUIRK(path->device)->quirks & CAM_QUIRK_NORPTLUNS) == 0) { PROBE_SET_ACTION(softc, PROBE_REPORT_LUNS); periph->path->target->rpl_size = 16; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } } else if (cam_periph_error(done_ccb, 0, done_ccb->ccb_h.target_lun > 0 ? SF_RETRY_UA|SF_QUIET_IR : SF_RETRY_UA) == ERESTART) { goto outr; } else { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } path->device->flags &= ~CAM_DEV_INQUIRY_DATA_VALID; } /* * If we get to this point, we got an error status back * from the inquiry and the error status doesn't require * automatically retrying the command. Therefore, the * inquiry failed. If we had inquiry information before * for this device, but this latest inquiry command failed, * the device has probably gone away. If this device isn't * already marked unconfigured, notify the peripheral * drivers that this device is no more. */ if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) /* Send the async notification. */ xpt_async(AC_LOST_DEVICE, path, NULL); PROBE_SET_ACTION(softc, PROBE_INVALID); xpt_release_ccb(done_ccb); break; } case PROBE_REPORT_LUNS: { struct ccb_scsiio *csio; struct scsi_report_luns_data *lp; u_int nlun, maxlun; csio = &done_ccb->csio; lp = (struct scsi_report_luns_data *)csio->data_ptr; nlun = scsi_4btoul(lp->length) / 8; maxlun = (csio->dxfer_len / 8) - 1; if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, done_ccb->ccb_h.target_lun > 0 ? SF_RETRY_UA|SF_QUIET_IR : SF_RETRY_UA) == ERESTART) { goto outr; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { xpt_release_devq(done_ccb->ccb_h.path, 1, TRUE); } free(lp, M_CAMXPT); lp = NULL; } else if (nlun > maxlun) { /* * Reallocate and retry to cover all luns */ CAM_DEBUG(path, CAM_DEBUG_PROBE, ("Probe: reallocating REPORT_LUNS for %u luns\n", nlun)); free(lp, M_CAMXPT); path->target->rpl_size = (nlun << 3) + 8; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } else if (nlun == 0) { /* * If there don't appear to be any luns, bail. */ free(lp, M_CAMXPT); lp = NULL; } else { lun_id_t lun; int idx; CAM_DEBUG(path, CAM_DEBUG_PROBE, ("Probe: %u lun(s) reported\n", nlun)); CAM_GET_LUN(lp, 0, lun); /* * If the first lun is not lun 0, then either there * is no lun 0 in the list, or the list is unsorted. */ if (lun != 0) { for (idx = 0; idx < nlun; idx++) { CAM_GET_LUN(lp, idx, lun); if (lun == 0) { break; } } if (idx != nlun) { uint8_t tlun[8]; memcpy(tlun, lp->luns[0].lundata, 8); memcpy(lp->luns[0].lundata, lp->luns[idx].lundata, 8); memcpy(lp->luns[idx].lundata, tlun, 8); CAM_DEBUG(path, CAM_DEBUG_PROBE, ("lun 0 in position %u\n", idx)); } } /* * If we have an old lun list, We can either * retest luns that appear to have been dropped, * or just nuke them. We'll opt for the latter. * This function will also install the new list * in the target structure. */ probe_purge_old(path, lp, softc->flags); lp = NULL; } inq_buf = &path->device->inq_data; if (path->device->flags & CAM_DEV_INQUIRY_DATA_VALID && (SID_QUAL(inq_buf) == SID_QUAL_LU_CONNECTED || SID_QUAL(inq_buf) == SID_QUAL_LU_OFFLINE)) { if (INQ_DATA_TQ_ENABLED(inq_buf)) PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); else PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } if (lp) { free(lp, M_CAMXPT); } PROBE_SET_ACTION(softc, PROBE_INVALID); xpt_release_ccb(done_ccb); break; } case PROBE_MODE_SENSE: { struct ccb_scsiio *csio; struct scsi_mode_header_6 *mode_hdr; csio = &done_ccb->csio; mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr; if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { struct scsi_control_page *page; u_int8_t *offset; offset = ((u_int8_t *)&mode_hdr[1]) + mode_hdr->blk_desc_len; page = (struct scsi_control_page *)offset; path->device->queue_flags = page->queue_flags; } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } xpt_release_ccb(done_ccb); free(mode_hdr, M_CAMXPT); PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); xpt_schedule(periph, priority); goto out; } case PROBE_SUPPORTED_VPD_LIST: { struct ccb_scsiio *csio; struct scsi_vpd_supported_page_list *page_list; csio = &done_ccb->csio; page_list = (struct scsi_vpd_supported_page_list *)csio->data_ptr; if (path->device->supported_vpds != NULL) { free(path->device->supported_vpds, M_CAMXPT); path->device->supported_vpds = NULL; path->device->supported_vpds_len = 0; } if (page_list == NULL) { /* * Don't process the command as it was never sent */ } else if (CCB_COMPLETED_OK(csio->ccb_h)) { /* Got vpd list */ path->device->supported_vpds_len = page_list->length + SVPD_SUPPORTED_PAGES_HDR_LEN; path->device->supported_vpds = (uint8_t *)page_list; xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_DEVICE_ID); xpt_schedule(periph, priority); goto out; } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } if (page_list) free(page_list, M_CAMXPT); /* No VPDs available, skip to device check. */ csio->data_ptr = NULL; goto probe_device_check; } case PROBE_DEVICE_ID: { struct scsi_vpd_device_id *devid; struct ccb_scsiio *csio; uint32_t length = 0; csio = &done_ccb->csio; devid = (struct scsi_vpd_device_id *)csio->data_ptr; /* Clean up from previous instance of this device */ if (path->device->device_id != NULL) { path->device->device_id_len = 0; free(path->device->device_id, M_CAMXPT); path->device->device_id = NULL; } if (devid == NULL) { /* Don't process the command as it was never sent */ } else if (CCB_COMPLETED_OK(csio->ccb_h)) { length = scsi_2btoul(devid->length); if (length != 0) { /* * NB: device_id_len is actual response * size, not buffer size. */ path->device->device_id_len = length + SVPD_DEVICE_ID_HDR_LEN; path->device->device_id = (uint8_t *)devid; } } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* Free the device id space if we don't use it */ if (devid && length == 0) free(devid, M_CAMXPT); xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_EXTENDED_INQUIRY); xpt_schedule(periph, priority); goto out; } case PROBE_EXTENDED_INQUIRY: { struct scsi_vpd_extended_inquiry_data *ext_inq; struct ccb_scsiio *csio; int32_t length = 0; csio = &done_ccb->csio; ext_inq = (struct scsi_vpd_extended_inquiry_data *) csio->data_ptr; if (path->device->ext_inq != NULL) { path->device->ext_inq_len = 0; free(path->device->ext_inq, M_CAMXPT); path->device->ext_inq = NULL; } if (ext_inq == NULL) { /* Don't process the command as it was never sent */ } else if (CCB_COMPLETED_OK(csio->ccb_h)) { length = scsi_2btoul(ext_inq->page_length) + __offsetof(struct scsi_vpd_extended_inquiry_data, flags1); length = min(length, sizeof(*ext_inq)); length -= csio->resid; if (length > 0) { path->device->ext_inq_len = length; path->device->ext_inq = (uint8_t *)ext_inq; } } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* Free the device id space if we don't use it */ if (ext_inq && length <= 0) free(ext_inq, M_CAMXPT); xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM); xpt_schedule(periph, priority); goto out; } probe_device_check: case PROBE_SERIAL_NUM: { struct ccb_scsiio *csio; struct scsi_vpd_unit_serial_number *serial_buf; u_int32_t priority; int changed; int have_serialnum; changed = 1; have_serialnum = 0; csio = &done_ccb->csio; priority = done_ccb->ccb_h.pinfo.priority; serial_buf = (struct scsi_vpd_unit_serial_number *)csio->data_ptr; if (serial_buf == NULL) { /* * Don't process the command as it was never sent */ } else if (cam_ccb_status(done_ccb) == CAM_REQ_CMP && (serial_buf->length > 0)) { have_serialnum = 1; path->device->serial_num = (u_int8_t *)malloc((serial_buf->length + 1), M_CAMXPT, M_NOWAIT); if (path->device->serial_num != NULL) { int start, slen; start = strspn(serial_buf->serial_num, " "); slen = serial_buf->length - start; if (slen <= 0) { /* * SPC5r05 says that an all-space serial * number means no product serial number * is available */ slen = 0; } memcpy(path->device->serial_num, &serial_buf->serial_num[start], slen); path->device->serial_num_len = slen; path->device->serial_num[slen] = '\0'; } } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* * Let's see if we have seen this device before. */ if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) { MD5_CTX context; u_int8_t digest[16]; MD5Init(&context); MD5Update(&context, (unsigned char *)&path->device->inq_data, sizeof(struct scsi_inquiry_data)); if (have_serialnum) MD5Update(&context, path->device->serial_num, path->device->serial_num_len); MD5Final(digest, &context); if (bcmp(softc->digest, digest, 16) == 0) changed = 0; /* * XXX Do we need to do a TUR in order to ensure * that the device really hasn't changed??? */ if ((changed != 0) && ((softc->flags & PROBE_NO_ANNOUNCE) == 0)) xpt_async(AC_LOST_DEVICE, path, NULL); } if (serial_buf != NULL) free(serial_buf, M_CAMXPT); if (changed != 0) { /* * Now that we have all the necessary * information to safely perform transfer * negotiations... Controllers don't perform * any negotiation or tagged queuing until * after the first XPT_SET_TRAN_SETTINGS ccb is * received. So, on a new device, just retrieve * the user settings, and set them as the current * settings to set the device up. */ proberequestdefaultnegotiation(periph); xpt_release_ccb(done_ccb); /* * Perform a TUR to allow the controller to * perform any necessary transfer negotiation. */ PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION); xpt_schedule(periph, priority); goto out; } xpt_release_ccb(done_ccb); break; } case PROBE_TUR_FOR_NEGOTIATION: case PROBE_DV_EXIT: if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { cam_periph_error(done_ccb, 0, SF_NO_PRINT | SF_NO_RECOVERY | SF_NO_RETRY); } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* * Do Domain Validation for lun 0 on devices that claim * to support Synchronous Transfer modes. */ if (softc->action == PROBE_TUR_FOR_NEGOTIATION && done_ccb->ccb_h.target_lun == 0 && (path->device->inq_data.flags & SID_Sync) != 0 && (path->device->flags & CAM_DEV_IN_DV) == 0) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Begin Domain Validation\n")); path->device->flags |= CAM_DEV_IN_DV; xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV1); xpt_schedule(periph, priority); goto out; } if (softc->action == PROBE_DV_EXIT) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Leave Domain Validation\n")); } if (path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); } path->device->flags &= ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM); if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { /* Inform the XPT that a new device has been found */ done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); xpt_release_ccb(done_ccb); break; case PROBE_INQUIRY_BASIC_DV1: case PROBE_INQUIRY_BASIC_DV2: { struct scsi_inquiry_data *nbuf; struct ccb_scsiio *csio; if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { cam_periph_error(done_ccb, 0, SF_NO_PRINT | SF_NO_RECOVERY | SF_NO_RETRY); } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } csio = &done_ccb->csio; nbuf = (struct scsi_inquiry_data *)csio->data_ptr; if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) { xpt_print(path, "inquiry data fails comparison at DV%d step\n", softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2); if (proberequestbackoff(periph, path->device)) { path->device->flags &= ~CAM_DEV_IN_DV; PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION); } else { /* give up */ PROBE_SET_ACTION(softc, PROBE_DV_EXIT); } free(nbuf, M_CAMXPT); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } free(nbuf, M_CAMXPT); if (softc->action == PROBE_INQUIRY_BASIC_DV1) { PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV2); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } if (softc->action == PROBE_INQUIRY_BASIC_DV2) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Leave Domain Validation Successfully\n")); } if (path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); } path->device->flags &= ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM); if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { /* Inform the XPT that a new device has been found */ done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); xpt_release_ccb(done_ccb); break; } default: panic("probedone: invalid action state 0x%x\n", softc->action); } done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); done_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(done_ccb); if (TAILQ_FIRST(&softc->request_ccbs) == NULL) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n")); /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(path, 0, 0, 0, FALSE); cam_periph_release_locked(periph); cam_periph_invalidate(periph); cam_periph_release_locked(periph); } else { probeschedule(periph); goto out; } } static void probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new, probe_flags flags) { struct cam_path *tp; struct scsi_report_luns_data *old; u_int idx1, idx2, nlun_old, nlun_new; lun_id_t this_lun; u_int8_t *ol, *nl; if (path->target == NULL) { return; } mtx_lock(&path->target->luns_mtx); old = path->target->luns; path->target->luns = new; mtx_unlock(&path->target->luns_mtx); if (old == NULL) return; nlun_old = scsi_4btoul(old->length) / 8; nlun_new = scsi_4btoul(new->length) / 8; /* * We are not going to assume sorted lists. Deal. */ for (idx1 = 0; idx1 < nlun_old; idx1++) { ol = old->luns[idx1].lundata; for (idx2 = 0; idx2 < nlun_new; idx2++) { nl = new->luns[idx2].lundata; if (memcmp(nl, ol, 8) == 0) { break; } } if (idx2 < nlun_new) { continue; } /* * An 'old' item not in the 'new' list. * Nuke it. Except that if it is lun 0, * that would be what the probe state * machine is currently working on, * so we won't do that. */ CAM_GET_LUN(old, idx1, this_lun); if (this_lun == 0) { continue; } /* * We also cannot nuke it if it is * not in a lun format we understand * and replace the LUN with a "simple" LUN * if that is all the HBA supports. */ if (!(flags & PROBE_EXTLUN)) { if (!CAM_CAN_GET_SIMPLE_LUN(old, idx1)) continue; CAM_GET_SIMPLE_LUN(old, idx1, this_lun); } if (xpt_create_path(&tp, NULL, xpt_path_path_id(path), xpt_path_target_id(path), this_lun) == CAM_REQ_CMP) { xpt_async(AC_LOST_DEVICE, tp, NULL); xpt_free_path(tp); } } free(old, M_CAMXPT); } static void probecleanup(struct cam_periph *periph) { free(periph->softc, M_CAMXPT); } static void scsi_find_quirk(struct cam_ed *device) { struct scsi_quirk_entry *quirk; caddr_t match; match = cam_quirkmatch((caddr_t)&device->inq_data, (caddr_t)scsi_quirk_table, nitems(scsi_quirk_table), sizeof(*scsi_quirk_table), scsi_inquiry_match); if (match == NULL) panic("xpt_find_quirk: device didn't match wildcard entry!!"); quirk = (struct scsi_quirk_entry *)match; device->quirk = quirk; device->mintags = quirk->mintags; device->maxtags = quirk->maxtags; } static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS) { int error, val; val = cam_srch_hi; error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (val == 0 || val == 1) { cam_srch_hi = val; return (0); } else { return (EINVAL); } } typedef struct { union ccb *request_ccb; struct ccb_pathinq *cpi; int counter; int lunindex[0]; } scsi_scan_bus_info; /* * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. * As the scan progresses, scsi_scan_bus is used as the * callback on completion function. */ static void scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) { struct mtx *mtx; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("scsi_scan_bus\n")); switch (request_ccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_SCAN_TGT: { scsi_scan_bus_info *scan_info; union ccb *work_ccb, *reset_ccb; struct cam_path *path; u_int i; u_int low_target, max_target; u_int initiator_id; /* Find out the characteristics of the bus */ work_ccb = xpt_alloc_ccb_nowait(); if (work_ccb == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(request_ccb); return; } xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, request_ccb->ccb_h.pinfo.priority); work_ccb->ccb_h.func_code = XPT_PATH_INQ; xpt_action(work_ccb); if (work_ccb->ccb_h.status != CAM_REQ_CMP) { request_ccb->ccb_h.status = work_ccb->ccb_h.status; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) { /* * Can't scan the bus on an adapter that * cannot perform the initiator role. */ request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } /* We may need to reset bus first, if we haven't done it yet. */ if ((work_ccb->cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) && !(work_ccb->cpi.hba_misc & PIM_NOBUSRESET) && !timevalisset(&request_ccb->ccb_h.path->bus->last_reset) && (reset_ccb = xpt_alloc_ccb_nowait()) != NULL) { xpt_setup_ccb(&reset_ccb->ccb_h, request_ccb->ccb_h.path, CAM_PRIORITY_NONE); reset_ccb->ccb_h.func_code = XPT_RESET_BUS; xpt_action(reset_ccb); if (reset_ccb->ccb_h.status != CAM_REQ_CMP) { request_ccb->ccb_h.status = reset_ccb->ccb_h.status; xpt_free_ccb(reset_ccb); xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } xpt_free_ccb(reset_ccb); } /* Save some state for use while we probe for devices */ scan_info = (scsi_scan_bus_info *) malloc(sizeof(scsi_scan_bus_info) + (work_ccb->cpi.max_target * sizeof (u_int)), M_CAMXPT, M_ZERO|M_NOWAIT); if (scan_info == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("SCAN start for %p\n", scan_info)); scan_info->request_ccb = request_ccb; scan_info->cpi = &work_ccb->cpi; /* Cache on our stack so we can work asynchronously */ max_target = scan_info->cpi->max_target; low_target = 0; initiator_id = scan_info->cpi->initiator_id; /* * We can scan all targets in parallel, or do it sequentially. */ if (request_ccb->ccb_h.func_code == XPT_SCAN_TGT) { max_target = low_target = request_ccb->ccb_h.target_id; scan_info->counter = 0; } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) { max_target = 0; scan_info->counter = 0; } else { scan_info->counter = scan_info->cpi->max_target + 1; if (scan_info->cpi->initiator_id < scan_info->counter) { scan_info->counter--; } } mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); mtx_unlock(mtx); for (i = low_target; i <= max_target; i++) { cam_status status; if (i == initiator_id) continue; status = xpt_create_path(&path, NULL, request_ccb->ccb_h.path_id, i, 0); if (status != CAM_REQ_CMP) { printf("scsi_scan_bus: xpt_create_path failed" " with status %#x, bus scan halted\n", status); free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = status; xpt_free_ccb(work_ccb); xpt_done(request_ccb); break; } work_ccb = xpt_alloc_ccb_nowait(); if (work_ccb == NULL) { xpt_free_ccb((union ccb *)scan_info->cpi); free(scan_info, M_CAMXPT); xpt_free_path(path); request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(request_ccb); break; } xpt_setup_ccb(&work_ccb->ccb_h, path, request_ccb->ccb_h.pinfo.priority); work_ccb->ccb_h.func_code = XPT_SCAN_LUN; work_ccb->ccb_h.cbfcnp = scsi_scan_bus; work_ccb->ccb_h.flags |= CAM_UNLOCKED; work_ccb->ccb_h.ppriv_ptr0 = scan_info; work_ccb->crcn.flags = request_ccb->crcn.flags; xpt_action(work_ccb); } mtx_lock(mtx); break; } case XPT_SCAN_LUN: { cam_status status; struct cam_path *path, *oldpath; scsi_scan_bus_info *scan_info; struct cam_et *target; struct cam_ed *device, *nextdev; int next_target; path_id_t path_id; target_id_t target_id; lun_id_t lun_id; oldpath = request_ccb->ccb_h.path; status = cam_ccb_status(request_ccb); scan_info = (scsi_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0; path_id = request_ccb->ccb_h.path_id; target_id = request_ccb->ccb_h.target_id; lun_id = request_ccb->ccb_h.target_lun; target = request_ccb->ccb_h.path->target; next_target = 1; mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); mtx_lock(mtx); mtx_lock(&target->luns_mtx); if (target->luns) { lun_id_t first; u_int nluns = scsi_4btoul(target->luns->length) / 8; /* * Make sure we skip over lun 0 if it's the first member * of the list as we've actually just finished probing * it. */ CAM_GET_LUN(target->luns, 0, first); if (first == 0 && scan_info->lunindex[target_id] == 0) { scan_info->lunindex[target_id]++; } /* * Skip any LUNs that the HBA can't deal with. */ while (scan_info->lunindex[target_id] < nluns) { if (scan_info->cpi->hba_misc & PIM_EXTLUNS) { CAM_GET_LUN(target->luns, scan_info->lunindex[target_id], lun_id); break; } if (CAM_CAN_GET_SIMPLE_LUN(target->luns, scan_info->lunindex[target_id])) { CAM_GET_SIMPLE_LUN(target->luns, scan_info->lunindex[target_id], lun_id); break; } scan_info->lunindex[target_id]++; } if (scan_info->lunindex[target_id] < nluns) { mtx_unlock(&target->luns_mtx); next_target = 0; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("next lun to try at index %u is %jx\n", scan_info->lunindex[target_id], (uintmax_t)lun_id)); scan_info->lunindex[target_id]++; } else { mtx_unlock(&target->luns_mtx); /* We're done with scanning all luns. */ } } else { mtx_unlock(&target->luns_mtx); device = request_ccb->ccb_h.path->device; /* Continue sequential LUN scan if: */ /* -- we have more LUNs that need recheck */ mtx_lock(&target->bus->eb_mtx); nextdev = device; while ((nextdev = TAILQ_NEXT(nextdev, links)) != NULL) if ((nextdev->flags & CAM_DEV_UNCONFIGURED) == 0) break; mtx_unlock(&target->bus->eb_mtx); if (nextdev != NULL) { next_target = 0; /* -- stop if CAM_QUIRK_NOLUNS is set. */ } else if (SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOLUNS) { next_target = 1; /* -- this LUN is connected and its SCSI version * allows more LUNs. */ } else if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) { if (lun_id < (CAM_SCSI2_MAXLUN-1) || CAN_SRCH_HI_DENSE(device)) next_target = 0; /* -- this LUN is disconnected, its SCSI version * allows more LUNs and we guess they may be. */ } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) { if (lun_id < (CAM_SCSI2_MAXLUN-1) || CAN_SRCH_HI_SPARSE(device)) next_target = 0; } if (next_target == 0) { lun_id++; if (lun_id > scan_info->cpi->max_lun) next_target = 1; } } /* * Check to see if we scan any further luns. */ if (next_target) { int done; /* * Free the current request path- we're done with it. */ xpt_free_path(oldpath); hop_again: done = 0; if (scan_info->request_ccb->ccb_h.func_code == XPT_SCAN_TGT) { done = 1; } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) { scan_info->counter++; if (scan_info->counter == scan_info->cpi->initiator_id) { scan_info->counter++; } if (scan_info->counter >= scan_info->cpi->max_target+1) { done = 1; } } else { scan_info->counter--; if (scan_info->counter == 0) { done = 1; } } if (done) { mtx_unlock(mtx); xpt_free_ccb(request_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("SCAN done for %p\n", scan_info)); free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(request_ccb); break; } if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) { mtx_unlock(mtx); xpt_free_ccb(request_ccb); break; } status = xpt_create_path(&path, NULL, scan_info->request_ccb->ccb_h.path_id, scan_info->counter, 0); if (status != CAM_REQ_CMP) { mtx_unlock(mtx); printf("scsi_scan_bus: xpt_create_path failed" " with status %#x, bus scan halted\n", status); xpt_free_ccb(request_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = status; xpt_done(request_ccb); break; } xpt_setup_ccb(&request_ccb->ccb_h, path, request_ccb->ccb_h.pinfo.priority); request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.cbfcnp = scsi_scan_bus; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->ccb_h.ppriv_ptr0 = scan_info; request_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; } else { status = xpt_create_path(&path, NULL, path_id, target_id, lun_id); /* * Free the old request path- we're done with it. We * do this *after* creating the new path so that * we don't remove a target that has our lun list * in the case that lun 0 is not present. */ xpt_free_path(oldpath); if (status != CAM_REQ_CMP) { printf("scsi_scan_bus: xpt_create_path failed " "with status %#x, halting LUN scan\n", status); goto hop_again; } xpt_setup_ccb(&request_ccb->ccb_h, path, request_ccb->ccb_h.pinfo.priority); request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.cbfcnp = scsi_scan_bus; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->ccb_h.ppriv_ptr0 = scan_info; request_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; } mtx_unlock(mtx); xpt_action(request_ccb); break; } default: break; } } static void scsi_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *request_ccb) { struct ccb_pathinq cpi; cam_status status; struct cam_path *new_path; struct cam_periph *old_periph; int lock; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("scsi_scan_lun\n")); xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status != CAM_REQ_CMP) { if (request_ccb != NULL) { request_ccb->ccb_h.status = cpi.ccb_h.status; xpt_done(request_ccb); } return; } if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) { /* * Can't scan the bus on an adapter that * cannot perform the initiator role. */ if (request_ccb != NULL) { request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(request_ccb); } return; } if (request_ccb == NULL) { request_ccb = xpt_alloc_ccb_nowait(); if (request_ccb == NULL) { xpt_print(path, "scsi_scan_lun: can't allocate CCB, " "can't continue\n"); return; } status = xpt_create_path(&new_path, NULL, path->bus->path_id, path->target->target_id, path->device->lun_id); if (status != CAM_REQ_CMP) { xpt_print(path, "scsi_scan_lun: can't create path, " "can't continue\n"); xpt_free_ccb(request_ccb); return; } xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT); request_ccb->ccb_h.cbfcnp = xptscandone; request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->crcn.flags = flags; } lock = (xpt_path_owned(path) == 0); if (lock) xpt_path_lock(path); if ((old_periph = cam_periph_find(path, "probe")) != NULL) { if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { probe_softc *softc; softc = (probe_softc *)old_periph->softc; TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); } else { request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(request_ccb); } } else { status = cam_periph_alloc(proberegister, NULL, probecleanup, probestart, "probe", CAM_PERIPH_BIO, request_ccb->ccb_h.path, NULL, 0, request_ccb); if (status != CAM_REQ_CMP) { xpt_print(path, "scsi_scan_lun: cam_alloc_periph " "returned an error, can't continue probe\n"); request_ccb->ccb_h.status = status; xpt_done(request_ccb); } } if (lock) xpt_path_unlock(path); } static void xptscandone(struct cam_periph *periph, union ccb *done_ccb) { xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } static struct cam_ed * scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct scsi_quirk_entry *quirk; struct cam_ed *device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); /* * Take the default quirk entry until we have inquiry * data and can determine a better quirk to use. */ quirk = &scsi_quirk_table[nitems(scsi_quirk_table) - 1]; device->quirk = (void *)quirk; device->mintags = quirk->mintags; device->maxtags = quirk->maxtags; bzero(&device->inq_data, sizeof(device->inq_data)); device->inq_flags = 0; device->queue_flags = 0; device->serial_num = NULL; device->serial_num_len = 0; device->device_id = NULL; device->device_id_len = 0; device->supported_vpds = NULL; device->supported_vpds_len = 0; return (device); } static void scsi_devise_transport(struct cam_path *path) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; struct scsi_inquiry_data *inq_buf; /* Get transport information from the SIM */ xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); inq_buf = NULL; if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) inq_buf = &path->device->inq_data; path->device->protocol = PROTO_SCSI; path->device->protocol_version = inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version; path->device->transport = cpi.transport; path->device->transport_version = cpi.transport_version; /* * Any device not using SPI3 features should * be considered SPI2 or lower. */ if (inq_buf != NULL) { if (path->device->transport == XPORT_SPI && (inq_buf->spi3data & SID_SPI_MASK) == 0 && path->device->transport_version > 2) path->device->transport_version = 2; } else { struct cam_ed* otherdev; for (otherdev = TAILQ_FIRST(&path->target->ed_entries); otherdev != NULL; otherdev = TAILQ_NEXT(otherdev, links)) { if (otherdev != path->device) break; } if (otherdev != NULL) { /* * Initially assume the same versioning as * prior luns for this target. */ path->device->protocol_version = otherdev->protocol_version; path->device->transport_version = otherdev->transport_version; } else { /* Until we know better, opt for safety */ path->device->protocol_version = 2; if (path->device->transport == XPORT_SPI) path->device->transport_version = 2; else path->device->transport_version = 0; } } /* * XXX * For a device compliant with SPC-2 we should be able * to determine the transport version supported by * scrutinizing the version descriptors in the * inquiry buffer. */ /* Tell the controller what we think */ xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.transport = path->device->transport; cts.transport_version = path->device->transport_version; cts.protocol = path->device->protocol; cts.protocol_version = path->device->protocol_version; cts.proto_specific.valid = 0; cts.xport_specific.valid = 0; xpt_action((union ccb *)&cts); } static void scsi_dev_advinfo(union ccb *start_ccb) { struct cam_ed *device; struct ccb_dev_advinfo *cdai; off_t amt; start_ccb->ccb_h.status = CAM_REQ_INVALID; device = start_ccb->ccb_h.path->device; cdai = &start_ccb->cdai; switch(cdai->buftype) { case CDAI_TYPE_SCSI_DEVID: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->device_id_len; if (device->device_id_len == 0) break; amt = device->device_id_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->device_id, amt); break; case CDAI_TYPE_SERIAL_NUM: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->serial_num_len; if (device->serial_num_len == 0) break; amt = device->serial_num_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->serial_num, amt); break; case CDAI_TYPE_PHYS_PATH: if (cdai->flags & CDAI_FLAG_STORE) { if (device->physpath != NULL) { free(device->physpath, M_CAMXPT); device->physpath = NULL; device->physpath_len = 0; } /* Clear existing buffer if zero length */ if (cdai->bufsiz == 0) break; device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); if (device->physpath == NULL) { start_ccb->ccb_h.status = CAM_REQ_ABORTED; return; } device->physpath_len = cdai->bufsiz; memcpy(device->physpath, cdai->buf, cdai->bufsiz); } else { cdai->provsiz = device->physpath_len; if (device->physpath_len == 0) break; amt = device->physpath_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->physpath, amt); } break; case CDAI_TYPE_RCAPLONG: if (cdai->flags & CDAI_FLAG_STORE) { if (device->rcap_buf != NULL) { free(device->rcap_buf, M_CAMXPT); device->rcap_buf = NULL; } device->rcap_len = cdai->bufsiz; /* Clear existing buffer if zero length */ if (cdai->bufsiz == 0) break; device->rcap_buf = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); if (device->rcap_buf == NULL) { start_ccb->ccb_h.status = CAM_REQ_ABORTED; return; } memcpy(device->rcap_buf, cdai->buf, cdai->bufsiz); } else { cdai->provsiz = device->rcap_len; if (device->rcap_len == 0) break; amt = device->rcap_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->rcap_buf, amt); } break; case CDAI_TYPE_EXT_INQ: /* * We fetch extended inquiry data during probe, if * available. We don't allow changing it. */ if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->ext_inq_len; if (device->ext_inq_len == 0) break; amt = device->ext_inq_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->ext_inq, amt); break; default: return; } start_ccb->ccb_h.status = CAM_REQ_CMP; if (cdai->flags & CDAI_FLAG_STORE) { xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, (void *)(uintptr_t)cdai->buftype); } } static void scsi_action(union ccb *start_ccb) { switch (start_ccb->ccb_h.func_code) { case XPT_SET_TRAN_SETTINGS: { scsi_set_transfer_settings(&start_ccb->cts, start_ccb->ccb_h.path, /*async_update*/FALSE); break; } case XPT_SCAN_BUS: case XPT_SCAN_TGT: scsi_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); break; case XPT_SCAN_LUN: scsi_scan_lun(start_ccb->ccb_h.path->periph, start_ccb->ccb_h.path, start_ccb->crcn.flags, start_ccb); break; case XPT_DEV_ADVINFO: { scsi_dev_advinfo(start_ccb); break; } default: xpt_action_default(start_ccb); break; } } static void scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update) { struct ccb_pathinq cpi; struct ccb_trans_settings cur_cts; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_scsi *cur_scsi; struct scsi_inquiry_data *inq_data; struct cam_ed *device; if (path == NULL || (device = path->device) == NULL) { cts->ccb_h.status = CAM_PATH_INVALID; xpt_done((union ccb *)cts); return; } if (cts->protocol == PROTO_UNKNOWN || cts->protocol == PROTO_UNSPECIFIED) { cts->protocol = device->protocol; cts->protocol_version = device->protocol_version; } if (cts->protocol_version == PROTO_VERSION_UNKNOWN || cts->protocol_version == PROTO_VERSION_UNSPECIFIED) cts->protocol_version = device->protocol_version; if (cts->protocol != device->protocol) { xpt_print(path, "Uninitialized Protocol %x:%x?\n", cts->protocol, device->protocol); cts->protocol = device->protocol; } if (cts->protocol_version > device->protocol_version) { if (bootverbose) { xpt_print(path, "Down reving Protocol " "Version from %d to %d?\n", cts->protocol_version, device->protocol_version); } cts->protocol_version = device->protocol_version; } if (cts->transport == XPORT_UNKNOWN || cts->transport == XPORT_UNSPECIFIED) { cts->transport = device->transport; cts->transport_version = device->transport_version; } if (cts->transport_version == XPORT_VERSION_UNKNOWN || cts->transport_version == XPORT_VERSION_UNSPECIFIED) cts->transport_version = device->transport_version; if (cts->transport != device->transport) { xpt_print(path, "Uninitialized Transport %x:%x?\n", cts->transport, device->transport); cts->transport = device->transport; } if (cts->transport_version > device->transport_version) { if (bootverbose) { xpt_print(path, "Down reving Transport " "Version from %d to %d?\n", cts->transport_version, device->transport_version); } cts->transport_version = device->transport_version; } /* * Nothing more of interest to do unless * this is a device connected via the * SCSI protocol. */ if (cts->protocol != PROTO_SCSI) { if (async_update == FALSE) xpt_action_default((union ccb *)cts); return; } inq_data = &device->inq_data; scsi = &cts->proto_specific.scsi; xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* SCSI specific sanity checking */ if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0 || (device->queue_flags & SCP_QUEUE_DQUE) != 0 || (device->mintags == 0)) { /* * Can't tag on hardware that doesn't support tags, * doesn't have it enabled, or has broken tag support. */ scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } if (async_update == FALSE) { /* * Perform sanity checking against what the * controller and device can do. */ xpt_setup_ccb(&cur_cts.ccb_h, path, CAM_PRIORITY_NONE); cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cur_cts.type = cts->type; xpt_action((union ccb *)&cur_cts); if (cam_ccb_status((union ccb *)&cur_cts) != CAM_REQ_CMP) { return; } cur_scsi = &cur_cts.proto_specific.scsi; if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) { scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB; } if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0) scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } /* SPI specific sanity checking */ if (cts->transport == XPORT_SPI && async_update == FALSE) { u_int spi3caps; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings_spi *cur_spi; spi = &cts->xport_specific.spi; cur_spi = &cur_cts.xport_specific.spi; /* Fill in any gaps in what the user gave us */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) spi->sync_period = cur_spi->sync_period; if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) spi->sync_period = 0; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) spi->sync_offset = cur_spi->sync_offset; if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) spi->sync_offset = 0; if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) spi->ppr_options = cur_spi->ppr_options; if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) spi->ppr_options = 0; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) spi->bus_width = cur_spi->bus_width; if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) spi->bus_width = 0; if ((spi->valid & CTS_SPI_VALID_DISC) == 0) { spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB; } if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0) spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 && (inq_data->flags & SID_Sync) == 0 && cts->type == CTS_TYPE_CURRENT_SETTINGS) || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)) { /* Force async */ spi->sync_period = 0; spi->sync_offset = 0; } switch (spi->bus_width) { case MSG_EXT_WDTR_BUS_32_BIT: if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 || (inq_data->flags & SID_WBus32) != 0 || cts->type == CTS_TYPE_USER_SETTINGS) && (cpi.hba_inquiry & PI_WIDE_32) != 0) break; /* Fall Through to 16-bit */ case MSG_EXT_WDTR_BUS_16_BIT: if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 || (inq_data->flags & SID_WBus16) != 0 || cts->type == CTS_TYPE_USER_SETTINGS) && (cpi.hba_inquiry & PI_WIDE_16) != 0) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* Fall Through to 8-bit */ default: /* New bus width?? */ case MSG_EXT_WDTR_BUS_8_BIT: /* All targets can do this */ spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } spi3caps = cpi.xport_specific.spi.ppr_options; if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 && cts->type == CTS_TYPE_CURRENT_SETTINGS) spi3caps &= inq_data->spi3data; if ((spi3caps & SID_SPI_CLOCK_DT) == 0) spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; if ((spi3caps & SID_SPI_IUS) == 0) spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ; if ((spi3caps & SID_SPI_QAS) == 0) spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ; /* No SPI Transfer settings are allowed unless we are wide */ if (spi->bus_width == 0) spi->ppr_options = 0; if ((spi->valid & CTS_SPI_VALID_DISC) && ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0)) { /* * Can't tag queue without disconnection. */ scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; scsi->valid |= CTS_SCSI_VALID_TQ; } /* * If we are currently performing tagged transactions to * this device and want to change its negotiation parameters, * go non-tagged for a bit to give the controller a chance to * negotiate unhampered by tag messages. */ if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (device->inq_flags & SID_CmdQue) != 0 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE| CTS_SPI_VALID_SYNC_OFFSET| CTS_SPI_VALID_BUS_WIDTH)) != 0) scsi_toggle_tags(path); } if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) { int device_tagenb; /* * If we are transitioning from tags to no-tags or * vice-versa, we need to carefully freeze and restart * the queue so that we don't overlap tagged and non-tagged * commands. We also temporarily stop tags if there is * a change in transfer negotiation settings to allow * "tag-less" negotiation. */ if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (device->inq_flags & SID_CmdQue) != 0) device_tagenb = TRUE; else device_tagenb = FALSE; if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 && device_tagenb == FALSE) || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0 && device_tagenb == TRUE)) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) { /* * Delay change to use tags until after a * few commands have gone to this device so * the controller has time to perform transfer * negotiations without tagged messages getting * in the way. */ device->tag_delay_count = CAM_TAG_DELAY_COUNT; device->flags |= CAM_DEV_TAG_AFTER_COUNT; } else { xpt_stop_tags(path); } } } if (async_update == FALSE) xpt_action_default((union ccb *)cts); } static void scsi_toggle_tags(struct cam_path *path) { struct cam_ed *dev; /* * Give controllers a chance to renegotiate * before starting tag operations. We * "toggle" tagged queuing off then on * which causes the tag enable command delay * counter to come into effect. */ dev = path->device; if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || ((dev->inq_flags & SID_CmdQue) != 0 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) { struct ccb_trans_settings cts; xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.protocol = PROTO_SCSI; cts.protocol_version = PROTO_VERSION_UNSPECIFIED; cts.transport = XPORT_UNSPECIFIED; cts.transport_version = XPORT_VERSION_UNSPECIFIED; cts.proto_specific.scsi.flags = 0; cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; scsi_set_transfer_settings(&cts, path, /*async_update*/TRUE); cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; scsi_set_transfer_settings(&cts, path, /*async_update*/TRUE); } } /* * Handle any per-device event notifications that require action by the XPT. */ static void scsi_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { cam_status status; struct cam_path newpath; /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; /* * We need our own path with wildcards expanded to * handle certain types of events. */ if ((async_code == AC_SENT_BDR) || (async_code == AC_BUS_RESET) || (async_code == AC_INQ_CHANGED)) status = xpt_compile_path(&newpath, NULL, bus->path_id, target->target_id, device->lun_id); else status = CAM_REQ_CMP_ERR; if (status == CAM_REQ_CMP) { /* * Allow transfer negotiation to occur in a * tag free environment and after settle delay. */ if (async_code == AC_SENT_BDR || async_code == AC_BUS_RESET) { cam_freeze_devq(&newpath); cam_release_devq(&newpath, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/scsi_delay, /*getcount_only*/0); scsi_toggle_tags(&newpath); } if (async_code == AC_INQ_CHANGED) { /* * We've sent a start unit command, or * something similar to a device that * may have caused its inquiry data to * change. So we re-scan the device to * refresh the inquiry data for it. */ scsi_scan_lun(newpath.periph, &newpath, CAM_EXPECT_INQ_CHANGE, NULL); } xpt_release_path(&newpath); } else if (async_code == AC_LOST_DEVICE && (device->flags & CAM_DEV_UNCONFIGURED) == 0) { device->flags |= CAM_DEV_UNCONFIGURED; xpt_release_device(device); } else if (async_code == AC_TRANSFER_NEG) { struct ccb_trans_settings *settings; struct cam_path path; settings = (struct ccb_trans_settings *)async_arg; xpt_compile_path(&path, NULL, bus->path_id, target->target_id, device->lun_id); scsi_set_transfer_settings(settings, &path, /*async_update*/TRUE); xpt_release_path(&path); } } static void _scsi_announce_periph(struct cam_periph *periph, u_int *speed, u_int *freq, struct ccb_trans_settings *cts) { struct ccb_pathinq cpi; struct cam_path *path = periph->path; cam_periph_assert(periph, MA_OWNED); xpt_setup_ccb(&cts->ccb_h, path, CAM_PRIORITY_NORMAL); cts->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts->type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb*)cts); if (cam_ccb_status((union ccb *)cts) != CAM_REQ_CMP) return; /* Ask the SIM for its base transfer speed */ xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* Report connection speed */ *speed = cpi.base_transfer_speed; *freq = 0; if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0 && spi->sync_offset != 0) { *freq = scsi_calc_syncsrate(spi->sync_period); *speed = *freq; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) *speed *= (0x01 << spi->bus_width); } if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; if (fc->valid & CTS_FC_VALID_SPEED) *speed = fc->bitrate; } if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SAS) { struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; if (sas->valid & CTS_SAS_VALID_SPEED) *speed = sas->bitrate; } } static void scsi_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) { struct ccb_trans_settings cts; u_int speed, freq, mb; _scsi_announce_periph(periph, &speed, &freq, &cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) return; mb = speed / 1000; if (mb > 0) sbuf_printf(sb, "%s%d: %d.%03dMB/s transfers", periph->periph_name, periph->unit_number, mb, speed % 1000); else sbuf_printf(sb, "%s%d: %dKB/s transfers", periph->periph_name, periph->unit_number, speed); /* Report additional information about SPI connections */ if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi; spi = &cts.xport_specific.spi; if (freq != 0) { sbuf_printf(sb, " (%d.%03dMHz%s, offset %d", freq / 1000, freq % 1000, (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 ? " DT" : "", spi->sync_offset); } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0 && spi->bus_width > 0) { if (freq != 0) { sbuf_printf(sb, ", "); } else { sbuf_printf(sb, " ("); } sbuf_printf(sb, "%dbit)", 8 * (0x01 << spi->bus_width)); } else if (freq != 0) { sbuf_printf(sb, ")"); } } if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc; fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWNN) sbuf_printf(sb, " WWNN 0x%llx", (long long) fc->wwnn); if (fc->valid & CTS_FC_VALID_WWPN) sbuf_printf(sb, " WWPN 0x%llx", (long long) fc->wwpn); if (fc->valid & CTS_FC_VALID_PORT) sbuf_printf(sb, " PortID 0x%x", fc->port); } sbuf_printf(sb, "\n"); } static void scsi_announce_periph(struct cam_periph *periph) { struct ccb_trans_settings cts; u_int speed, freq, mb; _scsi_announce_periph(periph, &speed, &freq, &cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) return; mb = speed / 1000; if (mb > 0) printf("%s%d: %d.%03dMB/s transfers", periph->periph_name, periph->unit_number, mb, speed % 1000); else printf("%s%d: %dKB/s transfers", periph->periph_name, periph->unit_number, speed); /* Report additional information about SPI connections */ if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi; spi = &cts.xport_specific.spi; if (freq != 0) { printf(" (%d.%03dMHz%s, offset %d", freq / 1000, freq % 1000, (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 ? " DT" : "", spi->sync_offset); } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0 && spi->bus_width > 0) { if (freq != 0) { printf(", "); } else { printf(" ("); } printf("%dbit)", 8 * (0x01 << spi->bus_width)); } else if (freq != 0) { printf(")"); } } if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc; fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWNN) printf(" WWNN 0x%llx", (long long) fc->wwnn); if (fc->valid & CTS_FC_VALID_WWPN) printf(" WWPN 0x%llx", (long long) fc->wwpn); if (fc->valid & CTS_FC_VALID_PORT) printf(" PortID 0x%x", fc->port); } printf("\n"); } static void scsi_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb) { scsi_print_inquiry_sbuf(sb, &device->inq_data); } static void scsi_proto_announce(struct cam_ed *device) { scsi_print_inquiry(&device->inq_data); } static void scsi_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb) { scsi_print_inquiry_short_sbuf(sb, &device->inq_data); } static void scsi_proto_denounce(struct cam_ed *device) { scsi_print_inquiry_short(&device->inq_data); } static void scsi_proto_debug_out(union ccb *ccb) { char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; struct cam_ed *device; if (ccb->ccb_h.func_code != XPT_SCSI_IO) return; device = ccb->ccb_h.path->device; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_CDB,("%s. CDB: %s\n", scsi_op_desc(scsiio_cdb_ptr(&ccb->csio)[0], &device->inq_data), scsi_cdb_string(scsiio_cdb_ptr(&ccb->csio), cdb_str, sizeof(cdb_str)))); }