Index: head/sys/cam/ata/ata_da.c =================================================================== --- head/sys/cam/ata/ata_da.c (revision 355600) +++ head/sys/cam/ata/ata_da.c (revision 355601) @@ -1,3627 +1,3627 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ada.h" #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #include #include #include /* geometry translation */ #ifdef _KERNEL #define ATA_MAX_28BIT_LBA 268435455UL extern int iosched_debug; typedef enum { ADA_STATE_RAHEAD, ADA_STATE_WCACHE, ADA_STATE_LOGDIR, ADA_STATE_IDDIR, ADA_STATE_SUP_CAP, ADA_STATE_ZONE, ADA_STATE_NORMAL } ada_state; typedef enum { ADA_FLAG_CAN_48BIT = 0x00000002, ADA_FLAG_CAN_FLUSHCACHE = 0x00000004, ADA_FLAG_CAN_NCQ = 0x00000008, ADA_FLAG_CAN_DMA = 0x00000010, ADA_FLAG_NEED_OTAG = 0x00000020, ADA_FLAG_WAS_OTAG = 0x00000040, ADA_FLAG_CAN_TRIM = 0x00000080, ADA_FLAG_OPEN = 0x00000100, ADA_FLAG_SCTX_INIT = 0x00000200, ADA_FLAG_CAN_CFA = 0x00000400, ADA_FLAG_CAN_POWERMGT = 0x00000800, ADA_FLAG_CAN_DMA48 = 0x00001000, ADA_FLAG_CAN_LOG = 0x00002000, ADA_FLAG_CAN_IDLOG = 0x00004000, ADA_FLAG_CAN_SUPCAP = 0x00008000, ADA_FLAG_CAN_ZONE = 0x00010000, ADA_FLAG_CAN_WCACHE = 0x00020000, ADA_FLAG_CAN_RAHEAD = 0x00040000, ADA_FLAG_PROBED = 0x00080000, ADA_FLAG_ANNOUNCED = 0x00100000, ADA_FLAG_DIRTY = 0x00200000, ADA_FLAG_CAN_NCQ_TRIM = 0x00400000, /* CAN_TRIM also set */ ADA_FLAG_PIM_ATA_EXT = 0x00800000 } ada_flags; typedef enum { ADA_Q_NONE = 0x00, ADA_Q_4K = 0x01, ADA_Q_NCQ_TRIM_BROKEN = 0x02, ADA_Q_LOG_BROKEN = 0x04, ADA_Q_SMR_DM = 0x08, ADA_Q_NO_TRIM = 0x10, ADA_Q_128KB = 0x20 } ada_quirks; #define ADA_Q_BIT_STRING \ "\020" \ "\0014K" \ "\002NCQ_TRIM_BROKEN" \ "\003LOG_BROKEN" \ "\004SMR_DM" \ "\005NO_TRIM" \ "\006128KB" typedef enum { ADA_CCB_RAHEAD = 0x01, ADA_CCB_WCACHE = 0x02, ADA_CCB_BUFFER_IO = 0x03, ADA_CCB_DUMP = 0x05, ADA_CCB_TRIM = 0x06, ADA_CCB_LOGDIR = 0x07, ADA_CCB_IDDIR = 0x08, ADA_CCB_SUP_CAP = 0x09, ADA_CCB_ZONE = 0x0a, ADA_CCB_TYPE_MASK = 0x0F, } ada_ccb_state; typedef enum { ADA_ZONE_NONE = 0x00, ADA_ZONE_DRIVE_MANAGED = 0x01, ADA_ZONE_HOST_AWARE = 0x02, ADA_ZONE_HOST_MANAGED = 0x03 } ada_zone_mode; typedef enum { ADA_ZONE_FLAG_RZ_SUP = 0x0001, ADA_ZONE_FLAG_OPEN_SUP = 0x0002, ADA_ZONE_FLAG_CLOSE_SUP = 0x0004, ADA_ZONE_FLAG_FINISH_SUP = 0x0008, ADA_ZONE_FLAG_RWP_SUP = 0x0010, ADA_ZONE_FLAG_SUP_MASK = (ADA_ZONE_FLAG_RZ_SUP | ADA_ZONE_FLAG_OPEN_SUP | ADA_ZONE_FLAG_CLOSE_SUP | ADA_ZONE_FLAG_FINISH_SUP | ADA_ZONE_FLAG_RWP_SUP), ADA_ZONE_FLAG_URSWRZ = 0x0020, ADA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, ADA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, ADA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, ADA_ZONE_FLAG_SET_MASK = (ADA_ZONE_FLAG_OPT_SEQ_SET | ADA_ZONE_FLAG_OPT_NONSEQ_SET | ADA_ZONE_FLAG_MAX_SEQ_SET) } ada_zone_flags; static struct ada_zone_desc { ada_zone_flags value; const char *desc; } ada_zone_desc_table[] = { {ADA_ZONE_FLAG_RZ_SUP, "Report Zones" }, {ADA_ZONE_FLAG_OPEN_SUP, "Open" }, {ADA_ZONE_FLAG_CLOSE_SUP, "Close" }, {ADA_ZONE_FLAG_FINISH_SUP, "Finish" }, {ADA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, }; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 typedef enum { ADA_DELETE_NONE, ADA_DELETE_DISABLE, ADA_DELETE_CFA_ERASE, ADA_DELETE_DSM_TRIM, ADA_DELETE_NCQ_DSM_TRIM, ADA_DELETE_MIN = ADA_DELETE_CFA_ERASE, ADA_DELETE_MAX = ADA_DELETE_NCQ_DSM_TRIM, } ada_delete_methods; static const char *ada_delete_method_names[] = { "NONE", "DISABLE", "CFA_ERASE", "DSM_TRIM", "NCQ_DSM_TRIM" }; #if 0 static const char *ada_delete_method_desc[] = { "NONE", "DISABLED", "CFA Erase", "DSM Trim", "DSM Trim via NCQ" }; #endif struct disk_params { u_int8_t heads; u_int8_t secs_per_track; u_int32_t cylinders; u_int32_t secsize; /* Number of bytes/logical sector */ u_int64_t sectors; /* Total number sectors */ }; #define TRIM_MAX_BLOCKS 8 #define TRIM_MAX_RANGES (TRIM_MAX_BLOCKS * ATA_DSM_BLK_RANGES) struct trim_request { uint8_t data[TRIM_MAX_RANGES * ATA_DSM_RANGE_SIZE]; TAILQ_HEAD(, bio) bps; }; struct ada_softc { struct cam_iosched_softc *cam_iosched; int outstanding_cmds; /* Number of active commands */ int refcount; /* Active xpt_action() calls */ ada_state state; ada_flags flags; ada_zone_mode zone_mode; ada_zone_flags zone_flags; struct ata_gp_log_dir ata_logdir; int valid_logdir_len; struct ata_identify_log_pages ata_iddir; int valid_iddir_len; uint64_t optimal_seq_zones; uint64_t optimal_nonseq_zones; uint64_t max_seq_zones; ada_quirks quirks; ada_delete_methods delete_method; int trim_max_ranges; int read_ahead; int write_cache; int unmappedio; int rotating; #ifdef CAM_TEST_FAILURE int force_read_error; int force_write_error; int periodic_read_error; int periodic_read_count; #endif struct ccb_pathinq cpi; struct disk_params params; struct disk *disk; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct callout sendordered_c; struct trim_request trim_req; uint64_t trim_count; uint64_t trim_ranges; uint64_t trim_lbas; #ifdef CAM_IO_STATS struct sysctl_ctx_list sysctl_stats_ctx; struct sysctl_oid *sysctl_stats_tree; u_int timeouts; u_int errors; u_int invalidations; #endif #define ADA_ANNOUNCETMP_SZ 80 char announce_temp[ADA_ANNOUNCETMP_SZ]; #define ADA_ANNOUNCE_SZ 400 char announce_buffer[ADA_ANNOUNCE_SZ]; }; struct ada_quirk_entry { struct scsi_inquiry_pattern inq_pat; ada_quirks quirks; }; static struct ada_quirk_entry ada_quirk_table[] = { { /* Sandisk X400 */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SanDisk?SD8SB8U1T00*", "X4162000*" }, /*quirks*/ADA_Q_128KB }, { /* Hitachi Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Hitachi H??????????E3*", "*" }, /*quirks*/ADA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD155UI*", "*" }, /*quirks*/ADA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD204UI*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DL*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Barracuda Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???DM*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Barracuda Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DM*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500423AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500424AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640423AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640424AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750420AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750422AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750423AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???LT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Red Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????CX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RS*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green/Red Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Red Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????CX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????AZEX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????FZEX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RS*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PKT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PKT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PVT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PVT*", "*" }, /*quirks*/ADA_Q_4K }, /* SSDs */ { /* * Corsair Force 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair CSSD-F*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Corsair Force 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force 3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Corsair Neutron GTX SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Corsair Force GT & GS SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force G*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Crucial M4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "M4-CT???M4SSD2*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Crucial M500 SSDs MU07 firmware * NCQ Trim works */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "MU07" }, /*quirks*/0 }, { /* * Crucial M500 SSDs all other firmware * NCQ Trim doesn't work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "*" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Crucial M550 SSDs * NCQ Trim doesn't work, but only on MU01 firmware */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M550*", "MU01" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Crucial MX100 SSDs * NCQ Trim doesn't work, but only on MU01 firmware */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*MX100*", "MU01" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Crucial RealSSD C300 SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "C300-CTFDDAC???MAG*", "*" }, /*quirks*/ADA_Q_4K }, { /* * FCCT M500 SSDs * NCQ Trim doesn't work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "FCCT*M500*", "*" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Intel 320 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2CW*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel 330 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2CT*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel 510 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2MH*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel 520 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2BW*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel S3610 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2BX*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel X25-M Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2M*", "*" }, /*quirks*/ADA_Q_4K }, { /* * KingDian S200 60GB P0921B * Trimming crash the SSD */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "KingDian S200 *", "*" }, /*quirks*/ADA_Q_NO_TRIM }, { /* * Kingston E100 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SE100S3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Kingston HyperX 3k SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SH103S3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Marvell SSDs (entry taken from OpenSolaris) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "MARVELL SD88SA02*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Micron M500 SSDs firmware MU07 * NCQ Trim works? */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "MU07" }, /*quirks*/0 }, { /* * Micron M500 SSDs all other firmware * NCQ Trim doesn't work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "*" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Micron M5[15]0 SSDs * NCQ Trim doesn't work, but only MU01 firmware */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M5[15]0*", "MU01" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Micron 5100 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron 5100 MTFDDAK*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Agility 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Agility 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Deneva R Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "DENRSTE251M45*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Vertex 2 SSDs (inc pro series) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ?VERTEX2*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Vertex 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Vertex 4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX4*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung 750 SSDs * 4k optimised, NCQ TRIM seems to work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 750*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung 830 Series SSDs * 4k optimised, NCQ TRIM Broken (normal TRIM is fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD 830 Series*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung 840 SSDs * 4k optimised, NCQ TRIM Broken (normal TRIM is fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 840*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung 845 SSDs * 4k optimised, NCQ TRIM Broken (normal TRIM is fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 845*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung 850 SSDs * 4k optimised, NCQ TRIM broken (normal TRIM fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 850*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung SM863 Series SSDs (MZ7KM*) * 4k optimised, NCQ believed to be working */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7KM*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung 843T Series SSDs (MZ7WD*) * Samsung PM851 Series SSDs (MZ7TE*) * Samsung PM853T Series SSDs (MZ7GE*) * 4k optimised, NCQ believed to be broken since these are * appear to be built with the same controllers as the 840/850. */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Same as for SAMSUNG MZ7* but enable the quirks for SSD * starting with MZ7* too */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "MZ7*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung PM851 Series SSDs Dell OEM * device model "SAMSUNG SSD PM851 mSATA 256GB" * 4k optimised, NCQ broken */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD PM851*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * SuperTalent TeraDrive CT SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "FTM??CT25H*", "*" }, /*quirks*/ADA_Q_4K }, { /* * XceedIOPS SATA SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SG9XCS2D*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung drive that doesn't support READ LOG EXT or * READ LOG DMA EXT, despite reporting that it does in * ATA identify data: * SAMSUNG HD200HJ KF100-06 */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD200*", "*" }, /*quirks*/ADA_Q_LOG_BROKEN }, { /* * Samsung drive that doesn't support READ LOG EXT or * READ LOG DMA EXT, despite reporting that it does in * ATA identify data: * SAMSUNG HD501LJ CR100-10 */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD501*", "*" }, /*quirks*/ADA_Q_LOG_BROKEN }, { /* * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR) * Drive Managed SATA hard drive. This drive doesn't report * in firmware that it is a drive managed SMR drive. */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST8000AS000[23]*", "*" }, /*quirks*/ADA_Q_SMR_DM }, { /* Default */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, /*vendor*/"*", /*product*/"*", /*revision*/"*" }, /*quirks*/0 }, }; static disk_strategy_t adastrategy; static dumper_t adadump; static periph_init_t adainit; static void adadiskgonecb(struct disk *dp); static periph_oninv_t adaoninvalidate; static periph_dtor_t adacleanup; static void adaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static int adazonemodesysctl(SYSCTL_HANDLER_ARGS); static int adazonesupsysctl(SYSCTL_HANDLER_ARGS); static void adasysctlinit(void *context, int pending); static int adagetattr(struct bio *bp); static void adasetflags(struct ada_softc *softc, struct ccb_getdev *cgd); static void adasetgeom(struct ada_softc *softc, struct ccb_getdev *cgd); static periph_ctor_t adaregister; static void ada_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio); static void ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio); static int ada_zone_bio_to_ata(int disk_zone_cmd); static int ada_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, int *queue_ccb); static periph_start_t adastart; static void adaprobedone(struct cam_periph *periph, union ccb *ccb); static void adazonedone(struct cam_periph *periph, union ccb *ccb); static void adadone(struct cam_periph *periph, union ccb *done_ccb); static int adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); -static timeout_t adasendorderedtag; +static callout_func_t adasendorderedtag; static void adashutdown(void *arg, int howto); static void adasuspend(void *arg); static void adaresume(void *arg); #ifndef ADA_DEFAULT_TIMEOUT #define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ #endif #ifndef ADA_DEFAULT_RETRY #define ADA_DEFAULT_RETRY 4 #endif #ifndef ADA_DEFAULT_SEND_ORDERED #define ADA_DEFAULT_SEND_ORDERED 1 #endif #ifndef ADA_DEFAULT_SPINDOWN_SHUTDOWN #define ADA_DEFAULT_SPINDOWN_SHUTDOWN 1 #endif #ifndef ADA_DEFAULT_SPINDOWN_SUSPEND #define ADA_DEFAULT_SPINDOWN_SUSPEND 1 #endif #ifndef ADA_DEFAULT_READ_AHEAD #define ADA_DEFAULT_READ_AHEAD 1 #endif #ifndef ADA_DEFAULT_WRITE_CACHE #define ADA_DEFAULT_WRITE_CACHE 1 #endif #define ADA_RA (softc->read_ahead >= 0 ? \ softc->read_ahead : ada_read_ahead) #define ADA_WC (softc->write_cache >= 0 ? \ softc->write_cache : ada_write_cache) /* * Most platforms map firmware geometry to actual, but some don't. If * not overridden, default to nothing. */ #ifndef ata_disk_firmware_geom_adjust #define ata_disk_firmware_geom_adjust(disk) #endif static int ada_retry_count = ADA_DEFAULT_RETRY; static int ada_default_timeout = ADA_DEFAULT_TIMEOUT; static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED; static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN; static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND; static int ada_read_ahead = ADA_DEFAULT_READ_AHEAD; static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE; static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0, "CAM Direct Access Disk driver"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RWTUN, &ada_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RWTUN, &ada_default_timeout, 0, "Normal I/O timeout (in seconds)"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RWTUN, &ada_send_ordered, 0, "Send Ordered Tags"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RWTUN, &ada_spindown_shutdown, 0, "Spin down upon shutdown"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RWTUN, &ada_spindown_suspend, 0, "Spin down upon suspend"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RWTUN, &ada_read_ahead, 0, "Enable disk read-ahead"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RWTUN, &ada_write_cache, 0, "Enable disk write cache"); /* * ADA_ORDEREDTAG_INTERVAL determines how often, relative * to the default timeout, we check to see whether an ordered * tagged transaction is appropriate to prevent simple tag * starvation. Since we'd like to ensure that there is at least * 1/2 of the timeout length left for a starved transaction to * complete after we've sent an ordered tag, we must poll at least * four times in every timeout period. This takes care of the worst * case where a starved transaction starts during an interval that * meets the requirement "don't send an ordered tag" test so it takes * us two intervals to determine that a tag must be sent. */ #ifndef ADA_ORDEREDTAG_INTERVAL #define ADA_ORDEREDTAG_INTERVAL 4 #endif static struct periph_driver adadriver = { adainit, "ada", TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0 }; static int adadeletemethodsysctl(SYSCTL_HANDLER_ARGS); PERIPHDRIVER_DECLARE(ada, adadriver); static MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers"); static int adaopen(struct disk *dp) { struct cam_periph *periph; struct ada_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; if (cam_periph_acquire(periph) != 0) { return(ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("adaopen\n")); softc = (struct ada_softc *)periph->softc; softc->flags |= ADA_FLAG_OPEN; cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } static int adaclose(struct disk *dp) { struct cam_periph *periph; struct ada_softc *softc; union ccb *ccb; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct ada_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("adaclose\n")); /* We only sync the cache if the drive is capable of it. */ if ((softc->flags & ADA_FLAG_DIRTY) != 0 && (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 && (periph->flags & CAM_PERIPH_INVALID) == 0 && cam_periph_hold(periph, PRIBIO) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); cam_fill_ataio(&ccb->ataio, 1, NULL, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0); error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0, /*sense_flags*/0, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); softc->flags &= ~ADA_FLAG_DIRTY; xpt_release_ccb(ccb); cam_periph_unhold(periph); } softc->flags &= ~ADA_FLAG_OPEN; while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "adaclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void adaschedule(struct cam_periph *periph) { struct ada_softc *softc = (struct ada_softc *)periph->softc; if (softc->state != ADA_STATE_NORMAL) return; cam_iosched_schedule(softc->cam_iosched, periph); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void adastrategy(struct bio *bp) { struct cam_periph *periph; struct ada_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct ada_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastrategy(%p)\n", bp)); /* * If the device has been made invalid, error out */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * Zone commands must be ordered, because they can depend on the * effects of previously issued commands, and they may affect * commands after them. */ if (bp->bio_cmd == BIO_ZONE) bp->bio_flags |= BIO_ORDERED; /* * Place it in the queue of disk activities for this disk */ cam_iosched_queue_work(softc->cam_iosched, bp); /* * Schedule ourselves for performing the work. */ adaschedule(periph); cam_periph_unlock(periph); return; } static int adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct cam_periph *periph; struct ada_softc *softc; u_int secsize; struct ccb_ataio ataio; struct disk *dp; uint64_t lba; uint16_t count; int error = 0; dp = arg; periph = dp->d_drv1; softc = (struct ada_softc *)periph->softc; secsize = softc->params.secsize; lba = offset / secsize; count = length / secsize; if ((periph->flags & CAM_PERIPH_INVALID) != 0) return (ENXIO); memset(&ataio, 0, sizeof(ataio)); if (length > 0) { xpt_setup_ccb(&ataio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ataio.ccb_h.ccb_state = ADA_CCB_DUMP; cam_fill_ataio(&ataio, 0, NULL, CAM_DIR_OUT, 0, (u_int8_t *) virtual, length, ada_default_timeout*1000); if ((softc->flags & ADA_FLAG_CAN_48BIT) && (lba + count >= ATA_MAX_28BIT_LBA || count >= 256)) { ata_48bit_cmd(&ataio, ATA_WRITE_DMA48, 0, lba, count); } else { ata_28bit_cmd(&ataio, ATA_WRITE_DMA, 0, lba, count); } error = cam_periph_runccb((union ccb *)&ataio, adaerror, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) printf("Aborting dump due to I/O error.\n"); return (error); } if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) { xpt_setup_ccb(&ataio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); /* * Tell the drive to flush its internal cache. if we * can't flush in 5s we have big problems. No need to * wait the default 60s to detect problems. */ ataio.ccb_h.ccb_state = ADA_CCB_DUMP; cam_fill_ataio(&ataio, 0, NULL, CAM_DIR_NONE, 0, NULL, 0, 5*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(&ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(&ataio, ATA_FLUSHCACHE, 0, 0, 0); error = cam_periph_runccb((union ccb *)&ataio, adaerror, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); } return (error); } static void adainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ada: Failed to attach master async callback " "due to status 0x%x!\n", status); } else if (ada_send_ordered) { /* Register our event handlers */ if ((EVENTHANDLER_REGISTER(power_suspend, adasuspend, NULL, EVENTHANDLER_PRI_LAST)) == NULL) printf("adainit: power event registration failed!\n"); if ((EVENTHANDLER_REGISTER(power_resume, adaresume, NULL, EVENTHANDLER_PRI_LAST)) == NULL) printf("adainit: power event registration failed!\n"); if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("adainit: shutdown event registration failed!\n"); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void adadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void adaoninvalidate(struct cam_periph *periph) { struct ada_softc *softc; softc = (struct ada_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, adaasync, periph, periph->path); #ifdef CAM_IO_STATS softc->invalidations++; #endif /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); disk_gone(softc->disk); } static void adacleanup(struct cam_periph *periph) { struct ada_softc *softc; softc = (struct ada_softc *)periph->softc; cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0) { #ifdef CAM_IO_STATS if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) xpt_print(periph->path, "can't remove sysctl stats context\n"); #endif if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); } disk_destroy(softc->disk); callout_drain(&softc->sendordered_c); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void adasetdeletemethod(struct ada_softc *softc) { if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) softc->delete_method = ADA_DELETE_NCQ_DSM_TRIM; else if (softc->flags & ADA_FLAG_CAN_TRIM) softc->delete_method = ADA_DELETE_DSM_TRIM; else if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT)) softc->delete_method = ADA_DELETE_CFA_ERASE; else softc->delete_method = ADA_DELETE_NONE; } static void adaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct ccb_getdev cgd; struct cam_periph *periph; struct ada_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_ATA) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(adaregister, adaoninvalidate, adacleanup, adastart, "ada", CAM_PERIPH_BIO, path, adaasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("adaasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_GETDEV_CHANGED: { softc = (struct ada_softc *)periph->softc; xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); /* * Update our information based on the new Identify data. */ adasetflags(softc, &cgd); adasetgeom(softc, &cgd); disk_resize(softc->disk, M_NOWAIT); cam_periph_async(periph, code, path, arg); break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct ada_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_SENT_BDR: case AC_BUS_RESET: { softc = (struct ada_softc *)periph->softc; cam_periph_async(periph, code, path, arg); if (softc->state != ADA_STATE_NORMAL) break; xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD) softc->state = ADA_STATE_RAHEAD; else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_WCACHE) softc->state = ADA_STATE_WCACHE; else if ((softc->flags & ADA_FLAG_CAN_LOG) && (softc->zone_mode != ADA_ZONE_NONE)) softc->state = ADA_STATE_LOGDIR; else break; if (cam_periph_acquire(periph) != 0) softc->state = ADA_STATE_NORMAL; else xpt_schedule(periph, CAM_PRIORITY_DEV); } default: cam_periph_async(periph, code, path, arg); break; } } static int adazonemodesysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[40]; struct ada_softc *softc; int error; softc = (struct ada_softc *)arg1; switch (softc->zone_mode) { case ADA_ZONE_DRIVE_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); break; case ADA_ZONE_HOST_AWARE: snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); break; case ADA_ZONE_HOST_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); break; case ADA_ZONE_NONE: default: snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); break; } error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); return (error); } static int adazonesupsysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[180]; struct ada_softc *softc; struct sbuf sb; int error, first; unsigned int i; softc = (struct ada_softc *)arg1; error = 0; first = 1; sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); for (i = 0; i < sizeof(ada_zone_desc_table) / sizeof(ada_zone_desc_table[0]); i++) { if (softc->zone_flags & ada_zone_desc_table[i].value) { if (first == 0) sbuf_printf(&sb, ", "); else first = 0; sbuf_cat(&sb, ada_zone_desc_table[i].desc); } } if (first == 1) sbuf_printf(&sb, "None"); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); return (error); } static void adasysctlinit(void *context, int pending) { struct cam_periph *periph; struct ada_softc *softc; char tmpstr[32], tmpstr2[16]; periph = (struct cam_periph *)context; /* periph was held for us when this task was enqueued */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_release(periph); return; } softc = (struct ada_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d",periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= ADA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("adasysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RW, softc, 0, adadeletemethodsysctl, "A", "BIO_DELETE execution method"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "trim_count", CTLFLAG_RD, &softc->trim_count, "Total number of dsm commands sent"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "trim_ranges", CTLFLAG_RD, &softc->trim_ranges, "Total number of ranges in dsm commands"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "trim_lbas", CTLFLAG_RD, &softc->trim_lbas, "Total lbas in the dsm commands sent"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "read_ahead", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->read_ahead, 0, "Enable disk read ahead."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->write_cache, 0, "Enable disk write cache."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "unmapped_io", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->unmappedio, 0, "Unmapped I/O leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "rotating", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->rotating, 0, "Rotating media"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, adazonemodesysctl, "A", "Zone Mode"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, adazonesupsysctl, "A", "Zone Support"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, "Optimal Number of Open Sequential Write Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_nonseq_zones", CTLFLAG_RD, &softc->optimal_nonseq_zones, "Optimal Number of Non-Sequentially Written Sequential Write " "Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, "Maximum Number of Open Sequential Write Required Zones"); #ifdef CAM_TEST_FAILURE /* * Add a 'door bell' sysctl which allows one to set it from userland * and cause something bad to happen. For the moment, we only allow * whacking the next read or write. */ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->force_read_error, 0, "Force a read error for the next N reads."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->force_write_error, 0, "Force a write error for the next N writes."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->periodic_read_error, 0, "Force a read error every N reads (don't set too low)."); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, periph, 0, cam_periph_invalidate_sysctl, "I", "Write 1 to invalidate the drive immediately"); #endif #ifdef CAM_IO_STATS softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD, 0, "Statistics"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "timeouts", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->timeouts, 0, "Device timeouts reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "errors", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->errors, 0, "Transport errors reported by the SIM."); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "pack_invalidations", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->invalidations, 0, "Device pack invalidations."); #endif cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); cam_periph_release(periph); } static int adagetattr(struct bio *bp) { int ret; struct cam_periph *periph; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static int adadeletemethodsysctl(SYSCTL_HANDLER_ARGS) { char buf[16]; const char *p; struct ada_softc *softc; int i, error, value, methods; softc = (struct ada_softc *)arg1; value = softc->delete_method; if (value < 0 || value > ADA_DELETE_MAX) p = "UNKNOWN"; else p = ada_delete_method_names[value]; strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); methods = 1 << ADA_DELETE_DISABLE; if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT)) methods |= 1 << ADA_DELETE_CFA_ERASE; if (softc->flags & ADA_FLAG_CAN_TRIM) methods |= 1 << ADA_DELETE_DSM_TRIM; if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) methods |= 1 << ADA_DELETE_NCQ_DSM_TRIM; for (i = 0; i <= ADA_DELETE_MAX; i++) { if (!(methods & (1 << i)) || strcmp(buf, ada_delete_method_names[i]) != 0) continue; softc->delete_method = i; return (0); } return (EINVAL); } static void adasetflags(struct ada_softc *softc, struct ccb_getdev *cgd) { if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) && (cgd->inq_flags & SID_DMA)) softc->flags |= ADA_FLAG_CAN_DMA; else softc->flags &= ~ADA_FLAG_CAN_DMA; if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) { softc->flags |= ADA_FLAG_CAN_48BIT; if (cgd->inq_flags & SID_DMA48) softc->flags |= ADA_FLAG_CAN_DMA48; else softc->flags &= ~ADA_FLAG_CAN_DMA48; } else softc->flags &= ~(ADA_FLAG_CAN_48BIT | ADA_FLAG_CAN_DMA48); if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE) softc->flags |= ADA_FLAG_CAN_FLUSHCACHE; else softc->flags &= ~ADA_FLAG_CAN_FLUSHCACHE; if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT) softc->flags |= ADA_FLAG_CAN_POWERMGT; else softc->flags &= ~ADA_FLAG_CAN_POWERMGT; if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) && (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue)) softc->flags |= ADA_FLAG_CAN_NCQ; else softc->flags &= ~ADA_FLAG_CAN_NCQ; if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) && (cgd->inq_flags & SID_DMA) && (softc->quirks & ADA_Q_NO_TRIM) == 0) { softc->flags |= ADA_FLAG_CAN_TRIM; softc->trim_max_ranges = TRIM_MAX_RANGES; if (cgd->ident_data.max_dsm_blocks != 0) { softc->trim_max_ranges = min(cgd->ident_data.max_dsm_blocks * ATA_DSM_BLK_RANGES, softc->trim_max_ranges); } /* * If we can do RCVSND_FPDMA_QUEUED commands, we may be able * to do NCQ trims, if we support trims at all. We also need * support from the SIM to do things properly. Perhaps we * should look at log 13 dword 0 bit 0 and dword 1 bit 0 are * set too... */ if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 && (softc->flags & ADA_FLAG_PIM_ATA_EXT) != 0 && (cgd->ident_data.satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 && (softc->flags & ADA_FLAG_CAN_TRIM) != 0) softc->flags |= ADA_FLAG_CAN_NCQ_TRIM; else softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM; } else softc->flags &= ~(ADA_FLAG_CAN_TRIM | ADA_FLAG_CAN_NCQ_TRIM); if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA) softc->flags |= ADA_FLAG_CAN_CFA; else softc->flags &= ~ADA_FLAG_CAN_CFA; /* * Now that we've set the appropriate flags, setup the delete * method. */ adasetdeletemethod(softc); if ((cgd->ident_data.support.extension & ATA_SUPPORT_GENLOG) && ((softc->quirks & ADA_Q_LOG_BROKEN) == 0)) softc->flags |= ADA_FLAG_CAN_LOG; else softc->flags &= ~ADA_FLAG_CAN_LOG; if ((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) softc->zone_mode = ADA_ZONE_HOST_AWARE; else if (((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) || (softc->quirks & ADA_Q_SMR_DM)) softc->zone_mode = ADA_ZONE_DRIVE_MANAGED; else softc->zone_mode = ADA_ZONE_NONE; if (cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) softc->flags |= ADA_FLAG_CAN_RAHEAD; else softc->flags &= ~ADA_FLAG_CAN_RAHEAD; if (cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) softc->flags |= ADA_FLAG_CAN_WCACHE; else softc->flags &= ~ADA_FLAG_CAN_WCACHE; } static cam_status adaregister(struct cam_periph *periph, void *arg) { struct ada_softc *softc; struct ccb_getdev *cgd; struct disk_params *dp; struct sbuf sb; char *announce_buf; caddr_t match; int quirks; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("adaregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("adaregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } announce_buf = softc->announce_temp; bzero(announce_buf, ADA_ANNOUNCETMP_SZ); if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { printf("adaregister: Unable to probe new device. " "Unable to allocate iosched memory\n"); free(softc, M_DEVBUF); return(CAM_REQ_CMP_ERR); } periph->softc = softc; xpt_path_inq(&softc->cpi, periph->path); /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->ident_data, (caddr_t)ada_quirk_table, nitems(ada_quirk_table), sizeof(*ada_quirk_table), ata_identify_match); if (match != NULL) softc->quirks = ((struct ada_quirk_entry *)match)->quirks; else softc->quirks = ADA_Q_NONE; TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph); /* * Register this media as a disk */ (void)cam_periph_hold(periph, PRIBIO); cam_periph_unlock(periph); snprintf(announce_buf, ADA_ANNOUNCETMP_SZ, "kern.cam.ada.%d.quirks", periph->unit_number); quirks = softc->quirks; TUNABLE_INT_FETCH(announce_buf, &quirks); softc->quirks = quirks; softc->read_ahead = -1; snprintf(announce_buf, ADA_ANNOUNCETMP_SZ, "kern.cam.ada.%d.read_ahead", periph->unit_number); TUNABLE_INT_FETCH(announce_buf, &softc->read_ahead); softc->write_cache = -1; snprintf(announce_buf, ADA_ANNOUNCETMP_SZ, "kern.cam.ada.%d.write_cache", periph->unit_number); TUNABLE_INT_FETCH(announce_buf, &softc->write_cache); /* * Set support flags based on the Identify data and quirks. */ adasetflags(softc, cgd); if (softc->cpi.hba_misc & PIM_ATA_EXT) softc->flags |= ADA_FLAG_PIM_ATA_EXT; /* Disable queue sorting for non-rotational media by default. */ if (cgd->ident_data.media_rotation_rate == ATA_RATE_NON_ROTATING) { softc->rotating = 0; } else { softc->rotating = 1; } cam_iosched_set_sort_queue(softc->cam_iosched, softc->rotating ? -1 : 0); softc->disk = disk_alloc(); adasetgeom(softc, cgd); softc->disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, softc->params.secsize, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT | XPORT_DEVSTAT_TYPE(softc->cpi.transport), DEVSTAT_PRIORITY_DISK); softc->disk->d_open = adaopen; softc->disk->d_close = adaclose; softc->disk->d_strategy = adastrategy; softc->disk->d_getattr = adagetattr; softc->disk->d_dump = adadump; softc->disk->d_gone = adadiskgonecb; softc->disk->d_name = "ada"; softc->disk->d_drv1 = periph; softc->disk->d_unit = periph->unit_number; /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * adadiskgonecb()) telling us that our provider has been freed. */ if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); dp = &softc->params; snprintf(announce_buf, ADA_ANNOUNCETMP_SZ, "%juMB (%ju %u byte sectors)", ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024), (uintmax_t)dp->sectors, dp->secsize); sbuf_new(&sb, softc->announce_buffer, ADA_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, announce_buf); xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, ADA_Q_BIT_STRING); sbuf_finish(&sb); sbuf_putbuf(&sb); /* * Create our sysctl variables, now that we know * we have successfully attached. */ if (cam_periph_acquire(periph) == 0) taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); /* * Add async callbacks for bus reset and * bus device reset calls. I don't bother * checking if this fails as, in most cases, * the system will function just fine without * them and the only alternative would be to * not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_GETDEV_CHANGED | AC_ADVINFO_CHANGED, adaasync, periph, periph->path); /* * Schedule a periodic event to occasionally send an * ordered tag to a device. */ callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); callout_reset(&softc->sendordered_c, (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL, adasendorderedtag, softc); if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD) { softc->state = ADA_STATE_RAHEAD; } else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_WCACHE) { softc->state = ADA_STATE_WCACHE; } else if ((softc->flags & ADA_FLAG_CAN_LOG) && (softc->zone_mode != ADA_ZONE_NONE)) { softc->state = ADA_STATE_LOGDIR; } else { /* * Nothing to probe, so we can just transition to the * normal state. */ adaprobedone(periph, NULL); return(CAM_REQ_CMP); } xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int ada_dsmtrim_req_create(struct ada_softc *softc, struct bio *bp, struct trim_request *req) { uint64_t lastlba = (uint64_t)-1, lbas = 0; int c, lastcount = 0, off, ranges = 0; bzero(req, sizeof(*req)); TAILQ_INIT(&req->bps); do { uint64_t lba = bp->bio_pblkno; int count = bp->bio_bcount / softc->params.secsize; /* Try to extend the previous range. */ if (lba == lastlba) { c = min(count, ATA_DSM_RANGE_MAX - lastcount); lastcount += c; off = (ranges - 1) * ATA_DSM_RANGE_SIZE; req->data[off + 6] = lastcount & 0xff; req->data[off + 7] = (lastcount >> 8) & 0xff; count -= c; lba += c; lbas += c; } while (count > 0) { c = min(count, ATA_DSM_RANGE_MAX); off = ranges * ATA_DSM_RANGE_SIZE; req->data[off + 0] = lba & 0xff; req->data[off + 1] = (lba >> 8) & 0xff; req->data[off + 2] = (lba >> 16) & 0xff; req->data[off + 3] = (lba >> 24) & 0xff; req->data[off + 4] = (lba >> 32) & 0xff; req->data[off + 5] = (lba >> 40) & 0xff; req->data[off + 6] = c & 0xff; req->data[off + 7] = (c >> 8) & 0xff; lba += c; lbas += c; count -= c; lastcount = c; ranges++; /* * Its the caller's responsibility to ensure the * request will fit so we don't need to check for * overrun here */ } lastlba = lba; TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue); bp = cam_iosched_next_trim(softc->cam_iosched); if (bp == NULL) break; if (bp->bio_bcount / softc->params.secsize > (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { cam_iosched_put_back_trim(softc->cam_iosched, bp); break; } } while (1); softc->trim_count++; softc->trim_ranges += ranges; softc->trim_lbas += lbas; return (ranges); } static void ada_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio) { struct trim_request *req = &softc->trim_req; int ranges; ranges = ada_dsmtrim_req_create(softc, bp, req); cam_fill_ataio(ataio, ada_retry_count, adadone, CAM_DIR_OUT, 0, req->data, howmany(ranges, ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE, ada_default_timeout * 1000); ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT, ATA_DSM_TRIM, 0, howmany(ranges, ATA_DSM_BLK_RANGES)); } static void ada_ncq_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio) { struct trim_request *req = &softc->trim_req; int ranges; ranges = ada_dsmtrim_req_create(softc, bp, req); cam_fill_ataio(ataio, ada_retry_count, adadone, CAM_DIR_OUT, 0, req->data, howmany(ranges, ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE, ada_default_timeout * 1000); ata_ncq_cmd(ataio, ATA_SEND_FPDMA_QUEUED, 0, howmany(ranges, ATA_DSM_BLK_RANGES)); ataio->cmd.sector_count_exp = ATA_SFPDMA_DSM; ataio->ata_flags |= ATA_FLAG_AUX; ataio->aux = 1; } static void ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio) { struct trim_request *req = &softc->trim_req; uint64_t lba = bp->bio_pblkno; uint16_t count = bp->bio_bcount / softc->params.secsize; bzero(req, sizeof(*req)); TAILQ_INIT(&req->bps); TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue); cam_fill_ataio(ataio, ada_retry_count, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (count >= 256) count = 0; ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count); } static int ada_zone_bio_to_ata(int disk_zone_cmd) { switch (disk_zone_cmd) { case DISK_ZONE_OPEN: return ATA_ZM_OPEN_ZONE; case DISK_ZONE_CLOSE: return ATA_ZM_CLOSE_ZONE; case DISK_ZONE_FINISH: return ATA_ZM_FINISH_ZONE; case DISK_ZONE_RWP: return ATA_ZM_RWP; } return -1; } static int ada_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, int *queue_ccb) { struct ada_softc *softc; int error; error = 0; if (bp->bio_cmd != BIO_ZONE) { error = EINVAL; goto bailout; } softc = periph->softc; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: { int zone_flags; int zone_sa; uint64_t lba; zone_sa = ada_zone_bio_to_ata(bp->bio_zone.zone_cmd); if (zone_sa == -1) { xpt_print(periph->path, "Cannot translate zone " "cmd %#x to ATA\n", bp->bio_zone.zone_cmd); error = EINVAL; goto bailout; } zone_flags = 0; lba = bp->bio_zone.zone_params.rwp.id; if (bp->bio_zone.zone_params.rwp.flags & DISK_ZONE_RWP_FLAG_ALL) zone_flags |= ZBC_OUT_ALL; ata_zac_mgmt_out(&ccb->ataio, /*retries*/ ada_retry_count, /*cbfcnp*/ adadone, /*use_ncq*/ (softc->flags & ADA_FLAG_PIM_ATA_EXT) ? 1 : 0, /*zm_action*/ zone_sa, /*zone_id*/ lba, /*zone_flags*/ zone_flags, /*sector_count*/ 0, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*timeout*/ ada_default_timeout * 1000); *queue_ccb = 1; break; } case DISK_ZONE_REPORT_ZONES: { uint8_t *rz_ptr; uint32_t num_entries, alloc_size; struct disk_zone_report *rep; rep = &bp->bio_zone.zone_params.report; num_entries = rep->entries_allocated; if (num_entries == 0) { xpt_print(periph->path, "No entries allocated for " "Report Zones request\n"); error = EINVAL; goto bailout; } alloc_size = sizeof(struct scsi_report_zones_hdr) + (sizeof(struct scsi_report_zones_desc) * num_entries); alloc_size = min(alloc_size, softc->disk->d_maxsize); rz_ptr = malloc(alloc_size, M_ATADA, M_NOWAIT | M_ZERO); if (rz_ptr == NULL) { xpt_print(periph->path, "Unable to allocate memory " "for Report Zones request\n"); error = ENOMEM; goto bailout; } ata_zac_mgmt_in(&ccb->ataio, /*retries*/ ada_retry_count, /*cbcfnp*/ adadone, /*use_ncq*/ (softc->flags & ADA_FLAG_PIM_ATA_EXT) ? 1 : 0, /*zm_action*/ ATA_ZM_REPORT_ZONES, /*zone_id*/ rep->starting_id, /*zone_flags*/ rep->rep_options, /*data_ptr*/ rz_ptr, /*dxfer_len*/ alloc_size, /*timeout*/ ada_default_timeout * 1000); /* * For BIO_ZONE, this isn't normally needed. However, it * is used by devstat_end_transaction_bio() to determine * how much data was transferred. */ /* * XXX KDM we have a problem. But I'm not sure how to fix * it. devstat uses bio_bcount - bio_resid to calculate * the amount of data transferred. The GEOM disk code * uses bio_length - bio_resid to calculate the amount of * data in bio_completed. We have different structure * sizes above and below the ada(4) driver. So, if we * use the sizes above, the amount transferred won't be * quite accurate for devstat. If we use different sizes * for bio_bcount and bio_length (above and below * respectively), then the residual needs to match one or * the other. Everything is calculated after the bio * leaves the driver, so changing the values around isn't * really an option. For now, just set the count to the * passed in length. This means that the calculations * above (e.g. bio_completed) will be correct, but the * amount of data reported to devstat will be slightly * under or overstated. */ bp->bio_bcount = bp->bio_length; *queue_ccb = 1; break; } case DISK_ZONE_GET_PARAMS: { struct disk_zone_disk_params *params; params = &bp->bio_zone.zone_params.disk_params; bzero(params, sizeof(*params)); switch (softc->zone_mode) { case ADA_ZONE_DRIVE_MANAGED: params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; break; case ADA_ZONE_HOST_AWARE: params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; break; case ADA_ZONE_HOST_MANAGED: params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; break; default: case ADA_ZONE_NONE: params->zone_mode = DISK_ZONE_MODE_NONE; break; } if (softc->zone_flags & ADA_ZONE_FLAG_URSWRZ) params->flags |= DISK_ZONE_DISK_URSWRZ; if (softc->zone_flags & ADA_ZONE_FLAG_OPT_SEQ_SET) { params->optimal_seq_zones = softc->optimal_seq_zones; params->flags |= DISK_ZONE_OPT_SEQ_SET; } if (softc->zone_flags & ADA_ZONE_FLAG_OPT_NONSEQ_SET) { params->optimal_nonseq_zones = softc->optimal_nonseq_zones; params->flags |= DISK_ZONE_OPT_NONSEQ_SET; } if (softc->zone_flags & ADA_ZONE_FLAG_MAX_SEQ_SET) { params->max_seq_zones = softc->max_seq_zones; params->flags |= DISK_ZONE_MAX_SEQ_SET; } if (softc->zone_flags & ADA_ZONE_FLAG_RZ_SUP) params->flags |= DISK_ZONE_RZ_SUP; if (softc->zone_flags & ADA_ZONE_FLAG_OPEN_SUP) params->flags |= DISK_ZONE_OPEN_SUP; if (softc->zone_flags & ADA_ZONE_FLAG_CLOSE_SUP) params->flags |= DISK_ZONE_CLOSE_SUP; if (softc->zone_flags & ADA_ZONE_FLAG_FINISH_SUP) params->flags |= DISK_ZONE_FINISH_SUP; if (softc->zone_flags & ADA_ZONE_FLAG_RWP_SUP) params->flags |= DISK_ZONE_RWP_SUP; break; } default: break; } bailout: return (error); } static void adastart(struct cam_periph *periph, union ccb *start_ccb) { struct ada_softc *softc = (struct ada_softc *)periph->softc; struct ccb_ataio *ataio = &start_ccb->ataio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastart\n")); switch (softc->state) { case ADA_STATE_NORMAL: { struct bio *bp; u_int8_t tag_code; bp = cam_iosched_next_bio(softc->cam_iosched); if (bp == NULL) { xpt_release_ccb(start_ccb); break; } if ((bp->bio_flags & BIO_ORDERED) != 0 || (bp->bio_cmd != BIO_DELETE && (softc->flags & ADA_FLAG_NEED_OTAG) != 0)) { softc->flags &= ~ADA_FLAG_NEED_OTAG; softc->flags |= ADA_FLAG_WAS_OTAG; tag_code = 0; } else { tag_code = 1; } switch (bp->bio_cmd) { case BIO_WRITE: case BIO_READ: { uint64_t lba = bp->bio_pblkno; uint16_t count = bp->bio_bcount / softc->params.secsize; void *data_ptr; int rw_op; if (bp->bio_cmd == BIO_WRITE) { softc->flags |= ADA_FLAG_DIRTY; rw_op = CAM_DIR_OUT; } else { rw_op = CAM_DIR_IN; } data_ptr = bp->bio_data; if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { rw_op |= CAM_DATA_BIO; data_ptr = bp; } #ifdef CAM_TEST_FAILURE int fail = 0; /* * Support the failure ioctls. If the command is a * read, and there are pending forced read errors, or * if a write and pending write errors, then fail this * operation with EIO. This is useful for testing * purposes. Also, support having every Nth read fail. * * This is a rather blunt tool. */ if (bp->bio_cmd == BIO_READ) { if (softc->force_read_error) { softc->force_read_error--; fail = 1; } if (softc->periodic_read_error > 0) { if (++softc->periodic_read_count >= softc->periodic_read_error) { softc->periodic_read_count = 0; fail = 1; } } } else { if (softc->force_write_error) { softc->force_write_error--; fail = 1; } } if (fail) { biofinish(bp, NULL, EIO); xpt_release_ccb(start_ccb); adaschedule(periph); return; } #endif KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 || round_page(bp->bio_bcount + bp->bio_ma_offset) / PAGE_SIZE == bp->bio_ma_n, ("Short bio %p", bp)); cam_fill_ataio(ataio, ada_retry_count, adadone, rw_op, 0, data_ptr, bp->bio_bcount, ada_default_timeout*1000); if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) { if (bp->bio_cmd == BIO_READ) { ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED, lba, count); } else { ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED, lba, count); } } else if ((softc->flags & ADA_FLAG_CAN_48BIT) && (lba + count >= ATA_MAX_28BIT_LBA || count > 256)) { if (softc->flags & ADA_FLAG_CAN_DMA48) { if (bp->bio_cmd == BIO_READ) { ata_48bit_cmd(ataio, ATA_READ_DMA48, 0, lba, count); } else { ata_48bit_cmd(ataio, ATA_WRITE_DMA48, 0, lba, count); } } else { if (bp->bio_cmd == BIO_READ) { ata_48bit_cmd(ataio, ATA_READ_MUL48, 0, lba, count); } else { ata_48bit_cmd(ataio, ATA_WRITE_MUL48, 0, lba, count); } } } else { if (count == 256) count = 0; if (softc->flags & ADA_FLAG_CAN_DMA) { if (bp->bio_cmd == BIO_READ) { ata_28bit_cmd(ataio, ATA_READ_DMA, 0, lba, count); } else { ata_28bit_cmd(ataio, ATA_WRITE_DMA, 0, lba, count); } } else { if (bp->bio_cmd == BIO_READ) { ata_28bit_cmd(ataio, ATA_READ_MUL, 0, lba, count); } else { ata_28bit_cmd(ataio, ATA_WRITE_MUL, 0, lba, count); } } } break; } case BIO_DELETE: switch (softc->delete_method) { case ADA_DELETE_NCQ_DSM_TRIM: ada_ncq_dsmtrim(softc, bp, ataio); break; case ADA_DELETE_DSM_TRIM: ada_dsmtrim(softc, bp, ataio); break; case ADA_DELETE_CFA_ERASE: ada_cfaerase(softc, bp, ataio); break; default: biofinish(bp, NULL, EOPNOTSUPP); xpt_release_ccb(start_ccb); adaschedule(periph); return; } start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM; start_ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); goto out; case BIO_FLUSH: cam_fill_ataio(ataio, 1, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0); break; case BIO_ZONE: { int error, queue_ccb; queue_ccb = 0; error = ada_zone_cmd(periph, start_ccb, bp, &queue_ccb); if ((error != 0) || (queue_ccb == 0)) { biofinish(bp, NULL, error); xpt_release_ccb(start_ccb); return; } break; } } start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO; start_ccb->ccb_h.flags |= CAM_UNLOCKED; out: start_ccb->ccb_h.ccb_bp = bp; softc->outstanding_cmds++; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); /* May have more work to do, so ensure we stay scheduled */ adaschedule(periph); break; } case ADA_STATE_RAHEAD: case ADA_STATE_WCACHE: { cam_fill_ataio(ataio, 1, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->state == ADA_STATE_RAHEAD) { ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_RA ? ATA_SF_ENAB_RCACHE : ATA_SF_DIS_RCACHE, 0, 0); start_ccb->ccb_h.ccb_state = ADA_CCB_RAHEAD; } else { ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_WC ? ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0); start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE; } start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; xpt_action(start_ccb); break; } case ADA_STATE_LOGDIR: { struct ata_gp_log_dir *log_dir; if ((softc->flags & ADA_FLAG_CAN_LOG) == 0) { adaprobedone(periph, start_ccb); break; } log_dir = malloc(sizeof(*log_dir), M_ATADA, M_NOWAIT|M_ZERO); if (log_dir == NULL) { xpt_print(periph->path, "Couldn't malloc log_dir " "data\n"); softc->state = ADA_STATE_NORMAL; xpt_release_ccb(start_ccb); break; } ata_read_log(ataio, /*retries*/1, /*cbfcnp*/adadone, /*log_address*/ ATA_LOG_DIRECTORY, /*page_number*/ 0, /*block_count*/ 1, /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? CAM_ATAIO_DMA : 0, /*data_ptr*/ (uint8_t *)log_dir, /*dxfer_len*/sizeof(*log_dir), /*timeout*/ada_default_timeout*1000); start_ccb->ccb_h.ccb_state = ADA_CCB_LOGDIR; xpt_action(start_ccb); break; } case ADA_STATE_IDDIR: { struct ata_identify_log_pages *id_dir; id_dir = malloc(sizeof(*id_dir), M_ATADA, M_NOWAIT | M_ZERO); if (id_dir == NULL) { xpt_print(periph->path, "Couldn't malloc id_dir " "data\n"); adaprobedone(periph, start_ccb); break; } ata_read_log(ataio, /*retries*/1, /*cbfcnp*/adadone, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_PAGE_LIST, /*block_count*/ 1, /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? CAM_ATAIO_DMA : 0, /*data_ptr*/ (uint8_t *)id_dir, /*dxfer_len*/ sizeof(*id_dir), /*timeout*/ada_default_timeout*1000); start_ccb->ccb_h.ccb_state = ADA_CCB_IDDIR; xpt_action(start_ccb); break; } case ADA_STATE_SUP_CAP: { struct ata_identify_log_sup_cap *sup_cap; sup_cap = malloc(sizeof(*sup_cap), M_ATADA, M_NOWAIT|M_ZERO); if (sup_cap == NULL) { xpt_print(periph->path, "Couldn't malloc sup_cap " "data\n"); adaprobedone(periph, start_ccb); break; } ata_read_log(ataio, /*retries*/1, /*cbfcnp*/adadone, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_SUP_CAP, /*block_count*/ 1, /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? CAM_ATAIO_DMA : 0, /*data_ptr*/ (uint8_t *)sup_cap, /*dxfer_len*/ sizeof(*sup_cap), /*timeout*/ada_default_timeout*1000); start_ccb->ccb_h.ccb_state = ADA_CCB_SUP_CAP; xpt_action(start_ccb); break; } case ADA_STATE_ZONE: { struct ata_zoned_info_log *ata_zone; ata_zone = malloc(sizeof(*ata_zone), M_ATADA, M_NOWAIT|M_ZERO); if (ata_zone == NULL) { xpt_print(periph->path, "Couldn't malloc ata_zone " "data\n"); adaprobedone(periph, start_ccb); break; } ata_read_log(ataio, /*retries*/1, /*cbfcnp*/adadone, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_ZDI, /*block_count*/ 1, /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? CAM_ATAIO_DMA : 0, /*data_ptr*/ (uint8_t *)ata_zone, /*dxfer_len*/ sizeof(*ata_zone), /*timeout*/ada_default_timeout*1000); start_ccb->ccb_h.ccb_state = ADA_CCB_ZONE; xpt_action(start_ccb); break; } } } static void adaprobedone(struct cam_periph *periph, union ccb *ccb) { struct ada_softc *softc; softc = (struct ada_softc *)periph->softc; if (ccb != NULL) xpt_release_ccb(ccb); softc->state = ADA_STATE_NORMAL; softc->flags |= ADA_FLAG_PROBED; adaschedule(periph); if ((softc->flags & ADA_FLAG_ANNOUNCED) == 0) { softc->flags |= ADA_FLAG_ANNOUNCED; cam_periph_unhold(periph); } else { cam_periph_release_locked(periph); } } static void adazonedone(struct cam_periph *periph, union ccb *ccb) { struct bio *bp; bp = (struct bio *)ccb->ccb_h.ccb_bp; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: break; case DISK_ZONE_REPORT_ZONES: { uint32_t avail_len; struct disk_zone_report *rep; struct scsi_report_zones_hdr *hdr; struct scsi_report_zones_desc *desc; struct disk_zone_rep_entry *entry; uint32_t hdr_len, num_avail; uint32_t num_to_fill, i; rep = &bp->bio_zone.zone_params.report; avail_len = ccb->ataio.dxfer_len - ccb->ataio.resid; /* * Note that bio_resid isn't normally used for zone * commands, but it is used by devstat_end_transaction_bio() * to determine how much data was transferred. Because * the size of the SCSI/ATA data structures is different * than the size of the BIO interface structures, the * amount of data actually transferred from the drive will * be different than the amount of data transferred to * the user. */ hdr = (struct scsi_report_zones_hdr *)ccb->ataio.data_ptr; if (avail_len < sizeof(*hdr)) { /* * Is there a better error than EIO here? We asked * for at least the header, and we got less than * that. */ bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; break; } hdr_len = le32dec(hdr->length); if (hdr_len > 0) rep->entries_available = hdr_len / sizeof(*desc); else rep->entries_available = 0; /* * NOTE: using the same values for the BIO version of the * same field as the SCSI/ATA values. This means we could * get some additional values that aren't defined in bio.h * if more values of the same field are defined later. */ rep->header.same = hdr->byte4 & SRZ_SAME_MASK; rep->header.maximum_lba = le64dec(hdr->maximum_lba); /* * If the drive reports no entries that match the query, * we're done. */ if (hdr_len == 0) { rep->entries_filled = 0; bp->bio_resid = bp->bio_bcount; break; } num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), hdr_len / sizeof(*desc)); /* * If the drive didn't return any data, then we're done. */ if (num_avail == 0) { rep->entries_filled = 0; bp->bio_resid = bp->bio_bcount; break; } num_to_fill = min(num_avail, rep->entries_allocated); /* * If the user didn't allocate any entries for us to fill, * we're done. */ if (num_to_fill == 0) { rep->entries_filled = 0; bp->bio_resid = bp->bio_bcount; break; } for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; i < num_to_fill; i++, desc++, entry++) { /* * NOTE: we're mapping the values here directly * from the SCSI/ATA bit definitions to the bio.h * definitions. There is also a warning in * disk_zone.h, but the impact is that if * additional values are added in the SCSI/ATA * specs these will be visible to consumers of * this interface. */ entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; entry->zone_condition = (desc->zone_flags & SRZ_ZONE_COND_MASK) >> SRZ_ZONE_COND_SHIFT; entry->zone_flags |= desc->zone_flags & (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); entry->zone_length = le64dec(desc->zone_length); entry->zone_start_lba = le64dec(desc->zone_start_lba); entry->write_pointer_lba = le64dec(desc->write_pointer_lba); } rep->entries_filled = num_to_fill; /* * Note that this residual is accurate from the user's * standpoint, but the amount transferred isn't accurate * from the standpoint of what actually came back from the * drive. */ bp->bio_resid = bp->bio_bcount - (num_to_fill * sizeof(*entry)); break; } case DISK_ZONE_GET_PARAMS: default: /* * In theory we should not get a GET_PARAMS bio, since it * should be handled without queueing the command to the * drive. */ panic("%s: Invalid zone command %d", __func__, bp->bio_zone.zone_cmd); break; } if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) free(ccb->ataio.data_ptr, M_ATADA); } static void adadone(struct cam_periph *periph, union ccb *done_ccb) { struct ada_softc *softc; struct ccb_ataio *ataio; struct cam_path *path; uint32_t priority; int state; softc = (struct ada_softc *)periph->softc; ataio = &done_ccb->ataio; path = done_ccb->ccb_h.path; priority = done_ccb->ccb_h.pinfo.priority; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("adadone\n")); state = ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK; switch (state) { case ADA_CCB_BUFFER_IO: case ADA_CCB_TRIM: { struct bio *bp; int error; cam_periph_lock(periph); bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = adaerror(done_ccb, 0, 0); if (error == ERESTART) { /* A retry was scheduled, so just return. */ cam_periph_unlock(periph); return; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); /* * If we get an error on an NCQ DSM TRIM, fall back * to a non-NCQ DSM TRIM forever. Please note that if * CAN_NCQ_TRIM is set, CAN_TRIM is necessarily set too. * However, for this one trim, we treat it as advisory * and return success up the stack. */ if (state == ADA_CCB_TRIM && error != 0 && (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) != 0) { softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM; error = 0; adasetdeletemethod(softc); } } else { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); error = 0; } bp->bio_error = error; if (error != 0) { bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } else { if (bp->bio_cmd == BIO_ZONE) adazonedone(periph, done_ccb); else if (state == ADA_CCB_TRIM) bp->bio_resid = 0; else bp->bio_resid = ataio->resid; if ((bp->bio_resid > 0) && (bp->bio_cmd != BIO_ZONE)) bp->bio_flags |= BIO_ERROR; } softc->outstanding_cmds--; if (softc->outstanding_cmds == 0) softc->flags |= ADA_FLAG_WAS_OTAG; /* * We need to call cam_iosched before we call biodone so that we * don't measure any activity that happens in the completion * routine, which in the case of sendfile can be quite * extensive. Release the periph refcount taken in adastart() * for each CCB. */ cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); xpt_release_ccb(done_ccb); KASSERT(softc->refcount >= 1, ("adadone softc %p refcount %d", softc, softc->refcount)); softc->refcount--; if (state == ADA_CCB_TRIM) { TAILQ_HEAD(, bio) queue; struct bio *bp1; TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &softc->trim_req.bps, bio_queue); /* * Normally, the xpt_release_ccb() above would make sure * that when we have more work to do, that work would * get kicked off. However, we specifically keep * trim_running set to 0 before the call above to allow * other I/O to progress when many BIO_DELETE requests * are pushed down. We set trim_running to 0 and call * daschedule again so that we don't stall if there are * no other I/Os pending apart from BIO_DELETEs. */ cam_iosched_trim_done(softc->cam_iosched); adaschedule(periph); cam_periph_unlock(periph); while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bp1, bio_queue); bp1->bio_error = error; if (error != 0) { bp1->bio_flags |= BIO_ERROR; bp1->bio_resid = bp1->bio_bcount; } else bp1->bio_resid = 0; biodone(bp1); } } else { adaschedule(periph); cam_periph_unlock(periph); biodone(bp); } return; } case ADA_CCB_RAHEAD: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (adaerror(done_ccb, 0, 0) == ERESTART) { /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the reference on the peripheral. * The peripheral will only go away once the last reference * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(done_ccb); softc->state = ADA_STATE_WCACHE; xpt_schedule(periph, priority); /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } case ADA_CCB_WCACHE: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (adaerror(done_ccb, 0, 0) == ERESTART) { /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); if ((softc->flags & ADA_FLAG_CAN_LOG) && (softc->zone_mode != ADA_ZONE_NONE)) { xpt_release_ccb(done_ccb); softc->state = ADA_STATE_LOGDIR; xpt_schedule(periph, priority); } else { adaprobedone(periph, done_ccb); } return; } case ADA_CCB_LOGDIR: { int error; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { error = 0; softc->valid_logdir_len = 0; bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); softc->valid_logdir_len = ataio->dxfer_len - ataio->resid; if (softc->valid_logdir_len > 0) bcopy(ataio->data_ptr, &softc->ata_logdir, min(softc->valid_logdir_len, sizeof(softc->ata_logdir))); /* * Figure out whether the Identify Device log is * supported. The General Purpose log directory * has a header, and lists the number of pages * available for each GP log identified by the * offset into the list. */ if ((softc->valid_logdir_len >= ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) && (le16dec(softc->ata_logdir.header) == ATA_GP_LOG_DIR_VERSION) && (le16dec(&softc->ata_logdir.num_pages[ (ATA_IDENTIFY_DATA_LOG * sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ softc->flags |= ADA_FLAG_CAN_IDLOG; } else { softc->flags &= ~ADA_FLAG_CAN_IDLOG; } } else { error = adaerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA log directory, * then ATA logs are effectively not * supported even if the bit is set in the * identify data. */ softc->flags &= ~(ADA_FLAG_CAN_LOG | ADA_FLAG_CAN_IDLOG); if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ataio->data_ptr, M_ATADA); if ((error == 0) && (softc->flags & ADA_FLAG_CAN_IDLOG)) { softc->state = ADA_STATE_IDDIR; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); } else adaprobedone(periph, done_ccb); return; } case ADA_CCB_IDDIR: { int error; if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { off_t entries_offset, max_entries; error = 0; softc->valid_iddir_len = 0; bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); softc->flags &= ~(ADA_FLAG_CAN_SUPCAP | ADA_FLAG_CAN_ZONE); softc->valid_iddir_len = ataio->dxfer_len - ataio->resid; if (softc->valid_iddir_len > 0) bcopy(ataio->data_ptr, &softc->ata_iddir, min(softc->valid_iddir_len, sizeof(softc->ata_iddir))); entries_offset = __offsetof(struct ata_identify_log_pages,entries); max_entries = softc->valid_iddir_len - entries_offset; if ((softc->valid_iddir_len > (entries_offset + 1)) && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION) && (softc->ata_iddir.entry_count > 0)) { int num_entries, i; num_entries = softc->ata_iddir.entry_count; num_entries = min(num_entries, softc->valid_iddir_len - entries_offset); for (i = 0; i < num_entries && i < max_entries; i++) { if (softc->ata_iddir.entries[i] == ATA_IDL_SUP_CAP) softc->flags |= ADA_FLAG_CAN_SUPCAP; else if (softc->ata_iddir.entries[i]== ATA_IDL_ZDI) softc->flags |= ADA_FLAG_CAN_ZONE; if ((softc->flags & ADA_FLAG_CAN_SUPCAP) && (softc->flags & ADA_FLAG_CAN_ZONE)) break; } } } else { error = adaerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data log * directory, then it effectively isn't * supported even if the ATA Log directory * a non-zero number of pages present for * this log. */ softc->flags &= ~ADA_FLAG_CAN_IDLOG; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ataio->data_ptr, M_ATADA); if ((error == 0) && (softc->flags & ADA_FLAG_CAN_SUPCAP)) { softc->state = ADA_STATE_SUP_CAP; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); } else adaprobedone(periph, done_ccb); return; } case ADA_CCB_SUP_CAP: { int error; if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; size_t needed_size; struct ata_identify_log_sup_cap *sup_cap; error = 0; sup_cap = (struct ata_identify_log_sup_cap *) ataio->data_ptr; valid_len = ataio->dxfer_len - ataio->resid; needed_size = __offsetof(struct ata_identify_log_sup_cap, sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); if (valid_len >= needed_size) { uint64_t zoned, zac_cap; zoned = le64dec(sup_cap->zoned_cap); if (zoned & ATA_ZONED_VALID) { /* * This should have already been * set, because this is also in the * ATA identify data. */ if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) softc->zone_mode = ADA_ZONE_HOST_AWARE; else if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) softc->zone_mode = ADA_ZONE_DRIVE_MANAGED; } zac_cap = le64dec(sup_cap->sup_zac_cap); if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { if (zac_cap & ATA_REPORT_ZONES_SUP) softc->zone_flags |= ADA_ZONE_FLAG_RZ_SUP; if (zac_cap & ATA_ND_OPEN_ZONE_SUP) softc->zone_flags |= ADA_ZONE_FLAG_OPEN_SUP; if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) softc->zone_flags |= ADA_ZONE_FLAG_CLOSE_SUP; if (zac_cap & ATA_ND_FINISH_ZONE_SUP) softc->zone_flags |= ADA_ZONE_FLAG_FINISH_SUP; if (zac_cap & ATA_ND_RWP_SUP) softc->zone_flags |= ADA_ZONE_FLAG_RWP_SUP; } else { /* * This field was introduced in * ACS-4, r08 on April 28th, 2015. * If the drive firmware was written * to an earlier spec, it won't have * the field. So, assume all * commands are supported. */ softc->zone_flags |= ADA_ZONE_FLAG_SUP_MASK; } } } else { error = adaerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data * Supported Capabilities page, clear the * flag... */ softc->flags &= ~ADA_FLAG_CAN_SUPCAP; /* * And clear zone capabilities. */ softc->zone_flags &= ~ADA_ZONE_FLAG_SUP_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ataio->data_ptr, M_ATADA); if ((error == 0) && (softc->flags & ADA_FLAG_CAN_ZONE)) { softc->state = ADA_STATE_ZONE; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); } else adaprobedone(periph, done_ccb); return; } case ADA_CCB_ZONE: { int error; if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct ata_zoned_info_log *zi_log; uint32_t valid_len; size_t needed_size; zi_log = (struct ata_zoned_info_log *)ataio->data_ptr; valid_len = ataio->dxfer_len - ataio->resid; needed_size = __offsetof(struct ata_zoned_info_log, version_info) + 1 + sizeof(zi_log->version_info); if (valid_len >= needed_size) { uint64_t tmpvar; tmpvar = le64dec(zi_log->zoned_cap); if (tmpvar & ATA_ZDI_CAP_VALID) { if (tmpvar & ATA_ZDI_CAP_URSWRZ) softc->zone_flags |= ADA_ZONE_FLAG_URSWRZ; else softc->zone_flags &= ~ADA_ZONE_FLAG_URSWRZ; } tmpvar = le64dec(zi_log->optimal_seq_zones); if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { softc->zone_flags |= ADA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = (tmpvar & ATA_ZDI_OPT_SEQ_MASK); } else { softc->zone_flags &= ~ADA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = 0; } tmpvar =le64dec(zi_log->optimal_nonseq_zones); if (tmpvar & ATA_ZDI_OPT_NS_VALID) { softc->zone_flags |= ADA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = (tmpvar & ATA_ZDI_OPT_NS_MASK); } else { softc->zone_flags &= ~ADA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = 0; } tmpvar = le64dec(zi_log->max_seq_req_zones); if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { softc->zone_flags |= ADA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = (tmpvar & ATA_ZDI_MAX_SEQ_MASK); } else { softc->zone_flags &= ~ADA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = 0; } } } else { error = adaerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { softc->flags &= ~ADA_FLAG_CAN_ZONE; softc->flags &= ~ADA_ZONE_FLAG_SET_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ataio->data_ptr, M_ATADA); adaprobedone(periph, done_ccb); return; } case ADA_CCB_DUMP: /* No-op. We're polling */ return; default: break; } xpt_release_ccb(done_ccb); } static int adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { #ifdef CAM_IO_STATS struct ada_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct ada_softc *)periph->softc; switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: softc->timeouts++; break; case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: case CAM_ATA_STATUS_ERROR: softc->errors++; break; default: break; } #endif return(cam_periph_error(ccb, cam_flags, sense_flags)); } static void adasetgeom(struct ada_softc *softc, struct ccb_getdev *cgd) { struct disk_params *dp = &softc->params; u_int64_t lbasize48; u_int32_t lbasize; u_int maxio, d_flags; dp->secsize = ata_logical_sector_size(&cgd->ident_data); if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) && cgd->ident_data.current_heads != 0 && cgd->ident_data.current_sectors != 0) { dp->heads = cgd->ident_data.current_heads; dp->secs_per_track = cgd->ident_data.current_sectors; dp->cylinders = cgd->ident_data.cylinders; dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 | ((u_int32_t)cgd->ident_data.current_size_2 << 16); } else { dp->heads = cgd->ident_data.heads; dp->secs_per_track = cgd->ident_data.sectors; dp->cylinders = cgd->ident_data.cylinders; dp->sectors = cgd->ident_data.cylinders * (u_int32_t)(dp->heads * dp->secs_per_track); } lbasize = (u_int32_t)cgd->ident_data.lba_size_1 | ((u_int32_t)cgd->ident_data.lba_size_2 << 16); /* use the 28bit LBA size if valid or bigger than the CHS mapping */ if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize) dp->sectors = lbasize; /* use the 48bit LBA size if valid */ lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) | ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) | ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) | ((u_int64_t)cgd->ident_data.lba_size48_4 << 48); if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) && lbasize48 > ATA_MAX_28BIT_LBA) dp->sectors = lbasize48; maxio = softc->cpi.maxio; /* Honor max I/O size of SIM */ if (maxio == 0) maxio = DFLTPHYS; /* traditional default */ else if (maxio > MAXPHYS) maxio = MAXPHYS; /* for safety */ if (softc->flags & ADA_FLAG_CAN_48BIT) maxio = min(maxio, 65536 * softc->params.secsize); else /* 28bit ATA command limit */ maxio = min(maxio, 256 * softc->params.secsize); if (softc->quirks & ADA_Q_128KB) maxio = min(maxio, 128 * 1024); softc->disk->d_maxsize = maxio; d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) d_flags |= DISKFLAG_CANFLUSHCACHE; if (softc->flags & ADA_FLAG_CAN_TRIM) { d_flags |= DISKFLAG_CANDELETE; softc->disk->d_delmaxsize = softc->params.secsize * ATA_DSM_RANGE_MAX * softc->trim_max_ranges; } else if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT)) { d_flags |= DISKFLAG_CANDELETE; softc->disk->d_delmaxsize = 256 * softc->params.secsize; } else softc->disk->d_delmaxsize = maxio; if ((softc->cpi.hba_misc & PIM_UNMAPPED) != 0) { d_flags |= DISKFLAG_UNMAPPED_BIO; softc->unmappedio = 1; } softc->disk->d_flags = d_flags; strlcpy(softc->disk->d_descr, cgd->ident_data.model, MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model))); strlcpy(softc->disk->d_ident, cgd->ident_data.serial, MIN(sizeof(softc->disk->d_ident), sizeof(cgd->ident_data.serial))); softc->disk->d_sectorsize = softc->params.secsize; softc->disk->d_mediasize = (off_t)softc->params.sectors * softc->params.secsize; if (ata_physical_sector_size(&cgd->ident_data) != softc->params.secsize) { softc->disk->d_stripesize = ata_physical_sector_size(&cgd->ident_data); softc->disk->d_stripeoffset = (softc->disk->d_stripesize - ata_logical_sector_offset(&cgd->ident_data)) % softc->disk->d_stripesize; } else if (softc->quirks & ADA_Q_4K) { softc->disk->d_stripesize = 4096; softc->disk->d_stripeoffset = 0; } softc->disk->d_fwsectors = softc->params.secs_per_track; softc->disk->d_fwheads = softc->params.heads; ata_disk_firmware_geom_adjust(softc->disk); softc->disk->d_rotation_rate = cgd->ident_data.media_rotation_rate; snprintf(softc->disk->d_attachment, sizeof(softc->disk->d_attachment), "%s%d", softc->cpi.dev_name, softc->cpi.unit_number); } static void adasendorderedtag(void *arg) { struct ada_softc *softc = arg; if (ada_send_ordered) { if (softc->outstanding_cmds > 0) { if ((softc->flags & ADA_FLAG_WAS_OTAG) == 0) softc->flags |= ADA_FLAG_NEED_OTAG; softc->flags &= ~ADA_FLAG_WAS_OTAG; } } /* Queue us up again */ callout_reset(&softc->sendordered_c, (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL, adasendorderedtag, softc); } /* * Step through all ADA peripheral drivers, and if the device is still open, * sync the disk cache to physical media. */ static void adaflush(void) { struct cam_periph *periph; struct ada_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &adadriver) { softc = (struct ada_softc *)periph->softc; if (SCHEDULER_STOPPED()) { /* If we paniced with the lock held, do not recurse. */ if (!cam_periph_owned(periph) && (softc->flags & ADA_FLAG_OPEN)) { adadump(softc->disk, NULL, 0, 0, 0); } continue; } cam_periph_lock(periph); /* * We only sync the cache if the drive is still open, and * if the drive is capable of it.. */ if (((softc->flags & ADA_FLAG_OPEN) == 0) || (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) { cam_periph_unlock(periph); continue; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); cam_fill_ataio(&ccb->ataio, 0, NULL, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0); error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } static void adaspindown(uint8_t cmd, int flags) { struct cam_periph *periph; struct ada_softc *softc; struct ccb_ataio local_ccb; int error; CAM_PERIPH_FOREACH(periph, &adadriver) { /* If we paniced with lock held - not recurse here. */ if (cam_periph_owned(periph)) continue; cam_periph_lock(periph); softc = (struct ada_softc *)periph->softc; /* * We only spin-down the drive if it is capable of it.. */ if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) { cam_periph_unlock(periph); continue; } if (bootverbose) xpt_print(periph->path, "spin-down\n"); memset(&local_ccb, 0, sizeof(local_ccb)); xpt_setup_ccb(&local_ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); local_ccb.ccb_h.ccb_state = ADA_CCB_DUMP; cam_fill_ataio(&local_ccb, 0, NULL, CAM_DIR_NONE | flags, 0, NULL, 0, ada_default_timeout*1000); ata_28bit_cmd(&local_ccb, cmd, 0, 0, 0); error = cam_periph_runccb((union ccb *)&local_ccb, adaerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Spin-down disk failed\n"); cam_periph_unlock(periph); } } static void adashutdown(void *arg, int howto) { int how; adaflush(); /* * STANDBY IMMEDIATE saves any volatile data to the drive. It also spins * down hard drives. IDLE IMMEDIATE also saves the volatile data without * a spindown. We send the former when we expect to lose power soon. For * a warm boot, we send the latter to avoid a thundering herd of spinups * just after the kernel loads while probing. We have to do something to * flush the data because the BIOS in many systems resets the HBA * causing a COMINIT/COMRESET negotiation, which some drives interpret * as license to toss the volatile data, and others count as unclean * shutdown when in the Active PM state in SMART attributes. * * adaspindown will ensure that we don't send this to a drive that * doesn't support it. */ if (ada_spindown_shutdown != 0) { how = (howto & (RB_HALT | RB_POWEROFF | RB_POWERCYCLE)) ? ATA_STANDBY_IMMEDIATE : ATA_IDLE_IMMEDIATE; adaspindown(how, 0); } } static void adasuspend(void *arg) { adaflush(); /* * SLEEP also fushes any volatile data, like STANDBY IMEDIATE, * so we don't need to send it as well. */ if (ada_spindown_suspend != 0) adaspindown(ATA_SLEEP, CAM_DEV_QFREEZE); } static void adaresume(void *arg) { struct cam_periph *periph; struct ada_softc *softc; if (ada_spindown_suspend == 0) return; CAM_PERIPH_FOREACH(periph, &adadriver) { cam_periph_lock(periph); softc = (struct ada_softc *)periph->softc; /* * We only spin-down the drive if it is capable of it.. */ if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) { cam_periph_unlock(periph); continue; } if (bootverbose) xpt_print(periph->path, "resume\n"); /* * Drop freeze taken due to CAM_DEV_QFREEZE flag set on * sleep request. */ cam_release_devq(periph->path, /*relsim_flags*/0, /*openings*/0, /*timeout*/0, /*getcount_only*/0); cam_periph_unlock(periph); } } #endif /* _KERNEL */ Index: head/sys/cam/cam_xpt.c =================================================================== --- head/sys/cam/cam_xpt.c (revision 355600) +++ head/sys/cam/cam_xpt.c (revision 355601) @@ -1,5633 +1,5633 @@ /*- * Implementation of the Common Access Method Transport (XPT) layer. * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_printf.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* geometry translation */ #include /* for xpt_print below */ #include "opt_cam.h" /* Wild guess based on not wanting to grow the stack too much */ #define XPT_PRINT_MAXLEN 512 #ifdef PRINTF_BUFR_SIZE #define XPT_PRINT_LEN PRINTF_BUFR_SIZE #else #define XPT_PRINT_LEN 128 #endif _Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large"); /* * This is the maximum number of high powered commands (e.g. start unit) * that can be outstanding at a particular time. */ #ifndef CAM_MAX_HIGHPOWER #define CAM_MAX_HIGHPOWER 4 #endif /* Datastructures internal to the xpt layer */ MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); struct xpt_softc { uint32_t xpt_generation; /* number of high powered commands that can go through right now */ struct mtx xpt_highpower_lock; STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; int num_highpower; /* queue for handling async rescan requests. */ TAILQ_HEAD(, ccb_hdr) ccb_scanq; int buses_to_config; int buses_config_done; int announce_nosbuf; /* * Registered buses * * N.B., "busses" is an archaic spelling of "buses". In new code * "buses" is preferred. */ TAILQ_HEAD(,cam_eb) xpt_busses; u_int bus_generation; int boot_delay; struct callout boot_callout; struct task boot_task; struct root_hold_token xpt_rootmount; struct mtx xpt_topo_lock; struct taskqueue *xpt_taskq; }; typedef enum { DM_RET_COPY = 0x01, DM_RET_FLAG_MASK = 0x0f, DM_RET_NONE = 0x00, DM_RET_STOP = 0x10, DM_RET_DESCEND = 0x20, DM_RET_ERROR = 0x30, DM_RET_ACTION_MASK = 0xf0 } dev_match_ret; typedef enum { XPT_DEPTH_BUS, XPT_DEPTH_TARGET, XPT_DEPTH_DEVICE, XPT_DEPTH_PERIPH } xpt_traverse_depth; struct xpt_traverse_config { xpt_traverse_depth depth; void *tr_func; void *tr_arg; }; typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); /* Transport layer configuration information */ static struct xpt_softc xsoftc; MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, &xsoftc.boot_delay, 0, "Bus registration wait time"); SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); SYSCTL_INT(_kern_cam, OID_AUTO, announce_nosbuf, CTLFLAG_RWTUN, &xsoftc.announce_nosbuf, 0, "Don't use sbuf for announcements"); struct cam_doneq { struct mtx_padalign cam_doneq_mtx; STAILQ_HEAD(, ccb_hdr) cam_doneq; int cam_doneq_sleep; }; static struct cam_doneq cam_doneqs[MAXCPU]; static int cam_num_doneqs; static struct proc *cam_proc; SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, &cam_num_doneqs, 0, "Number of completion queues/threads"); struct cam_periph *xpt_periph; static periph_init_t xpt_periph_init; static struct periph_driver xpt_driver = { xpt_periph_init, "xpt", TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(xpt, xpt_driver); static d_open_t xptopen; static d_close_t xptclose; static d_ioctl_t xptioctl; static d_ioctl_t xptdoioctl; static struct cdevsw xpt_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = xptopen, .d_close = xptclose, .d_ioctl = xptioctl, .d_name = "xpt", }; /* Storage for debugging datastructures */ struct cam_path *cam_dpath; u_int32_t __read_mostly cam_dflags = CAM_DEBUG_FLAGS; SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, &cam_dflags, 0, "Enabled debug flags"); u_int32_t cam_debug_delay = CAM_DEBUG_DELAY; SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, &cam_debug_delay, 0, "Delay in us after each debug message"); /* Our boot-time initialization hook */ static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); static moduledata_t cam_moduledata = { "cam", cam_module_event_handler, NULL }; static int xpt_init(void *); DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); MODULE_VERSION(cam, 1); static void xpt_async_bcast(struct async_list *async_head, u_int32_t async_code, struct cam_path *path, void *async_arg); static path_id_t xptnextfreepathid(void); static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); static union ccb *xpt_get_ccb(struct cam_periph *periph); static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); static void xpt_run_allocq(struct cam_periph *periph, int sleep); static void xpt_run_allocq_task(void *context, int pending); static void xpt_run_devq(struct cam_devq *devq); -static timeout_t xpt_release_devq_timeout; +static callout_func_t xpt_release_devq_timeout; static void xpt_release_simq_timeout(void *arg) __unused; static void xpt_acquire_bus(struct cam_eb *bus); static void xpt_release_bus(struct cam_eb *bus); static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); static int xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue); static struct cam_et* xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); static void xpt_acquire_target(struct cam_et *target); static void xpt_release_target(struct cam_et *target); static struct cam_eb* xpt_find_bus(path_id_t path_id); static struct cam_et* xpt_find_target(struct cam_eb *bus, target_id_t target_id); static struct cam_ed* xpt_find_device(struct cam_et *target, lun_id_t lun_id); static void xpt_config(void *arg); static void xpt_hold_boot_locked(void); static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, u_int32_t new_priority); static xpt_devicefunc_t xptpassannouncefunc; static void xptaction(struct cam_sim *sim, union ccb *work_ccb); static void xptpoll(struct cam_sim *sim); static void camisr_runqueue(void); static void xpt_done_process(struct ccb_hdr *ccb_h); static void xpt_done_td(void *); static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_eb *bus); static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_ed *device); static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_periph *periph); static xpt_busfunc_t xptedtbusfunc; static xpt_targetfunc_t xptedttargetfunc; static xpt_devicefunc_t xptedtdevicefunc; static xpt_periphfunc_t xptedtperiphfunc; static xpt_pdrvfunc_t xptplistpdrvfunc; static xpt_periphfunc_t xptplistperiphfunc; static int xptedtmatch(struct ccb_dev_match *cdm); static int xptperiphlistmatch(struct ccb_dev_match *cdm); static int xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg); static int xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, xpt_targetfunc_t *tr_func, void *arg); static int xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, xpt_devicefunc_t *tr_func, void *arg); static int xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg); static int xptpdrvtraverse(struct periph_driver **start_pdrv, xpt_pdrvfunc_t *tr_func, void *arg); static int xptpdperiphtraverse(struct periph_driver **pdrv, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg); static xpt_busfunc_t xptdefbusfunc; static xpt_targetfunc_t xptdeftargetfunc; static xpt_devicefunc_t xptdefdevicefunc; static xpt_periphfunc_t xptdefperiphfunc; static void xpt_finishconfig_task(void *context, int pending); static void xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static xpt_devicefunc_t xptsetasyncfunc; static xpt_busfunc_t xptsetasyncbusfunc; static cam_status xptregister(struct cam_periph *periph, void *arg); static __inline int device_is_queued(struct cam_ed *device); static __inline int xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) { int retval; mtx_assert(&devq->send_mtx, MA_OWNED); if ((dev->ccbq.queue.entries > 0) && (dev->ccbq.dev_openings > 0) && (dev->ccbq.queue.qfrozen_cnt == 0)) { /* * The priority of a device waiting for controller * resources is that of the highest priority CCB * enqueued. */ retval = xpt_schedule_dev(&devq->send_queue, &dev->devq_entry, CAMQ_GET_PRIO(&dev->ccbq.queue)); } else { retval = 0; } return (retval); } static __inline int device_is_queued(struct cam_ed *device) { return (device->devq_entry.index != CAM_UNQUEUED_INDEX); } static void xpt_periph_init() { make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); } static int xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) { /* * Only allow read-write access. */ if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) return(EPERM); /* * We don't allow nonblocking access. */ if ((flags & O_NONBLOCK) != 0) { printf("%s: can't do nonblocking access\n", devtoname(dev)); return(ENODEV); } return(0); } static int xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) { return(0); } /* * Don't automatically grab the xpt softc lock here even though this is going * through the xpt device. The xpt device is really just a back door for * accessing other devices and SIMs, so the right thing to do is to grab * the appropriate SIM lock once the bus/SIM is located. */ static int xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); } return (error); } static int xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; error = 0; switch(cmd) { /* * For the transport layer CAMIOCOMMAND ioctl, we really only want * to accept CCB types that don't quite make sense to send through a * passthrough driver. XPT_PATH_INQ is an exception to this, as stated * in the CAM spec. */ case CAMIOCOMMAND: { union ccb *ccb; union ccb *inccb; struct cam_eb *bus; inccb = (union ccb *)addr; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (inccb->ccb_h.func_code == XPT_SCSI_IO) inccb->csio.bio = NULL; #endif if (inccb->ccb_h.flags & CAM_UNLOCKED) return (EINVAL); bus = xpt_find_bus(inccb->ccb_h.path_id); if (bus == NULL) return (EINVAL); switch (inccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_RESET_BUS: if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { xpt_release_bus(bus); return (EINVAL); } break; case XPT_SCAN_TGT: if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { xpt_release_bus(bus); return (EINVAL); } break; default: break; } switch(inccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_RESET_BUS: case XPT_PATH_INQ: case XPT_ENG_INQ: case XPT_SCAN_LUN: case XPT_SCAN_TGT: ccb = xpt_alloc_ccb(); /* * Create a path using the bus, target, and lun the * user passed in. */ if (xpt_create_path(&ccb->ccb_h.path, NULL, inccb->ccb_h.path_id, inccb->ccb_h.target_id, inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; xpt_free_ccb(ccb); break; } /* Ensure all of our fields are correct */ xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(ccb, inccb); xpt_path_lock(ccb->ccb_h.path); cam_periph_runccb(ccb, NULL, 0, 0, NULL); xpt_path_unlock(ccb->ccb_h.path); bcopy(ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); break; case XPT_DEBUG: { union ccb ccb; /* * This is an immediate CCB, so it's okay to * allocate it on the stack. */ /* * Create a path using the bus, target, and lun the * user passed in. */ if (xpt_create_path(&ccb.ccb_h.path, NULL, inccb->ccb_h.path_id, inccb->ccb_h.target_id, inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; break; } /* Ensure all of our fields are correct */ xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(&ccb, inccb); xpt_action(&ccb); bcopy(&ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb.ccb_h.path); break; } case XPT_DEV_MATCH: { struct cam_periph_map_info mapinfo; struct cam_path *old_path; /* * We can't deal with physical addresses for this * type of transaction. */ if ((inccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) { error = EINVAL; break; } /* * Save this in case the caller had it set to * something in particular. */ old_path = inccb->ccb_h.path; /* * We really don't need a path for the matching * code. The path is needed because of the * debugging statements in xpt_action(). They * assume that the CCB has a valid path. */ inccb->ccb_h.path = xpt_periph->path; bzero(&mapinfo, sizeof(mapinfo)); /* * Map the pattern and match buffers into kernel * virtual address space. */ error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS); if (error) { inccb->ccb_h.path = old_path; break; } /* * This is an immediate CCB, we can send it on directly. */ xpt_action(inccb); /* * Map the buffers back into user space. */ cam_periph_unmapmem(inccb, &mapinfo); inccb->ccb_h.path = old_path; error = 0; break; } default: error = ENOTSUP; break; } xpt_release_bus(bus); break; } /* * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, * with the periphal driver name and unit name filled in. The other * fields don't really matter as input. The passthrough driver name * ("pass"), and unit number are passed back in the ccb. The current * device generation number, and the index into the device peripheral * driver list, and the status are also passed back. Note that * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is * (or rather should be) impossible for the device peripheral driver * list to change since we look at the whole thing in one pass, and * we do it with lock protection. * */ case CAMGETPASSTHRU: { union ccb *ccb; struct cam_periph *periph; struct periph_driver **p_drv; char *name; u_int unit; int base_periph_found; ccb = (union ccb *)addr; unit = ccb->cgdl.unit_number; name = ccb->cgdl.periph_name; base_periph_found = 0; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.bio = NULL; #endif /* * Sanity check -- make sure we don't get a null peripheral * driver name. */ if (*ccb->cgdl.periph_name == '\0') { error = EINVAL; break; } /* Keep the list from changing while we traverse it */ xpt_lock_buses(); /* first find our driver in the list of drivers */ for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) if (strcmp((*p_drv)->driver_name, name) == 0) break; if (*p_drv == NULL) { xpt_unlock_buses(); ccb->ccb_h.status = CAM_REQ_CMP_ERR; ccb->cgdl.status = CAM_GDEVLIST_ERROR; *ccb->cgdl.periph_name = '\0'; ccb->cgdl.unit_number = 0; error = ENOENT; break; } /* * Run through every peripheral instance of this driver * and check to see whether it matches the unit passed * in by the user. If it does, get out of the loops and * find the passthrough driver associated with that * peripheral driver. */ for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; periph = TAILQ_NEXT(periph, unit_links)) { if (periph->unit_number == unit) break; } /* * If we found the peripheral driver that the user passed * in, go through all of the peripheral drivers for that * particular device and look for a passthrough driver. */ if (periph != NULL) { struct cam_ed *device; int i; base_periph_found = 1; device = periph->path->device; for (i = 0, periph = SLIST_FIRST(&device->periphs); periph != NULL; periph = SLIST_NEXT(periph, periph_links), i++) { /* * Check to see whether we have a * passthrough device or not. */ if (strcmp(periph->periph_name, "pass") == 0) { /* * Fill in the getdevlist fields. */ strlcpy(ccb->cgdl.periph_name, periph->periph_name, sizeof(ccb->cgdl.periph_name)); ccb->cgdl.unit_number = periph->unit_number; if (SLIST_NEXT(periph, periph_links)) ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; else ccb->cgdl.status = CAM_GDEVLIST_LAST_DEVICE; ccb->cgdl.generation = device->generation; ccb->cgdl.index = i; /* * Fill in some CCB header fields * that the user may want. */ ccb->ccb_h.path_id = periph->path->bus->path_id; ccb->ccb_h.target_id = periph->path->target->target_id; ccb->ccb_h.target_lun = periph->path->device->lun_id; ccb->ccb_h.status = CAM_REQ_CMP; break; } } } /* * If the periph is null here, one of two things has * happened. The first possibility is that we couldn't * find the unit number of the particular peripheral driver * that the user is asking about. e.g. the user asks for * the passthrough driver for "da11". We find the list of * "da" peripherals all right, but there is no unit 11. * The other possibility is that we went through the list * of peripheral drivers attached to the device structure, * but didn't find one with the name "pass". Either way, * we return ENOENT, since we couldn't find something. */ if (periph == NULL) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; ccb->cgdl.status = CAM_GDEVLIST_ERROR; *ccb->cgdl.periph_name = '\0'; ccb->cgdl.unit_number = 0; error = ENOENT; /* * It is unfortunate that this is even necessary, * but there are many, many clueless users out there. * If this is true, the user is looking for the * passthrough driver, but doesn't have one in his * kernel. */ if (base_periph_found == 1) { printf("xptioctl: pass driver is not in the " "kernel\n"); printf("xptioctl: put \"device pass\" in " "your kernel config file\n"); } } xpt_unlock_buses(); break; } default: error = ENOTTY; break; } return(error); } static int cam_module_event_handler(module_t mod, int what, void *arg) { int error; switch (what) { case MOD_LOAD: if ((error = xpt_init(NULL)) != 0) return (error); break; case MOD_UNLOAD: return EBUSY; default: return EOPNOTSUPP; } return 0; } static struct xpt_proto * xpt_proto_find(cam_proto proto) { struct xpt_proto **pp; SET_FOREACH(pp, cam_xpt_proto_set) { if ((*pp)->proto == proto) return *pp; } return NULL; } static void xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) { if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } else { done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); } xpt_release_boot(); } /* thread to handle bus rescans */ static void xpt_scanner_thread(void *dummy) { union ccb *ccb; struct cam_path path; xpt_lock_buses(); for (;;) { if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, "-", 0); if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); xpt_unlock_buses(); /* * Since lock can be dropped inside and path freed * by completion callback even before return here, * take our own path copy for reference. */ xpt_copy_path(&path, ccb->ccb_h.path); xpt_path_lock(&path); xpt_action(ccb); xpt_path_unlock(&path); xpt_release_path(&path); xpt_lock_buses(); } } } void xpt_rescan(union ccb *ccb) { struct ccb_hdr *hdr; /* Prepare request */ if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_BUS; else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_TGT; else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_LUN; else { xpt_print(ccb->ccb_h.path, "illegal scan path\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code, xpt_action_name(ccb->ccb_h.func_code))); ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; ccb->ccb_h.cbfcnp = xpt_rescan_done; xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); /* Don't make duplicate entries for the same paths. */ xpt_lock_buses(); if (ccb->ccb_h.ppriv_ptr1 == NULL) { TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { wakeup(&xsoftc.ccb_scanq); xpt_unlock_buses(); xpt_print(ccb->ccb_h.path, "rescan already queued\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } } } TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); xpt_hold_boot_locked(); wakeup(&xsoftc.ccb_scanq); xpt_unlock_buses(); } /* Functions accessed by the peripheral drivers */ static int xpt_init(void *dummy) { struct cam_sim *xpt_sim; struct cam_path *path; struct cam_devq *devq; cam_status status; int error, i; TAILQ_INIT(&xsoftc.xpt_busses); TAILQ_INIT(&xsoftc.ccb_scanq); STAILQ_INIT(&xsoftc.highpowerq); xsoftc.num_highpower = CAM_MAX_HIGHPOWER; mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); #ifdef CAM_BOOT_DELAY /* * Override this value at compile time to assist our users * who don't use loader to boot a kernel. */ xsoftc.boot_delay = CAM_BOOT_DELAY; #endif /* * The xpt layer is, itself, the equivalent of a SIM. * Allow 16 ccbs in the ccb pool for it. This should * give decent parallelism when we probe buses and * perform other XPT functions. */ devq = cam_simq_alloc(16); xpt_sim = cam_sim_alloc(xptaction, xptpoll, "xpt", /*softc*/NULL, /*unit*/0, /*mtx*/NULL, /*max_dev_transactions*/0, /*max_tagged_dev_transactions*/0, devq); if (xpt_sim == NULL) return (ENOMEM); if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { printf("xpt_init: xpt_bus_register failed with status %#x," " failing attach\n", status); return (EINVAL); } /* * Looking at the XPT from the SIM layer, the XPT is * the equivalent of a peripheral driver. Allocate * a peripheral driver entry for us. */ if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { printf("xpt_init: xpt_create_path failed with status %#x," " failing attach\n", status); return (EINVAL); } xpt_path_lock(path); cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, path, NULL, 0, xpt_sim); xpt_path_unlock(path); xpt_free_path(path); if (cam_num_doneqs < 1) cam_num_doneqs = 1 + mp_ncpus / 6; else if (cam_num_doneqs > MAXCPU) cam_num_doneqs = MAXCPU; for (i = 0; i < cam_num_doneqs; i++) { mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, MTX_DEF); STAILQ_INIT(&cam_doneqs[i].cam_doneq); error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); if (error != 0) { cam_num_doneqs = i; break; } } if (cam_num_doneqs < 1) { printf("xpt_init: Cannot init completion queues " "- failing attach\n"); return (ENOMEM); } /* * Register a callback for when interrupts are enabled. */ config_intrhook_oneshot(xpt_config, NULL); return (0); } static cam_status xptregister(struct cam_periph *periph, void *arg) { struct cam_sim *xpt_sim; if (periph == NULL) { printf("xptregister: periph was NULL!!\n"); return(CAM_REQ_CMP_ERR); } xpt_sim = (struct cam_sim *)arg; xpt_sim->softc = periph; xpt_periph = periph; periph->softc = NULL; return(CAM_REQ_CMP); } int32_t xpt_add_periph(struct cam_periph *periph) { struct cam_ed *device; int32_t status; TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); device = periph->path->device; status = CAM_REQ_CMP; if (device != NULL) { mtx_lock(&device->target->bus->eb_mtx); device->generation++; SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); mtx_unlock(&device->target->bus->eb_mtx); atomic_add_32(&xsoftc.xpt_generation, 1); } return (status); } void xpt_remove_periph(struct cam_periph *periph) { struct cam_ed *device; device = periph->path->device; if (device != NULL) { mtx_lock(&device->target->bus->eb_mtx); device->generation++; SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); mtx_unlock(&device->target->bus->eb_mtx); atomic_add_32(&xsoftc.xpt_generation, 1); } } void xpt_announce_periph(struct cam_periph *periph, char *announce_string) { struct cam_path *path = periph->path; struct xpt_proto *proto; cam_periph_assert(periph, MA_OWNED); periph->flags |= CAM_PERIPH_ANNOUNCED; printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); printf("%s%d: ", periph->periph_name, periph->unit_number); proto = xpt_proto_find(path->device->protocol); if (proto) proto->ops->announce(path->device); else printf("%s%d: Unknown protocol device %d\n", periph->periph_name, periph->unit_number, path->device->protocol); if (path->device->serial_num_len > 0) { /* Don't wrap the screen - print only the first 60 chars */ printf("%s%d: Serial Number %.60s\n", periph->periph_name, periph->unit_number, path->device->serial_num); } /* Announce transport details. */ path->bus->xport->ops->announce(periph); /* Announce command queueing. */ if (path->device->inq_flags & SID_CmdQue || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { printf("%s%d: Command Queueing enabled\n", periph->periph_name, periph->unit_number); } /* Announce caller's details if they've passed in. */ if (announce_string != NULL) printf("%s%d: %s\n", periph->periph_name, periph->unit_number, announce_string); } void xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb, char *announce_string) { struct cam_path *path = periph->path; struct xpt_proto *proto; cam_periph_assert(periph, MA_OWNED); periph->flags |= CAM_PERIPH_ANNOUNCED; /* Fall back to the non-sbuf method if necessary */ if (xsoftc.announce_nosbuf != 0) { xpt_announce_periph(periph, announce_string); return; } proto = xpt_proto_find(path->device->protocol); if (((proto != NULL) && (proto->ops->announce_sbuf == NULL)) || (path->bus->xport->ops->announce_sbuf == NULL)) { xpt_announce_periph(periph, announce_string); return; } sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); if (proto) proto->ops->announce_sbuf(path->device, sb); else sbuf_printf(sb, "%s%d: Unknown protocol device %d\n", periph->periph_name, periph->unit_number, path->device->protocol); if (path->device->serial_num_len > 0) { /* Don't wrap the screen - print only the first 60 chars */ sbuf_printf(sb, "%s%d: Serial Number %.60s\n", periph->periph_name, periph->unit_number, path->device->serial_num); } /* Announce transport details. */ path->bus->xport->ops->announce_sbuf(periph, sb); /* Announce command queueing. */ if (path->device->inq_flags & SID_CmdQue || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { sbuf_printf(sb, "%s%d: Command Queueing enabled\n", periph->periph_name, periph->unit_number); } /* Announce caller's details if they've passed in. */ if (announce_string != NULL) sbuf_printf(sb, "%s%d: %s\n", periph->periph_name, periph->unit_number, announce_string); } void xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) { if (quirks != 0) { printf("%s%d: quirks=0x%b\n", periph->periph_name, periph->unit_number, quirks, bit_string); } } void xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb, int quirks, char *bit_string) { if (xsoftc.announce_nosbuf != 0) { xpt_announce_quirks(periph, quirks, bit_string); return; } if (quirks != 0) { sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name, periph->unit_number, quirks, bit_string); } } void xpt_denounce_periph(struct cam_periph *periph) { struct cam_path *path = periph->path; struct xpt_proto *proto; cam_periph_assert(periph, MA_OWNED); printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); printf("%s%d: ", periph->periph_name, periph->unit_number); proto = xpt_proto_find(path->device->protocol); if (proto) proto->ops->denounce(path->device); else printf("%s%d: Unknown protocol device %d\n", periph->periph_name, periph->unit_number, path->device->protocol); if (path->device->serial_num_len > 0) printf(" s/n %.60s", path->device->serial_num); printf(" detached\n"); } void xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) { struct cam_path *path = periph->path; struct xpt_proto *proto; cam_periph_assert(periph, MA_OWNED); /* Fall back to the non-sbuf method if necessary */ if (xsoftc.announce_nosbuf != 0) { xpt_denounce_periph(periph); return; } proto = xpt_proto_find(path->device->protocol); if ((proto != NULL) && (proto->ops->denounce_sbuf == NULL)) { xpt_denounce_periph(periph); return; } sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); if (proto) proto->ops->denounce_sbuf(path->device, sb); else sbuf_printf(sb, "%s%d: Unknown protocol device %d\n", periph->periph_name, periph->unit_number, path->device->protocol); if (path->device->serial_num_len > 0) sbuf_printf(sb, " s/n %.60s", path->device->serial_num); sbuf_printf(sb, " detached\n"); } int xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) { int ret = -1, l, o; struct ccb_dev_advinfo cdai; struct scsi_vpd_device_id *did; struct scsi_vpd_id_descriptor *idd; xpt_path_assert(path, MA_OWNED); memset(&cdai, 0, sizeof(cdai)); xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.flags = CDAI_FLAG_NONE; cdai.bufsiz = len; cdai.buf = buf; if (!strcmp(attr, "GEOM::ident")) cdai.buftype = CDAI_TYPE_SERIAL_NUM; else if (!strcmp(attr, "GEOM::physpath")) cdai.buftype = CDAI_TYPE_PHYS_PATH; else if (strcmp(attr, "GEOM::lunid") == 0 || strcmp(attr, "GEOM::lunname") == 0) { cdai.buftype = CDAI_TYPE_SCSI_DEVID; cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT); if (cdai.buf == NULL) { ret = ENOMEM; goto out; } } else goto out; xpt_action((union ccb *)&cdai); /* can only be synchronous */ if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.provsiz == 0) goto out; switch(cdai.buftype) { case CDAI_TYPE_SCSI_DEVID: did = (struct scsi_vpd_device_id *)cdai.buf; if (strcmp(attr, "GEOM::lunid") == 0) { idd = scsi_get_devid(did, cdai.provsiz, scsi_devid_is_lun_naa); if (idd == NULL) idd = scsi_get_devid(did, cdai.provsiz, scsi_devid_is_lun_eui64); if (idd == NULL) idd = scsi_get_devid(did, cdai.provsiz, scsi_devid_is_lun_uuid); if (idd == NULL) idd = scsi_get_devid(did, cdai.provsiz, scsi_devid_is_lun_md5); } else idd = NULL; if (idd == NULL) idd = scsi_get_devid(did, cdai.provsiz, scsi_devid_is_lun_t10); if (idd == NULL) idd = scsi_get_devid(did, cdai.provsiz, scsi_devid_is_lun_name); if (idd == NULL) break; ret = 0; if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) { if (idd->length < len) { for (l = 0; l < idd->length; l++) buf[l] = idd->identifier[l] ? idd->identifier[l] : ' '; buf[l] = 0; } else ret = EFAULT; break; } if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) { l = strnlen(idd->identifier, idd->length); if (l < len) { bcopy(idd->identifier, buf, l); buf[l] = 0; } else ret = EFAULT; break; } if ((idd->id_type & SVPD_ID_TYPE_MASK) == SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) { if ((idd->length - 2) * 2 + 4 >= len) { ret = EFAULT; break; } for (l = 2, o = 0; l < idd->length; l++) { if (l == 6 || l == 8 || l == 10 || l == 12) o += sprintf(buf + o, "-"); o += sprintf(buf + o, "%02x", idd->identifier[l]); } break; } if (idd->length * 2 < len) { for (l = 0; l < idd->length; l++) sprintf(buf + l * 2, "%02x", idd->identifier[l]); } else ret = EFAULT; break; default: if (cdai.provsiz < len) { cdai.buf[cdai.provsiz] = 0; ret = 0; } else ret = EFAULT; break; } out: if ((char *)cdai.buf != buf) free(cdai.buf, M_CAMXPT); return ret; } static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_eb *bus) { dev_match_ret retval; u_int i; retval = DM_RET_NONE; /* * If we aren't given something to match against, that's an error. */ if (bus == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this bus matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_DESCEND | DM_RET_COPY); for (i = 0; i < num_patterns; i++) { struct bus_match_pattern *cur_pattern; /* * If the pattern in question isn't for a bus node, we * aren't interested. However, we do indicate to the * calling routine that we should continue descending the * tree, since the user wants to match against lower-level * EDT elements. */ if (patterns[i].type != DEV_MATCH_BUS) { if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_DESCEND; continue; } cur_pattern = &patterns[i].pattern.bus_pattern; /* * If they want to match any bus node, we give them any * device node. */ if (cur_pattern->flags == BUS_MATCH_ANY) { /* set the copy flag */ retval |= DM_RET_COPY; /* * If we've already decided on an action, go ahead * and return. */ if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) return(retval); } /* * Not sure why someone would do this... */ if (cur_pattern->flags == BUS_MATCH_NONE) continue; if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) && (cur_pattern->path_id != bus->path_id)) continue; if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) && (cur_pattern->bus_id != bus->sim->bus_id)) continue; if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) && (cur_pattern->unit_number != bus->sim->unit_number)) continue; if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, DEV_IDLEN) != 0)) continue; /* * If we get to this point, the user definitely wants * information on this bus. So tell the caller to copy the * data out. */ retval |= DM_RET_COPY; /* * If the return action has been set to descend, then we * know that we've already seen a non-bus matching * expression, therefore we need to further descend the tree. * This won't change by continuing around the loop, so we * go ahead and return. If we haven't seen a non-bus * matching expression, we keep going around the loop until * we exhaust the matching expressions. We'll set the stop * flag once we fall out of the loop. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) return(retval); } /* * If the return action hasn't been set to descend yet, that means * we haven't seen anything other than bus matching patterns. So * tell the caller to stop descending the tree -- the user doesn't * want to match against lower level tree elements. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_STOP; return(retval); } static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_ed *device) { dev_match_ret retval; u_int i; retval = DM_RET_NONE; /* * If we aren't given something to match against, that's an error. */ if (device == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this device matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_DESCEND | DM_RET_COPY); for (i = 0; i < num_patterns; i++) { struct device_match_pattern *cur_pattern; struct scsi_vpd_device_id *device_id_page; /* * If the pattern in question isn't for a device node, we * aren't interested. */ if (patterns[i].type != DEV_MATCH_DEVICE) { if ((patterns[i].type == DEV_MATCH_PERIPH) && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) retval |= DM_RET_DESCEND; continue; } cur_pattern = &patterns[i].pattern.device_pattern; /* Error out if mutually exclusive options are specified. */ if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) return(DM_RET_ERROR); /* * If they want to match any device node, we give them any * device node. */ if (cur_pattern->flags == DEV_MATCH_ANY) goto copy_dev_node; /* * Not sure why someone would do this... */ if (cur_pattern->flags == DEV_MATCH_NONE) continue; if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) && (cur_pattern->path_id != device->target->bus->path_id)) continue; if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) && (cur_pattern->target_id != device->target->target_id)) continue; if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) && (cur_pattern->target_lun != device->lun_id)) continue; if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) && (cam_quirkmatch((caddr_t)&device->inq_data, (caddr_t)&cur_pattern->data.inq_pat, 1, sizeof(cur_pattern->data.inq_pat), scsi_static_inquiry_match) == NULL)) continue; device_id_page = (struct scsi_vpd_device_id *)device->device_id; if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN || scsi_devid_match((uint8_t *)device_id_page->desc_list, device->device_id_len - SVPD_DEVICE_ID_HDR_LEN, cur_pattern->data.devid_pat.id, cur_pattern->data.devid_pat.id_len) != 0)) continue; copy_dev_node: /* * If we get to this point, the user definitely wants * information on this device. So tell the caller to copy * the data out. */ retval |= DM_RET_COPY; /* * If the return action has been set to descend, then we * know that we've already seen a peripheral matching * expression, therefore we need to further descend the tree. * This won't change by continuing around the loop, so we * go ahead and return. If we haven't seen a peripheral * matching expression, we keep going around the loop until * we exhaust the matching expressions. We'll set the stop * flag once we fall out of the loop. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) return(retval); } /* * If the return action hasn't been set to descend yet, that means * we haven't seen any peripheral matching patterns. So tell the * caller to stop descending the tree -- the user doesn't want to * match against lower level tree elements. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_STOP; return(retval); } /* * Match a single peripheral against any number of match patterns. */ static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_periph *periph) { dev_match_ret retval; u_int i; /* * If we aren't given something to match against, that's an error. */ if (periph == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this peripheral matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_STOP | DM_RET_COPY); /* * There aren't any nodes below a peripheral node, so there's no * reason to descend the tree any further. */ retval = DM_RET_STOP; for (i = 0; i < num_patterns; i++) { struct periph_match_pattern *cur_pattern; /* * If the pattern in question isn't for a peripheral, we * aren't interested. */ if (patterns[i].type != DEV_MATCH_PERIPH) continue; cur_pattern = &patterns[i].pattern.periph_pattern; /* * If they want to match on anything, then we will do so. */ if (cur_pattern->flags == PERIPH_MATCH_ANY) { /* set the copy flag */ retval |= DM_RET_COPY; /* * We've already set the return action to stop, * since there are no nodes below peripherals in * the tree. */ return(retval); } /* * Not sure why someone would do this... */ if (cur_pattern->flags == PERIPH_MATCH_NONE) continue; if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) && (cur_pattern->path_id != periph->path->bus->path_id)) continue; /* * For the target and lun id's, we have to make sure the * target and lun pointers aren't NULL. The xpt peripheral * has a wildcard target and device. */ if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) && ((periph->path->target == NULL) ||(cur_pattern->target_id != periph->path->target->target_id))) continue; if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) && ((periph->path->device == NULL) || (cur_pattern->target_lun != periph->path->device->lun_id))) continue; if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) && (cur_pattern->unit_number != periph->unit_number)) continue; if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) && (strncmp(cur_pattern->periph_name, periph->periph_name, DEV_IDLEN) != 0)) continue; /* * If we get to this point, the user definitely wants * information on this peripheral. So tell the caller to * copy the data out. */ retval |= DM_RET_COPY; /* * The return action has already been set to stop, since * peripherals don't have any nodes below them in the EDT. */ return(retval); } /* * If we get to this point, the peripheral that was passed in * doesn't match any of the patterns. */ return(retval); } static int xptedtbusfunc(struct cam_eb *bus, void *arg) { struct ccb_dev_match *cdm; struct cam_et *target; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; /* * If our position is for something deeper in the tree, that means * that we've already seen this node. So, we keep going down. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target != NULL)) retval = DM_RET_DESCEND; else retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); /* * If we got an error, bail out of the search. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this bus out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; cdm->pos.cookie.bus = bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_BUS; cdm->matches[j].result.bus_result.path_id = bus->path_id; cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; cdm->matches[j].result.bus_result.unit_number = bus->sim->unit_number; strlcpy(cdm->matches[j].result.bus_result.dev_name, bus->sim->sim_name, sizeof(cdm->matches[j].result.bus_result.dev_name)); } /* * If the user is only interested in buses, there's no * reason to descend to the next level in the tree. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) return(1); /* * If there is a target generation recorded, check it to * make sure the target list hasn't changed. */ mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target != NULL)) { if ((cdm->pos.generations[CAM_TARGET_GENERATION] != bus->generation)) { mtx_unlock(&bus->eb_mtx); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return (0); } target = (struct cam_et *)cdm->pos.cookie.target; target->refcount++; } else target = NULL; mtx_unlock(&bus->eb_mtx); return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); } static int xptedttargetfunc(struct cam_et *target, void *arg) { struct ccb_dev_match *cdm; struct cam_eb *bus; struct cam_ed *device; cdm = (struct ccb_dev_match *)arg; bus = target->bus; /* * If there is a device list generation recorded, check it to * make sure the device list hasn't changed. */ mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device != NULL)) { if (cdm->pos.generations[CAM_DEV_GENERATION] != target->generation) { mtx_unlock(&bus->eb_mtx); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } device = (struct cam_ed *)cdm->pos.cookie.device; device->refcount++; } else device = NULL; mtx_unlock(&bus->eb_mtx); return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); } static int xptedtdevicefunc(struct cam_ed *device, void *arg) { struct cam_eb *bus; struct cam_periph *periph; struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; bus = device->target->bus; /* * If our position is for something deeper in the tree, that means * that we've already seen this node. So, we keep going down. */ if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device == device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) retval = DM_RET_DESCEND; else retval = xptdevicematch(cdm->patterns, cdm->num_patterns, device); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this device out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; cdm->pos.cookie.bus = device->target->bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->pos.cookie.target = device->target; cdm->pos.generations[CAM_TARGET_GENERATION] = device->target->bus->generation; cdm->pos.cookie.device = device; cdm->pos.generations[CAM_DEV_GENERATION] = device->target->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_DEVICE; cdm->matches[j].result.device_result.path_id = device->target->bus->path_id; cdm->matches[j].result.device_result.target_id = device->target->target_id; cdm->matches[j].result.device_result.target_lun = device->lun_id; cdm->matches[j].result.device_result.protocol = device->protocol; bcopy(&device->inq_data, &cdm->matches[j].result.device_result.inq_data, sizeof(struct scsi_inquiry_data)); bcopy(&device->ident_data, &cdm->matches[j].result.device_result.ident_data, sizeof(struct ata_params)); /* Let the user know whether this device is unconfigured */ if (device->flags & CAM_DEV_UNCONFIGURED) cdm->matches[j].result.device_result.flags = DEV_RESULT_UNCONFIGURED; else cdm->matches[j].result.device_result.flags = DEV_RESULT_NOFLAG; } /* * If the user isn't interested in peripherals, don't descend * the tree any further. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) return(1); /* * If there is a peripheral list generation recorded, make sure * it hasn't changed. */ xpt_lock_buses(); mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == device->target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device == device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) { if (cdm->pos.generations[CAM_PERIPH_GENERATION] != device->generation) { mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } periph = (struct cam_periph *)cdm->pos.cookie.periph; periph->refcount++; } else periph = NULL; mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); } static int xptedtperiphfunc(struct cam_periph *periph, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this peripheral out. */ if (retval & DM_RET_COPY) { int spaceleft, j; size_t l; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | CAM_DEV_POS_PERIPH; cdm->pos.cookie.bus = periph->path->bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->pos.cookie.target = periph->path->target; cdm->pos.generations[CAM_TARGET_GENERATION] = periph->path->bus->generation; cdm->pos.cookie.device = periph->path->device; cdm->pos.generations[CAM_DEV_GENERATION] = periph->path->target->generation; cdm->pos.cookie.periph = periph; cdm->pos.generations[CAM_PERIPH_GENERATION] = periph->path->device->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_PERIPH; cdm->matches[j].result.periph_result.path_id = periph->path->bus->path_id; cdm->matches[j].result.periph_result.target_id = periph->path->target->target_id; cdm->matches[j].result.periph_result.target_lun = periph->path->device->lun_id; cdm->matches[j].result.periph_result.unit_number = periph->unit_number; l = sizeof(cdm->matches[j].result.periph_result.periph_name); strlcpy(cdm->matches[j].result.periph_result.periph_name, periph->periph_name, l); } return(1); } static int xptedtmatch(struct ccb_dev_match *cdm) { struct cam_eb *bus; int ret; cdm->num_matches = 0; /* * Check the bus list generation. If it has changed, the user * needs to reset everything and start over. */ xpt_lock_buses(); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus != NULL)) { if (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation) { xpt_unlock_buses(); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } bus = (struct cam_eb *)cdm->pos.cookie.bus; bus->refcount++; } else bus = NULL; xpt_unlock_buses(); ret = xptbustraverse(bus, xptedtbusfunc, cdm); /* * If we get back 0, that means that we had to stop before fully * traversing the EDT. It also means that one of the subroutines * has set the status field to the proper value. If we get back 1, * we've fully traversed the EDT and copied out any matching entries. */ if (ret == 1) cdm->status = CAM_DEV_MATCH_LAST; return(ret); } static int xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) { struct cam_periph *periph; struct ccb_dev_match *cdm; cdm = (struct ccb_dev_match *)arg; xpt_lock_buses(); if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv == pdrv) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) { if (cdm->pos.generations[CAM_PERIPH_GENERATION] != (*pdrv)->generation) { xpt_unlock_buses(); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } periph = (struct cam_periph *)cdm->pos.cookie.periph; periph->refcount++; } else periph = NULL; xpt_unlock_buses(); return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); } static int xptplistperiphfunc(struct cam_periph *periph, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this peripheral out. */ if (retval & DM_RET_COPY) { int spaceleft, j; size_t l; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { struct periph_driver **pdrv; pdrv = NULL; bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | CAM_DEV_POS_PERIPH; /* * This may look a bit non-sensical, but it is * actually quite logical. There are very few * peripheral drivers, and bloating every peripheral * structure with a pointer back to its parent * peripheral driver linker set entry would cost * more in the long run than doing this quick lookup. */ for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { if (strcmp((*pdrv)->driver_name, periph->periph_name) == 0) break; } if (*pdrv == NULL) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } cdm->pos.cookie.pdrv = pdrv; /* * The periph generation slot does double duty, as * does the periph pointer slot. They are used for * both edt and pdrv lookups and positioning. */ cdm->pos.cookie.periph = periph; cdm->pos.generations[CAM_PERIPH_GENERATION] = (*pdrv)->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_PERIPH; cdm->matches[j].result.periph_result.path_id = periph->path->bus->path_id; /* * The transport layer peripheral doesn't have a target or * lun. */ if (periph->path->target) cdm->matches[j].result.periph_result.target_id = periph->path->target->target_id; else cdm->matches[j].result.periph_result.target_id = CAM_TARGET_WILDCARD; if (periph->path->device) cdm->matches[j].result.periph_result.target_lun = periph->path->device->lun_id; else cdm->matches[j].result.periph_result.target_lun = CAM_LUN_WILDCARD; cdm->matches[j].result.periph_result.unit_number = periph->unit_number; l = sizeof(cdm->matches[j].result.periph_result.periph_name); strlcpy(cdm->matches[j].result.periph_result.periph_name, periph->periph_name, l); } return(1); } static int xptperiphlistmatch(struct ccb_dev_match *cdm) { int ret; cdm->num_matches = 0; /* * At this point in the edt traversal function, we check the bus * list generation to make sure that no buses have been added or * removed since the user last sent a XPT_DEV_MATCH ccb through. * For the peripheral driver list traversal function, however, we * don't have to worry about new peripheral driver types coming or * going; they're in a linker set, and therefore can't change * without a recompile. */ if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv != NULL)) ret = xptpdrvtraverse( (struct periph_driver **)cdm->pos.cookie.pdrv, xptplistpdrvfunc, cdm); else ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); /* * If we get back 0, that means that we had to stop before fully * traversing the peripheral driver tree. It also means that one of * the subroutines has set the status field to the proper value. If * we get back 1, we've fully traversed the EDT and copied out any * matching entries. */ if (ret == 1) cdm->status = CAM_DEV_MATCH_LAST; return(ret); } static int xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) { struct cam_eb *bus, *next_bus; int retval; retval = 1; if (start_bus) bus = start_bus; else { xpt_lock_buses(); bus = TAILQ_FIRST(&xsoftc.xpt_busses); if (bus == NULL) { xpt_unlock_buses(); return (retval); } bus->refcount++; xpt_unlock_buses(); } for (; bus != NULL; bus = next_bus) { retval = tr_func(bus, arg); if (retval == 0) { xpt_release_bus(bus); break; } xpt_lock_buses(); next_bus = TAILQ_NEXT(bus, links); if (next_bus) next_bus->refcount++; xpt_unlock_buses(); xpt_release_bus(bus); } return(retval); } static int xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, xpt_targetfunc_t *tr_func, void *arg) { struct cam_et *target, *next_target; int retval; retval = 1; if (start_target) target = start_target; else { mtx_lock(&bus->eb_mtx); target = TAILQ_FIRST(&bus->et_entries); if (target == NULL) { mtx_unlock(&bus->eb_mtx); return (retval); } target->refcount++; mtx_unlock(&bus->eb_mtx); } for (; target != NULL; target = next_target) { retval = tr_func(target, arg); if (retval == 0) { xpt_release_target(target); break; } mtx_lock(&bus->eb_mtx); next_target = TAILQ_NEXT(target, links); if (next_target) next_target->refcount++; mtx_unlock(&bus->eb_mtx); xpt_release_target(target); } return(retval); } static int xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, xpt_devicefunc_t *tr_func, void *arg) { struct cam_eb *bus; struct cam_ed *device, *next_device; int retval; retval = 1; bus = target->bus; if (start_device) device = start_device; else { mtx_lock(&bus->eb_mtx); device = TAILQ_FIRST(&target->ed_entries); if (device == NULL) { mtx_unlock(&bus->eb_mtx); return (retval); } device->refcount++; mtx_unlock(&bus->eb_mtx); } for (; device != NULL; device = next_device) { mtx_lock(&device->device_mtx); retval = tr_func(device, arg); mtx_unlock(&device->device_mtx); if (retval == 0) { xpt_release_device(device); break; } mtx_lock(&bus->eb_mtx); next_device = TAILQ_NEXT(device, links); if (next_device) next_device->refcount++; mtx_unlock(&bus->eb_mtx); xpt_release_device(device); } return(retval); } static int xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg) { struct cam_eb *bus; struct cam_periph *periph, *next_periph; int retval; retval = 1; bus = device->target->bus; if (start_periph) periph = start_periph; else { xpt_lock_buses(); mtx_lock(&bus->eb_mtx); periph = SLIST_FIRST(&device->periphs); while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) periph = SLIST_NEXT(periph, periph_links); if (periph == NULL) { mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); return (retval); } periph->refcount++; mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); } for (; periph != NULL; periph = next_periph) { retval = tr_func(periph, arg); if (retval == 0) { cam_periph_release_locked(periph); break; } xpt_lock_buses(); mtx_lock(&bus->eb_mtx); next_periph = SLIST_NEXT(periph, periph_links); while (next_periph != NULL && (next_periph->flags & CAM_PERIPH_FREE) != 0) next_periph = SLIST_NEXT(next_periph, periph_links); if (next_periph) next_periph->refcount++; mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); cam_periph_release_locked(periph); } return(retval); } static int xptpdrvtraverse(struct periph_driver **start_pdrv, xpt_pdrvfunc_t *tr_func, void *arg) { struct periph_driver **pdrv; int retval; retval = 1; /* * We don't traverse the peripheral driver list like we do the * other lists, because it is a linker set, and therefore cannot be * changed during runtime. If the peripheral driver list is ever * re-done to be something other than a linker set (i.e. it can * change while the system is running), the list traversal should * be modified to work like the other traversal functions. */ for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); *pdrv != NULL; pdrv++) { retval = tr_func(pdrv, arg); if (retval == 0) return(retval); } return(retval); } static int xptpdperiphtraverse(struct periph_driver **pdrv, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg) { struct cam_periph *periph, *next_periph; int retval; retval = 1; if (start_periph) periph = start_periph; else { xpt_lock_buses(); periph = TAILQ_FIRST(&(*pdrv)->units); while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) periph = TAILQ_NEXT(periph, unit_links); if (periph == NULL) { xpt_unlock_buses(); return (retval); } periph->refcount++; xpt_unlock_buses(); } for (; periph != NULL; periph = next_periph) { cam_periph_lock(periph); retval = tr_func(periph, arg); cam_periph_unlock(periph); if (retval == 0) { cam_periph_release(periph); break; } xpt_lock_buses(); next_periph = TAILQ_NEXT(periph, unit_links); while (next_periph != NULL && (next_periph->flags & CAM_PERIPH_FREE) != 0) next_periph = TAILQ_NEXT(next_periph, unit_links); if (next_periph) next_periph->refcount++; xpt_unlock_buses(); cam_periph_release(periph); } return(retval); } static int xptdefbusfunc(struct cam_eb *bus, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_BUS) { xpt_busfunc_t *tr_func; tr_func = (xpt_busfunc_t *)tr_config->tr_func; return(tr_func(bus, tr_config->tr_arg)); } else return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); } static int xptdeftargetfunc(struct cam_et *target, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_TARGET) { xpt_targetfunc_t *tr_func; tr_func = (xpt_targetfunc_t *)tr_config->tr_func; return(tr_func(target, tr_config->tr_arg)); } else return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); } static int xptdefdevicefunc(struct cam_ed *device, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_DEVICE) { xpt_devicefunc_t *tr_func; tr_func = (xpt_devicefunc_t *)tr_config->tr_func; return(tr_func(device, tr_config->tr_arg)); } else return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); } static int xptdefperiphfunc(struct cam_periph *periph, void *arg) { struct xpt_traverse_config *tr_config; xpt_periphfunc_t *tr_func; tr_config = (struct xpt_traverse_config *)arg; tr_func = (xpt_periphfunc_t *)tr_config->tr_func; /* * Unlike the other default functions, we don't check for depth * here. The peripheral driver level is the last level in the EDT, * so if we're here, we should execute the function in question. */ return(tr_func(periph, tr_config->tr_arg)); } /* * Execute the given function for every bus in the EDT. */ static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) { struct xpt_traverse_config tr_config; tr_config.depth = XPT_DEPTH_BUS; tr_config.tr_func = tr_func; tr_config.tr_arg = arg; return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); } /* * Execute the given function for every device in the EDT. */ static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) { struct xpt_traverse_config tr_config; tr_config.depth = XPT_DEPTH_DEVICE; tr_config.tr_func = tr_func; tr_config.tr_arg = arg; return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); } static int xptsetasyncfunc(struct cam_ed *device, void *arg) { struct cam_path path; struct ccb_getdev cgd; struct ccb_setasync *csa = (struct ccb_setasync *)arg; /* * Don't report unconfigured devices (Wildcard devs, * devices only for target mode, device instances * that have been invalidated but are waiting for * their last reference count to be released). */ if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) return (1); xpt_compile_path(&path, NULL, device->target->bus->path_id, device->target->target_id, device->lun_id); xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); csa->callback(csa->callback_arg, AC_FOUND_DEVICE, &path, &cgd); xpt_release_path(&path); return(1); } static int xptsetasyncbusfunc(struct cam_eb *bus, void *arg) { struct cam_path path; struct ccb_pathinq cpi; struct ccb_setasync *csa = (struct ccb_setasync *)arg; xpt_compile_path(&path, /*periph*/NULL, bus->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); xpt_path_lock(&path); xpt_path_inq(&cpi, &path); csa->callback(csa->callback_arg, AC_PATH_REGISTERED, &path, &cpi); xpt_path_unlock(&path); xpt_release_path(&path); return(1); } void xpt_action(union ccb *start_ccb) { CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code, xpt_action_name(start_ccb->ccb_h.func_code))); start_ccb->ccb_h.status = CAM_REQ_INPROG; (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb); } void xpt_action_default(union ccb *start_ccb) { struct cam_path *path; struct cam_sim *sim; struct mtx *mtx; path = start_ccb->ccb_h.path; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code, xpt_action_name(start_ccb->ccb_h.func_code))); switch (start_ccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct cam_ed *device; /* * For the sake of compatibility with SCSI-1 * devices that may not understand the identify * message, we include lun information in the * second byte of all commands. SCSI-1 specifies * that luns are a 3 bit value and reserves only 3 * bits for lun information in the CDB. Later * revisions of the SCSI spec allow for more than 8 * luns, but have deprecated lun information in the * CDB. So, if the lun won't fit, we must omit. * * Also be aware that during initial probing for devices, * the inquiry information is unknown but initialized to 0. * This means that this code will be exercised while probing * devices with an ANSI revision greater than 2. */ device = path->device; if (device->protocol_version <= SCSI_REV_2 && start_ccb->ccb_h.target_lun < 8 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { start_ccb->csio.cdb_io.cdb_bytes[1] |= start_ccb->ccb_h.target_lun << 5; } start_ccb->csio.scsi_status = SCSI_STATUS_OK; } /* FALLTHROUGH */ case XPT_TARGET_IO: case XPT_CONT_TARGET_IO: start_ccb->csio.sense_resid = 0; start_ccb->csio.resid = 0; /* FALLTHROUGH */ case XPT_ATA_IO: if (start_ccb->ccb_h.func_code == XPT_ATA_IO) start_ccb->ataio.resid = 0; /* FALLTHROUGH */ case XPT_NVME_IO: /* FALLTHROUGH */ case XPT_NVME_ADMIN: /* FALLTHROUGH */ case XPT_MMC_IO: /* XXX just like nmve_io? */ case XPT_RESET_DEV: case XPT_ENG_EXEC: case XPT_SMP_IO: { struct cam_devq *devq; devq = path->bus->sim->devq; mtx_lock(&devq->send_mtx); cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); if (xpt_schedule_devq(devq, path->device) != 0) xpt_run_devq(devq); mtx_unlock(&devq->send_mtx); break; } case XPT_CALC_GEOMETRY: /* Filter out garbage */ if (start_ccb->ccg.block_size == 0 || start_ccb->ccg.volume_size == 0) { start_ccb->ccg.cylinders = 0; start_ccb->ccg.heads = 0; start_ccb->ccg.secs_per_track = 0; start_ccb->ccb_h.status = CAM_REQ_CMP; break; } #if defined(__sparc64__) /* * For sparc64, we may need adjust the geometry of large * disks in order to fit the limitations of the 16-bit * fields of the VTOC8 disk label. */ if (scsi_da_bios_params(&start_ccb->ccg) != 0) { start_ccb->ccb_h.status = CAM_REQ_CMP; break; } #endif goto call_sim; case XPT_ABORT: { union ccb* abort_ccb; abort_ccb = start_ccb->cab.abort_ccb; if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { struct cam_ed *device; struct cam_devq *devq; device = abort_ccb->ccb_h.path->device; devq = device->sim->devq; mtx_lock(&devq->send_mtx); if (abort_ccb->ccb_h.pinfo.index > 0) { cam_ccbq_remove_ccb(&device->ccbq, abort_ccb); abort_ccb->ccb_h.status = CAM_REQ_ABORTED|CAM_DEV_QFRZN; xpt_freeze_devq_device(device, 1); mtx_unlock(&devq->send_mtx); xpt_done(abort_ccb); start_ccb->ccb_h.status = CAM_REQ_CMP; break; } mtx_unlock(&devq->send_mtx); if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { /* * We've caught this ccb en route to * the SIM. Flag it for abort and the * SIM will do so just before starting * real work on the CCB. */ abort_ccb->ccb_h.status = CAM_REQ_ABORTED|CAM_DEV_QFRZN; xpt_freeze_devq(abort_ccb->ccb_h.path, 1); start_ccb->ccb_h.status = CAM_REQ_CMP; break; } } if (XPT_FC_IS_QUEUED(abort_ccb) && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { /* * It's already completed but waiting * for our SWI to get to it. */ start_ccb->ccb_h.status = CAM_UA_ABORT; break; } /* * If we weren't able to take care of the abort request * in the XPT, pass the request down to the SIM for processing. */ } /* FALLTHROUGH */ case XPT_ACCEPT_TARGET_IO: case XPT_EN_LUN: case XPT_IMMED_NOTIFY: case XPT_NOTIFY_ACK: case XPT_RESET_BUS: case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: case XPT_GET_SIM_KNOB_OLD: case XPT_GET_SIM_KNOB: case XPT_SET_SIM_KNOB: case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: case XPT_PATH_INQ: call_sim: sim = path->bus->sim; mtx = sim->mtx; if (mtx && !mtx_owned(mtx)) mtx_lock(mtx); else mtx = NULL; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code)); (*(sim->sim_action))(sim, start_ccb); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status)); if (mtx) mtx_unlock(mtx); break; case XPT_PATH_STATS: start_ccb->cpis.last_reset = path->bus->last_reset; start_ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_GDEV_TYPE: { struct cam_ed *dev; dev = path->device; if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else { struct ccb_getdev *cgd; cgd = &start_ccb->cgd; cgd->protocol = dev->protocol; cgd->inq_data = dev->inq_data; cgd->ident_data = dev->ident_data; cgd->inq_flags = dev->inq_flags; cgd->ccb_h.status = CAM_REQ_CMP; cgd->serial_num_len = dev->serial_num_len; if ((dev->serial_num_len > 0) && (dev->serial_num != NULL)) bcopy(dev->serial_num, cgd->serial_num, dev->serial_num_len); } break; } case XPT_GDEV_STATS: { struct ccb_getdevstats *cgds = &start_ccb->cgds; struct cam_ed *dev = path->device; struct cam_eb *bus = path->bus; struct cam_et *tar = path->target; struct cam_devq *devq = bus->sim->devq; mtx_lock(&devq->send_mtx); cgds->dev_openings = dev->ccbq.dev_openings; cgds->dev_active = dev->ccbq.dev_active; cgds->allocated = dev->ccbq.allocated; cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); cgds->held = cgds->allocated - cgds->dev_active - cgds->queued; cgds->last_reset = tar->last_reset; cgds->maxtags = dev->maxtags; cgds->mintags = dev->mintags; if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) cgds->last_reset = bus->last_reset; mtx_unlock(&devq->send_mtx); cgds->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GDEVLIST: { struct cam_periph *nperiph; struct periph_list *periph_head; struct ccb_getdevlist *cgdl; u_int i; struct cam_ed *device; int found; found = 0; /* * Don't want anyone mucking with our data. */ device = path->device; periph_head = &device->periphs; cgdl = &start_ccb->cgdl; /* * Check and see if the list has changed since the user * last requested a list member. If so, tell them that the * list has changed, and therefore they need to start over * from the beginning. */ if ((cgdl->index != 0) && (cgdl->generation != device->generation)) { cgdl->status = CAM_GDEVLIST_LIST_CHANGED; break; } /* * Traverse the list of peripherals and attempt to find * the requested peripheral. */ for (nperiph = SLIST_FIRST(periph_head), i = 0; (nperiph != NULL) && (i <= cgdl->index); nperiph = SLIST_NEXT(nperiph, periph_links), i++) { if (i == cgdl->index) { strlcpy(cgdl->periph_name, nperiph->periph_name, sizeof(cgdl->periph_name)); cgdl->unit_number = nperiph->unit_number; found = 1; } } if (found == 0) { cgdl->status = CAM_GDEVLIST_ERROR; break; } if (nperiph == NULL) cgdl->status = CAM_GDEVLIST_LAST_DEVICE; else cgdl->status = CAM_GDEVLIST_MORE_DEVS; cgdl->index++; cgdl->generation = device->generation; cgdl->ccb_h.status = CAM_REQ_CMP; break; } case XPT_DEV_MATCH: { dev_pos_type position_type; struct ccb_dev_match *cdm; cdm = &start_ccb->cdm; /* * There are two ways of getting at information in the EDT. * The first way is via the primary EDT tree. It starts * with a list of buses, then a list of targets on a bus, * then devices/luns on a target, and then peripherals on a * device/lun. The "other" way is by the peripheral driver * lists. The peripheral driver lists are organized by * peripheral driver. (obviously) So it makes sense to * use the peripheral driver list if the user is looking * for something like "da1", or all "da" devices. If the * user is looking for something on a particular bus/target * or lun, it's generally better to go through the EDT tree. */ if (cdm->pos.position_type != CAM_DEV_POS_NONE) position_type = cdm->pos.position_type; else { u_int i; position_type = CAM_DEV_POS_NONE; for (i = 0; i < cdm->num_patterns; i++) { if ((cdm->patterns[i].type == DEV_MATCH_BUS) ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ position_type = CAM_DEV_POS_EDT; break; } } if (cdm->num_patterns == 0) position_type = CAM_DEV_POS_EDT; else if (position_type == CAM_DEV_POS_NONE) position_type = CAM_DEV_POS_PDRV; } switch(position_type & CAM_DEV_POS_TYPEMASK) { case CAM_DEV_POS_EDT: xptedtmatch(cdm); break; case CAM_DEV_POS_PDRV: xptperiphlistmatch(cdm); break; default: cdm->status = CAM_DEV_MATCH_ERROR; break; } if (cdm->status == CAM_DEV_MATCH_ERROR) start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; else start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SASYNC_CB: { struct ccb_setasync *csa; struct async_node *cur_entry; struct async_list *async_head; u_int32_t added; csa = &start_ccb->csa; added = csa->event_enable; async_head = &path->device->asyncs; /* * If there is already an entry for us, simply * update it. */ cur_entry = SLIST_FIRST(async_head); while (cur_entry != NULL) { if ((cur_entry->callback_arg == csa->callback_arg) && (cur_entry->callback == csa->callback)) break; cur_entry = SLIST_NEXT(cur_entry, links); } if (cur_entry != NULL) { /* * If the request has no flags set, * remove the entry. */ added &= ~cur_entry->event_enable; if (csa->event_enable == 0) { SLIST_REMOVE(async_head, cur_entry, async_node, links); xpt_release_device(path->device); free(cur_entry, M_CAMXPT); } else { cur_entry->event_enable = csa->event_enable; } csa->event_enable = added; } else { cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, M_NOWAIT); if (cur_entry == NULL) { csa->ccb_h.status = CAM_RESRC_UNAVAIL; break; } cur_entry->event_enable = csa->event_enable; cur_entry->event_lock = (path->bus->sim->mtx && mtx_owned(path->bus->sim->mtx)) ? 1 : 0; cur_entry->callback_arg = csa->callback_arg; cur_entry->callback = csa->callback; SLIST_INSERT_HEAD(async_head, cur_entry, links); xpt_acquire_device(path->device); } start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_REL_SIMQ: { struct ccb_relsim *crs; struct cam_ed *dev; crs = &start_ccb->crs; dev = path->device; if (dev == NULL) { crs->ccb_h.status = CAM_DEV_NOT_THERE; break; } if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { /* Don't ever go below one opening */ if (crs->openings > 0) { xpt_dev_ccbq_resize(path, crs->openings); if (bootverbose) { xpt_print(path, "number of openings is now %d\n", crs->openings); } } } mtx_lock(&dev->sim->devq->send_mtx); if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { /* * Just extend the old timeout and decrement * the freeze count so that a single timeout * is sufficient for releasing the queue. */ start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; callout_stop(&dev->callout); } else { start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } callout_reset_sbt(&dev->callout, SBT_1MS * crs->release_timeout, 0, xpt_release_devq_timeout, dev, 0); dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; } if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { /* * Decrement the freeze count so that a single * completion is still sufficient to unfreeze * the queue. */ start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; } else { dev->flags |= CAM_DEV_REL_ON_COMPLETE; start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } } if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 || (dev->ccbq.dev_active == 0)) { start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; } else { dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } } mtx_unlock(&dev->sim->devq->send_mtx); if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_DEBUG: { struct cam_path *oldpath; /* Check that all request bits are supported. */ if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } cam_dflags = CAM_DEBUG_NONE; if (cam_dpath != NULL) { oldpath = cam_dpath; cam_dpath = NULL; xpt_free_path(oldpath); } if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { if (xpt_create_path(&cam_dpath, NULL, start_ccb->ccb_h.path_id, start_ccb->ccb_h.target_id, start_ccb->ccb_h.target_lun) != CAM_REQ_CMP) { start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; } else { cam_dflags = start_ccb->cdbg.flags; start_ccb->ccb_h.status = CAM_REQ_CMP; xpt_print(cam_dpath, "debugging flags now %x\n", cam_dflags); } } else start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_NOOP: if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) xpt_freeze_devq(path, 1); start_ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_REPROBE_LUN: xpt_async(AC_INQ_CHANGED, path, NULL); start_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(start_ccb); break; default: case XPT_SDEV_TYPE: case XPT_TERM_IO: case XPT_ENG_INQ: /* XXX Implement */ xpt_print(start_ccb->ccb_h.path, "%s: CCB type %#x %s not supported\n", __func__, start_ccb->ccb_h.func_code, xpt_action_name(start_ccb->ccb_h.func_code)); start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { xpt_done(start_ccb); } break; } CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default: func= %#x %s status %#x\n", start_ccb->ccb_h.func_code, xpt_action_name(start_ccb->ccb_h.func_code), start_ccb->ccb_h.status)); } /* * Call the sim poll routine to allow the sim to complete * any inflight requests, then call camisr_runqueue to * complete any CCB that the polling completed. */ void xpt_sim_poll(struct cam_sim *sim) { struct mtx *mtx; mtx = sim->mtx; if (mtx) mtx_lock(mtx); (*(sim->sim_poll))(sim); if (mtx) mtx_unlock(mtx); camisr_runqueue(); } uint32_t xpt_poll_setup(union ccb *start_ccb) { u_int32_t timeout; struct cam_sim *sim; struct cam_devq *devq; struct cam_ed *dev; timeout = start_ccb->ccb_h.timeout * 10; sim = start_ccb->ccb_h.path->bus->sim; devq = sim->devq; dev = start_ccb->ccb_h.path->device; /* * Steal an opening so that no other queued requests * can get it before us while we simulate interrupts. */ mtx_lock(&devq->send_mtx); dev->ccbq.dev_openings--; while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && (--timeout > 0)) { mtx_unlock(&devq->send_mtx); DELAY(100); xpt_sim_poll(sim); mtx_lock(&devq->send_mtx); } dev->ccbq.dev_openings++; mtx_unlock(&devq->send_mtx); return (timeout); } void xpt_pollwait(union ccb *start_ccb, uint32_t timeout) { while (--timeout > 0) { xpt_sim_poll(start_ccb->ccb_h.path->bus->sim); if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) break; DELAY(100); } if (timeout == 0) { /* * XXX Is it worth adding a sim_timeout entry * point so we can attempt recovery? If * this is only used for dumps, I don't think * it is. */ start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; } } void xpt_polled_action(union ccb *start_ccb) { uint32_t timeout; struct cam_ed *dev; timeout = start_ccb->ccb_h.timeout * 10; dev = start_ccb->ccb_h.path->device; mtx_unlock(&dev->device_mtx); timeout = xpt_poll_setup(start_ccb); if (timeout > 0) { xpt_action(start_ccb); xpt_pollwait(start_ccb, timeout); } else { start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; } mtx_lock(&dev->device_mtx); } /* * Schedule a peripheral driver to receive a ccb when its * target device has space for more transactions. */ void xpt_schedule(struct cam_periph *periph, u_int32_t new_priority) { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); cam_periph_assert(periph, MA_OWNED); if (new_priority < periph->scheduled_priority) { periph->scheduled_priority = new_priority; xpt_run_allocq(periph, 0); } } /* * Schedule a device to run on a given queue. * If the device was inserted as a new entry on the queue, * return 1 meaning the device queue should be run. If we * were already queued, implying someone else has already * started the queue, return 0 so the caller doesn't attempt * to run the queue. */ static int xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, u_int32_t new_priority) { int retval; u_int32_t old_priority; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); old_priority = pinfo->priority; /* * Are we already queued? */ if (pinfo->index != CAM_UNQUEUED_INDEX) { /* Simply reorder based on new priority */ if (new_priority < old_priority) { camq_change_priority(queue, pinfo->index, new_priority); CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("changed priority to %d\n", new_priority)); retval = 1; } else retval = 0; } else { /* New entry on the queue */ if (new_priority < old_priority) pinfo->priority = new_priority; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("Inserting onto queue\n")); pinfo->generation = ++queue->generation; camq_insert(queue, pinfo); retval = 1; } return (retval); } static void xpt_run_allocq_task(void *context, int pending) { struct cam_periph *periph = context; cam_periph_lock(periph); periph->flags &= ~CAM_PERIPH_RUN_TASK; xpt_run_allocq(periph, 1); cam_periph_unlock(periph); cam_periph_release(periph); } static void xpt_run_allocq(struct cam_periph *periph, int sleep) { struct cam_ed *device; union ccb *ccb; uint32_t prio; cam_periph_assert(periph, MA_OWNED); if (periph->periph_allocating) return; cam_periph_doacquire(periph); periph->periph_allocating = 1; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); device = periph->path->device; ccb = NULL; restart: while ((prio = min(periph->scheduled_priority, periph->immediate_priority)) != CAM_PRIORITY_NONE && (periph->periph_allocated - (ccb != NULL ? 1 : 0) < device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { if (ccb == NULL && (ccb = xpt_get_ccb_nowait(periph)) == NULL) { if (sleep) { ccb = xpt_get_ccb(periph); goto restart; } if (periph->flags & CAM_PERIPH_RUN_TASK) break; cam_periph_doacquire(periph); periph->flags |= CAM_PERIPH_RUN_TASK; taskqueue_enqueue(xsoftc.xpt_taskq, &periph->periph_run_task); break; } xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); if (prio == periph->immediate_priority) { periph->immediate_priority = CAM_PRIORITY_NONE; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("waking cam_periph_getccb()\n")); SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, periph_links.sle); wakeup(&periph->ccb_list); } else { periph->scheduled_priority = CAM_PRIORITY_NONE; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("calling periph_start()\n")); periph->periph_start(periph, ccb); } ccb = NULL; } if (ccb != NULL) xpt_release_ccb(ccb); periph->periph_allocating = 0; cam_periph_release_locked(periph); } static void xpt_run_devq(struct cam_devq *devq) { struct mtx *mtx; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); devq->send_queue.qfrozen_cnt++; while ((devq->send_queue.entries > 0) && (devq->send_openings > 0) && (devq->send_queue.qfrozen_cnt <= 1)) { struct cam_ed *device; union ccb *work_ccb; struct cam_sim *sim; struct xpt_proto *proto; device = (struct cam_ed *)camq_remove(&devq->send_queue, CAMQ_HEAD); CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("running device %p\n", device)); work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); if (work_ccb == NULL) { printf("device on run queue with no ccbs???\n"); continue; } if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { mtx_lock(&xsoftc.xpt_highpower_lock); if (xsoftc.num_highpower <= 0) { /* * We got a high power command, but we * don't have any available slots. Freeze * the device queue until we have a slot * available. */ xpt_freeze_devq_device(device, 1); STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, highpowerq_entry); mtx_unlock(&xsoftc.xpt_highpower_lock); continue; } else { /* * Consume a high power slot while * this ccb runs. */ xsoftc.num_highpower--; } mtx_unlock(&xsoftc.xpt_highpower_lock); } cam_ccbq_remove_ccb(&device->ccbq, work_ccb); cam_ccbq_send_ccb(&device->ccbq, work_ccb); devq->send_openings--; devq->send_active++; xpt_schedule_devq(devq, device); mtx_unlock(&devq->send_mtx); if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { /* * The client wants to freeze the queue * after this CCB is sent. */ xpt_freeze_devq(work_ccb->ccb_h.path, 1); } /* In Target mode, the peripheral driver knows best... */ if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { if ((device->inq_flags & SID_CmdQue) != 0 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; else /* * Clear this in case of a retried CCB that * failed due to a rejected tag. */ work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; } KASSERT(device == work_ccb->ccb_h.path->device, ("device (%p) / path->device (%p) mismatch", device, work_ccb->ccb_h.path->device)); proto = xpt_proto_find(device->protocol); if (proto && proto->ops->debug_out) proto->ops->debug_out(work_ccb); /* * Device queues can be shared among multiple SIM instances * that reside on different buses. Use the SIM from the * queued device, rather than the one from the calling bus. */ sim = device->sim; mtx = sim->mtx; if (mtx && !mtx_owned(mtx)) mtx_lock(mtx); else mtx = NULL; work_ccb->ccb_h.qos.periph_data = cam_iosched_now(); (*(sim->sim_action))(sim, work_ccb); if (mtx) mtx_unlock(mtx); mtx_lock(&devq->send_mtx); } devq->send_queue.qfrozen_cnt--; } /* * This function merges stuff from the slave ccb into the master ccb, while * keeping important fields in the master ccb constant. */ void xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) { /* * Pull fields that are valid for peripheral drivers to set * into the master CCB along with the CCB "payload". */ master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); } void xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority, u_int32_t flags) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); ccb_h->pinfo.priority = priority; ccb_h->path = path; ccb_h->path_id = path->bus->path_id; if (path->target) ccb_h->target_id = path->target->target_id; else ccb_h->target_id = CAM_TARGET_WILDCARD; if (path->device) { ccb_h->target_lun = path->device->lun_id; ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; } else { ccb_h->target_lun = CAM_TARGET_WILDCARD; } ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; ccb_h->flags = flags; ccb_h->xflags = 0; } void xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) { xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); } /* Path manipulation functions */ cam_status xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_path *path; cam_status status; path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); if (path == NULL) { status = CAM_RESRC_UNAVAIL; return(status); } status = xpt_compile_path(path, perph, path_id, target_id, lun_id); if (status != CAM_REQ_CMP) { free(path, M_CAMPATH); path = NULL; } *new_path_ptr = path; return (status); } cam_status xpt_create_path_unlocked(struct cam_path **new_path_ptr, struct cam_periph *periph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { return (xpt_create_path(new_path_ptr, periph, path_id, target_id, lun_id)); } cam_status xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_eb *bus; struct cam_et *target; struct cam_ed *device; cam_status status; status = CAM_REQ_CMP; /* Completed without error */ target = NULL; /* Wildcarded */ device = NULL; /* Wildcarded */ /* * We will potentially modify the EDT, so block interrupts * that may attempt to create cam paths. */ bus = xpt_find_bus(path_id); if (bus == NULL) { status = CAM_PATH_INVALID; } else { xpt_lock_buses(); mtx_lock(&bus->eb_mtx); target = xpt_find_target(bus, target_id); if (target == NULL) { /* Create one */ struct cam_et *new_target; new_target = xpt_alloc_target(bus, target_id); if (new_target == NULL) { status = CAM_RESRC_UNAVAIL; } else { target = new_target; } } xpt_unlock_buses(); if (target != NULL) { device = xpt_find_device(target, lun_id); if (device == NULL) { /* Create one */ struct cam_ed *new_device; new_device = (*(bus->xport->ops->alloc_device))(bus, target, lun_id); if (new_device == NULL) { status = CAM_RESRC_UNAVAIL; } else { device = new_device; } } } mtx_unlock(&bus->eb_mtx); } /* * Only touch the user's data if we are successful. */ if (status == CAM_REQ_CMP) { new_path->periph = perph; new_path->bus = bus; new_path->target = target; new_path->device = device; CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); } else { if (device != NULL) xpt_release_device(device); if (target != NULL) xpt_release_target(target); if (bus != NULL) xpt_release_bus(bus); } return (status); } cam_status xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) { struct cam_path *new_path; new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); if (new_path == NULL) return(CAM_RESRC_UNAVAIL); xpt_copy_path(new_path, path); *new_path_ptr = new_path; return (CAM_REQ_CMP); } void xpt_copy_path(struct cam_path *new_path, struct cam_path *path) { *new_path = *path; if (path->bus != NULL) xpt_acquire_bus(path->bus); if (path->target != NULL) xpt_acquire_target(path->target); if (path->device != NULL) xpt_acquire_device(path->device); } void xpt_release_path(struct cam_path *path) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); if (path->device != NULL) { xpt_release_device(path->device); path->device = NULL; } if (path->target != NULL) { xpt_release_target(path->target); path->target = NULL; } if (path->bus != NULL) { xpt_release_bus(path->bus); path->bus = NULL; } } void xpt_free_path(struct cam_path *path) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); xpt_release_path(path); free(path, M_CAMPATH); } void xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) { xpt_lock_buses(); if (bus_ref) { if (path->bus) *bus_ref = path->bus->refcount; else *bus_ref = 0; } if (periph_ref) { if (path->periph) *periph_ref = path->periph->refcount; else *periph_ref = 0; } xpt_unlock_buses(); if (target_ref) { if (path->target) *target_ref = path->target->refcount; else *target_ref = 0; } if (device_ref) { if (path->device) *device_ref = path->device->refcount; else *device_ref = 0; } } /* * Return -1 for failure, 0 for exact match, 1 for match with wildcards * in path1, 2 for match with wildcards in path2. */ int xpt_path_comp(struct cam_path *path1, struct cam_path *path2) { int retval = 0; if (path1->bus != path2->bus) { if (path1->bus->path_id == CAM_BUS_WILDCARD) retval = 1; else if (path2->bus->path_id == CAM_BUS_WILDCARD) retval = 2; else return (-1); } if (path1->target != path2->target) { if (path1->target->target_id == CAM_TARGET_WILDCARD) { if (retval == 0) retval = 1; } else if (path2->target->target_id == CAM_TARGET_WILDCARD) retval = 2; else return (-1); } if (path1->device != path2->device) { if (path1->device->lun_id == CAM_LUN_WILDCARD) { if (retval == 0) retval = 1; } else if (path2->device->lun_id == CAM_LUN_WILDCARD) retval = 2; else return (-1); } return (retval); } int xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) { int retval = 0; if (path->bus != dev->target->bus) { if (path->bus->path_id == CAM_BUS_WILDCARD) retval = 1; else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) retval = 2; else return (-1); } if (path->target != dev->target) { if (path->target->target_id == CAM_TARGET_WILDCARD) { if (retval == 0) retval = 1; } else if (dev->target->target_id == CAM_TARGET_WILDCARD) retval = 2; else return (-1); } if (path->device != dev) { if (path->device->lun_id == CAM_LUN_WILDCARD) { if (retval == 0) retval = 1; } else if (dev->lun_id == CAM_LUN_WILDCARD) retval = 2; else return (-1); } return (retval); } void xpt_print_path(struct cam_path *path) { struct sbuf sb; char buffer[XPT_PRINT_LEN]; sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); xpt_path_sbuf(path, &sb); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); sbuf_delete(&sb); } void xpt_print_device(struct cam_ed *device) { if (device == NULL) printf("(nopath): "); else { printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name, device->sim->unit_number, device->sim->bus_id, device->target->target_id, (uintmax_t)device->lun_id); } } void xpt_print(struct cam_path *path, const char *fmt, ...) { va_list ap; struct sbuf sb; char buffer[XPT_PRINT_LEN]; sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); xpt_path_sbuf(path, &sb); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); sbuf_delete(&sb); } int xpt_path_string(struct cam_path *path, char *str, size_t str_len) { struct sbuf sb; int len; sbuf_new(&sb, str, str_len, 0); len = xpt_path_sbuf(path, &sb); sbuf_finish(&sb); return (len); } int xpt_path_sbuf(struct cam_path *path, struct sbuf *sb) { if (path == NULL) sbuf_printf(sb, "(nopath): "); else { if (path->periph != NULL) sbuf_printf(sb, "(%s%d:", path->periph->periph_name, path->periph->unit_number); else sbuf_printf(sb, "(noperiph:"); if (path->bus != NULL) sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id); else sbuf_printf(sb, "nobus:"); if (path->target != NULL) sbuf_printf(sb, "%d:", path->target->target_id); else sbuf_printf(sb, "X:"); if (path->device != NULL) sbuf_printf(sb, "%jx): ", (uintmax_t)path->device->lun_id); else sbuf_printf(sb, "X): "); } return(sbuf_len(sb)); } path_id_t xpt_path_path_id(struct cam_path *path) { return(path->bus->path_id); } target_id_t xpt_path_target_id(struct cam_path *path) { if (path->target != NULL) return (path->target->target_id); else return (CAM_TARGET_WILDCARD); } lun_id_t xpt_path_lun_id(struct cam_path *path) { if (path->device != NULL) return (path->device->lun_id); else return (CAM_LUN_WILDCARD); } struct cam_sim * xpt_path_sim(struct cam_path *path) { return (path->bus->sim); } struct cam_periph* xpt_path_periph(struct cam_path *path) { return (path->periph); } /* * Release a CAM control block for the caller. Remit the cost of the structure * to the device referenced by the path. If the this device had no 'credits' * and peripheral drivers have registered async callbacks for this notification * call them now. */ void xpt_release_ccb(union ccb *free_ccb) { struct cam_ed *device; struct cam_periph *periph; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); device = free_ccb->ccb_h.path->device; periph = free_ccb->ccb_h.path->periph; xpt_free_ccb(free_ccb); periph->periph_allocated--; cam_ccbq_release_opening(&device->ccbq); xpt_run_allocq(periph, 0); } /* Functions accessed by SIM drivers */ static struct xpt_xport_ops xport_default_ops = { .alloc_device = xpt_alloc_device_default, .action = xpt_action_default, .async = xpt_dev_async_default, }; static struct xpt_xport xport_default = { .xport = XPORT_UNKNOWN, .name = "unknown", .ops = &xport_default_ops, }; CAM_XPT_XPORT(xport_default); /* * A sim structure, listing the SIM entry points and instance * identification info is passed to xpt_bus_register to hook the SIM * into the CAM framework. xpt_bus_register creates a cam_eb entry * for this new bus and places it in the array of buses and assigns * it a path_id. The path_id may be influenced by "hard wiring" * information specified by the user. Once interrupt services are * available, the bus will be probed. */ int32_t xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) { struct cam_eb *new_bus; struct cam_eb *old_bus; struct ccb_pathinq cpi; struct cam_path *path; cam_status status; sim->bus_id = bus; new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), M_CAMXPT, M_NOWAIT|M_ZERO); if (new_bus == NULL) { /* Couldn't satisfy request */ return (CAM_RESRC_UNAVAIL); } mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); TAILQ_INIT(&new_bus->et_entries); cam_sim_hold(sim); new_bus->sim = sim; timevalclear(&new_bus->last_reset); new_bus->flags = 0; new_bus->refcount = 1; /* Held until a bus_deregister event */ new_bus->generation = 0; xpt_lock_buses(); sim->path_id = new_bus->path_id = xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); while (old_bus != NULL && old_bus->path_id < new_bus->path_id) old_bus = TAILQ_NEXT(old_bus, links); if (old_bus != NULL) TAILQ_INSERT_BEFORE(old_bus, new_bus, links); else TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); xsoftc.bus_generation++; xpt_unlock_buses(); /* * Set a default transport so that a PATH_INQ can be issued to * the SIM. This will then allow for probing and attaching of * a more appropriate transport. */ new_bus->xport = &xport_default; status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { xpt_release_bus(new_bus); return (CAM_RESRC_UNAVAIL); } xpt_path_inq(&cpi, path); if (cpi.ccb_h.status == CAM_REQ_CMP) { struct xpt_xport **xpt; SET_FOREACH(xpt, cam_xpt_xport_set) { if ((*xpt)->xport == cpi.transport) { new_bus->xport = *xpt; break; } } if (new_bus->xport == NULL) { xpt_print(path, "No transport found for %d\n", cpi.transport); xpt_release_bus(new_bus); free(path, M_CAMXPT); return (CAM_RESRC_UNAVAIL); } } /* Notify interested parties */ if (sim->path_id != CAM_XPT_PATH_ID) { xpt_async(AC_PATH_REGISTERED, path, &cpi); if ((cpi.hba_misc & PIM_NOSCAN) == 0) { union ccb *scan_ccb; /* Initiate bus rescan. */ scan_ccb = xpt_alloc_ccb_nowait(); if (scan_ccb != NULL) { scan_ccb->ccb_h.path = path; scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; scan_ccb->crcn.flags = 0; xpt_rescan(scan_ccb); } else { xpt_print(path, "Can't allocate CCB to scan bus\n"); xpt_free_path(path); } } else xpt_free_path(path); } else xpt_free_path(path); return (CAM_SUCCESS); } int32_t xpt_bus_deregister(path_id_t pathid) { struct cam_path bus_path; cam_status status; status = xpt_compile_path(&bus_path, NULL, pathid, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) return (status); xpt_async(AC_LOST_DEVICE, &bus_path, NULL); xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); /* Release the reference count held while registered. */ xpt_release_bus(bus_path.bus); xpt_release_path(&bus_path); return (CAM_REQ_CMP); } static path_id_t xptnextfreepathid(void) { struct cam_eb *bus; path_id_t pathid; const char *strval; mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); pathid = 0; bus = TAILQ_FIRST(&xsoftc.xpt_busses); retry: /* Find an unoccupied pathid */ while (bus != NULL && bus->path_id <= pathid) { if (bus->path_id == pathid) pathid++; bus = TAILQ_NEXT(bus, links); } /* * Ensure that this pathid is not reserved for * a bus that may be registered in the future. */ if (resource_string_value("scbus", pathid, "at", &strval) == 0) { ++pathid; /* Start the search over */ goto retry; } return (pathid); } static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus) { path_id_t pathid; int i, dunit, val; char buf[32]; const char *dname; pathid = CAM_XPT_PATH_ID; snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) return (pathid); i = 0; while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { if (strcmp(dname, "scbus")) { /* Avoid a bit of foot shooting. */ continue; } if (dunit < 0) /* unwired?! */ continue; if (resource_int_value("scbus", dunit, "bus", &val) == 0) { if (sim_bus == val) { pathid = dunit; break; } } else if (sim_bus == 0) { /* Unspecified matches bus 0 */ pathid = dunit; break; } else { printf("Ambiguous scbus configuration for %s%d " "bus %d, cannot wire down. The kernel " "config entry for scbus%d should " "specify a controller bus.\n" "Scbus will be assigned dynamically.\n", sim_name, sim_unit, sim_bus, dunit); break; } } if (pathid == CAM_XPT_PATH_ID) pathid = xptnextfreepathid(); return (pathid); } static const char * xpt_async_string(u_int32_t async_code) { switch (async_code) { case AC_BUS_RESET: return ("AC_BUS_RESET"); case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); case AC_SCSI_AEN: return ("AC_SCSI_AEN"); case AC_SENT_BDR: return ("AC_SENT_BDR"); case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); case AC_CONTRACT: return ("AC_CONTRACT"); case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); } return ("AC_UNKNOWN"); } static int xpt_async_size(u_int32_t async_code) { switch (async_code) { case AC_BUS_RESET: return (0); case AC_UNSOL_RESEL: return (0); case AC_SCSI_AEN: return (0); case AC_SENT_BDR: return (0); case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); case AC_PATH_DEREGISTERED: return (0); case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); case AC_LOST_DEVICE: return (0); case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); case AC_INQ_CHANGED: return (0); case AC_GETDEV_CHANGED: return (0); case AC_CONTRACT: return (sizeof(struct ac_contract)); case AC_ADVINFO_CHANGED: return (-1); case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); } return (0); } static int xpt_async_process_dev(struct cam_ed *device, void *arg) { union ccb *ccb = arg; struct cam_path *path = ccb->ccb_h.path; void *async_arg = ccb->casync.async_arg_ptr; u_int32_t async_code = ccb->casync.async_code; int relock; if (path->device != device && path->device->lun_id != CAM_LUN_WILDCARD && device->lun_id != CAM_LUN_WILDCARD) return (1); /* * The async callback could free the device. * If it is a broadcast async, it doesn't hold * device reference, so take our own reference. */ xpt_acquire_device(device); /* * If async for specific device is to be delivered to * the wildcard client, take the specific device lock. * XXX: We may need a way for client to specify it. */ if ((device->lun_id == CAM_LUN_WILDCARD && path->device->lun_id != CAM_LUN_WILDCARD) || (device->target->target_id == CAM_TARGET_WILDCARD && path->target->target_id != CAM_TARGET_WILDCARD) || (device->target->bus->path_id == CAM_BUS_WILDCARD && path->target->bus->path_id != CAM_BUS_WILDCARD)) { mtx_unlock(&device->device_mtx); xpt_path_lock(path); relock = 1; } else relock = 0; (*(device->target->bus->xport->ops->async))(async_code, device->target->bus, device->target, device, async_arg); xpt_async_bcast(&device->asyncs, async_code, path, async_arg); if (relock) { xpt_path_unlock(path); mtx_lock(&device->device_mtx); } xpt_release_device(device); return (1); } static int xpt_async_process_tgt(struct cam_et *target, void *arg) { union ccb *ccb = arg; struct cam_path *path = ccb->ccb_h.path; if (path->target != target && path->target->target_id != CAM_TARGET_WILDCARD && target->target_id != CAM_TARGET_WILDCARD) return (1); if (ccb->casync.async_code == AC_SENT_BDR) { /* Update our notion of when the last reset occurred */ microtime(&target->last_reset); } return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); } static void xpt_async_process(struct cam_periph *periph, union ccb *ccb) { struct cam_eb *bus; struct cam_path *path; void *async_arg; u_int32_t async_code; path = ccb->ccb_h.path; async_code = ccb->casync.async_code; async_arg = ccb->casync.async_arg_ptr; CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, ("xpt_async(%s)\n", xpt_async_string(async_code))); bus = path->bus; if (async_code == AC_BUS_RESET) { /* Update our notion of when the last reset occurred */ microtime(&bus->last_reset); } xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); /* * If this wasn't a fully wildcarded async, tell all * clients that want all async events. */ if (bus != xpt_periph->path->bus) { xpt_path_lock(xpt_periph->path); xpt_async_process_dev(xpt_periph->path->device, ccb); xpt_path_unlock(xpt_periph->path); } if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) xpt_release_devq(path, 1, TRUE); else xpt_release_simq(path->bus->sim, TRUE); if (ccb->casync.async_arg_size > 0) free(async_arg, M_CAMXPT); xpt_free_path(path); xpt_free_ccb(ccb); } static void xpt_async_bcast(struct async_list *async_head, u_int32_t async_code, struct cam_path *path, void *async_arg) { struct async_node *cur_entry; struct mtx *mtx; cur_entry = SLIST_FIRST(async_head); while (cur_entry != NULL) { struct async_node *next_entry; /* * Grab the next list entry before we call the current * entry's callback. This is because the callback function * can delete its async callback entry. */ next_entry = SLIST_NEXT(cur_entry, links); if ((cur_entry->event_enable & async_code) != 0) { mtx = cur_entry->event_lock ? path->device->sim->mtx : NULL; if (mtx) mtx_lock(mtx); cur_entry->callback(cur_entry->callback_arg, async_code, path, async_arg); if (mtx) mtx_unlock(mtx); } cur_entry = next_entry; } } void xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) { union ccb *ccb; int size; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { xpt_print(path, "Can't allocate CCB to send %s\n", xpt_async_string(async_code)); return; } if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) { xpt_print(path, "Can't allocate path to send %s\n", xpt_async_string(async_code)); xpt_free_ccb(ccb); return; } ccb->ccb_h.path->periph = NULL; ccb->ccb_h.func_code = XPT_ASYNC; ccb->ccb_h.cbfcnp = xpt_async_process; ccb->ccb_h.flags |= CAM_UNLOCKED; ccb->casync.async_code = async_code; ccb->casync.async_arg_size = 0; size = xpt_async_size(async_code); CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_async: func %#x %s aync_code %d %s\n", ccb->ccb_h.func_code, xpt_action_name(ccb->ccb_h.func_code), async_code, xpt_async_string(async_code))); if (size > 0 && async_arg != NULL) { ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); if (ccb->casync.async_arg_ptr == NULL) { xpt_print(path, "Can't allocate argument to send %s\n", xpt_async_string(async_code)); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } memcpy(ccb->casync.async_arg_ptr, async_arg, size); ccb->casync.async_arg_size = size; } else if (size < 0) { ccb->casync.async_arg_ptr = async_arg; ccb->casync.async_arg_size = size; } if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) xpt_freeze_devq(path, 1); else xpt_freeze_simq(path->bus->sim, 1); xpt_done(ccb); } static void xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; printf("%s called\n", __func__); } static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count) { struct cam_devq *devq; uint32_t freeze; devq = dev->sim->devq; mtx_assert(&devq->send_mtx, MA_OWNED); CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_freeze_devq_device(%d) %u->%u\n", count, dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); freeze = (dev->ccbq.queue.qfrozen_cnt += count); /* Remove frozen device from sendq. */ if (device_is_queued(dev)) camq_remove(&devq->send_queue, dev->devq_entry.index); return (freeze); } u_int32_t xpt_freeze_devq(struct cam_path *path, u_int count) { struct cam_ed *dev = path->device; struct cam_devq *devq; uint32_t freeze; devq = dev->sim->devq; mtx_lock(&devq->send_mtx); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); freeze = xpt_freeze_devq_device(dev, count); mtx_unlock(&devq->send_mtx); return (freeze); } u_int32_t xpt_freeze_simq(struct cam_sim *sim, u_int count) { struct cam_devq *devq; uint32_t freeze; devq = sim->devq; mtx_lock(&devq->send_mtx); freeze = (devq->send_queue.qfrozen_cnt += count); mtx_unlock(&devq->send_mtx); return (freeze); } static void xpt_release_devq_timeout(void *arg) { struct cam_ed *dev; struct cam_devq *devq; dev = (struct cam_ed *)arg; CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); devq = dev->sim->devq; mtx_assert(&devq->send_mtx, MA_OWNED); if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) xpt_run_devq(devq); } void xpt_release_devq(struct cam_path *path, u_int count, int run_queue) { struct cam_ed *dev; struct cam_devq *devq; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", count, run_queue)); dev = path->device; devq = dev->sim->devq; mtx_lock(&devq->send_mtx); if (xpt_release_devq_device(dev, count, run_queue)) xpt_run_devq(dev->sim->devq); mtx_unlock(&devq->send_mtx); } static int xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) { mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); if (count > dev->ccbq.queue.qfrozen_cnt) { #ifdef INVARIANTS printf("xpt_release_devq(): requested %u > present %u\n", count, dev->ccbq.queue.qfrozen_cnt); #endif count = dev->ccbq.queue.qfrozen_cnt; } dev->ccbq.queue.qfrozen_cnt -= count; if (dev->ccbq.queue.qfrozen_cnt == 0) { /* * No longer need to wait for a successful * command completion. */ dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; /* * Remove any timeouts that might be scheduled * to release this queue. */ if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { callout_stop(&dev->callout); dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; } /* * Now that we are unfrozen schedule the * device so any pending transactions are * run. */ xpt_schedule_devq(dev->sim->devq, dev); } else run_queue = 0; return (run_queue); } void xpt_release_simq(struct cam_sim *sim, int run_queue) { struct cam_devq *devq; devq = sim->devq; mtx_lock(&devq->send_mtx); if (devq->send_queue.qfrozen_cnt <= 0) { #ifdef INVARIANTS printf("xpt_release_simq: requested 1 > present %u\n", devq->send_queue.qfrozen_cnt); #endif } else devq->send_queue.qfrozen_cnt--; if (devq->send_queue.qfrozen_cnt == 0) { /* * If there is a timeout scheduled to release this * sim queue, remove it. The queue frozen count is * already at 0. */ if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ callout_stop(&sim->callout); sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; } if (run_queue) { /* * Now that we are unfrozen run the send queue. */ xpt_run_devq(sim->devq); } } mtx_unlock(&devq->send_mtx); } /* * XXX Appears to be unused. */ static void xpt_release_simq_timeout(void *arg) { struct cam_sim *sim; sim = (struct cam_sim *)arg; xpt_release_simq(sim, /* run_queue */ TRUE); } void xpt_done(union ccb *done_ccb) { struct cam_doneq *queue; int run, hash; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (done_ccb->ccb_h.func_code == XPT_SCSI_IO && done_ccb->csio.bio != NULL) biotrack(done_ccb->csio.bio, __func__); #endif CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done: func= %#x %s status %#x\n", done_ccb->ccb_h.func_code, xpt_action_name(done_ccb->ccb_h.func_code), done_ccb->ccb_h.status)); if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) return; /* Store the time the ccb was in the sim */ done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + done_ccb->ccb_h.target_lun) % cam_num_doneqs; queue = &cam_doneqs[hash]; mtx_lock(&queue->cam_doneq_mtx); run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; mtx_unlock(&queue->cam_doneq_mtx); if (run) wakeup(&queue->cam_doneq); } void xpt_done_direct(union ccb *done_ccb) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status)); if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) return; /* Store the time the ccb was in the sim */ done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); xpt_done_process(&done_ccb->ccb_h); } union ccb * xpt_alloc_ccb() { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); return (new_ccb); } union ccb * xpt_alloc_ccb_nowait() { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); return (new_ccb); } void xpt_free_ccb(union ccb *free_ccb) { free(free_ccb, M_CAMCCB); } /* Private XPT functions */ /* * Get a CAM control block for the caller. Charge the structure to the device * referenced by the path. If we don't have sufficient resources to allocate * more ccbs, we return NULL. */ static union ccb * xpt_get_ccb_nowait(struct cam_periph *periph) { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); if (new_ccb == NULL) return (NULL); periph->periph_allocated++; cam_ccbq_take_opening(&periph->path->device->ccbq); return (new_ccb); } static union ccb * xpt_get_ccb(struct cam_periph *periph) { union ccb *new_ccb; cam_periph_unlock(periph); new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); cam_periph_lock(periph); periph->periph_allocated++; cam_ccbq_take_opening(&periph->path->device->ccbq); return (new_ccb); } union ccb * cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) { struct ccb_hdr *ccb_h; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); cam_periph_assert(periph, MA_OWNED); while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || ccb_h->pinfo.priority != priority) { if (priority < periph->immediate_priority) { periph->immediate_priority = priority; xpt_run_allocq(periph, 0); } else cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, "cgticb", 0); } SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); return ((union ccb *)ccb_h); } static void xpt_acquire_bus(struct cam_eb *bus) { xpt_lock_buses(); bus->refcount++; xpt_unlock_buses(); } static void xpt_release_bus(struct cam_eb *bus) { xpt_lock_buses(); KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); if (--bus->refcount > 0) { xpt_unlock_buses(); return; } TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); xsoftc.bus_generation++; xpt_unlock_buses(); KASSERT(TAILQ_EMPTY(&bus->et_entries), ("destroying bus, but target list is not empty")); cam_sim_release(bus->sim); mtx_destroy(&bus->eb_mtx); free(bus, M_CAMXPT); } static struct cam_et * xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *cur_target, *target; mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); mtx_assert(&bus->eb_mtx, MA_OWNED); target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT|M_ZERO); if (target == NULL) return (NULL); TAILQ_INIT(&target->ed_entries); target->bus = bus; target->target_id = target_id; target->refcount = 1; target->generation = 0; target->luns = NULL; mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); timevalclear(&target->last_reset); /* * Hold a reference to our parent bus so it * will not go away before we do. */ bus->refcount++; /* Insertion sort into our bus's target list */ cur_target = TAILQ_FIRST(&bus->et_entries); while (cur_target != NULL && cur_target->target_id < target_id) cur_target = TAILQ_NEXT(cur_target, links); if (cur_target != NULL) { TAILQ_INSERT_BEFORE(cur_target, target, links); } else { TAILQ_INSERT_TAIL(&bus->et_entries, target, links); } bus->generation++; return (target); } static void xpt_acquire_target(struct cam_et *target) { struct cam_eb *bus = target->bus; mtx_lock(&bus->eb_mtx); target->refcount++; mtx_unlock(&bus->eb_mtx); } static void xpt_release_target(struct cam_et *target) { struct cam_eb *bus = target->bus; mtx_lock(&bus->eb_mtx); if (--target->refcount > 0) { mtx_unlock(&bus->eb_mtx); return; } TAILQ_REMOVE(&bus->et_entries, target, links); bus->generation++; mtx_unlock(&bus->eb_mtx); KASSERT(TAILQ_EMPTY(&target->ed_entries), ("destroying target, but device list is not empty")); xpt_release_bus(bus); mtx_destroy(&target->luns_mtx); if (target->luns) free(target->luns, M_CAMXPT); free(target, M_CAMXPT); } static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); device->mintags = 1; device->maxtags = 1; return (device); } static void xpt_destroy_device(void *context, int pending) { struct cam_ed *device = context; mtx_lock(&device->device_mtx); mtx_destroy(&device->device_mtx); free(device, M_CAMDEV); } struct cam_ed * xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct cam_ed *cur_device, *device; struct cam_devq *devq; cam_status status; mtx_assert(&bus->eb_mtx, MA_OWNED); /* Make space for us in the device queue on our bus */ devq = bus->sim->devq; mtx_lock(&devq->send_mtx); status = cam_devq_resize(devq, devq->send_queue.array_size + 1); mtx_unlock(&devq->send_mtx); if (status != CAM_REQ_CMP) return (NULL); device = (struct cam_ed *)malloc(sizeof(*device), M_CAMDEV, M_NOWAIT|M_ZERO); if (device == NULL) return (NULL); cam_init_pinfo(&device->devq_entry); device->target = target; device->lun_id = lun_id; device->sim = bus->sim; if (cam_ccbq_init(&device->ccbq, bus->sim->max_dev_openings) != 0) { free(device, M_CAMDEV); return (NULL); } SLIST_INIT(&device->asyncs); SLIST_INIT(&device->periphs); device->generation = 0; device->flags = CAM_DEV_UNCONFIGURED; device->tag_delay_count = 0; device->tag_saved_openings = 0; device->refcount = 1; mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); callout_init_mtx(&device->callout, &devq->send_mtx, 0); TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); /* * Hold a reference to our parent bus so it * will not go away before we do. */ target->refcount++; cur_device = TAILQ_FIRST(&target->ed_entries); while (cur_device != NULL && cur_device->lun_id < lun_id) cur_device = TAILQ_NEXT(cur_device, links); if (cur_device != NULL) TAILQ_INSERT_BEFORE(cur_device, device, links); else TAILQ_INSERT_TAIL(&target->ed_entries, device, links); target->generation++; return (device); } void xpt_acquire_device(struct cam_ed *device) { struct cam_eb *bus = device->target->bus; mtx_lock(&bus->eb_mtx); device->refcount++; mtx_unlock(&bus->eb_mtx); } void xpt_release_device(struct cam_ed *device) { struct cam_eb *bus = device->target->bus; struct cam_devq *devq; mtx_lock(&bus->eb_mtx); if (--device->refcount > 0) { mtx_unlock(&bus->eb_mtx); return; } TAILQ_REMOVE(&device->target->ed_entries, device,links); device->target->generation++; mtx_unlock(&bus->eb_mtx); /* Release our slot in the devq */ devq = bus->sim->devq; mtx_lock(&devq->send_mtx); cam_devq_resize(devq, devq->send_queue.array_size - 1); mtx_unlock(&devq->send_mtx); KASSERT(SLIST_EMPTY(&device->periphs), ("destroying device, but periphs list is not empty")); KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, ("destroying device while still queued for ccbs")); if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) callout_stop(&device->callout); xpt_release_target(device->target); cam_ccbq_fini(&device->ccbq); /* * Free allocated memory. free(9) does nothing if the * supplied pointer is NULL, so it is safe to call without * checking. */ free(device->supported_vpds, M_CAMXPT); free(device->device_id, M_CAMXPT); free(device->ext_inq, M_CAMXPT); free(device->physpath, M_CAMXPT); free(device->rcap_buf, M_CAMXPT); free(device->serial_num, M_CAMXPT); free(device->nvme_data, M_CAMXPT); free(device->nvme_cdata, M_CAMXPT); taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); } u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) { int result; struct cam_ed *dev; dev = path->device; mtx_lock(&dev->sim->devq->send_mtx); result = cam_ccbq_resize(&dev->ccbq, newopenings); mtx_unlock(&dev->sim->devq->send_mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (dev->inq_flags & SID_CmdQue) != 0) dev->tag_saved_openings = newopenings; return (result); } static struct cam_eb * xpt_find_bus(path_id_t path_id) { struct cam_eb *bus; xpt_lock_buses(); for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); bus != NULL; bus = TAILQ_NEXT(bus, links)) { if (bus->path_id == path_id) { bus->refcount++; break; } } xpt_unlock_buses(); return (bus); } static struct cam_et * xpt_find_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *target; mtx_assert(&bus->eb_mtx, MA_OWNED); for (target = TAILQ_FIRST(&bus->et_entries); target != NULL; target = TAILQ_NEXT(target, links)) { if (target->target_id == target_id) { target->refcount++; break; } } return (target); } static struct cam_ed * xpt_find_device(struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; mtx_assert(&target->bus->eb_mtx, MA_OWNED); for (device = TAILQ_FIRST(&target->ed_entries); device != NULL; device = TAILQ_NEXT(device, links)) { if (device->lun_id == lun_id) { device->refcount++; break; } } return (device); } void xpt_start_tags(struct cam_path *path) { struct ccb_relsim crs; struct cam_ed *device; struct cam_sim *sim; int newopenings; device = path->device; sim = path->bus->sim; device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; xpt_freeze_devq(path, /*count*/1); device->inq_flags |= SID_CmdQue; if (device->tag_saved_openings != 0) newopenings = device->tag_saved_openings; else newopenings = min(device->maxtags, sim->max_tagged_dev_openings); xpt_dev_ccbq_resize(path, newopenings); xpt_async(AC_GETDEV_CHANGED, path, NULL); xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; crs.openings = crs.release_timeout = crs.qfrozen_cnt = 0; xpt_action((union ccb *)&crs); } void xpt_stop_tags(struct cam_path *path) { struct ccb_relsim crs; struct cam_ed *device; struct cam_sim *sim; device = path->device; sim = path->bus->sim; device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; device->tag_delay_count = 0; xpt_freeze_devq(path, /*count*/1); device->inq_flags &= ~SID_CmdQue; xpt_dev_ccbq_resize(path, sim->max_dev_openings); xpt_async(AC_GETDEV_CHANGED, path, NULL); xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; crs.openings = crs.release_timeout = crs.qfrozen_cnt = 0; xpt_action((union ccb *)&crs); } /* * Assume all possible buses are detected by this time, so allow boot * as soon as they all are scanned. */ static void xpt_boot_delay(void *arg) { xpt_release_boot(); } /* * Now that all config hooks have completed, start boot_delay timer, * waiting for possibly still undetected buses (USB) to appear. */ static void xpt_ch_done(void *arg) { callout_init(&xsoftc.boot_callout, 1); callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0, xpt_boot_delay, NULL, 0); } SYSINIT(xpt_hw_delay, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, xpt_ch_done, NULL); /* * Now that interrupts are enabled, go find our devices */ static void xpt_config(void *arg) { if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) printf("xpt_config: failed to create taskqueue thread.\n"); /* Setup debugging path */ if (cam_dflags != CAM_DEBUG_NONE) { if (xpt_create_path(&cam_dpath, NULL, CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN) != CAM_REQ_CMP) { printf("xpt_config: xpt_create_path() failed for debug" " target %d:%d:%d, debugging disabled\n", CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); cam_dflags = CAM_DEBUG_NONE; } } else cam_dpath = NULL; periphdriver_init(1); xpt_hold_boot(); /* Fire up rescan thread. */ if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, "cam", "scanner")) { printf("xpt_config: failed to create rescan thread.\n"); } } void xpt_hold_boot_locked(void) { if (xsoftc.buses_to_config++ == 0) root_mount_hold_token("CAM", &xsoftc.xpt_rootmount); } void xpt_hold_boot(void) { xpt_lock_buses(); xpt_hold_boot_locked(); xpt_unlock_buses(); } void xpt_release_boot(void) { xpt_lock_buses(); if (--xsoftc.buses_to_config == 0) { if (xsoftc.buses_config_done == 0) { xsoftc.buses_config_done = 1; xsoftc.buses_to_config++; TASK_INIT(&xsoftc.boot_task, 0, xpt_finishconfig_task, NULL); taskqueue_enqueue(taskqueue_thread, &xsoftc.boot_task); } else root_mount_rel(&xsoftc.xpt_rootmount); } xpt_unlock_buses(); } /* * If the given device only has one peripheral attached to it, and if that * peripheral is the passthrough driver, announce it. This insures that the * user sees some sort of announcement for every peripheral in their system. */ static int xptpassannouncefunc(struct cam_ed *device, void *arg) { struct cam_periph *periph; int i; for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; periph = SLIST_NEXT(periph, periph_links), i++); periph = SLIST_FIRST(&device->periphs); if ((i == 1) && (strncmp(periph->periph_name, "pass", 4) == 0)) xpt_announce_periph(periph, NULL); return(1); } static void xpt_finishconfig_task(void *context, int pending) { periphdriver_init(2); /* * Check for devices with no "standard" peripheral driver * attached. For any devices like that, announce the * passthrough driver so the user will see something. */ if (!bootverbose) xpt_for_all_devices(xptpassannouncefunc, NULL); xpt_release_boot(); } cam_status xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, struct cam_path *path) { struct ccb_setasync csa; cam_status status; int xptpath = 0; if (path == NULL) { status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) return (status); xpt_path_lock(path); xptpath = 1; } xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = event; csa.callback = cbfunc; csa.callback_arg = cbarg; xpt_action((union ccb *)&csa); status = csa.ccb_h.status; CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE, ("xpt_register_async: func %p\n", cbfunc)); if (xptpath) { xpt_path_unlock(path); xpt_free_path(path); } if ((status == CAM_REQ_CMP) && (csa.event_enable & AC_FOUND_DEVICE)) { /* * Get this peripheral up to date with all * the currently existing devices. */ xpt_for_all_devices(xptsetasyncfunc, &csa); } if ((status == CAM_REQ_CMP) && (csa.event_enable & AC_PATH_REGISTERED)) { /* * Get this peripheral up to date with all * the currently existing buses. */ xpt_for_all_busses(xptsetasyncbusfunc, &csa); } return (status); } static void xptaction(struct cam_sim *sim, union ccb *work_ccb) { CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); switch (work_ccb->ccb_h.func_code) { /* Common cases first */ case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi; cpi = &work_ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 0; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "", HBA_IDLEN); strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); cpi->unit_number = sim->unit_number; cpi->bus_id = sim->bus_id; cpi->base_transfer_speed = 0; cpi->protocol = PROTO_UNSPECIFIED; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->transport = XPORT_UNSPECIFIED; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(work_ccb); break; } default: work_ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(work_ccb); break; } } /* * The xpt as a "controller" has no interrupt sources, so polling * is a no-op. */ static void xptpoll(struct cam_sim *sim) { } void xpt_lock_buses(void) { mtx_lock(&xsoftc.xpt_topo_lock); } void xpt_unlock_buses(void) { mtx_unlock(&xsoftc.xpt_topo_lock); } struct mtx * xpt_path_mtx(struct cam_path *path) { return (&path->device->device_mtx); } static void xpt_done_process(struct ccb_hdr *ccb_h) { struct cam_sim *sim = NULL; struct cam_devq *devq = NULL; struct mtx *mtx = NULL; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) struct ccb_scsiio *csio; if (ccb_h->func_code == XPT_SCSI_IO) { csio = &((union ccb *)ccb_h)->csio; if (csio->bio != NULL) biotrack(csio->bio, __func__); } #endif if (ccb_h->flags & CAM_HIGH_POWER) { struct highpowerlist *hphead; struct cam_ed *device; mtx_lock(&xsoftc.xpt_highpower_lock); hphead = &xsoftc.highpowerq; device = STAILQ_FIRST(hphead); /* * Increment the count since this command is done. */ xsoftc.num_highpower++; /* * Any high powered commands queued up? */ if (device != NULL) { STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); mtx_unlock(&xsoftc.xpt_highpower_lock); mtx_lock(&device->sim->devq->send_mtx); xpt_release_devq_device(device, /*count*/1, /*runqueue*/TRUE); mtx_unlock(&device->sim->devq->send_mtx); } else mtx_unlock(&xsoftc.xpt_highpower_lock); } /* * Insulate against a race where the periph is destroyed but CCBs are * still not all processed. This shouldn't happen, but allows us better * bug diagnostic when it does. */ if (ccb_h->path->bus) sim = ccb_h->path->bus->sim; if (ccb_h->status & CAM_RELEASE_SIMQ) { KASSERT(sim, ("sim missing for CAM_RELEASE_SIMQ request")); xpt_release_simq(sim, /*run_queue*/FALSE); ccb_h->status &= ~CAM_RELEASE_SIMQ; } if ((ccb_h->flags & CAM_DEV_QFRZDIS) && (ccb_h->status & CAM_DEV_QFRZN)) { xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); ccb_h->status &= ~CAM_DEV_QFRZN; } if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { struct cam_ed *dev = ccb_h->path->device; if (sim) devq = sim->devq; KASSERT(devq, ("Periph disappeared with request pending.")); mtx_lock(&devq->send_mtx); devq->send_active--; devq->send_openings++; cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 && (dev->ccbq.dev_active == 0))) { dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; xpt_release_devq_device(dev, /*count*/1, /*run_queue*/FALSE); } if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; xpt_release_devq_device(dev, /*count*/1, /*run_queue*/FALSE); } if (!device_is_queued(dev)) (void)xpt_schedule_devq(devq, dev); xpt_run_devq(devq); mtx_unlock(&devq->send_mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { mtx = xpt_path_mtx(ccb_h->path); mtx_lock(mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 && (--dev->tag_delay_count == 0)) xpt_start_tags(ccb_h->path); } } if ((ccb_h->flags & CAM_UNLOCKED) == 0) { if (mtx == NULL) { mtx = xpt_path_mtx(ccb_h->path); mtx_lock(mtx); } } else { if (mtx != NULL) { mtx_unlock(mtx); mtx = NULL; } } /* Call the peripheral driver's callback */ ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); if (mtx != NULL) mtx_unlock(mtx); } void xpt_done_td(void *arg) { struct cam_doneq *queue = arg; struct ccb_hdr *ccb_h; STAILQ_HEAD(, ccb_hdr) doneq; STAILQ_INIT(&doneq); mtx_lock(&queue->cam_doneq_mtx); while (1) { while (STAILQ_EMPTY(&queue->cam_doneq)) { queue->cam_doneq_sleep = 1; msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, PRIBIO, "-", 0); queue->cam_doneq_sleep = 0; } STAILQ_CONCAT(&doneq, &queue->cam_doneq); mtx_unlock(&queue->cam_doneq_mtx); THREAD_NO_SLEEPING(); while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); xpt_done_process(ccb_h); } THREAD_SLEEPING_OK(); mtx_lock(&queue->cam_doneq_mtx); } } static void camisr_runqueue(void) { struct ccb_hdr *ccb_h; struct cam_doneq *queue; int i; /* Process global queues. */ for (i = 0; i < cam_num_doneqs; i++) { queue = &cam_doneqs[i]; mtx_lock(&queue->cam_doneq_mtx); while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); mtx_unlock(&queue->cam_doneq_mtx); xpt_done_process(ccb_h); mtx_lock(&queue->cam_doneq_mtx); } mtx_unlock(&queue->cam_doneq_mtx); } } struct kv { uint32_t v; const char *name; }; static struct kv map[] = { { XPT_NOOP, "XPT_NOOP" }, { XPT_SCSI_IO, "XPT_SCSI_IO" }, { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" }, { XPT_GDEVLIST, "XPT_GDEVLIST" }, { XPT_PATH_INQ, "XPT_PATH_INQ" }, { XPT_REL_SIMQ, "XPT_REL_SIMQ" }, { XPT_SASYNC_CB, "XPT_SASYNC_CB" }, { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" }, { XPT_SCAN_BUS, "XPT_SCAN_BUS" }, { XPT_DEV_MATCH, "XPT_DEV_MATCH" }, { XPT_DEBUG, "XPT_DEBUG" }, { XPT_PATH_STATS, "XPT_PATH_STATS" }, { XPT_GDEV_STATS, "XPT_GDEV_STATS" }, { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" }, { XPT_ASYNC, "XPT_ASYNC" }, { XPT_ABORT, "XPT_ABORT" }, { XPT_RESET_BUS, "XPT_RESET_BUS" }, { XPT_RESET_DEV, "XPT_RESET_DEV" }, { XPT_TERM_IO, "XPT_TERM_IO" }, { XPT_SCAN_LUN, "XPT_SCAN_LUN" }, { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" }, { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" }, { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" }, { XPT_ATA_IO, "XPT_ATA_IO" }, { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" }, { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" }, { XPT_NVME_IO, "XPT_NVME_IO" }, { XPT_MMC_IO, "XPT_MMC_IO" }, { XPT_SMP_IO, "XPT_SMP_IO" }, { XPT_SCAN_TGT, "XPT_SCAN_TGT" }, { XPT_NVME_ADMIN, "XPT_NVME_ADMIN" }, { XPT_ENG_INQ, "XPT_ENG_INQ" }, { XPT_ENG_EXEC, "XPT_ENG_EXEC" }, { XPT_EN_LUN, "XPT_EN_LUN" }, { XPT_TARGET_IO, "XPT_TARGET_IO" }, { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" }, { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" }, { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" }, { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" }, { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" }, { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" }, { 0, 0 } }; const char * xpt_action_name(uint32_t action) { static char buffer[32]; /* Only for unknown messages -- racy */ struct kv *walker = map; while (walker->name != NULL) { if (walker->v == action) return (walker->name); walker++; } snprintf(buffer, sizeof(buffer), "%#x", action); return (buffer); } Index: head/sys/cam/scsi/scsi_cd.c =================================================================== --- head/sys/cam/scsi/scsi_cd.c (revision 355600) +++ head/sys/cam/scsi/scsi_cd.c (revision 355601) @@ -1,4248 +1,4248 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Portions of this driver taken from the original FreeBSD cd driver. * Written by Julian Elischer (julian@tfs.com) * for TRW Financial Systems for use under the MACH(2.5) operating system. * * TRW Financial Systems, in accordance with their agreement with Carnegie * Mellon University, makes this software available to CMU to distribute * or use in any manner that they see fit as long as this message is kept with * the software. For this reason TFS also grants any other persons or * organisations permission to use or modify this software. * * TFS supplies this software to be publicly redistributed * on the understanding that TFS is not responsible for the correct * functioning of this software in any circumstances. * * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992 * * from: cd.c,v 1.83 1997/05/04 15:24:22 joerg Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_cd.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LEADOUT 0xaa /* leadout toc entry */ struct cd_params { u_int32_t blksize; u_long disksize; }; typedef enum { CD_Q_NONE = 0x00, CD_Q_NO_TOUCH = 0x01, CD_Q_BCD_TRACKS = 0x02, CD_Q_10_BYTE_ONLY = 0x10, CD_Q_RETRY_BUSY = 0x40 } cd_quirks; #define CD_Q_BIT_STRING \ "\020" \ "\001NO_TOUCH" \ "\002BCD_TRACKS" \ "\00510_BYTE_ONLY" \ "\007RETRY_BUSY" typedef enum { CD_FLAG_INVALID = 0x0001, CD_FLAG_NEW_DISC = 0x0002, CD_FLAG_DISC_LOCKED = 0x0004, CD_FLAG_DISC_REMOVABLE = 0x0008, CD_FLAG_SAW_MEDIA = 0x0010, CD_FLAG_ACTIVE = 0x0080, CD_FLAG_SCHED_ON_COMP = 0x0100, CD_FLAG_RETRY_UA = 0x0200, CD_FLAG_VALID_MEDIA = 0x0400, CD_FLAG_VALID_TOC = 0x0800, CD_FLAG_SCTX_INIT = 0x1000, CD_FLAG_MEDIA_WAIT = 0x2000, CD_FLAG_MEDIA_SCAN_ACT = 0x4000 } cd_flags; typedef enum { CD_CCB_PROBE = 0x01, CD_CCB_BUFFER_IO = 0x02, CD_CCB_TUR = 0x03, CD_CCB_MEDIA_PREVENT = 0x04, CD_CCB_MEDIA_ALLOW = 0x05, CD_CCB_MEDIA_SIZE = 0x06, CD_CCB_MEDIA_TOC_HDR = 0x07, CD_CCB_MEDIA_TOC_FULL = 0x08, CD_CCB_MEDIA_TOC_LEAD = 0x09, CD_CCB_TYPE_MASK = 0x0F, CD_CCB_RETRY_UA = 0x10 } cd_ccb_state; #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct cd_tocdata { struct ioc_toc_header header; struct cd_toc_entry entries[100]; }; struct cd_toc_single { struct ioc_toc_header header; struct cd_toc_entry entry; }; typedef enum { CD_STATE_PROBE, CD_STATE_NORMAL, CD_STATE_MEDIA_PREVENT, CD_STATE_MEDIA_ALLOW, CD_STATE_MEDIA_SIZE, CD_STATE_MEDIA_TOC_HDR, CD_STATE_MEDIA_TOC_FULL, CD_STATE_MEDIA_TOC_LEAD } cd_state; struct cd_softc { cam_pinfo pinfo; cd_state state; volatile cd_flags flags; struct bio_queue_head bio_queue; LIST_HEAD(, ccb_hdr) pending_ccbs; struct cd_params params; union ccb saved_ccb; cd_quirks quirks; struct cam_periph *periph; int minimum_command_size; int outstanding_cmds; int tur; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; STAILQ_HEAD(, cd_mode_params) mode_queue; struct cd_tocdata toc; int toc_read_len; struct cd_toc_single leadout; struct disk *disk; struct callout mediapoll_c; #define CD_ANNOUNCETMP_SZ 120 char announce_temp[CD_ANNOUNCETMP_SZ]; #define CD_ANNOUNCE_SZ 400 char announce_buf[CD_ANNOUNCE_SZ]; }; struct cd_page_sizes { int page; int page_size; }; static struct cd_page_sizes cd_page_size_table[] = { { AUDIO_PAGE, sizeof(struct cd_audio_page)} }; struct cd_quirk_entry { struct scsi_inquiry_pattern inq_pat; cd_quirks quirks; }; /* * NOTE ON 10_BYTE_ONLY quirks: Any 10_BYTE_ONLY quirks MUST be because * your device hangs when it gets a 10 byte command. Adding a quirk just * to get rid of the informative diagnostic message is not acceptable. All * 10_BYTE_ONLY quirks must be documented in full in a PR (which should be * referenced in a comment along with the quirk) , and must be approved by * ken@FreeBSD.org. Any quirks added that don't adhere to this policy may * be removed until the submitter can explain why they are needed. * 10_BYTE_ONLY quirks will be removed (as they will no longer be necessary) * when the CAM_NEW_TRAN_CODE work is done. */ static struct cd_quirk_entry cd_quirk_table[] = { { { T_CDROM, SIP_MEDIA_REMOVABLE, "CHINON", "CD-ROM CDS-535","*"}, /* quirks */ CD_Q_BCD_TRACKS }, { /* * VMware returns BUSY status when storage has transient * connectivity problems, so better wait. */ {T_CDROM, SIP_MEDIA_REMOVABLE, "NECVMWar", "VMware IDE CDR10", "*"}, /*quirks*/ CD_Q_RETRY_BUSY } }; #ifdef COMPAT_FREEBSD32 struct ioc_read_toc_entry32 { u_char address_format; u_char starting_track; u_short data_len; uint32_t data; /* (struct cd_toc_entry *) */ }; #define CDIOREADTOCENTRYS_32 \ _IOC_NEWTYPE(CDIOREADTOCENTRYS, struct ioc_read_toc_entry32) #endif static disk_open_t cdopen; static disk_close_t cdclose; static disk_ioctl_t cdioctl; static disk_strategy_t cdstrategy; static periph_init_t cdinit; static periph_ctor_t cdregister; static periph_dtor_t cdcleanup; static periph_start_t cdstart; static periph_oninv_t cdoninvalidate; static void cdasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static int cdcmdsizesysctl(SYSCTL_HANDLER_ARGS); static int cdrunccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags), u_int32_t cam_flags, u_int32_t sense_flags); static void cddone(struct cam_periph *periph, union ccb *start_ccb); static union cd_pages *cdgetpage(struct cd_mode_params *mode_params); static int cdgetpagesize(int page_num); static void cdprevent(struct cam_periph *periph, int action); static void cdmediaprobedone(struct cam_periph *periph); static int cdcheckmedia(struct cam_periph *periph, int do_wait); #if 0 static int cdsize(struct cam_periph *periph, u_int32_t *size); #endif static int cd6byteworkaround(union ccb *ccb); static int cderror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int cdreadtoc(struct cam_periph *periph, u_int32_t mode, u_int32_t start, u_int8_t *data, u_int32_t len, u_int32_t sense_flags); static int cdgetmode(struct cam_periph *periph, struct cd_mode_params *data, u_int32_t page); static int cdsetmode(struct cam_periph *periph, struct cd_mode_params *data); static int cdplay(struct cam_periph *periph, u_int32_t blk, u_int32_t len); static int cdreadsubchannel(struct cam_periph *periph, u_int32_t mode, u_int32_t format, int track, struct cd_sub_channel_info *data, u_int32_t len); static int cdplaymsf(struct cam_periph *periph, u_int32_t startm, u_int32_t starts, u_int32_t startf, u_int32_t endm, u_int32_t ends, u_int32_t endf); static int cdplaytracks(struct cam_periph *periph, u_int32_t strack, u_int32_t sindex, u_int32_t etrack, u_int32_t eindex); static int cdpause(struct cam_periph *periph, u_int32_t go); static int cdstopunit(struct cam_periph *periph, u_int32_t eject); static int cdstartunit(struct cam_periph *periph, int load); static int cdsetspeed(struct cam_periph *periph, u_int32_t rdspeed, u_int32_t wrspeed); static int cdreportkey(struct cam_periph *periph, struct dvd_authinfo *authinfo); static int cdsendkey(struct cam_periph *periph, struct dvd_authinfo *authinfo); static int cdreaddvdstructure(struct cam_periph *periph, struct dvd_struct *dvdstruct); -static timeout_t cdmediapoll; +static callout_func_t cdmediapoll; static struct periph_driver cddriver = { cdinit, "cd", TAILQ_HEAD_INITIALIZER(cddriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(cd, cddriver); #ifndef CD_DEFAULT_POLL_PERIOD #define CD_DEFAULT_POLL_PERIOD 3 #endif #ifndef CD_DEFAULT_RETRY #define CD_DEFAULT_RETRY 4 #endif #ifndef CD_DEFAULT_TIMEOUT #define CD_DEFAULT_TIMEOUT 30000 #endif static int cd_poll_period = CD_DEFAULT_POLL_PERIOD; static int cd_retry_count = CD_DEFAULT_RETRY; static int cd_timeout = CD_DEFAULT_TIMEOUT; static SYSCTL_NODE(_kern_cam, OID_AUTO, cd, CTLFLAG_RD, 0, "CAM CDROM driver"); SYSCTL_INT(_kern_cam_cd, OID_AUTO, poll_period, CTLFLAG_RWTUN, &cd_poll_period, 0, "Media polling period in seconds"); SYSCTL_INT(_kern_cam_cd, OID_AUTO, retry_count, CTLFLAG_RWTUN, &cd_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_cd, OID_AUTO, timeout, CTLFLAG_RWTUN, &cd_timeout, 0, "Timeout, in us, for read operations"); static MALLOC_DEFINE(M_SCSICD, "scsi_cd", "scsi_cd buffers"); static void cdinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, cdasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("cd: Failed to attach master async callback " "due to status 0x%x!\n", status); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void cddiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void cdoninvalidate(struct cam_periph *periph) { struct cd_softc *softc; softc = (struct cd_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, cdasync, periph, periph->path); softc->flags |= CD_FLAG_INVALID; /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ bioq_flush(&softc->bio_queue, NULL, ENXIO); disk_gone(softc->disk); } static void cdcleanup(struct cam_periph *periph) { struct cd_softc *softc; softc = (struct cd_softc *)periph->softc; cam_periph_unlock(periph); if ((softc->flags & CD_FLAG_SCTX_INIT) != 0 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) { xpt_print(periph->path, "can't remove sysctl context\n"); } callout_drain(&softc->mediapoll_c); disk_destroy(softc->disk); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void cdasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct cd_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_CDROM && SID_TYPE(&cgd->inq_data) != T_WORM) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(cdregister, cdoninvalidate, cdcleanup, cdstart, "cd", CAM_PERIPH_BIO, path, cdasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("cdasync: Unable to attach new device " "due to status 0x%x\n", status); break; } case AC_UNIT_ATTENTION: { union ccb *ccb; int error_code, sense_key, asc, ascq; softc = (struct cd_softc *)periph->softc; ccb = (union ccb *)arg; /* * Handle all media change UNIT ATTENTIONs except * our own, as they will be handled by cderror(). */ if (xpt_path_periph(ccb->ccb_h.path) != periph && scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (asc == 0x28 && ascq == 0x00) disk_media_changed(softc->disk, M_NOWAIT); } cam_periph_async(periph, code, path, arg); break; } case AC_SCSI_AEN: softc = (struct cd_softc *)periph->softc; if (softc->state == CD_STATE_NORMAL && !softc->tur) { if (cam_periph_acquire(periph) == 0) { softc->tur = 1; xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } /* FALLTHROUGH */ case AC_SENT_BDR: case AC_BUS_RESET: { struct ccb_hdr *ccbh; softc = (struct cd_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= CD_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= CD_CCB_RETRY_UA; /* FALLTHROUGH */ } default: cam_periph_async(periph, code, path, arg); break; } } static void cdsysctlinit(void *context, int pending) { struct cam_periph *periph; struct cd_softc *softc; char tmpstr[32], tmpstr2[16]; periph = (struct cam_periph *)context; if (cam_periph_acquire(periph) != 0) return; softc = (struct cd_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM CD unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= CD_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_cd), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("cdsysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } /* * Now register the sysctl handler, so the user can the value on * the fly. */ SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, &softc->minimum_command_size, 0, cdcmdsizesysctl, "I", "Minimum CDB size"); cam_periph_release(periph); } /* * We have a handler function for this so we can check the values when the * user sets them, instead of every time we look at them. */ static int cdcmdsizesysctl(SYSCTL_HANDLER_ARGS) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* * The only real values we can have here are 6 or 10. I don't * really forsee having 12 be an option at any time in the future. * So if the user sets something less than or equal to 6, we'll set * it to 6. If he sets something greater than 6, we'll set it to 10. * * I suppose we could just return an error here for the wrong values, * but I don't think it's necessary to do so, as long as we can * determine the user's intent without too much trouble. */ if (value < 6) value = 6; else if (value > 6) value = 10; *(int *)arg1 = value; return (0); } static cam_status cdregister(struct cam_periph *periph, void *arg) { struct cd_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; char tmpstr[80]; caddr_t match; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("cdregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct cd_softc *)malloc(sizeof(*softc),M_DEVBUF, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("cdregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } LIST_INIT(&softc->pending_ccbs); STAILQ_INIT(&softc->mode_queue); softc->state = CD_STATE_PROBE; bioq_init(&softc->bio_queue); if (SID_IS_REMOVABLE(&cgd->inq_data)) softc->flags |= CD_FLAG_DISC_REMOVABLE; periph->softc = softc; softc->periph = periph; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)cd_quirk_table, nitems(cd_quirk_table), sizeof(*cd_quirk_table), scsi_inquiry_match); if (match != NULL) softc->quirks = ((struct cd_quirk_entry *)match)->quirks; else softc->quirks = CD_Q_NONE; /* Check if the SIM does not want 6 byte commands */ xpt_path_inq(&cpi, periph->path); if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) softc->quirks |= CD_Q_10_BYTE_ONLY; TASK_INIT(&softc->sysctl_task, 0, cdsysctlinit, periph); /* The default is 6 byte commands, unless quirked otherwise */ if (softc->quirks & CD_Q_10_BYTE_ONLY) softc->minimum_command_size = 10; else softc->minimum_command_size = 6; /* * Refcount and block open attempts until we are setup * Can't block */ (void)cam_periph_hold(periph, PRIBIO); cam_periph_unlock(periph); /* * Load the user's default, if any. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.cd.%d.minimum_cmd_size", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->minimum_command_size); /* 6 and 10 are the only permissible values here. */ if (softc->minimum_command_size < 6) softc->minimum_command_size = 6; else if (softc->minimum_command_size > 6) softc->minimum_command_size = 10; /* * We need to register the statistics structure for this device, * but we don't have the blocksize yet for it. So, we register * the structure and indicate that we don't have the blocksize * yet. Unlike other SCSI peripheral drivers, we explicitly set * the device type here to be CDROM, rather than just ORing in * the device type. This is because this driver can attach to either * CDROM or WORM devices, and we want this peripheral driver to * show up in the devstat list as a CD peripheral driver, not a * WORM peripheral driver. WORM drives will also have the WORM * driver attached to them. */ softc->disk = disk_alloc(); softc->disk->d_devstat = devstat_new_entry("cd", periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, DEVSTAT_TYPE_CDROM | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_CD); softc->disk->d_open = cdopen; softc->disk->d_close = cdclose; softc->disk->d_strategy = cdstrategy; softc->disk->d_gone = cddiskgonecb; softc->disk->d_ioctl = cdioctl; softc->disk->d_name = "cd"; cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], cgd->inq_data.product, sizeof(cgd->inq_data.product), sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); softc->disk->d_unit = periph->unit_number; softc->disk->d_drv1 = periph; if (cpi.maxio == 0) softc->disk->d_maxsize = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->disk->d_maxsize = MAXPHYS; /* for safety */ else softc->disk->d_maxsize = cpi.maxio; softc->disk->d_flags = 0; softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; snprintf(softc->disk->d_attachment, sizeof(softc->disk->d_attachment), "%s%d", cpi.dev_name, cpi.unit_number); /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * dadiskgonecb()) telling us that our provider has been freed. */ if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); /* * Add an async callback so that we get * notified if this device goes away. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_SCSI_AEN | AC_UNIT_ATTENTION, cdasync, periph, periph->path); /* * Schedule a periodic media polling events. */ callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); if ((softc->flags & CD_FLAG_DISC_REMOVABLE) && (cgd->inq_flags & SID_AEN) == 0 && cd_poll_period != 0) callout_reset(&softc->mediapoll_c, cd_poll_period * hz, cdmediapoll, periph); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int cdopen(struct disk *dp) { struct cam_periph *periph; struct cd_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct cd_softc *)periph->softc; if (cam_periph_acquire(periph) != 0) return(ENXIO); cam_periph_lock(periph); if (softc->flags & CD_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } if ((error = cam_periph_hold(periph, PRIBIO | PCATCH)) != 0) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("cdopen\n")); /* * Check for media, and set the appropriate flags. We don't bail * if we don't have media, but then we don't allow anything but the * CDIOCEJECT/CDIOCCLOSE ioctls if there is no media. */ cdcheckmedia(periph, /*do_wait*/ 1); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("leaving cdopen\n")); cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } static int cdclose(struct disk *dp) { struct cam_periph *periph; struct cd_softc *softc; periph = (struct cam_periph *)dp->d_drv1; softc = (struct cd_softc *)periph->softc; cam_periph_lock(periph); if (cam_periph_hold(periph, PRIBIO) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (0); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("cdclose\n")); if ((softc->flags & CD_FLAG_DISC_REMOVABLE) != 0) cdprevent(periph, PR_ALLOW); /* * Since we're closing this CD, mark the blocksize as unavailable. * It will be marked as available when the CD is opened again. */ softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; /* * We'll check the media and toc again at the next open(). */ softc->flags &= ~(CD_FLAG_VALID_MEDIA|CD_FLAG_VALID_TOC); cam_periph_unhold(periph); cam_periph_release_locked(periph); cam_periph_unlock(periph); return (0); } static int cdrunccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags), u_int32_t cam_flags, u_int32_t sense_flags) { struct cd_softc *softc; struct cam_periph *periph; int error; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; error = cam_periph_runccb(ccb, error_routine, cam_flags, sense_flags, softc->disk->d_devstat); return(error); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void cdstrategy(struct bio *bp) { struct cam_periph *periph; struct cd_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cdstrategy(%p)\n", bp)); softc = (struct cd_softc *)periph->softc; /* * If the device has been made invalid, error out */ if ((softc->flags & CD_FLAG_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * Place it in the queue of disk activities for this disk */ bioq_disksort(&softc->bio_queue, bp); /* * If we don't know that we have valid media, schedule the media * check first. The I/O will get executed after the media check. */ if ((softc->flags & CD_FLAG_VALID_MEDIA) == 0) cdcheckmedia(periph, /*do_wait*/ 0); else xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); return; } static void cdstart(struct cam_periph *periph, union ccb *start_ccb) { struct cd_softc *softc; struct bio *bp; struct ccb_scsiio *csio; softc = (struct cd_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdstart\n")); switch (softc->state) { case CD_STATE_NORMAL: { bp = bioq_first(&softc->bio_queue); if (bp == NULL) { if (softc->tur) { softc->tur = 0; csio = &start_ccb->csio; scsi_test_unit_ready(csio, /*retries*/ cd_retry_count, cddone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, cd_timeout); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CD_CCB_TUR; xpt_action(start_ccb); } else xpt_release_ccb(start_ccb); } else { if (softc->tur) { softc->tur = 0; cam_periph_release_locked(periph); } bioq_remove(&softc->bio_queue, bp); scsi_read_write(&start_ccb->csio, /*retries*/ cd_retry_count, /* cbfcnp */ cddone, MSG_SIMPLE_Q_TAG, /* read */bp->bio_cmd == BIO_READ ? SCSI_RW_READ : SCSI_RW_WRITE, /* byte2 */ 0, /* minimum_cmd_size */ 10, /* lba */ bp->bio_offset / softc->params.blksize, bp->bio_bcount / softc->params.blksize, /* data_ptr */ bp->bio_data, /* dxfer_len */ bp->bio_bcount, /* sense_len */ cd_retry_count ? SSD_FULL_SIZE : SF_NO_PRINT, /* timeout */ cd_timeout); /* Use READ CD command for audio tracks. */ if (softc->params.blksize == 2352) { start_ccb->csio.cdb_io.cdb_bytes[0] = READ_CD; start_ccb->csio.cdb_io.cdb_bytes[9] = 0xf8; start_ccb->csio.cdb_io.cdb_bytes[10] = 0; start_ccb->csio.cdb_io.cdb_bytes[11] = 0; start_ccb->csio.cdb_len = 12; } start_ccb->ccb_h.ccb_state = CD_CCB_BUFFER_IO; LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); softc->outstanding_cmds++; /* We expect a unit attention from this device */ if ((softc->flags & CD_FLAG_RETRY_UA) != 0) { start_ccb->ccb_h.ccb_state |= CD_CCB_RETRY_UA; softc->flags &= ~CD_FLAG_RETRY_UA; } start_ccb->ccb_h.ccb_bp = bp; bp = bioq_first(&softc->bio_queue); xpt_action(start_ccb); } if (bp != NULL || softc->tur) { /* Have more work to do, so ensure we stay scheduled */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); } break; } case CD_STATE_PROBE: case CD_STATE_MEDIA_SIZE: { struct scsi_read_capacity_data *rcap; rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), M_SCSICD, M_NOWAIT | M_ZERO); if (rcap == NULL) { xpt_print(periph->path, "%s: Couldn't malloc read_capacity data\n", __func__); xpt_release_ccb(start_ccb); /* * We can't probe because we can't allocate memory, * so invalidate the peripheral. The system probably * has larger problems at this stage. If we've * already probed (and are re-probing capacity), we * don't need to invalidate. * * XXX KDM need to reset probe state and kick out * pending I/O. */ if (softc->state == CD_STATE_PROBE) cam_periph_invalidate(periph); break; } /* * Set the default capacity and sector size to something that * GEOM can handle. This will get reset when a read capacity * completes successfully. */ softc->disk->d_sectorsize = 2048; softc->disk->d_mediasize = 0; csio = &start_ccb->csio; scsi_read_capacity(csio, /*retries*/ cd_retry_count, cddone, MSG_SIMPLE_Q_TAG, rcap, SSD_FULL_SIZE, /*timeout*/20000); start_ccb->ccb_h.ccb_bp = NULL; if (softc->state == CD_STATE_PROBE) start_ccb->ccb_h.ccb_state = CD_CCB_PROBE; else start_ccb->ccb_h.ccb_state = CD_CCB_MEDIA_SIZE; xpt_action(start_ccb); break; } case CD_STATE_MEDIA_ALLOW: case CD_STATE_MEDIA_PREVENT: { /* * If the CD is already locked, we don't need to do this. * Move on to the capacity check. */ if (softc->state == CD_STATE_MEDIA_PREVENT && (softc->flags & CD_FLAG_DISC_LOCKED) != 0) { softc->state = CD_STATE_MEDIA_SIZE; xpt_release_ccb(start_ccb); xpt_schedule(periph, CAM_PRIORITY_NORMAL); break; } scsi_prevent(&start_ccb->csio, /*retries*/ cd_retry_count, /*cbfcnp*/ cddone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*action*/ (softc->state == CD_STATE_MEDIA_ALLOW) ? PR_ALLOW : PR_PREVENT, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ 60000); start_ccb->ccb_h.ccb_bp = NULL; if (softc->state == CD_STATE_MEDIA_ALLOW) start_ccb->ccb_h.ccb_state = CD_CCB_MEDIA_ALLOW; else start_ccb->ccb_h.ccb_state = CD_CCB_MEDIA_PREVENT; xpt_action(start_ccb); break; } case CD_STATE_MEDIA_TOC_HDR: { struct ioc_toc_header *toch; bzero(&softc->toc, sizeof(softc->toc)); toch = &softc->toc.header; scsi_read_toc(&start_ccb->csio, /*retries*/ cd_retry_count, /*cbfcnp*/ cddone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*byte1_flags*/ 0, /*format*/ SRTOC_FORMAT_TOC, /*track*/ 0, /*data_ptr*/ (uint8_t *)toch, /*dxfer_len*/ sizeof(*toch), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ 50000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CD_CCB_MEDIA_TOC_HDR; xpt_action(start_ccb); break; } case CD_STATE_MEDIA_TOC_FULL: { bzero(&softc->toc, sizeof(softc->toc)); scsi_read_toc(&start_ccb->csio, /*retries*/ cd_retry_count, /*cbfcnp*/ cddone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*byte1_flags*/ 0, /*format*/ SRTOC_FORMAT_TOC, /*track*/ 0, /*data_ptr*/ (uint8_t *)&softc->toc, /*dxfer_len*/ softc->toc_read_len ? softc->toc_read_len : sizeof(softc->toc), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ 50000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CD_CCB_MEDIA_TOC_FULL; xpt_action(start_ccb); break; } case CD_STATE_MEDIA_TOC_LEAD: { struct cd_toc_single *leadout; leadout = &softc->leadout; bzero(leadout, sizeof(*leadout)); scsi_read_toc(&start_ccb->csio, /*retries*/ cd_retry_count, /*cbfcnp*/ cddone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*byte1_flags*/ CD_MSF, /*format*/ SRTOC_FORMAT_TOC, /*track*/ LEADOUT, /*data_ptr*/ (uint8_t *)leadout, /*dxfer_len*/ sizeof(*leadout), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ 50000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CD_CCB_MEDIA_TOC_LEAD; xpt_action(start_ccb); break; } } } static void cddone(struct cam_periph *periph, union ccb *done_ccb) { struct cd_softc *softc; struct ccb_scsiio *csio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cddone\n")); softc = (struct cd_softc *)periph->softc; csio = &done_ccb->csio; switch (csio->ccb_h.ccb_state & CD_CCB_TYPE_MASK) { case CD_CCB_BUFFER_IO: { struct bio *bp; int error; bp = (struct bio *)done_ccb->ccb_h.ccb_bp; error = 0; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int sf; if ((done_ccb->ccb_h.ccb_state & CD_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = cderror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } } if (error != 0) { xpt_print(periph->path, "cddone: got error %#x back\n", error); bioq_flush(&softc->bio_queue, NULL, EIO); bp->bio_resid = bp->bio_bcount; bp->bio_error = error; bp->bio_flags |= BIO_ERROR; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else { bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) { /* * Short transfer ??? * XXX: not sure this is correct for partial * transfers at EOM */ bp->bio_flags |= BIO_ERROR; } } LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); softc->outstanding_cmds--; biofinish(bp, NULL, 0); break; } case CD_CCB_PROBE: { struct scsi_read_capacity_data *rdcap; char *announce_buf; struct cd_params *cdp; int error; cdp = &softc->params; announce_buf = softc->announce_temp; bzero(announce_buf, CD_ANNOUNCETMP_SZ); rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; cdp->disksize = scsi_4btoul (rdcap->addr) + 1; cdp->blksize = scsi_4btoul (rdcap->length); /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP || (error = cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT)) == 0) { snprintf(announce_buf, CD_ANNOUNCETMP_SZ, "%juMB (%ju %u byte sectors)", ((uintmax_t)cdp->disksize * cdp->blksize) / (1024 * 1024), (uintmax_t)cdp->disksize, cdp->blksize); } else { if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } else { int asc, ascq; int sense_key, error_code; int have_sense; cam_status status; struct ccb_getdev cgd; /* Don't wedge this device's queue */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); status = done_ccb->ccb_h.status; xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key, &asc, &ascq)) have_sense = TRUE; else have_sense = FALSE; /* * Attach to anything that claims to be a * CDROM or WORM device, as long as it * doesn't return a "Logical unit not * supported" (0x25) error. */ if ((have_sense) && (asc != 0x25) && (error_code == SSD_CURRENT_ERROR || error_code == SSD_DESC_CURRENT_ERROR)) { const char *sense_key_desc; const char *asc_desc; scsi_sense_desc(sense_key, asc, ascq, &cgd.inq_data, &sense_key_desc, &asc_desc); snprintf(announce_buf, CD_ANNOUNCETMP_SZ, "Attempt to query device " "size failed: %s, %s", sense_key_desc, asc_desc); } else if ((have_sense == 0) && ((status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) && (csio->scsi_status == SCSI_STATUS_BUSY)) { snprintf(announce_buf, CD_ANNOUNCETMP_SZ, "Attempt to query device " "size failed: SCSI Status: %s", scsi_status_string(csio)); } else if (SID_TYPE(&cgd.inq_data) == T_CDROM) { /* * We only print out an error for * CDROM type devices. For WORM * devices, we don't print out an * error since a few WORM devices * don't support CDROM commands. * If we have sense information, go * ahead and print it out. * Otherwise, just say that we * couldn't attach. */ /* * Just print out the error, not * the full probe message, when we * don't attach. */ if (have_sense) scsi_sense_print( &done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, " "failed to attach to device\n"); /* * Invalidate this peripheral. */ cam_periph_invalidate(periph); announce_buf = NULL; } else { /* * Invalidate this peripheral. */ cam_periph_invalidate(periph); announce_buf = NULL; } } } free(rdcap, M_SCSICD); if (announce_buf != NULL) { struct sbuf sb; sbuf_new(&sb, softc->announce_buf, CD_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, announce_buf); xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, CD_Q_BIT_STRING); sbuf_finish(&sb); sbuf_putbuf(&sb); /* * Create our sysctl variables, now that we know * we have successfully attached. */ taskqueue_enqueue(taskqueue_thread,&softc->sysctl_task); } softc->state = CD_STATE_NORMAL; /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(done_ccb); cam_periph_unhold(periph); return; } case CD_CCB_TUR: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } xpt_release_ccb(done_ccb); cam_periph_release_locked(periph); return; } case CD_CCB_MEDIA_ALLOW: case CD_CCB_MEDIA_PREVENT: { int error; int is_prevent; error = 0; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); } if (error == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); /* * Note that just like the original cdcheckmedia(), we do * a prevent without failing the whole operation if the * prevent fails. We try, but keep going if it doesn't * work. */ if ((done_ccb->ccb_h.ccb_state & CD_CCB_TYPE_MASK) == CD_CCB_MEDIA_PREVENT) is_prevent = 1; else is_prevent = 0; xpt_release_ccb(done_ccb); if (is_prevent != 0) { if (error == 0) softc->flags |= CD_FLAG_DISC_LOCKED; else softc->flags &= ~CD_FLAG_DISC_LOCKED; softc->state = CD_STATE_MEDIA_SIZE; xpt_schedule(periph, CAM_PRIORITY_NORMAL); } else { if (error == 0) softc->flags &= ~CD_FLAG_DISC_LOCKED; softc->state = CD_STATE_NORMAL; if (bioq_first(&softc->bio_queue) != NULL) xpt_schedule(periph, CAM_PRIORITY_NORMAL); } return; } case CD_CCB_MEDIA_SIZE: { struct scsi_read_capacity_data *rdcap; int error; error = 0; if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); } if (error == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; if (error == 0) { softc->params.disksize =scsi_4btoul(rdcap->addr) + 1; softc->params.blksize = scsi_4btoul(rdcap->length); /* Make sure we got at least some block size. */ if (softc->params.blksize == 0) error = EIO; /* * SCSI-3 mandates that the reported blocksize shall be * 2048. Older drives sometimes report funny values, * trim it down to 2048, or other parts of the kernel * will get confused. * * XXX we leave drives alone that might report 512 * bytes, as well as drives reporting more weird * sizes like perhaps 4K. */ if (softc->params.blksize > 2048 && softc->params.blksize <= 2352) softc->params.blksize = 2048; } free(rdcap, M_SCSICD); if (error == 0) { softc->disk->d_sectorsize = softc->params.blksize; softc->disk->d_mediasize = (off_t)softc->params.blksize * softc->params.disksize; softc->flags |= CD_FLAG_SAW_MEDIA | CD_FLAG_VALID_MEDIA; softc->state = CD_STATE_MEDIA_TOC_HDR; } else { softc->flags &= ~(CD_FLAG_VALID_MEDIA | CD_FLAG_VALID_TOC); bioq_flush(&softc->bio_queue, NULL, EINVAL); softc->state = CD_STATE_MEDIA_ALLOW; cdmediaprobedone(periph); } xpt_release_ccb(done_ccb); xpt_schedule(periph, CAM_PRIORITY_NORMAL); return; } case CD_CCB_MEDIA_TOC_HDR: case CD_CCB_MEDIA_TOC_FULL: case CD_CCB_MEDIA_TOC_LEAD: { int error; struct ioc_toc_header *toch; int num_entries; int cdindex; error = 0; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); } if (error == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); /* * We will get errors here for media that doesn't have a table * of contents. According to the MMC-3 spec: "When a Read * TOC/PMA/ATIP command is presented for a DDCD/CD-R/RW media, * where the first TOC has not been recorded (no complete * session) and the Format codes 0000b, 0001b, or 0010b are * specified, this command shall be rejected with an INVALID * FIELD IN CDB. Devices that are not capable of reading an * incomplete session on DDC/CD-R/RW media shall report * CANNOT READ MEDIUM - INCOMPATIBLE FORMAT." * * So this isn't fatal if we can't read the table of contents, * it just means that the user won't be able to issue the * play tracks ioctl, and likely lots of other stuff won't * work either. They need to burn the CD before we can do * a whole lot with it. So we don't print anything here if * we get an error back. * * We also bail out if the drive doesn't at least give us * the full TOC header. */ if ((error != 0) || ((csio->dxfer_len - csio->resid) < sizeof(struct ioc_toc_header))) { softc->flags &= ~CD_FLAG_VALID_TOC; bzero(&softc->toc, sizeof(softc->toc)); /* * Failing the TOC read is not an error. */ softc->state = CD_STATE_NORMAL; xpt_release_ccb(done_ccb); cdmediaprobedone(periph); /* * Go ahead and schedule I/O execution if there is * anything in the queue. It'll probably get * kicked out with an error. */ if (bioq_first(&softc->bio_queue) != NULL) xpt_schedule(periph, CAM_PRIORITY_NORMAL); return; } /* * Note that this is NOT the storage location used for the * leadout! */ toch = &softc->toc.header; if (softc->quirks & CD_Q_BCD_TRACKS) { toch->starting_track = bcd2bin(toch->starting_track); toch->ending_track = bcd2bin(toch->ending_track); } /* Number of TOC entries, plus leadout */ num_entries = (toch->ending_track - toch->starting_track) + 2; cdindex = toch->starting_track + num_entries -1; if ((done_ccb->ccb_h.ccb_state & CD_CCB_TYPE_MASK) == CD_CCB_MEDIA_TOC_HDR) { if (num_entries <= 0) { softc->flags &= ~CD_FLAG_VALID_TOC; bzero(&softc->toc, sizeof(softc->toc)); /* * Failing the TOC read is not an error. */ softc->state = CD_STATE_NORMAL; xpt_release_ccb(done_ccb); cdmediaprobedone(periph); /* * Go ahead and schedule I/O execution if * there is anything in the queue. It'll * probably get kicked out with an error. */ if (bioq_first(&softc->bio_queue) != NULL) xpt_schedule(periph, CAM_PRIORITY_NORMAL); } else { softc->toc_read_len = num_entries * sizeof(struct cd_toc_entry); softc->toc_read_len += sizeof(*toch); softc->state = CD_STATE_MEDIA_TOC_FULL; xpt_release_ccb(done_ccb); xpt_schedule(periph, CAM_PRIORITY_NORMAL); } return; } else if ((done_ccb->ccb_h.ccb_state & CD_CCB_TYPE_MASK) == CD_CCB_MEDIA_TOC_LEAD) { struct cd_toc_single *leadout; leadout = (struct cd_toc_single *)csio->data_ptr; softc->toc.entries[cdindex - toch->starting_track] = leadout->entry; } else if (((done_ccb->ccb_h.ccb_state & CD_CCB_TYPE_MASK) == CD_CCB_MEDIA_TOC_FULL) && (cdindex == toch->ending_track + 1)) { /* * XXX KDM is this necessary? Probably only if the * drive doesn't return leadout information with the * table of contents. */ softc->state = CD_STATE_MEDIA_TOC_LEAD; xpt_release_ccb(done_ccb); xpt_schedule(periph, CAM_PRIORITY_NORMAL); return; } if (softc->quirks & CD_Q_BCD_TRACKS) { for (cdindex = 0; cdindex < num_entries - 1; cdindex++){ softc->toc.entries[cdindex].track = bcd2bin(softc->toc.entries[cdindex].track); } } softc->flags |= CD_FLAG_VALID_TOC; /* If the first track is audio, correct sector size. */ if ((softc->toc.entries[0].control & 4) == 0) { softc->disk->d_sectorsize =softc->params.blksize = 2352; softc->disk->d_mediasize = (off_t)softc->params.blksize * softc->params.disksize; } softc->state = CD_STATE_NORMAL; /* * We unconditionally (re)set the blocksize each time the * CD device is opened. This is because the CD can change, * and therefore the blocksize might change. * XXX problems here if some slice or partition is still * open with the old size? */ if ((softc->disk->d_devstat->flags & DEVSTAT_BS_UNAVAILABLE)!=0) softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; softc->disk->d_devstat->block_size = softc->params.blksize; xpt_release_ccb(done_ccb); cdmediaprobedone(periph); if (bioq_first(&softc->bio_queue) != NULL) xpt_schedule(periph, CAM_PRIORITY_NORMAL); return; } default: break; } xpt_release_ccb(done_ccb); } static union cd_pages * cdgetpage(struct cd_mode_params *mode_params) { union cd_pages *page; if (mode_params->cdb_size == 10) page = (union cd_pages *)find_mode_page_10( (struct scsi_mode_header_10 *)mode_params->mode_buf); else page = (union cd_pages *)find_mode_page_6( (struct scsi_mode_header_6 *)mode_params->mode_buf); return (page); } static int cdgetpagesize(int page_num) { u_int i; for (i = 0; i < nitems(cd_page_size_table); i++) { if (cd_page_size_table[i].page == page_num) return (cd_page_size_table[i].page_size); } return (-1); } static struct cd_toc_entry * te_data_get_ptr(void *irtep, u_long cmd) { union { struct ioc_read_toc_entry irte; #ifdef COMPAT_FREEBSD32 struct ioc_read_toc_entry32 irte32; #endif } *irteup; irteup = irtep; switch (IOCPARM_LEN(cmd)) { case sizeof(irteup->irte): return (irteup->irte.data); #ifdef COMPAT_FREEBSD32 case sizeof(irteup->irte32): return ((struct cd_toc_entry *)(uintptr_t)irteup->irte32.data); #endif default: panic("Unhandled ioctl command %ld", cmd); } } static int cdioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td) { struct cam_periph *periph; struct cd_softc *softc; int error = 0; periph = (struct cam_periph *)dp->d_drv1; cam_periph_lock(periph); softc = (struct cd_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cdioctl(%#lx)\n", cmd)); if ((error = cam_periph_hold(periph, PRIBIO | PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } /* * If we don't have media loaded, check for it. If still don't * have media loaded, we can only do a load or eject. * * We only care whether media is loaded if this is a cd-specific ioctl * (thus the IOCGROUP check below). Note that this will break if * anyone adds any ioctls into the switch statement below that don't * have their ioctl group set to 'c'. */ if (((softc->flags & CD_FLAG_VALID_MEDIA) == 0) && ((cmd != CDIOCCLOSE) && (cmd != CDIOCEJECT)) && (IOCGROUP(cmd) == 'c')) { error = cdcheckmedia(periph, /*do_wait*/ 1); if (error != 0) { cam_periph_unhold(periph); cam_periph_unlock(periph); return (error); } } /* * Drop the lock here so later mallocs can use WAITOK. The periph * is essentially locked still with the cam_periph_hold call above. */ cam_periph_unlock(periph); switch (cmd) { case CDIOCPLAYTRACKS: { struct ioc_play_track *args = (struct ioc_play_track *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYTRACKS\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.flags &= ~CD_PA_SOTC; page->audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); if (error) { cam_periph_unlock(periph); break; } /* * This was originally implemented with the PLAY * AUDIO TRACK INDEX command, but that command was * deprecated after SCSI-2. Most (all?) SCSI CDROM * drives support it but ATAPI and ATAPI-derivative * drives don't seem to support it. So we keep a * cache of the table of contents and translate * track numbers to MSF format. */ if (softc->flags & CD_FLAG_VALID_TOC) { union msf_lba *sentry, *eentry; int st, et; if (args->end_track < softc->toc.header.ending_track + 1) args->end_track++; if (args->end_track > softc->toc.header.ending_track + 1) args->end_track = softc->toc.header.ending_track + 1; st = args->start_track - softc->toc.header.starting_track; et = args->end_track - softc->toc.header.starting_track; if ((st < 0) || (et < 0) || (st > (softc->toc.header.ending_track - softc->toc.header.starting_track))) { error = EINVAL; cam_periph_unlock(periph); break; } sentry = &softc->toc.entries[st].addr; eentry = &softc->toc.entries[et].addr; error = cdplaymsf(periph, sentry->msf.minute, sentry->msf.second, sentry->msf.frame, eentry->msf.minute, eentry->msf.second, eentry->msf.frame); } else { /* * If we don't have a valid TOC, try the * play track index command. It is part of * the SCSI-2 spec, but was removed in the * MMC specs. ATAPI and ATAPI-derived * drives don't support it. */ if (softc->quirks & CD_Q_BCD_TRACKS) { args->start_track = bin2bcd(args->start_track); args->end_track = bin2bcd(args->end_track); } error = cdplaytracks(periph, args->start_track, args->start_index, args->end_track, args->end_index); } cam_periph_unlock(periph); } break; case CDIOCPLAYMSF: { struct ioc_play_msf *args = (struct ioc_play_msf *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYMSF\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.flags &= ~CD_PA_SOTC; page->audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); if (error) { cam_periph_unlock(periph); break; } error = cdplaymsf(periph, args->start_m, args->start_s, args->start_f, args->end_m, args->end_s, args->end_f); cam_periph_unlock(periph); } break; case CDIOCPLAYBLOCKS: { struct ioc_play_blocks *args = (struct ioc_play_blocks *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYBLOCKS\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.flags &= ~CD_PA_SOTC; page->audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); if (error) { cam_periph_unlock(periph); break; } error = cdplay(periph, args->blk, args->len); cam_periph_unlock(periph); } break; case CDIOCREADSUBCHANNEL: { struct ioc_read_subchannel *args = (struct ioc_read_subchannel *) addr; struct cd_sub_channel_info *data; u_int32_t len = args->data_len; data = malloc(sizeof(struct cd_sub_channel_info), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCREADSUBCHANNEL\n")); if ((len > sizeof(struct cd_sub_channel_info)) || (len < sizeof(struct cd_sub_channel_header))) { printf( "scsi_cd: cdioctl: " "cdioreadsubchannel: error, len=%d\n", len); error = EINVAL; free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) args->track = bin2bcd(args->track); error = cdreadsubchannel(periph, args->address_format, args->data_format, args->track, data, len); if (error) { free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) data->what.track_info.track_number = bcd2bin(data->what.track_info.track_number); len = min(len, ((data->header.data_len[0] << 8) + data->header.data_len[1] + sizeof(struct cd_sub_channel_header))); cam_periph_unlock(periph); error = copyout(data, args->data, len); free(data, M_SCSICD); } break; case CDIOREADTOCHEADER: { struct ioc_toc_header *th; th = malloc(sizeof(struct ioc_toc_header), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCHEADER\n")); error = cdreadtoc(periph, 0, 0, (u_int8_t *)th, sizeof (*th), /*sense_flags*/SF_NO_PRINT); if (error) { free(th, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } th->len = ntohs(th->len); bcopy(th, addr, sizeof(*th)); free(th, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOREADTOCENTRYS: #ifdef COMPAT_FREEBSD32 case CDIOREADTOCENTRYS_32: #endif { struct cd_tocdata *data; struct cd_toc_single *lead; struct ioc_read_toc_entry *te = (struct ioc_read_toc_entry *) addr; struct ioc_toc_header *th; u_int32_t len, readlen, idx, num; u_int32_t starting_track = te->starting_track; data = malloc(sizeof(*data), M_SCSICD, M_WAITOK | M_ZERO); lead = malloc(sizeof(*lead), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCENTRYS\n")); if (te->data_len < sizeof(struct cd_toc_entry) || (te->data_len % sizeof(struct cd_toc_entry)) != 0 || (te->address_format != CD_MSF_FORMAT && te->address_format != CD_LBA_FORMAT)) { error = EINVAL; printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } th = &data->header; error = cdreadtoc(periph, 0, 0, (u_int8_t *)th, sizeof (*th), /*sense_flags*/0); if (error) { free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } if (starting_track == 0) starting_track = th->starting_track; else if (starting_track == LEADOUT) starting_track = th->ending_track + 1; else if (starting_track < th->starting_track || starting_track > th->ending_track + 1) { printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); error = EINVAL; break; } /* calculate reading length without leadout entry */ readlen = (th->ending_track - starting_track + 1) * sizeof(struct cd_toc_entry); /* and with leadout entry */ len = readlen + sizeof(struct cd_toc_entry); if (te->data_len < len) { len = te->data_len; if (readlen > len) readlen = len; } if (len > sizeof(data->entries)) { printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); error = EINVAL; free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } num = len / sizeof(struct cd_toc_entry); if (readlen > 0) { error = cdreadtoc(periph, te->address_format, starting_track, (u_int8_t *)data, readlen + sizeof (*th), /*sense_flags*/0); if (error) { free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } } /* make leadout entry if needed */ idx = starting_track + num - 1; if (softc->quirks & CD_Q_BCD_TRACKS) th->ending_track = bcd2bin(th->ending_track); if (idx == th->ending_track + 1) { error = cdreadtoc(periph, te->address_format, LEADOUT, (u_int8_t *)lead, sizeof(*lead), /*sense_flags*/0); if (error) { free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } data->entries[idx - starting_track] = lead->entry; } if (softc->quirks & CD_Q_BCD_TRACKS) { for (idx = 0; idx < num - 1; idx++) { data->entries[idx].track = bcd2bin(data->entries[idx].track); } } cam_periph_unlock(periph); error = copyout(data->entries, te_data_get_ptr(te, cmd), len); free(data, M_SCSICD); free(lead, M_SCSICD); } break; case CDIOREADTOCENTRY: { struct cd_toc_single *data; struct ioc_read_toc_single_entry *te = (struct ioc_read_toc_single_entry *) addr; struct ioc_toc_header *th; u_int32_t track; data = malloc(sizeof(*data), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCENTRY\n")); if (te->address_format != CD_MSF_FORMAT && te->address_format != CD_LBA_FORMAT) { printf("error in readtocentry, " " returning EINVAL\n"); free(data, M_SCSICD); error = EINVAL; cam_periph_unlock(periph); break; } th = &data->header; error = cdreadtoc(periph, 0, 0, (u_int8_t *)th, sizeof (*th), /*sense_flags*/0); if (error) { free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } track = te->track; if (track == 0) track = th->starting_track; else if (track == LEADOUT) /* OK */; else if (track < th->starting_track || track > th->ending_track + 1) { printf("error in readtocentry, " " returning EINVAL\n"); free(data, M_SCSICD); error = EINVAL; cam_periph_unlock(periph); break; } error = cdreadtoc(periph, te->address_format, track, (u_int8_t *)data, sizeof(*data), /*sense_flags*/0); if (error) { free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) data->entry.track = bcd2bin(data->entry.track); bcopy(&data->entry, &te->entry, sizeof(struct cd_toc_entry)); free(data, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETPATCH: { struct ioc_patch *arg = (struct ioc_patch *)addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETPATCH\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = arg->patch[0]; page->audio.port[RIGHT_PORT].channels = arg->patch[1]; page->audio.port[2].channels = arg->patch[2]; page->audio.port[3].channels = arg->patch[3]; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCGETVOL: { struct ioc_vol *arg = (struct ioc_vol *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCGETVOL\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); arg->vol[LEFT_PORT] = page->audio.port[LEFT_PORT].volume; arg->vol[RIGHT_PORT] = page->audio.port[RIGHT_PORT].volume; arg->vol[2] = page->audio.port[2].volume; arg->vol[3] = page->audio.port[3].volume; free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETVOL: { struct ioc_vol *arg = (struct ioc_vol *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETVOL\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = CHANNEL_0; page->audio.port[LEFT_PORT].volume = arg->vol[LEFT_PORT]; page->audio.port[RIGHT_PORT].channels = CHANNEL_1; page->audio.port[RIGHT_PORT].volume = arg->vol[RIGHT_PORT]; page->audio.port[2].volume = arg->vol[2]; page->audio.port[3].volume = arg->vol[3]; error = cdsetmode(periph, ¶ms); cam_periph_unlock(periph); free(params.mode_buf, M_SCSICD); } break; case CDIOCSETMONO: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETMONO\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = LEFT_CHANNEL | RIGHT_CHANNEL; page->audio.port[RIGHT_PORT].channels = LEFT_CHANNEL | RIGHT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); cam_periph_unlock(periph); free(params.mode_buf, M_SCSICD); } break; case CDIOCSETSTEREO: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETSTEREO\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = LEFT_CHANNEL; page->audio.port[RIGHT_PORT].channels = RIGHT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETMUTE: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETMUTE\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = 0; page->audio.port[RIGHT_PORT].channels = 0; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETLEFT: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETLEFT\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = LEFT_CHANNEL; page->audio.port[RIGHT_PORT].channels = LEFT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETRIGHT: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETRIGHT\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = RIGHT_CHANNEL; page->audio.port[RIGHT_PORT].channels = RIGHT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCRESUME: cam_periph_lock(periph); error = cdpause(periph, 1); cam_periph_unlock(periph); break; case CDIOCPAUSE: cam_periph_lock(periph); error = cdpause(periph, 0); cam_periph_unlock(periph); break; case CDIOCSTART: cam_periph_lock(periph); error = cdstartunit(periph, 0); cam_periph_unlock(periph); break; case CDIOCCLOSE: cam_periph_lock(periph); error = cdstartunit(periph, 1); cam_periph_unlock(periph); break; case CDIOCSTOP: cam_periph_lock(periph); error = cdstopunit(periph, 0); cam_periph_unlock(periph); break; case CDIOCEJECT: cam_periph_lock(periph); error = cdstopunit(periph, 1); cam_periph_unlock(periph); break; case CDIOCALLOW: cam_periph_lock(periph); cdprevent(periph, PR_ALLOW); cam_periph_unlock(periph); break; case CDIOCPREVENT: cam_periph_lock(periph); cdprevent(periph, PR_PREVENT); cam_periph_unlock(periph); break; case CDIOCSETDEBUG: /* sc_link->flags |= (SDEV_DB1 | SDEV_DB2); */ error = ENOTTY; break; case CDIOCCLRDEBUG: /* sc_link->flags &= ~(SDEV_DB1 | SDEV_DB2); */ error = ENOTTY; break; case CDIOCRESET: /* return (cd_reset(periph)); */ error = ENOTTY; break; case CDRIOCREADSPEED: cam_periph_lock(periph); error = cdsetspeed(periph, *(u_int32_t *)addr, CDR_MAX_SPEED); cam_periph_unlock(periph); break; case CDRIOCWRITESPEED: cam_periph_lock(periph); error = cdsetspeed(periph, CDR_MAX_SPEED, *(u_int32_t *)addr); cam_periph_unlock(periph); break; case CDRIOCGETBLOCKSIZE: *(int *)addr = softc->params.blksize; break; case CDRIOCSETBLOCKSIZE: if (*(int *)addr <= 0) { error = EINVAL; break; } softc->disk->d_sectorsize = softc->params.blksize = *(int *)addr; break; case DVDIOCSENDKEY: case DVDIOCREPORTKEY: { struct dvd_authinfo *authinfo; authinfo = (struct dvd_authinfo *)addr; if (cmd == DVDIOCREPORTKEY) error = cdreportkey(periph, authinfo); else error = cdsendkey(periph, authinfo); break; } case DVDIOCREADSTRUCTURE: { struct dvd_struct *dvdstruct; dvdstruct = (struct dvd_struct *)addr; error = cdreaddvdstructure(periph, dvdstruct); break; } default: cam_periph_lock(periph); error = cam_periph_ioctl(periph, cmd, addr, cderror); cam_periph_unlock(periph); break; } cam_periph_lock(periph); cam_periph_unhold(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("leaving cdioctl\n")); if (error && bootverbose) { printf("scsi_cd.c::ioctl cmd=%08lx error=%d\n", cmd, error); } cam_periph_unlock(periph); return (error); } static void cdprevent(struct cam_periph *periph, int action) { union ccb *ccb; struct cd_softc *softc; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdprevent\n")); softc = (struct cd_softc *)periph->softc; if (((action == PR_ALLOW) && (softc->flags & CD_FLAG_DISC_LOCKED) == 0) || ((action == PR_PREVENT) && (softc->flags & CD_FLAG_DISC_LOCKED) != 0)) { return; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_prevent(&ccb->csio, /*retries*/ cd_retry_count, /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, /* timeout */60000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA|SF_NO_PRINT); xpt_release_ccb(ccb); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~CD_FLAG_DISC_LOCKED; else softc->flags |= CD_FLAG_DISC_LOCKED; } } static void cdmediaprobedone(struct cam_periph *periph) { struct cd_softc *softc; softc = (struct cd_softc *)periph->softc; softc->flags &= ~CD_FLAG_MEDIA_SCAN_ACT; if ((softc->flags & CD_FLAG_MEDIA_WAIT) != 0) { softc->flags &= ~CD_FLAG_MEDIA_WAIT; wakeup(&softc->toc); } } /* * XXX: the disk media and sector size is only really able to change * XXX: while the device is closed. */ static int cdcheckmedia(struct cam_periph *periph, int do_wait) { struct cd_softc *softc; int error; softc = (struct cd_softc *)periph->softc; error = 0; if ((do_wait != 0) && ((softc->flags & CD_FLAG_MEDIA_WAIT) == 0)) { softc->flags |= CD_FLAG_MEDIA_WAIT; } if ((softc->flags & CD_FLAG_MEDIA_SCAN_ACT) == 0) { softc->state = CD_STATE_MEDIA_PREVENT; softc->flags |= CD_FLAG_MEDIA_SCAN_ACT; xpt_schedule(periph, CAM_PRIORITY_NORMAL); } if (do_wait == 0) goto bailout; error = msleep(&softc->toc, cam_periph_mtx(periph), PRIBIO,"cdmedia",0); if (error != 0) goto bailout; /* * Check to see whether we have a valid size from the media. We * may or may not have a valid TOC. */ if ((softc->flags & CD_FLAG_VALID_MEDIA) == 0) error = EINVAL; bailout: return (error); } #if 0 static int cdcheckmedia(struct cam_periph *periph) { struct cd_softc *softc; struct ioc_toc_header *toch; struct cd_toc_single leadout; u_int32_t size, toclen; int error, num_entries, cdindex; softc = (struct cd_softc *)periph->softc; cdprevent(periph, PR_PREVENT); softc->disk->d_sectorsize = 2048; softc->disk->d_mediasize = 0; /* * Get the disc size and block size. If we can't get it, we don't * have media, most likely. */ if ((error = cdsize(periph, &size)) != 0) { softc->flags &= ~(CD_FLAG_VALID_MEDIA|CD_FLAG_VALID_TOC); cdprevent(periph, PR_ALLOW); return (error); } else { softc->flags |= CD_FLAG_SAW_MEDIA | CD_FLAG_VALID_MEDIA; softc->disk->d_sectorsize = softc->params.blksize; softc->disk->d_mediasize = (off_t)softc->params.blksize * softc->params.disksize; } /* * Now we check the table of contents. This (currently) is only * used for the CDIOCPLAYTRACKS ioctl. It may be used later to do * things like present a separate entry in /dev for each track, * like that acd(4) driver does. */ bzero(&softc->toc, sizeof(softc->toc)); toch = &softc->toc.header; /* * We will get errors here for media that doesn't have a table of * contents. According to the MMC-3 spec: "When a Read TOC/PMA/ATIP * command is presented for a DDCD/CD-R/RW media, where the first TOC * has not been recorded (no complete session) and the Format codes * 0000b, 0001b, or 0010b are specified, this command shall be rejected * with an INVALID FIELD IN CDB. Devices that are not capable of * reading an incomplete session on DDC/CD-R/RW media shall report * CANNOT READ MEDIUM - INCOMPATIBLE FORMAT." * * So this isn't fatal if we can't read the table of contents, it * just means that the user won't be able to issue the play tracks * ioctl, and likely lots of other stuff won't work either. They * need to burn the CD before we can do a whole lot with it. So * we don't print anything here if we get an error back. */ error = cdreadtoc(periph, 0, 0, (u_int8_t *)toch, sizeof(*toch), SF_NO_PRINT); /* * Errors in reading the table of contents aren't fatal, we just * won't have a valid table of contents cached. */ if (error != 0) { error = 0; bzero(&softc->toc, sizeof(softc->toc)); goto bailout; } if (softc->quirks & CD_Q_BCD_TRACKS) { toch->starting_track = bcd2bin(toch->starting_track); toch->ending_track = bcd2bin(toch->ending_track); } /* Number of TOC entries, plus leadout */ num_entries = (toch->ending_track - toch->starting_track) + 2; if (num_entries <= 0) goto bailout; toclen = num_entries * sizeof(struct cd_toc_entry); error = cdreadtoc(periph, CD_MSF_FORMAT, toch->starting_track, (u_int8_t *)&softc->toc, toclen + sizeof(*toch), SF_NO_PRINT); if (error != 0) { error = 0; bzero(&softc->toc, sizeof(softc->toc)); goto bailout; } if (softc->quirks & CD_Q_BCD_TRACKS) { toch->starting_track = bcd2bin(toch->starting_track); toch->ending_track = bcd2bin(toch->ending_track); } /* * XXX KDM is this necessary? Probably only if the drive doesn't * return leadout information with the table of contents. */ cdindex = toch->starting_track + num_entries -1; if (cdindex == toch->ending_track + 1) { error = cdreadtoc(periph, CD_MSF_FORMAT, LEADOUT, (u_int8_t *)&leadout, sizeof(leadout), SF_NO_PRINT); if (error != 0) { error = 0; goto bailout; } softc->toc.entries[cdindex - toch->starting_track] = leadout.entry; } if (softc->quirks & CD_Q_BCD_TRACKS) { for (cdindex = 0; cdindex < num_entries - 1; cdindex++) { softc->toc.entries[cdindex].track = bcd2bin(softc->toc.entries[cdindex].track); } } softc->flags |= CD_FLAG_VALID_TOC; /* If the first track is audio, correct sector size. */ if ((softc->toc.entries[0].control & 4) == 0) { softc->disk->d_sectorsize = softc->params.blksize = 2352; softc->disk->d_mediasize = (off_t)softc->params.blksize * softc->params.disksize; } bailout: /* * We unconditionally (re)set the blocksize each time the * CD device is opened. This is because the CD can change, * and therefore the blocksize might change. * XXX problems here if some slice or partition is still * open with the old size? */ if ((softc->disk->d_devstat->flags & DEVSTAT_BS_UNAVAILABLE) != 0) softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; softc->disk->d_devstat->block_size = softc->params.blksize; return (error); } static int cdsize(struct cam_periph *periph, u_int32_t *size) { struct cd_softc *softc; union ccb *ccb; struct scsi_read_capacity_data *rcap_buf; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdsize\n")); softc = (struct cd_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); /* XXX Should be M_WAITOK */ rcap_buf = malloc(sizeof(struct scsi_read_capacity_data), M_SCSICD, M_NOWAIT | M_ZERO); if (rcap_buf == NULL) return (ENOMEM); scsi_read_capacity(&ccb->csio, /*retries*/ cd_retry_count, /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG, rcap_buf, SSD_FULL_SIZE, /* timeout */20000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA|SF_NO_PRINT); xpt_release_ccb(ccb); softc->params.disksize = scsi_4btoul(rcap_buf->addr) + 1; softc->params.blksize = scsi_4btoul(rcap_buf->length); /* Make sure we got at least some block size. */ if (error == 0 && softc->params.blksize == 0) error = EIO; /* * SCSI-3 mandates that the reported blocksize shall be 2048. * Older drives sometimes report funny values, trim it down to * 2048, or other parts of the kernel will get confused. * * XXX we leave drives alone that might report 512 bytes, as * well as drives reporting more weird sizes like perhaps 4K. */ if (softc->params.blksize > 2048 && softc->params.blksize <= 2352) softc->params.blksize = 2048; free(rcap_buf, M_SCSICD); *size = softc->params.disksize; return (error); } #endif static int cd6byteworkaround(union ccb *ccb) { u_int8_t *cdb; struct cam_periph *periph; struct cd_softc *softc; struct cd_mode_params *params; int frozen, found; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; cdb = ccb->csio.cdb_io.cdb_bytes; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) || ((cdb[0] != MODE_SENSE_6) && (cdb[0] != MODE_SELECT_6))) return (0); /* * Because there is no convenient place to stash the overall * cd_mode_params structure pointer, we have to grab it like this. * This means that ALL MODE_SENSE and MODE_SELECT requests in the * cd(4) driver MUST go through cdgetmode() and cdsetmode()! * * XXX It would be nice if, at some point, we could increase the * number of available peripheral private pointers. Both pointers * are currently used in most every peripheral driver. */ found = 0; STAILQ_FOREACH(params, &softc->mode_queue, links) { if (params->mode_buf == ccb->csio.data_ptr) { found = 1; break; } } /* * This shouldn't happen. All mode sense and mode select * operations in the cd(4) driver MUST go through cdgetmode() and * cdsetmode()! */ if (found == 0) { xpt_print(periph->path, "mode buffer not found in mode queue!\n"); return (0); } params->cdb_size = 10; softc->minimum_command_size = 10; xpt_print(ccb->ccb_h.path, "%s(6) failed, increasing minimum CDB size to 10 bytes\n", (cdb[0] == MODE_SENSE_6) ? "MODE_SENSE" : "MODE_SELECT"); if (cdb[0] == MODE_SENSE_6) { struct scsi_mode_sense_10 ms10; struct scsi_mode_sense_6 *ms6; int len; ms6 = (struct scsi_mode_sense_6 *)cdb; bzero(&ms10, sizeof(ms10)); ms10.opcode = MODE_SENSE_10; ms10.byte2 = ms6->byte2; ms10.page = ms6->page; /* * 10 byte mode header, block descriptor, * sizeof(union cd_pages) */ len = sizeof(struct cd_mode_data_10); ccb->csio.dxfer_len = len; scsi_ulto2b(len, ms10.length); ms10.control = ms6->control; bcopy(&ms10, cdb, 10); ccb->csio.cdb_len = 10; } else { struct scsi_mode_select_10 ms10; struct scsi_mode_select_6 *ms6; struct scsi_mode_header_6 *header6; struct scsi_mode_header_10 *header10; struct scsi_mode_page_header *page_header; int blk_desc_len, page_num, page_size, len; ms6 = (struct scsi_mode_select_6 *)cdb; bzero(&ms10, sizeof(ms10)); ms10.opcode = MODE_SELECT_10; ms10.byte2 = ms6->byte2; header6 = (struct scsi_mode_header_6 *)params->mode_buf; header10 = (struct scsi_mode_header_10 *)params->mode_buf; page_header = find_mode_page_6(header6); page_num = page_header->page_code; blk_desc_len = header6->blk_desc_len; page_size = cdgetpagesize(page_num); if (page_size != (page_header->page_length + sizeof(*page_header))) page_size = page_header->page_length + sizeof(*page_header); len = sizeof(*header10) + blk_desc_len + page_size; len = min(params->alloc_len, len); /* * Since the 6 byte parameter header is shorter than the 10 * byte parameter header, we need to copy the actual mode * page data, and the block descriptor, if any, so things wind * up in the right place. The regions will overlap, but * bcopy() does the right thing. */ bcopy(params->mode_buf + sizeof(*header6), params->mode_buf + sizeof(*header10), len - sizeof(*header10)); /* Make sure these fields are set correctly. */ scsi_ulto2b(0, header10->data_length); header10->medium_type = 0; scsi_ulto2b(blk_desc_len, header10->blk_desc_len); ccb->csio.dxfer_len = len; scsi_ulto2b(len, ms10.length); ms10.control = ms6->control; bcopy(&ms10, cdb, 10); ccb->csio.cdb_len = 10; } frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_action(ccb); if (frozen) { cam_release_devq(ccb->ccb_h.path, /*relsim_flags*/0, /*openings*/0, /*timeout*/0, /*getcount_only*/0); } return (ERESTART); } static int cderror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct cd_softc *softc; struct cam_periph *periph; int error, error_code, sense_key, asc, ascq; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; error = 0; /* * We use a status of CAM_REQ_INVALID as shorthand -- if a 6 byte * CDB comes back with this particular error, try transforming it * into the 10 byte version. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { error = cd6byteworkaround(ccb); } else if (scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (sense_key == SSD_KEY_ILLEGAL_REQUEST) error = cd6byteworkaround(ccb); else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x28 && ascq == 0x00) disk_media_changed(softc->disk, M_NOWAIT); else if (sense_key == SSD_KEY_NOT_READY && asc == 0x3a && (softc->flags & CD_FLAG_SAW_MEDIA)) { softc->flags &= ~CD_FLAG_SAW_MEDIA; disk_media_gone(softc->disk, M_NOWAIT); } } if (error == ERESTART) return (error); /* * XXX * Until we have a better way of doing pack validation, * don't treat UAs as errors. */ sense_flags |= SF_RETRY_UA; if (softc->quirks & CD_Q_RETRY_BUSY) sense_flags |= SF_RETRY_BUSY; return (cam_periph_error(ccb, cam_flags, sense_flags)); } static void cdmediapoll(void *arg) { struct cam_periph *periph = arg; struct cd_softc *softc = periph->softc; if (softc->state == CD_STATE_NORMAL && !softc->tur && softc->outstanding_cmds == 0) { if (cam_periph_acquire(periph) == 0) { softc->tur = 1; xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } /* Queue us up again */ if (cd_poll_period != 0) callout_schedule(&softc->mediapoll_c, cd_poll_period * hz); } /* * Read table of contents */ static int cdreadtoc(struct cam_periph *periph, u_int32_t mode, u_int32_t start, u_int8_t *data, u_int32_t len, u_int32_t sense_flags) { u_int32_t ntoc; struct ccb_scsiio *csio; union ccb *ccb; int error; ntoc = len; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; scsi_read_toc(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* byte1_flags */ (mode == CD_MSF_FORMAT) ? CD_MSF : 0, /* format */ SRTOC_FORMAT_TOC, /* track*/ start, /* data_ptr */ data, /* dxfer_len */ len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA | sense_flags); xpt_release_ccb(ccb); return(error); } static int cdreadsubchannel(struct cam_periph *periph, u_int32_t mode, u_int32_t format, int track, struct cd_sub_channel_info *data, u_int32_t len) { struct scsi_read_subchannel *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* flags */ CAM_DIR_IN, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ (u_int8_t *)data, /* dxfer_len */ len, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_read_subchannel), /* timeout */ 50000); scsi_cmd = (struct scsi_read_subchannel *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = READ_SUBCHANNEL; if (mode == CD_MSF_FORMAT) scsi_cmd->byte1 |= CD_MSF; scsi_cmd->byte2 = SRS_SUBQ; scsi_cmd->subchan_format = format; scsi_cmd->track = track; scsi_ulto2b(len, (u_int8_t *)scsi_cmd->data_len); scsi_cmd->control = 0; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } /* * All MODE_SENSE requests in the cd(4) driver MUST go through this * routine. See comments in cd6byteworkaround() for details. */ static int cdgetmode(struct cam_periph *periph, struct cd_mode_params *data, u_int32_t page) { struct ccb_scsiio *csio; struct cd_softc *softc; union ccb *ccb; int param_len; int error; softc = (struct cd_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; data->cdb_size = softc->minimum_command_size; if (data->cdb_size < 10) param_len = sizeof(struct cd_mode_data); else param_len = sizeof(struct cd_mode_data_10); /* Don't say we've got more room than we actually allocated */ param_len = min(param_len, data->alloc_len); scsi_mode_sense_len(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ 0, /* page_code */ SMS_PAGE_CTRL_CURRENT, /* page */ page, /* param_buf */ data->mode_buf, /* param_len */ param_len, /* minimum_cmd_size */ softc->minimum_command_size, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); /* * It would be nice not to have to do this, but there's no * available pointer in the CCB that would allow us to stuff the * mode params structure in there and retrieve it in * cd6byteworkaround(), so we can set the cdb size. The cdb size * lets the caller know what CDB size we ended up using, so they * can find the actual mode page offset. */ STAILQ_INSERT_TAIL(&softc->mode_queue, data, links); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); STAILQ_REMOVE(&softc->mode_queue, data, cd_mode_params, links); /* * This is a bit of belt-and-suspenders checking, but if we run * into a situation where the target sends back multiple block * descriptors, we might not have enough space in the buffer to * see the whole mode page. Better to return an error than * potentially access memory beyond our malloced region. */ if (error == 0) { u_int32_t data_len; if (data->cdb_size == 10) { struct scsi_mode_header_10 *hdr10; hdr10 = (struct scsi_mode_header_10 *)data->mode_buf; data_len = scsi_2btoul(hdr10->data_length); data_len += sizeof(hdr10->data_length); } else { struct scsi_mode_header_6 *hdr6; hdr6 = (struct scsi_mode_header_6 *)data->mode_buf; data_len = hdr6->data_length; data_len += sizeof(hdr6->data_length); } /* * Complain if there is more mode data available than we * allocated space for. This could potentially happen if * we miscalculated the page length for some reason, if the * drive returns multiple block descriptors, or if it sets * the data length incorrectly. */ if (data_len > data->alloc_len) { xpt_print(periph->path, "allocated modepage %d length " "%d < returned length %d\n", page, data->alloc_len, data_len); error = ENOSPC; } } return (error); } /* * All MODE_SELECT requests in the cd(4) driver MUST go through this * routine. See comments in cd6byteworkaround() for details. */ static int cdsetmode(struct cam_periph *periph, struct cd_mode_params *data) { struct ccb_scsiio *csio; struct cd_softc *softc; union ccb *ccb; int cdb_size, param_len; int error; softc = (struct cd_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; error = 0; /* * If the data is formatted for the 10 byte version of the mode * select parameter list, we need to use the 10 byte CDB. * Otherwise, we use whatever the stored minimum command size. */ if (data->cdb_size == 10) cdb_size = data->cdb_size; else cdb_size = softc->minimum_command_size; if (cdb_size >= 10) { struct scsi_mode_header_10 *mode_header; u_int32_t data_len; mode_header = (struct scsi_mode_header_10 *)data->mode_buf; data_len = scsi_2btoul(mode_header->data_length); scsi_ulto2b(0, mode_header->data_length); /* * SONY drives do not allow a mode select with a medium_type * value that has just been returned by a mode sense; use a * medium_type of 0 (Default) instead. */ mode_header->medium_type = 0; /* * Pass back whatever the drive passed to us, plus the size * of the data length field. */ param_len = data_len + sizeof(mode_header->data_length); } else { struct scsi_mode_header_6 *mode_header; mode_header = (struct scsi_mode_header_6 *)data->mode_buf; param_len = mode_header->data_length + 1; mode_header->data_length = 0; /* * SONY drives do not allow a mode select with a medium_type * value that has just been returned by a mode sense; use a * medium_type of 0 (Default) instead. */ mode_header->medium_type = 0; } /* Don't say we've got more room than we actually allocated */ param_len = min(param_len, data->alloc_len); scsi_mode_select_len(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* scsi_page_fmt */ 1, /* save_pages */ 0, /* param_buf */ data->mode_buf, /* param_len */ param_len, /* minimum_cmd_size */ cdb_size, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); /* See comments in cdgetmode() and cd6byteworkaround(). */ STAILQ_INSERT_TAIL(&softc->mode_queue, data, links); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); STAILQ_REMOVE(&softc->mode_queue, data, cd_mode_params, links); return (error); } static int cdplay(struct cam_periph *periph, u_int32_t blk, u_int32_t len) { struct ccb_scsiio *csio; union ccb *ccb; int error; u_int8_t cdb_len; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; /* * Use the smallest possible command to perform the operation. */ if ((len & 0xffff0000) == 0) { /* * We can fit in a 10 byte cdb. */ struct scsi_play_10 *scsi_cmd; scsi_cmd = (struct scsi_play_10 *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_10; scsi_ulto4b(blk, (u_int8_t *)scsi_cmd->blk_addr); scsi_ulto2b(len, (u_int8_t *)scsi_cmd->xfer_len); cdb_len = sizeof(*scsi_cmd); } else { struct scsi_play_12 *scsi_cmd; scsi_cmd = (struct scsi_play_12 *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_12; scsi_ulto4b(blk, (u_int8_t *)scsi_cmd->blk_addr); scsi_ulto4b(len, (u_int8_t *)scsi_cmd->xfer_len); cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, /*retries*/ cd_retry_count, /*cbfcnp*/NULL, /*flags*/CAM_DIR_NONE, MSG_SIMPLE_Q_TAG, /*dataptr*/NULL, /*datalen*/0, /*sense_len*/SSD_FULL_SIZE, cdb_len, /*timeout*/50 * 1000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdplaymsf(struct cam_periph *periph, u_int32_t startm, u_int32_t starts, u_int32_t startf, u_int32_t endm, u_int32_t ends, u_int32_t endf) { struct scsi_play_msf *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_play_msf), /* timeout */ 50000); scsi_cmd = (struct scsi_play_msf *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_MSF; scsi_cmd->start_m = startm; scsi_cmd->start_s = starts; scsi_cmd->start_f = startf; scsi_cmd->end_m = endm; scsi_cmd->end_s = ends; scsi_cmd->end_f = endf; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdplaytracks(struct cam_periph *periph, u_int32_t strack, u_int32_t sindex, u_int32_t etrack, u_int32_t eindex) { struct scsi_play_track *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_play_track), /* timeout */ 50000); scsi_cmd = (struct scsi_play_track *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_TRACK; scsi_cmd->start_track = strack; scsi_cmd->start_index = sindex; scsi_cmd->end_track = etrack; scsi_cmd->end_index = eindex; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdpause(struct cam_periph *periph, u_int32_t go) { struct scsi_pause *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_pause), /* timeout */ 50000); scsi_cmd = (struct scsi_pause *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PAUSE; scsi_cmd->resume = go; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdstartunit(struct cam_periph *periph, int load) { union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_start_stop(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* start */ TRUE, /* load_eject */ load, /* immediate */ FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdstopunit(struct cam_periph *periph, u_int32_t eject) { union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_start_stop(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* start */ FALSE, /* load_eject */ eject, /* immediate */ FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdsetspeed(struct cam_periph *periph, u_int32_t rdspeed, u_int32_t wrspeed) { struct scsi_set_speed *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; /* Preserve old behavior: units in multiples of CDROM speed */ if (rdspeed < 177) rdspeed *= 177; if (wrspeed < 177) wrspeed *= 177; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_set_speed), /* timeout */ 50000); scsi_cmd = (struct scsi_set_speed *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SET_CD_SPEED; scsi_ulto2b(rdspeed, scsi_cmd->readspeed); scsi_ulto2b(wrspeed, scsi_cmd->writespeed); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdreportkey(struct cam_periph *periph, struct dvd_authinfo *authinfo) { union ccb *ccb; u_int8_t *databuf; u_int32_t lba; int error; int length; error = 0; databuf = NULL; lba = 0; switch (authinfo->format) { case DVD_REPORT_AGID: length = sizeof(struct scsi_report_key_data_agid); break; case DVD_REPORT_CHALLENGE: length = sizeof(struct scsi_report_key_data_challenge); break; case DVD_REPORT_KEY1: length = sizeof(struct scsi_report_key_data_key1_key2); break; case DVD_REPORT_TITLE_KEY: length = sizeof(struct scsi_report_key_data_title); /* The lba field is only set for the title key */ lba = authinfo->lba; break; case DVD_REPORT_ASF: length = sizeof(struct scsi_report_key_data_asf); break; case DVD_REPORT_RPC: length = sizeof(struct scsi_report_key_data_rpc); break; case DVD_INVALIDATE_AGID: length = 0; break; default: return (EINVAL); } if (length != 0) { databuf = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); } else databuf = NULL; cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_report_key(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* lba */ lba, /* agid */ authinfo->agid, /* key_format */ authinfo->format, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); if (error != 0) goto bailout; if (ccb->csio.resid != 0) { xpt_print(periph->path, "warning, residual for report key " "command is %d\n", ccb->csio.resid); } switch(authinfo->format) { case DVD_REPORT_AGID: { struct scsi_report_key_data_agid *agid_data; agid_data = (struct scsi_report_key_data_agid *)databuf; authinfo->agid = (agid_data->agid & RKD_AGID_MASK) >> RKD_AGID_SHIFT; break; } case DVD_REPORT_CHALLENGE: { struct scsi_report_key_data_challenge *chal_data; chal_data = (struct scsi_report_key_data_challenge *)databuf; bcopy(chal_data->challenge_key, authinfo->keychal, min(sizeof(chal_data->challenge_key), sizeof(authinfo->keychal))); break; } case DVD_REPORT_KEY1: { struct scsi_report_key_data_key1_key2 *key1_data; key1_data = (struct scsi_report_key_data_key1_key2 *)databuf; bcopy(key1_data->key1, authinfo->keychal, min(sizeof(key1_data->key1), sizeof(authinfo->keychal))); break; } case DVD_REPORT_TITLE_KEY: { struct scsi_report_key_data_title *title_data; title_data = (struct scsi_report_key_data_title *)databuf; authinfo->cpm = (title_data->byte0 & RKD_TITLE_CPM) >> RKD_TITLE_CPM_SHIFT; authinfo->cp_sec = (title_data->byte0 & RKD_TITLE_CP_SEC) >> RKD_TITLE_CP_SEC_SHIFT; authinfo->cgms = (title_data->byte0 & RKD_TITLE_CMGS_MASK) >> RKD_TITLE_CMGS_SHIFT; bcopy(title_data->title_key, authinfo->keychal, min(sizeof(title_data->title_key), sizeof(authinfo->keychal))); break; } case DVD_REPORT_ASF: { struct scsi_report_key_data_asf *asf_data; asf_data = (struct scsi_report_key_data_asf *)databuf; authinfo->asf = asf_data->success & RKD_ASF_SUCCESS; break; } case DVD_REPORT_RPC: { struct scsi_report_key_data_rpc *rpc_data; rpc_data = (struct scsi_report_key_data_rpc *)databuf; authinfo->reg_type = (rpc_data->byte4 & RKD_RPC_TYPE_MASK) >> RKD_RPC_TYPE_SHIFT; authinfo->vend_rsts = (rpc_data->byte4 & RKD_RPC_VENDOR_RESET_MASK) >> RKD_RPC_VENDOR_RESET_SHIFT; authinfo->user_rsts = rpc_data->byte4 & RKD_RPC_USER_RESET_MASK; authinfo->region = rpc_data->region_mask; authinfo->rpc_scheme = rpc_data->rpc_scheme1; break; } case DVD_INVALIDATE_AGID: break; default: /* This should be impossible, since we checked above */ error = EINVAL; goto bailout; break; /* NOTREACHED */ } bailout: xpt_release_ccb(ccb); cam_periph_unlock(periph); if (databuf != NULL) free(databuf, M_DEVBUF); return(error); } static int cdsendkey(struct cam_periph *periph, struct dvd_authinfo *authinfo) { union ccb *ccb; u_int8_t *databuf; int length; int error; error = 0; databuf = NULL; switch(authinfo->format) { case DVD_SEND_CHALLENGE: { struct scsi_report_key_data_challenge *challenge_data; length = sizeof(*challenge_data); challenge_data = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); databuf = (u_int8_t *)challenge_data; scsi_ulto2b(length - sizeof(challenge_data->data_len), challenge_data->data_len); bcopy(authinfo->keychal, challenge_data->challenge_key, min(sizeof(authinfo->keychal), sizeof(challenge_data->challenge_key))); break; } case DVD_SEND_KEY2: { struct scsi_report_key_data_key1_key2 *key2_data; length = sizeof(*key2_data); key2_data = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); databuf = (u_int8_t *)key2_data; scsi_ulto2b(length - sizeof(key2_data->data_len), key2_data->data_len); bcopy(authinfo->keychal, key2_data->key1, min(sizeof(authinfo->keychal), sizeof(key2_data->key1))); break; } case DVD_SEND_RPC: { struct scsi_send_key_data_rpc *rpc_data; length = sizeof(*rpc_data); rpc_data = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); databuf = (u_int8_t *)rpc_data; scsi_ulto2b(length - sizeof(rpc_data->data_len), rpc_data->data_len); rpc_data->region_code = authinfo->region; break; } default: return (EINVAL); } cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_send_key(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* agid */ authinfo->agid, /* key_format */ authinfo->format, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); cam_periph_unlock(periph); if (databuf != NULL) free(databuf, M_DEVBUF); return(error); } static int cdreaddvdstructure(struct cam_periph *periph, struct dvd_struct *dvdstruct) { union ccb *ccb; u_int8_t *databuf; u_int32_t address; int error; int length; error = 0; databuf = NULL; /* The address is reserved for many of the formats */ address = 0; switch(dvdstruct->format) { case DVD_STRUCT_PHYSICAL: length = sizeof(struct scsi_read_dvd_struct_data_physical); break; case DVD_STRUCT_COPYRIGHT: length = sizeof(struct scsi_read_dvd_struct_data_copyright); break; case DVD_STRUCT_DISCKEY: length = sizeof(struct scsi_read_dvd_struct_data_disc_key); break; case DVD_STRUCT_BCA: length = sizeof(struct scsi_read_dvd_struct_data_bca); break; case DVD_STRUCT_MANUFACT: length = sizeof(struct scsi_read_dvd_struct_data_manufacturer); break; case DVD_STRUCT_CMI: return (ENODEV); case DVD_STRUCT_PROTDISCID: length = sizeof(struct scsi_read_dvd_struct_data_prot_discid); break; case DVD_STRUCT_DISCKEYBLOCK: length = sizeof(struct scsi_read_dvd_struct_data_disc_key_blk); break; case DVD_STRUCT_DDS: length = sizeof(struct scsi_read_dvd_struct_data_dds); break; case DVD_STRUCT_MEDIUM_STAT: length = sizeof(struct scsi_read_dvd_struct_data_medium_status); break; case DVD_STRUCT_SPARE_AREA: length = sizeof(struct scsi_read_dvd_struct_data_spare_area); break; case DVD_STRUCT_RMD_LAST: return (ENODEV); case DVD_STRUCT_RMD_RMA: return (ENODEV); case DVD_STRUCT_PRERECORDED: length = sizeof(struct scsi_read_dvd_struct_data_leadin); break; case DVD_STRUCT_UNIQUEID: length = sizeof(struct scsi_read_dvd_struct_data_disc_id); break; case DVD_STRUCT_DCB: return (ENODEV); case DVD_STRUCT_LIST: /* * This is the maximum allocation length for the READ DVD * STRUCTURE command. There's nothing in the MMC3 spec * that indicates a limit in the amount of data that can * be returned from this call, other than the limits * imposed by the 2-byte length variables. */ length = 65535; break; default: return (EINVAL); } if (length != 0) { databuf = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); } else databuf = NULL; cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_read_dvd_structure(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* lba */ address, /* layer_number */ dvdstruct->layer_num, /* key_format */ dvdstruct->format, /* agid */ dvdstruct->agid, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); if (error != 0) goto bailout; switch(dvdstruct->format) { case DVD_STRUCT_PHYSICAL: { struct scsi_read_dvd_struct_data_layer_desc *inlayer; struct dvd_layer *outlayer; struct scsi_read_dvd_struct_data_physical *phys_data; phys_data = (struct scsi_read_dvd_struct_data_physical *)databuf; inlayer = &phys_data->layer_desc; outlayer = (struct dvd_layer *)&dvdstruct->data; dvdstruct->length = sizeof(*inlayer); outlayer->book_type = (inlayer->book_type_version & RDSD_BOOK_TYPE_MASK) >> RDSD_BOOK_TYPE_SHIFT; outlayer->book_version = (inlayer->book_type_version & RDSD_BOOK_VERSION_MASK); outlayer->disc_size = (inlayer->disc_size_max_rate & RDSD_DISC_SIZE_MASK) >> RDSD_DISC_SIZE_SHIFT; outlayer->max_rate = (inlayer->disc_size_max_rate & RDSD_MAX_RATE_MASK); outlayer->nlayers = (inlayer->layer_info & RDSD_NUM_LAYERS_MASK) >> RDSD_NUM_LAYERS_SHIFT; outlayer->track_path = (inlayer->layer_info & RDSD_TRACK_PATH_MASK) >> RDSD_TRACK_PATH_SHIFT; outlayer->layer_type = (inlayer->layer_info & RDSD_LAYER_TYPE_MASK); outlayer->linear_density = (inlayer->density & RDSD_LIN_DENSITY_MASK) >> RDSD_LIN_DENSITY_SHIFT; outlayer->track_density = (inlayer->density & RDSD_TRACK_DENSITY_MASK); outlayer->bca = (inlayer->bca & RDSD_BCA_MASK) >> RDSD_BCA_SHIFT; outlayer->start_sector = scsi_3btoul(inlayer->main_data_start); outlayer->end_sector = scsi_3btoul(inlayer->main_data_end); outlayer->end_sector_l0 = scsi_3btoul(inlayer->end_sector_layer0); break; } case DVD_STRUCT_COPYRIGHT: { struct scsi_read_dvd_struct_data_copyright *copy_data; copy_data = (struct scsi_read_dvd_struct_data_copyright *) databuf; dvdstruct->cpst = copy_data->cps_type; dvdstruct->rmi = copy_data->region_info; dvdstruct->length = 0; break; } default: /* * Tell the user what the overall length is, no matter * what we can actually fit in the data buffer. */ dvdstruct->length = length - ccb->csio.resid - sizeof(struct scsi_read_dvd_struct_data_header); /* * But only actually copy out the smaller of what we read * in or what the structure can take. */ bcopy(databuf + sizeof(struct scsi_read_dvd_struct_data_header), dvdstruct->data, min(sizeof(dvdstruct->data), dvdstruct->length)); break; } bailout: xpt_release_ccb(ccb); cam_periph_unlock(periph); if (databuf != NULL) free(databuf, M_DEVBUF); return(error); } void scsi_report_key(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t lba, u_int8_t agid, u_int8_t key_format, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_report_key *scsi_cmd; scsi_cmd = (struct scsi_report_key *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REPORT_KEY; scsi_ulto4b(lba, scsi_cmd->lba); scsi_ulto2b(dxfer_len, scsi_cmd->alloc_len); scsi_cmd->agid_keyformat = (agid << RK_KF_AGID_SHIFT) | (key_format & RK_KF_KEYFORMAT_MASK); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len == 0) ? CAM_DIR_NONE : CAM_DIR_IN, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_send_key(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t agid, u_int8_t key_format, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_send_key *scsi_cmd; scsi_cmd = (struct scsi_send_key *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SEND_KEY; scsi_ulto2b(dxfer_len, scsi_cmd->param_len); scsi_cmd->agid_keyformat = (agid << RK_KF_AGID_SHIFT) | (key_format & RK_KF_KEYFORMAT_MASK); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_OUT, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_dvd_structure(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t address, u_int8_t layer_number, u_int8_t format, u_int8_t agid, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_dvd_structure *scsi_cmd; scsi_cmd = (struct scsi_read_dvd_structure *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_DVD_STRUCTURE; scsi_ulto4b(address, scsi_cmd->address); scsi_cmd->layer_number = layer_number; scsi_cmd->format = format; scsi_ulto2b(dxfer_len, scsi_cmd->alloc_len); /* The AGID is the top two bits of this byte */ scsi_cmd->agid = agid << 6; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_toc(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t byte1_flags, uint8_t format, uint8_t track, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_read_toc *scsi_cmd; scsi_cmd = (struct scsi_read_toc *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = READ_TOC; /* * The structure is counting from 1, the function counting from 0. * The spec counts from 0. In MMC-6, there is only one flag, the * MSF flag. But we put the whole byte in for a bit a future-proofing. */ scsi_cmd->byte2 = byte1_flags; scsi_cmd->format = format; scsi_cmd->from_track = track; scsi_ulto2b(dxfer_len, scsi_cmd->data_len); cam_fill_csio(csio, /* retries */ retries, /* cbfcnp */ cbfcnp, /* flags */ CAM_DIR_IN, /* tag_action */ tag_action, /* data_ptr */ data_ptr, /* dxfer_len */ dxfer_len, /* sense_len */ sense_len, sizeof(*scsi_cmd), /* timeout */ timeout); } Index: head/sys/cam/scsi/scsi_da.c =================================================================== --- head/sys/cam/scsi/scsi_da.c (revision 355600) +++ head/sys/cam/scsi/scsi_da.c (revision 355601) @@ -1,6569 +1,6569 @@ /*- * Implementation of SCSI Direct Access Peripheral driver for CAM. * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include "opt_da.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #ifdef _KERNEL #include #endif /* _KERNEL */ #include #include #include #include #ifdef _KERNEL /* * Note that there are probe ordering dependencies here. The order isn't * controlled by this enumeration, but by explicit state transitions in * dastart() and dadone(). Here are some of the dependencies: * * 1. RC should come first, before RC16, unless there is evidence that RC16 * is supported. * 2. BDC needs to come before any of the ATA probes, or the ZONE probe. * 3. The ATA probes should go in this order: * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE */ typedef enum { DA_STATE_PROBE_WP, DA_STATE_PROBE_RC, DA_STATE_PROBE_RC16, DA_STATE_PROBE_LBP, DA_STATE_PROBE_BLK_LIMITS, DA_STATE_PROBE_BDC, DA_STATE_PROBE_ATA, DA_STATE_PROBE_ATA_LOGDIR, DA_STATE_PROBE_ATA_IDDIR, DA_STATE_PROBE_ATA_SUP, DA_STATE_PROBE_ATA_ZONE, DA_STATE_PROBE_ZONE, DA_STATE_NORMAL } da_state; typedef enum { DA_FLAG_PACK_INVALID = 0x000001, DA_FLAG_NEW_PACK = 0x000002, DA_FLAG_PACK_LOCKED = 0x000004, DA_FLAG_PACK_REMOVABLE = 0x000008, DA_FLAG_NEED_OTAG = 0x000020, DA_FLAG_WAS_OTAG = 0x000040, DA_FLAG_RETRY_UA = 0x000080, DA_FLAG_OPEN = 0x000100, DA_FLAG_SCTX_INIT = 0x000200, DA_FLAG_CAN_RC16 = 0x000400, DA_FLAG_PROBED = 0x000800, DA_FLAG_DIRTY = 0x001000, DA_FLAG_ANNOUNCED = 0x002000, DA_FLAG_CAN_ATA_DMA = 0x004000, DA_FLAG_CAN_ATA_LOG = 0x008000, DA_FLAG_CAN_ATA_IDLOG = 0x010000, DA_FLAG_CAN_ATA_SUPCAP = 0x020000, DA_FLAG_CAN_ATA_ZONE = 0x040000, DA_FLAG_TUR_PENDING = 0x080000 } da_flags; typedef enum { DA_Q_NONE = 0x00, DA_Q_NO_SYNC_CACHE = 0x01, DA_Q_NO_6_BYTE = 0x02, DA_Q_NO_PREVENT = 0x04, DA_Q_4K = 0x08, DA_Q_NO_RC16 = 0x10, DA_Q_NO_UNMAP = 0x20, DA_Q_RETRY_BUSY = 0x40, DA_Q_SMR_DM = 0x80, DA_Q_STRICT_UNMAP = 0x100, DA_Q_128KB = 0x200 } da_quirks; #define DA_Q_BIT_STRING \ "\020" \ "\001NO_SYNC_CACHE" \ "\002NO_6_BYTE" \ "\003NO_PREVENT" \ "\0044K" \ "\005NO_RC16" \ "\006NO_UNMAP" \ "\007RETRY_BUSY" \ "\010SMR_DM" \ "\011STRICT_UNMAP" \ "\012128KB" typedef enum { DA_CCB_PROBE_RC = 0x01, DA_CCB_PROBE_RC16 = 0x02, DA_CCB_PROBE_LBP = 0x03, DA_CCB_PROBE_BLK_LIMITS = 0x04, DA_CCB_PROBE_BDC = 0x05, DA_CCB_PROBE_ATA = 0x06, DA_CCB_BUFFER_IO = 0x07, DA_CCB_DUMP = 0x0A, DA_CCB_DELETE = 0x0B, DA_CCB_TUR = 0x0C, DA_CCB_PROBE_ZONE = 0x0D, DA_CCB_PROBE_ATA_LOGDIR = 0x0E, DA_CCB_PROBE_ATA_IDDIR = 0x0F, DA_CCB_PROBE_ATA_SUP = 0x10, DA_CCB_PROBE_ATA_ZONE = 0x11, DA_CCB_PROBE_WP = 0x12, DA_CCB_TYPE_MASK = 0x1F, DA_CCB_RETRY_UA = 0x20 } da_ccb_state; /* * Order here is important for method choice * * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes * using ATA_TRIM than the corresponding UNMAP results for a real world mysql * import taking 5mins. * */ typedef enum { DA_DELETE_NONE, DA_DELETE_DISABLE, DA_DELETE_ATA_TRIM, DA_DELETE_UNMAP, DA_DELETE_WS16, DA_DELETE_WS10, DA_DELETE_ZERO, DA_DELETE_MIN = DA_DELETE_ATA_TRIM, DA_DELETE_MAX = DA_DELETE_ZERO } da_delete_methods; /* * For SCSI, host managed drives show up as a separate device type. For * ATA, host managed drives also have a different device signature. * XXX KDM figure out the ATA host managed signature. */ typedef enum { DA_ZONE_NONE = 0x00, DA_ZONE_DRIVE_MANAGED = 0x01, DA_ZONE_HOST_AWARE = 0x02, DA_ZONE_HOST_MANAGED = 0x03 } da_zone_mode; /* * We distinguish between these interface cases in addition to the drive type: * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC * o ATA drive behind a SCSI translation layer that does not know about * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this * case, we would need to share the ATA code with the ada(4) driver. * o SCSI drive. */ typedef enum { DA_ZONE_IF_SCSI, DA_ZONE_IF_ATA_PASS, DA_ZONE_IF_ATA_SAT, } da_zone_interface; typedef enum { DA_ZONE_FLAG_RZ_SUP = 0x0001, DA_ZONE_FLAG_OPEN_SUP = 0x0002, DA_ZONE_FLAG_CLOSE_SUP = 0x0004, DA_ZONE_FLAG_FINISH_SUP = 0x0008, DA_ZONE_FLAG_RWP_SUP = 0x0010, DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP | DA_ZONE_FLAG_OPEN_SUP | DA_ZONE_FLAG_CLOSE_SUP | DA_ZONE_FLAG_FINISH_SUP | DA_ZONE_FLAG_RWP_SUP), DA_ZONE_FLAG_URSWRZ = 0x0020, DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET | DA_ZONE_FLAG_OPT_NONSEQ_SET | DA_ZONE_FLAG_MAX_SEQ_SET) } da_zone_flags; static struct da_zone_desc { da_zone_flags value; const char *desc; } da_zone_desc_table[] = { {DA_ZONE_FLAG_RZ_SUP, "Report Zones" }, {DA_ZONE_FLAG_OPEN_SUP, "Open" }, {DA_ZONE_FLAG_CLOSE_SUP, "Close" }, {DA_ZONE_FLAG_FINISH_SUP, "Finish" }, {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, }; typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb, struct bio *bp); static da_delete_func_t da_delete_trim; static da_delete_func_t da_delete_unmap; static da_delete_func_t da_delete_ws; static const void * da_delete_functions[] = { NULL, NULL, da_delete_trim, da_delete_unmap, da_delete_ws, da_delete_ws, da_delete_ws }; static const char *da_delete_method_names[] = { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" }; static const char *da_delete_method_desc[] = { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP", "WRITE SAME(10) with UNMAP", "ZERO" }; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct disk_params { u_int8_t heads; u_int32_t cylinders; u_int8_t secs_per_track; u_int32_t secsize; /* Number of bytes/sector */ u_int64_t sectors; /* total number sectors */ u_int stripesize; u_int stripeoffset; }; #define UNMAP_RANGE_MAX 0xffffffff #define UNMAP_HEAD_SIZE 8 #define UNMAP_RANGE_SIZE 16 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */ #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \ UNMAP_HEAD_SIZE) #define WS10_MAX_BLKS 0xffff #define WS16_MAX_BLKS 0xffffffff #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \ (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE) #define DA_WORK_TUR (1 << 16) typedef enum { DA_REF_OPEN = 1, DA_REF_OPEN_HOLD, DA_REF_CLOSE_HOLD, DA_REF_PROBE_HOLD, DA_REF_TUR, DA_REF_GEOM, DA_REF_SYSCTL, DA_REF_REPROBE, DA_REF_MAX /* KEEP LAST */ } da_ref_token; struct da_softc { struct cam_iosched_softc *cam_iosched; struct bio_queue_head delete_run_queue; LIST_HEAD(, ccb_hdr) pending_ccbs; int refcount; /* Active xpt_action() calls */ da_state state; da_flags flags; da_quirks quirks; int minimum_cmd_size; int error_inject; int trim_max_ranges; int delete_available; /* Delete methods possibly available */ da_zone_mode zone_mode; da_zone_interface zone_interface; da_zone_flags zone_flags; struct ata_gp_log_dir ata_logdir; int valid_logdir_len; struct ata_identify_log_pages ata_iddir; int valid_iddir_len; uint64_t optimal_seq_zones; uint64_t optimal_nonseq_zones; uint64_t max_seq_zones; u_int maxio; uint32_t unmap_max_ranges; uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */ uint32_t unmap_gran; uint32_t unmap_gran_align; uint64_t ws_max_blks; uint64_t trim_count; uint64_t trim_ranges; uint64_t trim_lbas; da_delete_methods delete_method_pref; da_delete_methods delete_method; da_delete_func_t *delete_func; int unmappedio; int rotating; int p_type; struct disk_params params; struct disk *disk; union ccb saved_ccb; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct callout sendordered_c; uint64_t wwpn; uint8_t unmap_buf[UNMAP_BUF_SIZE]; struct scsi_read_capacity_data_long rcaplong; struct callout mediapoll_c; int ref_flags[DA_REF_MAX]; #ifdef CAM_IO_STATS struct sysctl_ctx_list sysctl_stats_ctx; struct sysctl_oid *sysctl_stats_tree; u_int errors; u_int timeouts; u_int invalidations; #endif #define DA_ANNOUNCETMP_SZ 160 char announce_temp[DA_ANNOUNCETMP_SZ]; #define DA_ANNOUNCE_SZ 400 char announcebuf[DA_ANNOUNCE_SZ]; }; #define dadeleteflag(softc, delete_method, enable) \ if (enable) { \ softc->delete_available |= (1 << delete_method); \ } else { \ softc->delete_available &= ~(1 << delete_method); \ } struct da_quirk_entry { struct scsi_inquiry_pattern inq_pat; da_quirks quirks; }; static const char quantum[] = "QUANTUM"; static const char microp[] = "MICROP"; static struct da_quirk_entry da_quirk_table[] = { /* SPI, FC devices */ { /* * Fujitsu M2513A MO drives. * Tested devices: M2513A2 firmware versions 1200 & 1300. * (dip switch selects whether T_DIRECT or T_OPTICAL device) * Reported by: W.Scholten */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* See above. */ {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This particular Fujitsu drive doesn't like the * synchronize cache command. * Reported by: Tom Jackson */ {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Matthew Jacob * in NetBSD PR kern/6027, August 24, 1998. */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Hellmuth Michaelis (hm@kts.org) * (PR 8882). */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't work correctly with 6 byte reads/writes. * Returns illegal request, and points to byte 9 of the * 6-byte CDB. * Reported by: Adam McDougall */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* See above. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The CISS RAID controllers do not support SYNC_CACHE */ {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The STEC SSDs sometimes hang on UNMAP. */ {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"}, /*quirks*/ DA_Q_NO_UNMAP }, { /* * VMware returns BUSY status when storage has transient * connectivity problems, so better wait. * Also VMware returns odd errors on misaligned UNMAPs. */ {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"}, /*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP }, /* USB mass storage devices supported by umass(4) */ { /* * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player * PR: kern/51675 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Power Quotient Int. (PQI) USB flash key * PR: kern/53067 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative Nomad MUVO mp3 player (USB) * PR: kern/53094 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Jungsoft NEXDISK USB flash key * PR: kern/54737 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * FreeDik USB Mini Data Drive * PR: kern/54786 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sigmatel USB Flash MP3 Player * PR: kern/57046 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Neuros USB Digital Audio Computer * PR: kern/63645 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SEAGRAND NP-900 MP3 Player * PR: kern/64563 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * iRiver iFP MP3 player (with UMS Firmware) * PR: kern/54881, i386/63941, kern/66124 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 * PR: kern/70158 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ZICPlay USB MP3 Player with FM * PR: kern/75057 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TEAC USB floppy mechanisms */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler II+ USB Pen-Drive. * Reported by: Pawel Jakub Dawidek */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * USB DISK Pro PMAP * Reported by: jhs * PR: usb/96381 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Motorola E398 Mobile Phone (TransFlash memory card). * Reported by: Wojciech A. Koszek * PR: usb/89889 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Qware BeatZkey! Pro * PR: usb/79164 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Time DPA20B 1GB MP3 Player * PR: usb/81846 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung USB key 128Mb * PR: usb/90081 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler 2.0 USB Flash memory. * PR: usb/89196 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative MUVO Slim mp3 player (USB) * PR: usb/86131 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3) * PR: usb/80487 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SanDisk Micro Cruzer 128MB * PR: usb/75970 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TOSHIBA TransMemory USB sticks * PR: kern/94660 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * PNY USB 3.0 Flash Drives */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16 }, { /* * PNY USB Flash keys * PR: usb/75578, usb/72344, usb/65436 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Genesys GL3224 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", "120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16 }, { /* * Genesys 6-in-1 Card Reader * PR: usb/94647 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Rekam Digital CAMERA * PR: usb/98713 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver H10 MP3 player * PR: usb/102547 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver U10 MP3 player * PR: usb/92306 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * X-Micro Flash Disk * PR: usb/96901 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * EasyMP3 EM732X USB 2.0 Flash MP3 Player * PR: usb/96546 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*", "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Denver MP3 player * PR: usb/107101 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Philips USB Key Audio KEY013 * PR: usb/68412 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { /* * JNC MP3 Player * PR: usb/94439 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SAMSUNG MP0402H * PR: usb/108427 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * I/O Magic USB flash - Giga Bank * PR: usb/108810 */ {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * JoyFly 128mb USB Flash Drive * PR: 96133 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ChipsBnk usb stick * PR: 103702 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A * PR: 129858 */ {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung YP-U3 mp3-player * PR: 125398 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*", "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sony Cyber-Shot DSC cameras * PR: usb/137035 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3", "1.00"}, /*quirks*/ DA_Q_NO_PREVENT }, { /* At least several Transcent USB sticks lie on RC16. */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*", "*"}, /*quirks*/ DA_Q_NO_RC16 }, { /* * I-O Data USB Flash Disk * PR: usb/211716 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_RC16 }, { /* * SLC CHIPFANCIER USB drives * PR: usb/234503 (RC10 right, RC16 wrong) * 16GB, 32GB and 128GB confirmed to have same issue */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "*SLC", "CHIPFANCIER", "*"}, /*quirks*/ DA_Q_NO_RC16 }, /* ATA/SATA devices over SAS/USB/... */ { /* Sandisk X400 */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SanDisk SD8SB8U1*", "*" }, /*quirks*/DA_Q_128KB }, { /* Hitachi Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" }, /*quirks*/DA_Q_4K }, { /* Micron Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1) * PR: usb/97472 */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE }, { /* * Olympus digital cameras (D-370) * PR: usb/97472 */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "D*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* * Olympus digital cameras (E-100RS, E-10). * PR: usb/97472 */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "E*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE }, { /* * Olympus FE-210 camera */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Pentax Digital Camera * PR: usb/93389 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PENTAX", "DIGITAL CAMERA", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LG UP3S MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Laser MP3-2GA13 MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LaCie external 250GB Hard drive des by Porsche * Submitted by: Ben Stuyts * PR: 121474 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, /* SATA SSDs */ { /* * Corsair Force 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Neutron GTX SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force GT & GS SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial M4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial RealSSD C300 SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 320 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 330 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 510 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 520 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel S3610 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel X25-M Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston E100 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston HyperX 3k SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Marvell SSDs (entry taken from OpenSolaris) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Deneva R Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 2 SSDs (inc pro series) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 750 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 830 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 840 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 845 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 850 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 843T Series SSDs (MZ7WD*) * Samsung PM851 Series SSDs (MZ7TE*) * Samsung PM853T Series SSDs (MZ7GE*) * Samsung SM863 Series SSDs (MZ7KM*) * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" }, /*quirks*/DA_Q_4K }, { /* * Same as for SAMSUNG MZ7* but enable the quirks for SSD * starting with MZ7* too */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" }, /*quirks*/DA_Q_4K }, { /* * SuperTalent TeraDrive CT SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" }, /*quirks*/DA_Q_4K }, { /* * XceedIOPS SATA SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" }, /*quirks*/DA_Q_4K }, { /* * Hama Innostor USB-Stick */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" }, /*quirks*/DA_Q_NO_RC16 }, { /* * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR) * Drive Managed SATA hard drive. This drive doesn't report * in firmware that it is a drive managed SMR drive. */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" }, /*quirks*/DA_Q_SMR_DM }, { /* * MX-ES USB Drive by Mach Xtreme */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"}, /*quirks*/DA_Q_NO_RC16 }, }; static disk_strategy_t dastrategy; static dumper_t dadump; static periph_init_t dainit; static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void dasysctlinit(void *context, int pending); static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS); static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS); static int dazonemodesysctl(SYSCTL_HANDLER_ARGS); static int dazonesupsysctl(SYSCTL_HANDLER_ARGS); static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS); static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method); static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method); static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method); static void daprobedone(struct cam_periph *periph, union ccb *ccb); static periph_ctor_t daregister; static periph_dtor_t dacleanup; static periph_start_t dastart; static periph_oninv_t daoninvalidate; static void dazonedone(struct cam_periph *periph, union ccb *ccb); static void dadone(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probewp(struct cam_periph *periph, union ccb *done_ccb); static void dadone_proberc(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probelbp(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probeblklimits(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probebdc(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probeata(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probeatalogdir(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probeataiddir(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probeatasup(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probeatazone(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probezone(struct cam_periph *periph, union ccb *done_ccb); static void dadone_tur(struct cam_periph *periph, union ccb *done_ccb); static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static void daprevent(struct cam_periph *periph, int action); static void dareprobe(struct cam_periph *periph); static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_size); -static timeout_t dasendorderedtag; +static callout_func_t dasendorderedtag; static void dashutdown(void *arg, int howto); -static timeout_t damediapoll; +static callout_func_t damediapoll; #ifndef DA_DEFAULT_POLL_PERIOD #define DA_DEFAULT_POLL_PERIOD 3 #endif #ifndef DA_DEFAULT_TIMEOUT #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ #endif #ifndef DA_DEFAULT_SOFTTIMEOUT #define DA_DEFAULT_SOFTTIMEOUT 0 #endif #ifndef DA_DEFAULT_RETRY #define DA_DEFAULT_RETRY 4 #endif #ifndef DA_DEFAULT_SEND_ORDERED #define DA_DEFAULT_SEND_ORDERED 1 #endif static int da_poll_period = DA_DEFAULT_POLL_PERIOD; static int da_retry_count = DA_DEFAULT_RETRY; static int da_default_timeout = DA_DEFAULT_TIMEOUT; static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT; static int da_send_ordered = DA_DEFAULT_SEND_ORDERED; static int da_disable_wp_detection = 0; static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, "CAM Direct Access Disk driver"); SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, &da_poll_period, 0, "Media polling period in seconds"); SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN, &da_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN, &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN, &da_send_ordered, 0, "Send Ordered Tags"); SYSCTL_INT(_kern_cam_da, OID_AUTO, disable_wp_detection, CTLFLAG_RWTUN, &da_disable_wp_detection, 0, "Disable detection of write-protected disks"); SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout, CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I", "Soft I/O timeout (ms)"); TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout); /* * DA_ORDEREDTAG_INTERVAL determines how often, relative * to the default timeout, we check to see whether an ordered * tagged transaction is appropriate to prevent simple tag * starvation. Since we'd like to ensure that there is at least * 1/2 of the timeout length left for a starved transaction to * complete after we've sent an ordered tag, we must poll at least * four times in every timeout period. This takes care of the worst * case where a starved transaction starts during an interval that * meets the requirement "don't send an ordered tag" test so it takes * us two intervals to determine that a tag must be sent. */ #ifndef DA_ORDEREDTAG_INTERVAL #define DA_ORDEREDTAG_INTERVAL 4 #endif static struct periph_driver dadriver = { dainit, "da", TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(da, dadriver); static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); /* * This driver takes out references / holds in well defined pairs, never * recursively. These macros / inline functions enforce those rules. They * are only enabled with DA_TRACK_REFS or INVARIANTS. If DA_TRACK_REFS is * defined to be 2 or larger, the tracking also includes debug printfs. */ #if defined(DA_TRACK_REFS) || defined(INVARIANTS) #ifndef DA_TRACK_REFS #define DA_TRACK_REFS 1 #endif #if DA_TRACK_REFS > 1 static const char *da_ref_text[] = { "bogus", "open", "open hold", "close hold", "reprobe hold", "Test Unit Ready", "Geom", "sysctl", "reprobe", "max -- also bogus" }; #define DA_PERIPH_PRINT(periph, msg, args...) \ CAM_PERIPH_PRINT(periph, msg, ##args) #else #define DA_PERIPH_PRINT(periph, msg, args...) #endif static inline void token_sanity(da_ref_token token) { if ((unsigned)token >= DA_REF_MAX) panic("Bad token value passed in %d\n", token); } static inline int da_periph_hold(struct cam_periph *periph, int priority, da_ref_token token) { int err = cam_periph_hold(periph, priority); token_sanity(token); DA_PERIPH_PRINT(periph, "Holding device %s (%d): %d\n", da_ref_text[token], token, err); if (err == 0) { int cnt; struct da_softc *softc = periph->softc; cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1); if (cnt != 0) panic("Re-holding for reason %d, cnt = %d", token, cnt); } return (err); } static inline void da_periph_unhold(struct cam_periph *periph, da_ref_token token) { int cnt; struct da_softc *softc = periph->softc; token_sanity(token); DA_PERIPH_PRINT(periph, "Unholding device %s (%d)\n", da_ref_text[token], token); cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); if (cnt != 1) panic("Unholding %d with cnt = %d", token, cnt); cam_periph_unhold(periph); } static inline int da_periph_acquire(struct cam_periph *periph, da_ref_token token) { int err = cam_periph_acquire(periph); token_sanity(token); DA_PERIPH_PRINT(periph, "acquiring device %s (%d): %d\n", da_ref_text[token], token, err); if (err == 0) { int cnt; struct da_softc *softc = periph->softc; cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1); if (cnt != 0) panic("Re-refing for reason %d, cnt = %d", token, cnt); } return (err); } static inline void da_periph_release(struct cam_periph *periph, da_ref_token token) { int cnt; struct da_softc *softc = periph->softc; token_sanity(token); DA_PERIPH_PRINT(periph, "releasing device %s (%d)\n", da_ref_text[token], token); cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); if (cnt != 1) panic("Releasing %d with cnt = %d", token, cnt); cam_periph_release(periph); } static inline void da_periph_release_locked(struct cam_periph *periph, da_ref_token token) { int cnt; struct da_softc *softc = periph->softc; token_sanity(token); DA_PERIPH_PRINT(periph, "releasing device (locked) %s (%d)\n", da_ref_text[token], token); cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); if (cnt != 1) panic("releasing (locked) %d with cnt = %d", token, cnt); cam_periph_release_locked(periph); } #define cam_periph_hold POISON #define cam_periph_unhold POISON #define cam_periph_acquire POISON #define cam_periph_release POISON #define cam_periph_release_locked POISON #else #define da_periph_hold(periph, prio, token) cam_periph_hold((periph), (prio)) #define da_periph_unhold(periph, token) cam_periph_unhold((periph)) #define da_periph_acquire(periph, token) cam_periph_acquire((periph)) #define da_periph_release(periph, token) cam_periph_release((periph)) #define da_periph_release_locked(periph, token) cam_periph_release_locked((periph)) #endif static int daopen(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; if (da_periph_acquire(periph, DA_REF_OPEN) != 0) { return (ENXIO); } cam_periph_lock(periph); if ((error = da_periph_hold(periph, PRIBIO|PCATCH, DA_REF_OPEN_HOLD)) != 0) { cam_periph_unlock(periph); da_periph_release(periph, DA_REF_OPEN); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daopen\n")); softc = (struct da_softc *)periph->softc; dareprobe(periph); /* Wait for the disk size update. */ error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO, "dareprobe", 0); if (error != 0) xpt_print(periph->path, "unable to retrieve capacity data\n"); if (periph->flags & CAM_PERIPH_INVALID) error = ENXIO; if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_PREVENT); if (error == 0) { softc->flags &= ~DA_FLAG_PACK_INVALID; softc->flags |= DA_FLAG_OPEN; } da_periph_unhold(periph, DA_REF_OPEN_HOLD); cam_periph_unlock(periph); if (error != 0) da_periph_release(periph, DA_REF_OPEN); return (error); } static int daclose(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; periph = (struct cam_periph *)dp->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daclose\n")); if (da_periph_hold(periph, PRIBIO, DA_REF_CLOSE_HOLD) == 0) { /* Flush disk cache. */ if ((softc->flags & DA_FLAG_DIRTY) != 0 && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/1, /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, 5 * 60 * 1000); cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, softc->disk->d_devstat); softc->flags &= ~DA_FLAG_DIRTY; xpt_release_ccb(ccb); } /* Allow medium removal. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_ALLOW); da_periph_unhold(periph, DA_REF_CLOSE_HOLD); } /* * If we've got removeable media, mark the blocksize as * unavailable, since it could change when new media is * inserted. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; softc->flags &= ~DA_FLAG_OPEN; while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); cam_periph_unlock(periph); da_periph_release(periph, DA_REF_OPEN); return (0); } static void daschedule(struct cam_periph *periph) { struct da_softc *softc = (struct da_softc *)periph->softc; if (softc->state != DA_STATE_NORMAL) return; cam_iosched_schedule(softc->cam_iosched, periph); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void dastrategy(struct bio *bp) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); /* * If the device has been made invalid, error out */ if ((softc->flags & DA_FLAG_PACK_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp)); /* * Zone commands must be ordered, because they can depend on the * effects of previously issued commands, and they may affect * commands after them. */ if (bp->bio_cmd == BIO_ZONE) bp->bio_flags |= BIO_ORDERED; /* * Place it in the queue of disk activities for this disk */ cam_iosched_queue_work(softc->cam_iosched, bp); /* * Schedule ourselves for performing the work. */ daschedule(periph); cam_periph_unlock(periph); return; } static int dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct cam_periph *periph; struct da_softc *softc; u_int secsize; struct ccb_scsiio csio; struct disk *dp; int error = 0; dp = arg; periph = dp->d_drv1; softc = (struct da_softc *)periph->softc; secsize = softc->params.secsize; if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) return (ENXIO); memset(&csio, 0, sizeof(csio)); if (length > 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_read_write(&csio, /*retries*/0, /*cbfcnp*/NULL, MSG_ORDERED_Q_TAG, /*read*/SCSI_RW_WRITE, /*byte2*/0, /*minimum_cmd_size*/ softc->minimum_cmd_size, offset / secsize, length / secsize, /*data_ptr*/(u_int8_t *) virtual, /*dxfer_len*/length, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); error = cam_periph_runccb((union ccb *)&csio, cam_periph_error, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) printf("Aborting dump due to I/O error.\n"); return (error); } /* * Sync the disk cache contents to the physical media. */ if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_synchronize_cache(&csio, /*retries*/0, /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG, /*begin_lba*/0,/* Cover the whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 5 * 1000); error = cam_periph_runccb((union ccb *)&csio, cam_periph_error, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); } return (error); } static int dagetattr(struct bio *bp) { int ret; struct cam_periph *periph; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static void dainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("da: Failed to attach master async callback " "due to status 0x%x!\n", status); } else if (da_send_ordered) { /* Register our shutdown event handler */ if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("dainit: shutdown event registration failed!\n"); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void dadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; da_periph_release(periph, DA_REF_GEOM); } static void daoninvalidate(struct cam_periph *periph) { struct da_softc *softc; cam_periph_assert(periph, MA_OWNED); softc = (struct da_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, daasync, periph, periph->path); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); /* * Tell GEOM that we've gone away, we'll get a callback when it is * done cleaning up its resources. */ disk_gone(softc->disk); } static void dacleanup(struct cam_periph *periph) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) { #ifdef CAM_IO_STATS if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) xpt_print(periph->path, "can't remove sysctl stats context\n"); #endif if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); } callout_drain(&softc->mediapoll_c); disk_destroy(softc->disk); callout_drain(&softc->sendordered_c); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: /* callback to create periph, no locking yet */ { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_DIRECT && SID_TYPE(&cgd->inq_data) != T_RBC && SID_TYPE(&cgd->inq_data) != T_OPTICAL && SID_TYPE(&cgd->inq_data) != T_ZBC_HM) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(daregister, daoninvalidate, dacleanup, dastart, "da", CAM_PERIPH_BIO, path, daasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("daasync: Unable to attach to new device " "due to status 0x%x\n", status); return; } case AC_ADVINFO_CHANGED: /* Doesn't touch periph */ { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct da_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_UNIT_ATTENTION: { union ccb *ccb; int error_code, sense_key, asc, ascq; softc = (struct da_softc *)periph->softc; ccb = (union ccb *)arg; /* * Handle all UNIT ATTENTIONs except our own, as they will be * handled by daerror(). Since this comes from a different periph, * that periph's lock is held, not ours, so we have to take it ours * out to touch softc flags. */ if (xpt_path_periph(ccb->ccb_h.path) != periph && scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (asc == 0x2A && ascq == 0x09) { xpt_print(ccb->ccb_h.path, "Capacity data has changed\n"); cam_periph_lock(periph); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); cam_periph_unlock(periph); } else if (asc == 0x28 && ascq == 0x00) { cam_periph_lock(periph); softc->flags &= ~DA_FLAG_PROBED; cam_periph_unlock(periph); disk_media_changed(softc->disk, M_NOWAIT); } else if (asc == 0x3F && ascq == 0x03) { xpt_print(ccb->ccb_h.path, "INQUIRY data has changed\n"); cam_periph_lock(periph); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); cam_periph_unlock(periph); } } break; } case AC_SCSI_AEN: /* Called for this path: periph locked */ /* * Appears to be currently unused for SCSI devices, only ata SIMs * generate this. */ cam_periph_assert(periph, MA_OWNED); softc = (struct da_softc *)periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && (softc->flags & DA_FLAG_TUR_PENDING) == 0) { if (da_periph_acquire(periph, DA_REF_TUR) == 0) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* FALLTHROUGH */ case AC_SENT_BDR: /* Called for this path: periph locked */ case AC_BUS_RESET: /* Called for this path: periph locked */ { struct ccb_hdr *ccbh; cam_periph_assert(periph, MA_OWNED); softc = (struct da_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= DA_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= DA_CCB_RETRY_UA; break; } case AC_INQ_CHANGED: /* Called for this path: periph locked */ cam_periph_assert(periph, MA_OWNED); softc = (struct da_softc *)periph->softc; softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); break; default: break; } cam_periph_async(periph, code, path, arg); } static void dasysctlinit(void *context, int pending) { struct cam_periph *periph; struct da_softc *softc; char tmpstr[32], tmpstr2[16]; struct ccb_trans_settings cts; periph = (struct cam_periph *)context; /* * periph was held for us when this task was enqueued */ if (periph->flags & CAM_PERIPH_INVALID) { da_periph_release(periph, DA_REF_SYSCTL); return; } softc = (struct da_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); cam_periph_lock(periph); softc->flags |= DA_FLAG_SCTX_INIT; cam_periph_unlock(periph); softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("dasysctlinit: unable to allocate sysctl tree\n"); da_periph_release(periph, DA_REF_SYSCTL); return; } /* * Now register the sysctl handler, so the user can change the value on * the fly. */ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN, softc, 0, dadeletemethodsysctl, "A", "BIO_DELETE execution method"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW, softc, 0, dadeletemaxsysctl, "Q", "Maximum BIO_DELETE size"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", "Minimum CDB size"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "trim_count", CTLFLAG_RD, &softc->trim_count, "Total number of unmap/dsm commands sent"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "trim_ranges", CTLFLAG_RD, &softc->trim_ranges, "Total number of ranges in unmap/dsm commands"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "trim_lbas", CTLFLAG_RD, &softc->trim_lbas, "Total lbas in the unmap/dsm commands sent"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, dazonemodesysctl, "A", "Zone Mode"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, dazonesupsysctl, "A", "Zone Support"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, "Optimal Number of Open Sequential Write Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_nonseq_zones", CTLFLAG_RD, &softc->optimal_nonseq_zones, "Optimal Number of Non-Sequentially Written Sequential Write " "Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, "Maximum Number of Open Sequential Write Required Zones"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "error_inject", CTLFLAG_RW, &softc->error_inject, 0, "error_inject leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "unmapped_io", CTLFLAG_RD, &softc->unmappedio, 0, "Unmapped I/O support"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "rotating", CTLFLAG_RD, &softc->rotating, 0, "Rotating media"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "p_type", CTLFLAG_RD, &softc->p_type, 0, "DIF protection type"); #ifdef CAM_TEST_FAILURE SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, periph, 0, cam_periph_invalidate_sysctl, "I", "Write 1 to invalidate the drive immediately"); #endif /* * Add some addressing info. */ memset(&cts, 0, sizeof (cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cam_periph_lock(periph); xpt_action((union ccb *)&cts); cam_periph_unlock(periph); if (cts.ccb_h.status != CAM_REQ_CMP) { da_periph_release(periph, DA_REF_SYSCTL); return; } if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWPN) { softc->wwpn = fc->wwpn; SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "wwpn", CTLFLAG_RD, &softc->wwpn, "World Wide Port Name"); } } #ifdef CAM_IO_STATS /* * Now add some useful stats. * XXX These should live in cam_periph and be common to all periphs */ softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD, 0, "Statistics"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "errors", CTLFLAG_RD, &softc->errors, 0, "Transport errors reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "timeouts", CTLFLAG_RD, &softc->timeouts, 0, "Device timeouts reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "pack_invalidations", CTLFLAG_RD, &softc->invalidations, 0, "Device pack invalidations"); #endif cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); da_periph_release(periph, DA_REF_SYSCTL); } static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS) { int error; uint64_t value; struct da_softc *softc; softc = (struct da_softc *)arg1; value = softc->disk->d_delmaxsize; error = sysctl_handle_64(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* only accept values smaller than the calculated value */ if (value > dadeletemaxsize(softc, softc->delete_method)) { return (EINVAL); } softc->disk->d_delmaxsize = value; return (0); } static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* * Acceptable values here are 6, 10, 12 or 16. */ if (value < 6) value = 6; else if ((value > 6) && (value <= 10)) value = 10; else if ((value > 10) && (value <= 12)) value = 12; else if (value > 12) value = 16; *(int *)arg1 = value; return (0); } static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS) { sbintime_t value; int error; value = da_default_softtimeout / SBT_1MS; error = sysctl_handle_int(oidp, (int *)&value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* XXX Should clip this to a reasonable level */ if (value > da_default_timeout * 1000) return (EINVAL); da_default_softtimeout = value * SBT_1MS; return (0); } static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method) { softc->delete_method = delete_method; softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method); softc->delete_func = da_delete_functions[delete_method]; if (softc->delete_method > DA_DELETE_DISABLE) softc->disk->d_flags |= DISKFLAG_CANDELETE; else softc->disk->d_flags &= ~DISKFLAG_CANDELETE; } static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method) { off_t sectors; switch(delete_method) { case DA_DELETE_UNMAP: sectors = (off_t)softc->unmap_max_lba; break; case DA_DELETE_ATA_TRIM: sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges; break; case DA_DELETE_WS16: sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS); break; case DA_DELETE_ZERO: case DA_DELETE_WS10: sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS); break; default: return 0; } return (off_t)softc->params.secsize * omin(sectors, softc->params.sectors); } static void daprobedone(struct cam_periph *periph, union ccb *ccb) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); dadeletemethodchoose(softc, DA_DELETE_NONE); if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { char buf[80]; int i, sep; snprintf(buf, sizeof(buf), "Delete methods: <"); sep = 0; for (i = 0; i <= DA_DELETE_MAX; i++) { if ((softc->delete_available & (1 << i)) == 0 && i != softc->delete_method) continue; if (sep) strlcat(buf, ",", sizeof(buf)); strlcat(buf, da_delete_method_names[i], sizeof(buf)); if (i == softc->delete_method) strlcat(buf, "(*)", sizeof(buf)); sep = 1; } strlcat(buf, ">", sizeof(buf)); printf("%s%d: %s\n", periph->periph_name, periph->unit_number, buf); } if ((softc->disk->d_flags & DISKFLAG_WRITE_PROTECT) != 0 && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { printf("%s%d: Write Protected\n", periph->periph_name, periph->unit_number); } /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(ccb); softc->state = DA_STATE_NORMAL; softc->flags |= DA_FLAG_PROBED; daschedule(periph); wakeup(&softc->disk->d_mediasize); if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) { softc->flags |= DA_FLAG_ANNOUNCED; da_periph_unhold(periph, DA_REF_PROBE_HOLD); } else da_periph_release_locked(periph, DA_REF_REPROBE); } static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method) { int i, methods; /* If available, prefer the method requested by user. */ i = softc->delete_method_pref; methods = softc->delete_available | (1 << DA_DELETE_DISABLE); if (methods & (1 << i)) { dadeletemethodset(softc, i); return; } /* Use the pre-defined order to choose the best performing delete. */ for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { if (i == DA_DELETE_ZERO) continue; if (softc->delete_available & (1 << i)) { dadeletemethodset(softc, i); return; } } /* Fallback to default. */ dadeletemethodset(softc, default_method); } static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS) { char buf[16]; const char *p; struct da_softc *softc; int i, error, value; softc = (struct da_softc *)arg1; value = softc->delete_method; if (value < 0 || value > DA_DELETE_MAX) p = "UNKNOWN"; else p = da_delete_method_names[value]; strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); for (i = 0; i <= DA_DELETE_MAX; i++) { if (strcmp(buf, da_delete_method_names[i]) == 0) break; } if (i > DA_DELETE_MAX) return (EINVAL); softc->delete_method_pref = i; dadeletemethodchoose(softc, DA_DELETE_NONE); return (0); } static int dazonemodesysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[40]; struct da_softc *softc; int error; softc = (struct da_softc *)arg1; switch (softc->zone_mode) { case DA_ZONE_DRIVE_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); break; case DA_ZONE_HOST_AWARE: snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); break; case DA_ZONE_HOST_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); break; case DA_ZONE_NONE: default: snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); break; } error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); return (error); } static int dazonesupsysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[180]; struct da_softc *softc; struct sbuf sb; int error, first; unsigned int i; softc = (struct da_softc *)arg1; error = 0; first = 1; sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); for (i = 0; i < sizeof(da_zone_desc_table) / sizeof(da_zone_desc_table[0]); i++) { if (softc->zone_flags & da_zone_desc_table[i].value) { if (first == 0) sbuf_printf(&sb, ", "); else first = 0; sbuf_cat(&sb, da_zone_desc_table[i].desc); } } if (first == 1) sbuf_printf(&sb, "None"); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); return (error); } static cam_status daregister(struct cam_periph *periph, void *arg) { struct da_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; char tmpstr[80]; caddr_t match; int quirks; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("daregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("daregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { printf("daregister: Unable to probe new device. " "Unable to allocate iosched memory\n"); free(softc, M_DEVBUF); return(CAM_REQ_CMP_ERR); } LIST_INIT(&softc->pending_ccbs); softc->state = DA_STATE_PROBE_WP; bioq_init(&softc->delete_run_queue); if (SID_IS_REMOVABLE(&cgd->inq_data)) softc->flags |= DA_FLAG_PACK_REMOVABLE; softc->unmap_max_ranges = UNMAP_MAX_RANGES; softc->unmap_max_lba = UNMAP_RANGE_MAX; softc->unmap_gran = 0; softc->unmap_gran_align = 0; softc->ws_max_blks = WS16_MAX_BLKS; softc->trim_max_ranges = ATA_TRIM_MAX_RANGES; softc->rotating = 1; periph->softc = softc; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)da_quirk_table, nitems(da_quirk_table), sizeof(*da_quirk_table), scsi_inquiry_match); if (match != NULL) softc->quirks = ((struct da_quirk_entry *)match)->quirks; else softc->quirks = DA_Q_NONE; /* Check if the SIM does not want 6 byte commands */ xpt_path_inq(&cpi, periph->path); if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) softc->quirks |= DA_Q_NO_6_BYTE; /* Override quirks if tunable is set */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.quirks", periph->unit_number); quirks = softc->quirks; TUNABLE_INT_FETCH(tmpstr, &quirks); softc->quirks = quirks; if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM) softc->zone_mode = DA_ZONE_HOST_MANAGED; else if (softc->quirks & DA_Q_SMR_DM) softc->zone_mode = DA_ZONE_DRIVE_MANAGED; else softc->zone_mode = DA_ZONE_NONE; if (softc->zone_mode != DA_ZONE_NONE) { if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) softc->zone_interface = DA_ZONE_IF_ATA_SAT; else softc->zone_interface = DA_ZONE_IF_ATA_PASS; } else softc->zone_interface = DA_ZONE_IF_SCSI; } TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); /* * Take an exclusive section lock qon the periph while dastart is called * to finish the probe. The lock will be dropped in dadone at the end * of probe. This locks out daopen and daclose from racing with the * probe. * * XXX if cam_periph_hold returns an error, we don't hold a refcount. */ (void)da_periph_hold(periph, PRIBIO, DA_REF_PROBE_HOLD); /* * Schedule a periodic event to occasionally send an * ordered tag to a device. */ callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, periph); cam_periph_unlock(periph); /* * RBC devices don't have to support READ(6), only READ(10). */ if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) softc->minimum_cmd_size = 10; else softc->minimum_cmd_size = 6; /* * Load the user's default, if any. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); /* * 6, 10, 12 and 16 are the currently permissible values. */ if (softc->minimum_cmd_size > 12) softc->minimum_cmd_size = 16; else if (softc->minimum_cmd_size > 10) softc->minimum_cmd_size = 12; else if (softc->minimum_cmd_size > 6) softc->minimum_cmd_size = 10; else softc->minimum_cmd_size = 6; /* Predict whether device may support READ CAPACITY(16). */ if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 && (softc->quirks & DA_Q_NO_RC16) == 0) { softc->flags |= DA_FLAG_CAN_RC16; } /* * Register this media as a disk. */ softc->disk = disk_alloc(); softc->disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); softc->disk->d_open = daopen; softc->disk->d_close = daclose; softc->disk->d_strategy = dastrategy; softc->disk->d_dump = dadump; softc->disk->d_getattr = dagetattr; softc->disk->d_gone = dadiskgonecb; softc->disk->d_name = "da"; softc->disk->d_drv1 = periph; if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; /* for safety */ else softc->maxio = cpi.maxio; if (softc->quirks & DA_Q_128KB) softc->maxio = min(softc->maxio, 128 * 1024); softc->disk->d_maxsize = softc->maxio; softc->disk->d_unit = periph->unit_number; softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { softc->unmappedio = 1; softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; } cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], cgd->inq_data.product, sizeof(cgd->inq_data.product), sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; snprintf(softc->disk->d_attachment, sizeof(softc->disk->d_attachment), "%s%d", cpi.dev_name, cpi.unit_number); /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * dadiskgonecb()) telling us that our provider has been freed. */ if (da_periph_acquire(periph, DA_REF_GEOM) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); /* * Add async callbacks for events of interest. * I don't bother checking if this fails as, * in most cases, the system will function just * fine without them and the only alternative * would be to not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION | AC_INQ_CHANGED, daasync, periph, periph->path); /* * Emit an attribute changed notification just in case * physical path information arrived before our async * event handler was registered, but after anyone attaching * to our disk device polled it. */ disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); /* * Schedule a periodic media polling events. */ callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && (cgd->inq_flags & SID_AEN) == 0 && da_poll_period != 0) callout_reset(&softc->mediapoll_c, da_poll_period * hz, damediapoll, periph); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int da_zone_bio_to_scsi(int disk_zone_cmd) { switch (disk_zone_cmd) { case DISK_ZONE_OPEN: return ZBC_OUT_SA_OPEN; case DISK_ZONE_CLOSE: return ZBC_OUT_SA_CLOSE; case DISK_ZONE_FINISH: return ZBC_OUT_SA_FINISH; case DISK_ZONE_RWP: return ZBC_OUT_SA_RWP; } return -1; } static int da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, int *queue_ccb) { struct da_softc *softc; int error; error = 0; if (bp->bio_cmd != BIO_ZONE) { error = EINVAL; goto bailout; } softc = periph->softc; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: { int zone_flags; int zone_sa; uint64_t lba; zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd); if (zone_sa == -1) { xpt_print(periph->path, "Cannot translate zone " "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd); error = EINVAL; goto bailout; } zone_flags = 0; lba = bp->bio_zone.zone_params.rwp.id; if (bp->bio_zone.zone_params.rwp.flags & DISK_ZONE_RWP_FLAG_ALL) zone_flags |= ZBC_OUT_ALL; if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { scsi_zbc_out(&ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ zone_sa, /*zone_id*/ lba, /*zone_flags*/ zone_flags, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); } else { /* * Note that in this case, even though we can * technically use NCQ, we don't bother for several * reasons: * 1. It hasn't been tested on a SAT layer that * supports it. This is new as of SAT-4. * 2. Even when there is a SAT layer that supports * it, that SAT layer will also probably support * ZBC -> ZAC translation, since they are both * in the SAT-4 spec. * 3. Translation will likely be preferable to ATA * passthrough. LSI / Avago at least single * steps ATA passthrough commands in the HBA, * regardless of protocol, so unless that * changes, there is a performance penalty for * doing ATA passthrough no matter whether * you're using NCQ/FPDMA, DMA or PIO. * 4. It requires a 32-byte CDB, which at least at * this point in CAM requires a CDB pointer, which * would require us to allocate an additional bit * of storage separate from the CCB. */ error = scsi_ata_zac_mgmt_out(&ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*use_ncq*/ 0, /*zm_action*/ zone_sa, /*zone_id*/ lba, /*zone_flags*/ zone_flags, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (error != 0) { error = EINVAL; xpt_print(periph->path, "scsi_ata_zac_mgmt_out() returned an " "error!"); goto bailout; } } *queue_ccb = 1; break; } case DISK_ZONE_REPORT_ZONES: { uint8_t *rz_ptr; uint32_t num_entries, alloc_size; struct disk_zone_report *rep; rep = &bp->bio_zone.zone_params.report; num_entries = rep->entries_allocated; if (num_entries == 0) { xpt_print(periph->path, "No entries allocated for " "Report Zones request\n"); error = EINVAL; goto bailout; } alloc_size = sizeof(struct scsi_report_zones_hdr) + (sizeof(struct scsi_report_zones_desc) * num_entries); alloc_size = min(alloc_size, softc->disk->d_maxsize); rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO); if (rz_ptr == NULL) { xpt_print(periph->path, "Unable to allocate memory " "for Report Zones request\n"); error = ENOMEM; goto bailout; } if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { scsi_zbc_in(&ccb->csio, /*retries*/ da_retry_count, /*cbcfnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ ZBC_IN_SA_REPORT_ZONES, /*zone_start_lba*/ rep->starting_id, /*zone_options*/ rep->rep_options, /*data_ptr*/ rz_ptr, /*dxfer_len*/ alloc_size, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); } else { /* * Note that in this case, even though we can * technically use NCQ, we don't bother for several * reasons: * 1. It hasn't been tested on a SAT layer that * supports it. This is new as of SAT-4. * 2. Even when there is a SAT layer that supports * it, that SAT layer will also probably support * ZBC -> ZAC translation, since they are both * in the SAT-4 spec. * 3. Translation will likely be preferable to ATA * passthrough. LSI / Avago at least single * steps ATA passthrough commands in the HBA, * regardless of protocol, so unless that * changes, there is a performance penalty for * doing ATA passthrough no matter whether * you're using NCQ/FPDMA, DMA or PIO. * 4. It requires a 32-byte CDB, which at least at * this point in CAM requires a CDB pointer, which * would require us to allocate an additional bit * of storage separate from the CCB. */ error = scsi_ata_zac_mgmt_in(&ccb->csio, /*retries*/ da_retry_count, /*cbcfnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*use_ncq*/ 0, /*zm_action*/ ATA_ZM_REPORT_ZONES, /*zone_id*/ rep->starting_id, /*zone_flags*/ rep->rep_options, /*data_ptr*/ rz_ptr, /*dxfer_len*/ alloc_size, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (error != 0) { error = EINVAL; xpt_print(periph->path, "scsi_ata_zac_mgmt_in() returned an " "error!"); goto bailout; } } /* * For BIO_ZONE, this isn't normally needed. However, it * is used by devstat_end_transaction_bio() to determine * how much data was transferred. */ /* * XXX KDM we have a problem. But I'm not sure how to fix * it. devstat uses bio_bcount - bio_resid to calculate * the amount of data transferred. The GEOM disk code * uses bio_length - bio_resid to calculate the amount of * data in bio_completed. We have different structure * sizes above and below the ada(4) driver. So, if we * use the sizes above, the amount transferred won't be * quite accurate for devstat. If we use different sizes * for bio_bcount and bio_length (above and below * respectively), then the residual needs to match one or * the other. Everything is calculated after the bio * leaves the driver, so changing the values around isn't * really an option. For now, just set the count to the * passed in length. This means that the calculations * above (e.g. bio_completed) will be correct, but the * amount of data reported to devstat will be slightly * under or overstated. */ bp->bio_bcount = bp->bio_length; *queue_ccb = 1; break; } case DISK_ZONE_GET_PARAMS: { struct disk_zone_disk_params *params; params = &bp->bio_zone.zone_params.disk_params; bzero(params, sizeof(*params)); switch (softc->zone_mode) { case DA_ZONE_DRIVE_MANAGED: params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; break; case DA_ZONE_HOST_AWARE: params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; break; case DA_ZONE_HOST_MANAGED: params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; break; default: case DA_ZONE_NONE: params->zone_mode = DISK_ZONE_MODE_NONE; break; } if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ) params->flags |= DISK_ZONE_DISK_URSWRZ; if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) { params->optimal_seq_zones = softc->optimal_seq_zones; params->flags |= DISK_ZONE_OPT_SEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) { params->optimal_nonseq_zones = softc->optimal_nonseq_zones; params->flags |= DISK_ZONE_OPT_NONSEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) { params->max_seq_zones = softc->max_seq_zones; params->flags |= DISK_ZONE_MAX_SEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP) params->flags |= DISK_ZONE_RZ_SUP; if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP) params->flags |= DISK_ZONE_OPEN_SUP; if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP) params->flags |= DISK_ZONE_CLOSE_SUP; if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP) params->flags |= DISK_ZONE_FINISH_SUP; if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP) params->flags |= DISK_ZONE_RWP_SUP; break; } default: break; } bailout: return (error); } static void dastart(struct cam_periph *periph, union ccb *start_ccb) { struct da_softc *softc; cam_periph_assert(periph, MA_OWNED); softc = (struct da_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n")); skipstate: switch (softc->state) { case DA_STATE_NORMAL: { struct bio *bp; uint8_t tag_code; more: bp = cam_iosched_next_bio(softc->cam_iosched); if (bp == NULL) { if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { softc->flags |= DA_FLAG_TUR_PENDING; cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); scsi_test_unit_ready(&start_ccb->csio, /*retries*/ da_retry_count, dadone_tur, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_TUR; xpt_action(start_ccb); } else xpt_release_ccb(start_ccb); break; } if (bp->bio_cmd == BIO_DELETE) { if (softc->delete_func != NULL) { softc->delete_func(periph, start_ccb, bp); goto out; } else { /* * Not sure this is possible, but failsafe by * lying and saying "sure, done." */ biofinish(bp, NULL, 0); goto more; } } if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); da_periph_release_locked(periph, DA_REF_TUR); } if ((bp->bio_flags & BIO_ORDERED) != 0 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { softc->flags &= ~DA_FLAG_NEED_OTAG; softc->flags |= DA_FLAG_WAS_OTAG; tag_code = MSG_ORDERED_Q_TAG; } else { tag_code = MSG_SIMPLE_Q_TAG; } switch (bp->bio_cmd) { case BIO_WRITE: case BIO_READ: { void *data_ptr; int rw_op; biotrack(bp, __func__); if (bp->bio_cmd == BIO_WRITE) { softc->flags |= DA_FLAG_DIRTY; rw_op = SCSI_RW_WRITE; } else { rw_op = SCSI_RW_READ; } data_ptr = bp->bio_data; if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { rw_op |= SCSI_RW_BIO; data_ptr = bp; } scsi_read_write(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/tag_code, rw_op, /*byte2*/0, softc->minimum_cmd_size, /*lba*/bp->bio_pblkno, /*block_count*/bp->bio_bcount / softc->params.secsize, data_ptr, /*dxfer_len*/ bp->bio_bcount, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) start_ccb->csio.bio = bp; #endif break; } case BIO_FLUSH: /* * If we don't support sync cache, or the disk * isn't dirty, FLUSH is a no-op. Use the * allocated CCB for the next bio if one is * available. */ if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 || (softc->flags & DA_FLAG_DIRTY) == 0) { biodone(bp); goto skipstate; } /* * BIO_FLUSH doesn't currently communicate * range data, so we synchronize the cache * over the whole disk. */ scsi_synchronize_cache(&start_ccb->csio, /*retries*/1, /*cbfcnp*/dadone, /*tag_action*/tag_code, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, da_default_timeout*1000); /* * Clear the dirty flag before sending the command. * Either this sync cache will be successful, or it * will fail after a retry. If it fails, it is * unlikely to be successful if retried later, so * we'll save ourselves time by just marking the * device clean. */ softc->flags &= ~DA_FLAG_DIRTY; break; case BIO_ZONE: { int error, queue_ccb; queue_ccb = 0; error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb); if ((error != 0) || (queue_ccb == 0)) { biofinish(bp, NULL, error); xpt_release_ccb(start_ccb); return; } break; } } start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout); out: LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); /* We expect a unit attention from this device */ if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; softc->flags &= ~DA_FLAG_RETRY_UA; } start_ccb->ccb_h.ccb_bp = bp; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); /* May have more work to do, so ensure we stay scheduled */ daschedule(periph); break; } case DA_STATE_PROBE_WP: { void *mode_buf; int mode_buf_len; if (da_disable_wp_detection) { if ((softc->flags & DA_FLAG_CAN_RC16) != 0) softc->state = DA_STATE_PROBE_RC16; else softc->state = DA_STATE_PROBE_RC; goto skipstate; } mode_buf_len = 192; mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT); if (mode_buf == NULL) { xpt_print(periph->path, "Unable to send mode sense - " "malloc failure\n"); if ((softc->flags & DA_FLAG_CAN_RC16) != 0) softc->state = DA_STATE_PROBE_RC16; else softc->state = DA_STATE_PROBE_RC; goto skipstate; } scsi_mode_sense_len(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone_probewp, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*dbd*/ FALSE, /*pc*/ SMS_PAGE_CTRL_CURRENT, /*page*/ SMS_ALL_PAGES_PAGE, /*param_buf*/ mode_buf, /*param_len*/ mode_buf_len, /*minimum_cmd_size*/ softc->minimum_cmd_size, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_RC: { struct scsi_read_capacity_data *rcap; rcap = (struct scsi_read_capacity_data *) malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcap == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity(&start_ccb->csio, /*retries*/da_retry_count, dadone_proberc, MSG_SIMPLE_Q_TAG, rcap, SSD_FULL_SIZE, /*timeout*/5000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_RC16: { struct scsi_read_capacity_data_long *rcaplong; rcaplong = (struct scsi_read_capacity_data_long *) malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcaplong == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity_16(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone_proberc, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*lba*/ 0, /*reladr*/ 0, /*pmi*/ 0, /*rcap_buf*/ (uint8_t *)rcaplong, /*rcap_buf_len*/ sizeof(*rcaplong), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16; xpt_action(start_ccb); break; } case DA_STATE_PROBE_LBP: { struct scsi_vpd_logical_block_prov *lbp; if (!scsi_vpd_supported_page(periph, SVPD_LBP)) { /* * If we get here we don't support any SBC-3 delete * methods with UNMAP as the Logical Block Provisioning * VPD page support is required for devices which * support it according to T10/1799-D Revision 31 * however older revisions of the spec don't mandate * this so we currently don't remove these methods * from the available set. */ softc->state = DA_STATE_PROBE_BLK_LIMITS; goto skipstate; } lbp = (struct scsi_vpd_logical_block_prov *) malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO); if (lbp == NULL) { printf("dastart: Couldn't malloc lbp data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone_probelbp, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)lbp, /*inq_len*/sizeof(*lbp), /*evpd*/TRUE, /*page_code*/SVPD_LBP, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BLK_LIMITS: { struct scsi_vpd_block_limits *block_limits; if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) { /* Not supported skip to next probe */ softc->state = DA_STATE_PROBE_BDC; goto skipstate; } block_limits = (struct scsi_vpd_block_limits *) malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO); if (block_limits == NULL) { printf("dastart: Couldn't malloc block_limits data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone_probeblklimits, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)block_limits, /*inq_len*/sizeof(*block_limits), /*evpd*/TRUE, /*page_code*/SVPD_BLOCK_LIMITS, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BDC: { struct scsi_vpd_block_characteristics *bdc; if (!scsi_vpd_supported_page(periph, SVPD_BDC)) { softc->state = DA_STATE_PROBE_ATA; goto skipstate; } bdc = (struct scsi_vpd_block_characteristics *) malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); if (bdc == NULL) { printf("dastart: Couldn't malloc bdc data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone_probebdc, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)bdc, /*inq_len*/sizeof(*bdc), /*evpd*/TRUE, /*page_code*/SVPD_BDC, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA: { struct ata_params *ata_params; if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { if ((softc->zone_mode == DA_ZONE_HOST_AWARE) || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { /* * Note that if the ATA VPD page isn't * supported, we aren't talking to an ATA * device anyway. Support for that VPD * page is mandatory for SCSI to ATA (SAT) * translation layers. */ softc->state = DA_STATE_PROBE_ZONE; goto skipstate; } daprobedone(periph, start_ccb); break; } ata_params = &periph->path->device->ident_data; scsi_ata_identify(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone_probeata, /*tag_action*/MSG_SIMPLE_Q_TAG, /*data_ptr*/(u_int8_t *)ata_params, /*dxfer_len*/sizeof(*ata_params), /*sense_len*/SSD_FULL_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_LOGDIR: { struct ata_gp_log_dir *log_dir; int retval; retval = 0; if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) { /* * If we don't have log support, not much point in * trying to probe zone support. */ daprobedone(periph, start_ccb); break; } /* * If we have an ATA device (the SCSI ATA Information VPD * page should be present and the ATA identify should have * succeeded) and it supports logs, ask for the log directory. */ log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO); if (log_dir == NULL) { xpt_print(periph->path, "Couldn't malloc log_dir " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone_probeatalogdir, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_LOG_DIRECTORY, /*page_number*/ 0, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)log_dir, /*dxfer_len*/ sizeof(*log_dir), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(log_dir, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_IDDIR: { struct ata_identify_log_pages *id_dir; int retval; retval = 0; /* * Check here to see whether the Identify Device log is * supported in the directory of logs. If so, continue * with requesting the log of identify device pages. */ if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) { daprobedone(periph, start_ccb); break; } id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO); if (id_dir == NULL) { xpt_print(periph->path, "Couldn't malloc id_dir " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone_probeataiddir, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_PAGE_LIST, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)id_dir, /*dxfer_len*/ sizeof(*id_dir), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(id_dir, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_SUP: { struct ata_identify_log_sup_cap *sup_cap; int retval; retval = 0; /* * Check here to see whether the Supported Capabilities log * is in the list of Identify Device logs. */ if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) { daprobedone(periph, start_ccb); break; } sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO); if (sup_cap == NULL) { xpt_print(periph->path, "Couldn't malloc sup_cap " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone_probeatasup, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_SUP_CAP, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)sup_cap, /*dxfer_len*/ sizeof(*sup_cap), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(sup_cap, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_ZONE: { struct ata_zoned_info_log *ata_zone; int retval; retval = 0; /* * Check here to see whether the zoned device information * page is supported. If so, continue on to request it. * If not, skip to DA_STATE_PROBE_LOG or done. */ if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) { daprobedone(periph, start_ccb); break; } ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA, M_NOWAIT|M_ZERO); if (ata_zone == NULL) { xpt_print(periph->path, "Couldn't malloc ata_zone " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone_probeatazone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_ZDI, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)ata_zone, /*dxfer_len*/ sizeof(*ata_zone), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(ata_zone, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ZONE: { struct scsi_vpd_zoned_bdc *bdc; /* * Note that this page will be supported for SCSI protocol * devices that support ZBC (SMR devices), as well as ATA * protocol devices that are behind a SAT (SCSI to ATA * Translation) layer that supports converting ZBC commands * to their ZAC equivalents. */ if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) { daprobedone(periph, start_ccb); break; } bdc = (struct scsi_vpd_zoned_bdc *) malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); if (bdc == NULL) { xpt_release_ccb(start_ccb); xpt_print(periph->path, "Couldn't malloc zone VPD " "data\n"); break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone_probezone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)bdc, /*inq_len*/sizeof(*bdc), /*evpd*/TRUE, /*page_code*/SVPD_ZONED_BDC, /*sense_len*/SSD_FULL_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE; xpt_action(start_ccb); break; } } } /* * In each of the methods below, while its the caller's * responsibility to ensure the request will fit into a * single device request, we might have changed the delete * method due to the device incorrectly advertising either * its supported methods or limits. * * To prevent this causing further issues we validate the * against the methods limits, and warn which would * otherwise be unnecessary. */ static void da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc;; struct bio *bp1; uint8_t *buf = softc->unmap_buf; struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE]; uint64_t lba, lastlba = (uint64_t)-1; uint64_t totalcount = 0; uint64_t count; uint32_t c, lastcount = 0, ranges = 0; /* * Currently this doesn't take the UNMAP * Granularity and Granularity Alignment * fields into account. * * This could result in both unoptimal unmap * requests as as well as UNMAP calls unmapping * fewer LBA's than requested. */ bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { /* * Note: ada and da are different in how they store the * pending bp's in a trim. ada stores all of them in the * trim_req.bps. da stores all but the first one in the * delete_run_queue. ada then completes all the bps in * its adadone() loop. da completes all the bps in the * delete_run_queue in dadone, and relies on the biodone * after to complete. This should be reconciled since there's * no real reason to do it differently. XXX */ if (bp1 != bp) bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, UNMAP_RANGE_MAX - lastcount); lastlba += c; lastcount += c; scsi_ulto4b(lastcount, d[ranges - 1].length); count -= c; lba += c; totalcount += c; } else if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0) { /* Align length of the previous range. */ if ((c = lastcount % softc->unmap_gran) != 0) { if (lastcount <= c) { totalcount -= lastcount; lastlba = (uint64_t)-1; lastcount = 0; ranges--; } else { totalcount -= c; lastlba -= c; lastcount -= c; scsi_ulto4b(lastcount, d[ranges - 1].length); } } /* Align beginning of the new range. */ c = (lba - softc->unmap_gran_align) % softc->unmap_gran; if (c != 0) { c = softc->unmap_gran - c; if (count <= c) { count = 0; } else { lba += c; count -= c; } } } while (count > 0) { c = omin(count, UNMAP_RANGE_MAX); if (totalcount + c > softc->unmap_max_lba || ranges >= softc->unmap_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld" "|| %d >= %d", da_delete_method_desc[softc->delete_method], totalcount + c, softc->unmap_max_lba, ranges, softc->unmap_max_ranges); break; } scsi_u64to8b(lba, d[ranges].lba); scsi_ulto4b(c, d[ranges].length); lba += c; totalcount += c; ranges++; count -= c; lastlba = lba; lastcount = c; } bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (ranges >= softc->unmap_max_ranges || totalcount + bp1->bio_bcount / softc->params.secsize > softc->unmap_max_lba) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); /* Align length of the last range. */ if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 && (c = lastcount % softc->unmap_gran) != 0) { if (lastcount <= c) ranges--; else scsi_ulto4b(lastcount - c, d[ranges - 1].length); } scsi_ulto2b(ranges * 16 + 6, &buf[0]); scsi_ulto2b(ranges * 16, &buf[2]); scsi_unmap(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/0, /*data_ptr*/ buf, /*dxfer_len*/ ranges * 16 + 8, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; softc->trim_count++; softc->trim_ranges += ranges; softc->trim_lbas += totalcount; cam_iosched_submit_trim(softc->cam_iosched); } static void da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc; struct bio *bp1; uint8_t *buf = softc->unmap_buf; uint64_t lastlba = (uint64_t)-1; uint64_t count; uint64_t lba; uint32_t lastcount = 0, c, requestcount; int ranges = 0, off, block_count; bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; requestcount = count; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, ATA_DSM_RANGE_MAX - lastcount); lastcount += c; off = (ranges - 1) * 8; buf[off + 6] = lastcount & 0xff; buf[off + 7] = (lastcount >> 8) & 0xff; count -= c; lba += c; } while (count > 0) { c = omin(count, ATA_DSM_RANGE_MAX); off = ranges * 8; buf[off + 0] = lba & 0xff; buf[off + 1] = (lba >> 8) & 0xff; buf[off + 2] = (lba >> 16) & 0xff; buf[off + 3] = (lba >> 24) & 0xff; buf[off + 4] = (lba >> 32) & 0xff; buf[off + 5] = (lba >> 40) & 0xff; buf[off + 6] = c & 0xff; buf[off + 7] = (c >> 8) & 0xff; lba += c; ranges++; count -= c; lastcount = c; if (count != 0 && ranges == softc->trim_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], requestcount, (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX); break; } } lastlba = lba; bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (bp1->bio_bcount / softc->params.secsize > (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); block_count = howmany(ranges, ATA_DSM_BLK_RANGES); scsi_ata_trim(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, block_count, /*data_ptr*/buf, /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } /* * We calculate ws_max_blks here based off d_delmaxsize instead * of using softc->ws_max_blks as it is absolute max for the * device not the protocol max which may well be lower. */ static void da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc; struct bio *bp1; uint64_t ws_max_blks; uint64_t lba; uint64_t count; /* forward compat with WS32 */ softc = (struct da_softc *)periph->softc; ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize; lba = bp->bio_pblkno; count = 0; bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); count += bp1->bio_bcount / softc->params.secsize; if (count > ws_max_blks) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], count, ws_max_blks); count = omin(count, ws_max_blks); break; } bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (lba + count != bp1->bio_pblkno || count + bp1->bio_bcount / softc->params.secsize > ws_max_blks) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); scsi_write_same(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/softc->delete_method == DA_DELETE_ZERO ? 0 : SWS_UNMAP, softc->delete_method == DA_DELETE_WS16 ? 16 : 10, /*lba*/lba, /*block_count*/count, /*data_ptr*/ __DECONST(void *, zero_region), /*dxfer_len*/ softc->params.secsize, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } static int cmd6workaround(union ccb *ccb) { struct scsi_rw_6 cmd6; struct scsi_rw_10 *cmd10; struct da_softc *softc; u_int8_t *cdb; struct bio *bp; int frozen; cdb = ccb->csio.cdb_io.cdb_bytes; softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) { da_delete_methods old_method = softc->delete_method; /* * Typically there are two reasons for failure here * 1. Delete method was detected as supported but isn't * 2. Delete failed due to invalid params e.g. too big * * While we will attempt to choose an alternative delete method * this may result in short deletes if the existing delete * requests from geom are big for the new method chosen. * * This method assumes that the error which triggered this * will not retry the io otherwise a panic will occur */ dadeleteflag(softc, old_method, 0); dadeletemethodchoose(softc, DA_DELETE_DISABLE); if (softc->delete_method == DA_DELETE_DISABLE) xpt_print(ccb->ccb_h.path, "%s failed, disabling BIO_DELETE\n", da_delete_method_desc[old_method]); else xpt_print(ccb->ccb_h.path, "%s failed, switching to %s BIO_DELETE\n", da_delete_method_desc[old_method], da_delete_method_desc[softc->delete_method]); while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL) cam_iosched_queue_work(softc->cam_iosched, bp); cam_iosched_queue_work(softc->cam_iosched, (struct bio *)ccb->ccb_h.ccb_bp); ccb->ccb_h.ccb_bp = NULL; return (0); } /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == PREVENT_ALLOW) && (softc->quirks & DA_Q_NO_PREVENT) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "PREVENT ALLOW MEDIUM REMOVAL not supported.\n"); softc->quirks |= DA_Q_NO_PREVENT; return (0); } /* Detect unsupported SYNCHRONIZE CACHE(10). */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == SYNCHRONIZE_CACHE) && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "SYNCHRONIZE CACHE(10) not supported.\n"); softc->quirks |= DA_Q_NO_SYNC_CACHE; softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE; return (0); } /* Translation only possible if CDB is an array and cmd is R/W6 */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || (*cdb != READ_6 && *cdb != WRITE_6)) return 0; xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, " "increasing minimum_cmd_size to 10.\n"); softc->minimum_cmd_size = 10; bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); cmd10 = (struct scsi_rw_10 *)cdb; cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; cmd10->byte2 = 0; scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); cmd10->reserved = 0; scsi_ulto2b(cmd6.length, cmd10->length); cmd10->control = cmd6.control; ccb->csio.cdb_len = sizeof(*cmd10); /* Requeue request, unfreezing queue if necessary */ frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_action(ccb); if (frozen) { cam_release_devq(ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } return (ERESTART); } static void dazonedone(struct cam_periph *periph, union ccb *ccb) { struct da_softc *softc; struct bio *bp; softc = periph->softc; bp = (struct bio *)ccb->ccb_h.ccb_bp; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: break; case DISK_ZONE_REPORT_ZONES: { uint32_t avail_len; struct disk_zone_report *rep; struct scsi_report_zones_hdr *hdr; struct scsi_report_zones_desc *desc; struct disk_zone_rep_entry *entry; uint32_t hdr_len, num_avail; uint32_t num_to_fill, i; int ata; rep = &bp->bio_zone.zone_params.report; avail_len = ccb->csio.dxfer_len - ccb->csio.resid; /* * Note that bio_resid isn't normally used for zone * commands, but it is used by devstat_end_transaction_bio() * to determine how much data was transferred. Because * the size of the SCSI/ATA data structures is different * than the size of the BIO interface structures, the * amount of data actually transferred from the drive will * be different than the amount of data transferred to * the user. */ bp->bio_resid = ccb->csio.resid; hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr; if (avail_len < sizeof(*hdr)) { /* * Is there a better error than EIO here? We asked * for at least the header, and we got less than * that. */ bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; break; } if (softc->zone_interface == DA_ZONE_IF_ATA_PASS) ata = 1; else ata = 0; hdr_len = ata ? le32dec(hdr->length) : scsi_4btoul(hdr->length); if (hdr_len > 0) rep->entries_available = hdr_len / sizeof(*desc); else rep->entries_available = 0; /* * NOTE: using the same values for the BIO version of the * same field as the SCSI/ATA values. This means we could * get some additional values that aren't defined in bio.h * if more values of the same field are defined later. */ rep->header.same = hdr->byte4 & SRZ_SAME_MASK; rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) : scsi_8btou64(hdr->maximum_lba); /* * If the drive reports no entries that match the query, * we're done. */ if (hdr_len == 0) { rep->entries_filled = 0; break; } num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), hdr_len / sizeof(*desc)); /* * If the drive didn't return any data, then we're done. */ if (num_avail == 0) { rep->entries_filled = 0; break; } num_to_fill = min(num_avail, rep->entries_allocated); /* * If the user didn't allocate any entries for us to fill, * we're done. */ if (num_to_fill == 0) { rep->entries_filled = 0; break; } for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; i < num_to_fill; i++, desc++, entry++) { /* * NOTE: we're mapping the values here directly * from the SCSI/ATA bit definitions to the bio.h * definitons. There is also a warning in * disk_zone.h, but the impact is that if * additional values are added in the SCSI/ATA * specs these will be visible to consumers of * this interface. */ entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; entry->zone_condition = (desc->zone_flags & SRZ_ZONE_COND_MASK) >> SRZ_ZONE_COND_SHIFT; entry->zone_flags |= desc->zone_flags & (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); entry->zone_length = ata ? le64dec(desc->zone_length) : scsi_8btou64(desc->zone_length); entry->zone_start_lba = ata ? le64dec(desc->zone_start_lba) : scsi_8btou64(desc->zone_start_lba); entry->write_pointer_lba = ata ? le64dec(desc->write_pointer_lba) : scsi_8btou64(desc->write_pointer_lba); } rep->entries_filled = num_to_fill; break; } case DISK_ZONE_GET_PARAMS: default: /* * In theory we should not get a GET_PARAMS bio, since it * should be handled without queueing the command to the * drive. */ panic("%s: Invalid zone command %d", __func__, bp->bio_zone.zone_cmd); break; } if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) free(ccb->csio.data_ptr, M_SCSIDA); } static void dadone(struct cam_periph *periph, union ccb *done_ccb) { struct bio *bp, *bp1; struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; da_ccb_state state; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (csio->bio != NULL) biotrack(csio->bio, __func__); #endif state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; cam_periph_lock(periph); bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int error; int sf; if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = daerror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* A retry was scheduled, so just return. */ cam_periph_unlock(periph); return; } bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if (error != 0) { int queued_error; /* * return all queued I/O with EIO, so that * the client can retry these I/Os in the * proper order should it attempt to recover. */ queued_error = EIO; if (error == ENXIO && (softc->flags & DA_FLAG_PACK_INVALID)== 0) { /* * Catastrophic error. Mark our pack as * invalid. * * XXX See if this is really a media * XXX change first? */ xpt_print(periph->path, "Invalidating pack\n"); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif queued_error = ENXIO; } cam_iosched_flush(softc->cam_iosched, NULL, queued_error); if (bp != NULL) { bp->bio_error = error; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } } else if (bp != NULL) { if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) bp->bio_flags |= BIO_ERROR; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else if (bp != NULL) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); if (bp->bio_cmd == BIO_ZONE) dazonedone(periph, done_ccb); else if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; if ((csio->resid > 0) && (bp->bio_cmd != BIO_ZONE)) bp->bio_flags |= BIO_ERROR; if (softc->error_inject != 0) { bp->bio_error = softc->error_inject; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; softc->error_inject = 0; } } if (bp != NULL) biotrack(bp, __func__); LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); if (LIST_EMPTY(&softc->pending_ccbs)) softc->flags |= DA_FLAG_WAS_OTAG; /* * We need to call cam_iosched before we call biodone so that we don't * measure any activity that happens in the completion routine, which in * the case of sendfile can be quite extensive. Release the periph * refcount taken in dastart() for each CCB. */ cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); xpt_release_ccb(done_ccb); KASSERT(softc->refcount >= 1, ("dadone softc %p refcount %d", softc, softc->refcount)); softc->refcount--; if (state == DA_CCB_DELETE) { TAILQ_HEAD(, bio) queue; TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue); softc->delete_run_queue.insert_point = NULL; /* * Normally, the xpt_release_ccb() above would make sure * that when we have more work to do, that work would * get kicked off. However, we specifically keep * delete_running set to 0 before the call above to * allow other I/O to progress when many BIO_DELETE * requests are pushed down. We set delete_running to 0 * and call daschedule again so that we don't stall if * there are no other I/Os pending apart from BIO_DELETEs. */ cam_iosched_trim_done(softc->cam_iosched); daschedule(periph); cam_periph_unlock(periph); while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bp1, bio_queue); bp1->bio_error = bp->bio_error; if (bp->bio_flags & BIO_ERROR) { bp1->bio_flags |= BIO_ERROR; bp1->bio_resid = bp1->bio_bcount; } else bp1->bio_resid = 0; biodone(bp1); } } else { daschedule(periph); cam_periph_unlock(periph); } if (bp != NULL) biodone(bp); return; } static void dadone_probewp(struct cam_periph *periph, union ccb *done_ccb) { struct scsi_mode_header_6 *mode_hdr6; struct scsi_mode_header_10 *mode_hdr10; struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; uint8_t dev_spec; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probewp\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); KASSERT(softc->state == DA_STATE_PROBE_WP, ("State (%d) not PROBE_WP in dadone_probewp, periph %p ccb %p", softc->state, periph, done_ccb)); KASSERT((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) == DA_CCB_PROBE_WP, ("CCB State (%lu) not PROBE_WP in dadone_probewp, periph %p ccb %p", (unsigned long)csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK, periph, done_ccb)); if (softc->minimum_cmd_size > 6) { mode_hdr10 = (struct scsi_mode_header_10 *)csio->data_ptr; dev_spec = mode_hdr10->dev_spec; } else { mode_hdr6 = (struct scsi_mode_header_6 *)csio->data_ptr; dev_spec = mode_hdr6->dev_spec; } if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { if ((dev_spec & 0x80) != 0) softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT; else softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT; } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((softc->flags & DA_FLAG_CAN_RC16) != 0) softc->state = DA_STATE_PROBE_RC16; else softc->state = DA_STATE_PROBE_RC; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } static void dadone_proberc(struct cam_periph *periph, union ccb *done_ccb) { struct scsi_read_capacity_data *rdcap; struct scsi_read_capacity_data_long *rcaplong; struct da_softc *softc; struct ccb_scsiio *csio; da_ccb_state state; char *announce_buf; u_int32_t priority; int lbp, n; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_proberc\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; KASSERT(softc->state == DA_STATE_PROBE_RC || softc->state == DA_STATE_PROBE_RC16, ("State (%d) not PROBE_RC* in dadone_proberc, periph %p ccb %p", softc->state, periph, done_ccb)); KASSERT(state == DA_CCB_PROBE_RC || state == DA_CCB_PROBE_RC16, ("CCB State (%lu) not PROBE_RC* in dadone_probewp, periph %p ccb %p", (unsigned long)state, periph, done_ccb)); lbp = 0; rdcap = NULL; rcaplong = NULL; /* XXX TODO: can this be a malloc? */ announce_buf = softc->announce_temp; bzero(announce_buf, DA_ANNOUNCETMP_SZ); if (state == DA_CCB_PROBE_RC) rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; else rcaplong = (struct scsi_read_capacity_data_long *) csio->data_ptr; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct disk_params *dp; uint32_t block_size; uint64_t maxsector; u_int lalba; /* Lowest aligned LBA. */ if (state == DA_CCB_PROBE_RC) { block_size = scsi_4btoul(rdcap->length); maxsector = scsi_4btoul(rdcap->addr); lalba = 0; /* * According to SBC-2, if the standard 10 * byte READ CAPACITY command returns 2^32, * we should issue the 16 byte version of * the command, since the device in question * has more sectors than can be represented * with the short version of the command. */ if (maxsector == 0xffffffff) { free(rdcap, M_SCSIDA); softc->state = DA_STATE_PROBE_RC16; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } } else { block_size = scsi_4btoul(rcaplong->length); maxsector = scsi_8btou64(rcaplong->addr); lalba = scsi_2btoul(rcaplong->lalba_lbp); } /* * Because GEOM code just will panic us if we * give them an 'illegal' value we'll avoid that * here. */ if (block_size == 0) { block_size = 512; if (maxsector == 0) maxsector = -1; } if (block_size >= MAXPHYS) { xpt_print(periph->path, "unsupportable block size %ju\n", (uintmax_t) block_size); announce_buf = NULL; cam_periph_invalidate(periph); } else { /* * We pass rcaplong into dasetgeom(), * because it will only use it if it is * non-NULL. */ dasetgeom(periph, block_size, maxsector, rcaplong, sizeof(*rcaplong)); lbp = (lalba & SRC16_LBPME_A); dp = &softc->params; n = snprintf(announce_buf, DA_ANNOUNCETMP_SZ, "%juMB (%ju %u byte sectors", ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024), (uintmax_t)dp->sectors, dp->secsize); if (softc->p_type != 0) { n += snprintf(announce_buf + n, DA_ANNOUNCETMP_SZ - n, ", DIF type %d", softc->p_type); } snprintf(announce_buf + n, DA_ANNOUNCETMP_SZ - n, ")"); } } else { int error; /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } else if (error != 0) { int asc, ascq; int sense_key, error_code; int have_sense; cam_status status; struct ccb_getdev cgd; /* Don't wedge this device's queue */ status = done_ccb->ccb_h.status; if ((status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key, &asc, &ascq)) have_sense = TRUE; else have_sense = FALSE; /* * If we tried READ CAPACITY(16) and failed, * fallback to READ CAPACITY(10). */ if ((state == DA_CCB_PROBE_RC16) && (softc->flags & DA_FLAG_CAN_RC16) && (((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) || ((have_sense) && (error_code == SSD_CURRENT_ERROR || error_code == SSD_DESC_CURRENT_ERROR) && (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) { cam_periph_assert(periph, MA_OWNED); softc->flags &= ~DA_FLAG_CAN_RC16; free(rdcap, M_SCSIDA); softc->state = DA_STATE_PROBE_RC; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } /* * Attach to anything that claims to be a * direct access or optical disk device, * as long as it doesn't return a "Logical * unit not supported" (0x25) error. * "Internal Target Failure" (0x44) is also * special and typically means that the * device is a SATA drive behind a SATL * translation that's fallen into a * terminally fatal state. */ if ((have_sense) && (asc != 0x25) && (asc != 0x44) && (error_code == SSD_CURRENT_ERROR || error_code == SSD_DESC_CURRENT_ERROR)) { const char *sense_key_desc; const char *asc_desc; dasetgeom(periph, 512, -1, NULL, 0); scsi_sense_desc(sense_key, asc, ascq, &cgd.inq_data, &sense_key_desc, &asc_desc); snprintf(announce_buf, DA_ANNOUNCETMP_SZ, "Attempt to query device " "size failed: %s, %s", sense_key_desc, asc_desc); } else { if (have_sense) scsi_sense_print(&done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, " "failed to attach to device\n"); announce_buf = NULL; /* * Free up resources. */ cam_periph_invalidate(periph); } } } free(csio->data_ptr, M_SCSIDA); if (announce_buf != NULL && ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) { struct sbuf sb; sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, announce_buf); xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, DA_Q_BIT_STRING); sbuf_finish(&sb); sbuf_putbuf(&sb); /* * Create our sysctl variables, now that we know * we have successfully attached. */ /* increase the refcount */ if (da_periph_acquire(periph, DA_REF_SYSCTL) == 0) { taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); } else { /* XXX This message is useless! */ xpt_print(periph->path, "fatal error, " "could not acquire reference count\n"); } } /* We already probed the device. */ if (softc->flags & DA_FLAG_PROBED) { daprobedone(periph, done_ccb); return; } /* Ensure re-probe doesn't see old delete. */ softc->delete_available = 0; dadeleteflag(softc, DA_DELETE_ZERO, 1); if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) { /* * Based on older SBC-3 spec revisions * any of the UNMAP methods "may" be * available via LBP given this flag so * we flag all of them as available and * then remove those which further * probes confirm aren't available * later. * * We could also check readcap(16) p_type * flag to exclude one or more invalid * write same (X) types here */ dadeleteflag(softc, DA_DELETE_WS16, 1); dadeleteflag(softc, DA_DELETE_WS10, 1); dadeleteflag(softc, DA_DELETE_UNMAP, 1); softc->state = DA_STATE_PROBE_LBP; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } softc->state = DA_STATE_PROBE_BDC; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } static void dadone_probelbp(struct cam_periph *periph, union ccb *done_ccb) { struct scsi_vpd_logical_block_prov *lbp; struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probelbp\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { /* * T10/1799-D Revision 31 states at least one of these * must be supported but we don't currently enforce this. */ dadeleteflag(softc, DA_DELETE_WS16, (lbp->flags & SVPD_LBP_WS16)); dadeleteflag(softc, DA_DELETE_WS10, (lbp->flags & SVPD_LBP_WS10)); dadeleteflag(softc, DA_DELETE_UNMAP, (lbp->flags & SVPD_LBP_UNMAP)); } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure indicates we don't support any SBC-3 * delete methods with UNMAP */ } } free(lbp, M_SCSIDA); softc->state = DA_STATE_PROBE_BLK_LIMITS; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } static void dadone_probeblklimits(struct cam_periph *periph, union ccb *done_ccb) { struct scsi_vpd_block_limits *block_limits; struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeblklimits\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t max_txfer_len = scsi_4btoul( block_limits->max_txfer_len); uint32_t max_unmap_lba_cnt = scsi_4btoul( block_limits->max_unmap_lba_cnt); uint32_t max_unmap_blk_cnt = scsi_4btoul( block_limits->max_unmap_blk_cnt); uint32_t unmap_gran = scsi_4btoul( block_limits->opt_unmap_grain); uint32_t unmap_gran_align = scsi_4btoul( block_limits->unmap_grain_align); uint64_t ws_max_blks = scsi_8btou64( block_limits->max_write_same_length); if (max_txfer_len != 0) { softc->disk->d_maxsize = MIN(softc->maxio, (off_t)max_txfer_len * softc->params.secsize); } /* * We should already support UNMAP but we check lba * and block count to be sure */ if (max_unmap_lba_cnt != 0x00L && max_unmap_blk_cnt != 0x00L) { softc->unmap_max_lba = max_unmap_lba_cnt; softc->unmap_max_ranges = min(max_unmap_blk_cnt, UNMAP_MAX_RANGES); if (unmap_gran > 1) { softc->unmap_gran = unmap_gran; if (unmap_gran_align & 0x80000000) { softc->unmap_gran_align = unmap_gran_align & 0x7fffffff; } } } else { /* * Unexpected UNMAP limits which means the * device doesn't actually support UNMAP */ dadeleteflag(softc, DA_DELETE_UNMAP, 0); } if (ws_max_blks != 0x00L) softc->ws_max_blks = ws_max_blks; } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure here doesn't mean UNMAP is not * supported as this is an optional page. */ softc->unmap_max_lba = 1; softc->unmap_max_ranges = 1; } } free(block_limits, M_SCSIDA); softc->state = DA_STATE_PROBE_BDC; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } static void dadone_probebdc(struct cam_periph *periph, union ccb *done_ccb) { struct scsi_vpd_block_device_characteristics *bdc; struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probebdc\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; bdc = (struct scsi_vpd_block_device_characteristics *)csio->data_ptr; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; /* * Disable queue sorting for non-rotational media * by default. */ u_int16_t old_rate = softc->disk->d_rotation_rate; valid_len = csio->dxfer_len - csio->resid; if (SBDC_IS_PRESENT(bdc, valid_len, medium_rotation_rate)) { softc->disk->d_rotation_rate = scsi_2btoul(bdc->medium_rotation_rate); if (softc->disk->d_rotation_rate == SVPD_BDC_RATE_NON_ROTATING) { cam_iosched_set_sort_queue( softc->cam_iosched, 0); softc->rotating = 0; } if (softc->disk->d_rotation_rate != old_rate) { disk_attr_changed(softc->disk, "GEOM::rotation_rate", M_NOWAIT); } } if ((SBDC_IS_PRESENT(bdc, valid_len, flags)) && (softc->zone_mode == DA_ZONE_NONE)) { int ata_proto; if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) ata_proto = 1; else ata_proto = 0; /* * The Zoned field will only be set for * Drive Managed and Host Aware drives. If * they are Host Managed, the device type * in the standard INQUIRY data should be * set to T_ZBC_HM (0x14). */ if ((bdc->flags & SVPD_ZBC_MASK) == SVPD_HAW_ZBC) { softc->zone_mode = DA_ZONE_HOST_AWARE; softc->zone_interface = (ata_proto) ? DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; } else if ((bdc->flags & SVPD_ZBC_MASK) == SVPD_DM_ZBC) { softc->zone_mode =DA_ZONE_DRIVE_MANAGED; softc->zone_interface = (ata_proto) ? DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; } else if ((bdc->flags & SVPD_ZBC_MASK) != SVPD_ZBC_NR) { xpt_print(periph->path, "Unknown zoned " "type %#x", bdc->flags & SVPD_ZBC_MASK); } } } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(bdc, M_SCSIDA); softc->state = DA_STATE_PROBE_ATA; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } static void dadone_probeata(struct cam_periph *periph, union ccb *done_ccb) { struct ata_params *ata_params; struct ccb_scsiio *csio; struct da_softc *softc; u_int32_t priority; int continue_probe; int error; int16_t *ptr; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeata\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; ata_params = (struct ata_params *)csio->data_ptr; ptr = (uint16_t *)ata_params; continue_probe = 0; error = 0; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint16_t old_rate; ata_param_fixup(ata_params); if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM && (softc->quirks & DA_Q_NO_UNMAP) == 0) { dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1); if (ata_params->max_dsm_blocks != 0) softc->trim_max_ranges = min( softc->trim_max_ranges, ata_params->max_dsm_blocks * ATA_DSM_BLK_RANGES); } /* * Disable queue sorting for non-rotational media * by default. */ old_rate = softc->disk->d_rotation_rate; softc->disk->d_rotation_rate = ata_params->media_rotation_rate; if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) { cam_iosched_set_sort_queue(softc->cam_iosched, 0); softc->rotating = 0; } if (softc->disk->d_rotation_rate != old_rate) { disk_attr_changed(softc->disk, "GEOM::rotation_rate", M_NOWAIT); } cam_periph_assert(periph, MA_OWNED); if (ata_params->capabilities1 & ATA_SUPPORT_DMA) softc->flags |= DA_FLAG_CAN_ATA_DMA; if (ata_params->support.extension & ATA_SUPPORT_GENLOG) softc->flags |= DA_FLAG_CAN_ATA_LOG; /* * At this point, if we have a SATA host aware drive, * we communicate via ATA passthrough unless the * SAT layer supports ZBC -> ZAC translation. In * that case, * * XXX KDM figure out how to detect a host managed * SATA drive. */ if (softc->zone_mode == DA_ZONE_NONE) { /* * Note that we don't override the zone * mode or interface if it has already been * set. This is because it has either been * set as a quirk, or when we probed the * SCSI Block Device Characteristics page, * the zoned field was set. The latter * means that the SAT layer supports ZBC to * ZAC translation, and we would prefer to * use that if it is available. */ if ((ata_params->support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) { softc->zone_mode = DA_ZONE_HOST_AWARE; softc->zone_interface = DA_ZONE_IF_ATA_PASS; } else if ((ata_params->support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) { softc->zone_mode =DA_ZONE_DRIVE_MANAGED; softc->zone_interface = DA_ZONE_IF_ATA_PASS; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } if ((softc->zone_mode == DA_ZONE_HOST_AWARE) || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { /* * If the ATA IDENTIFY failed, we could be talking * to a SCSI drive, although that seems unlikely, * since the drive did report that it supported the * ATA Information VPD page. If the ATA IDENTIFY * succeeded, and the SAT layer doesn't support * ZBC -> ZAC translation, continue on to get the * directory of ATA logs, and complete the rest of * the ZAC probe. If the SAT layer does support * ZBC -> ZAC translation, we want to use that, * and we'll probe the SCSI Zoned Block Device * Characteristics VPD page next. */ if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_LOG) && (softc->zone_interface == DA_ZONE_IF_ATA_PASS)) softc->state = DA_STATE_PROBE_ATA_LOGDIR; else softc->state = DA_STATE_PROBE_ZONE; continue_probe = 1; } if (continue_probe != 0) { xpt_schedule(periph, priority); xpt_release_ccb(done_ccb); return; } else daprobedone(periph, done_ccb); return; } static void dadone_probeatalogdir(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatalogdir\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { error = 0; softc->valid_logdir_len = 0; bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); softc->valid_logdir_len = csio->dxfer_len - csio->resid; if (softc->valid_logdir_len > 0) bcopy(csio->data_ptr, &softc->ata_logdir, min(softc->valid_logdir_len, sizeof(softc->ata_logdir))); /* * Figure out whether the Identify Device log is * supported. The General Purpose log directory * has a header, and lists the number of pages * available for each GP log identified by the * offset into the list. */ if ((softc->valid_logdir_len >= ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) && (le16dec(softc->ata_logdir.header) == ATA_GP_LOG_DIR_VERSION) && (le16dec(&softc->ata_logdir.num_pages[ (ATA_IDENTIFY_DATA_LOG * sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ softc->flags |= DA_FLAG_CAN_ATA_IDLOG; } else { softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA log directory, * then ATA logs are effectively not * supported even if the bit is set in the * identify data. */ softc->flags &= ~(DA_FLAG_CAN_ATA_LOG | DA_FLAG_CAN_ATA_IDLOG); if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) { softc->state = DA_STATE_PROBE_ATA_IDDIR; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } static void dadone_probeataiddir(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeataiddir\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { off_t entries_offset, max_entries; error = 0; softc->valid_iddir_len = 0; bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP | DA_FLAG_CAN_ATA_ZONE); softc->valid_iddir_len = csio->dxfer_len - csio->resid; if (softc->valid_iddir_len > 0) bcopy(csio->data_ptr, &softc->ata_iddir, min(softc->valid_iddir_len, sizeof(softc->ata_iddir))); entries_offset = __offsetof(struct ata_identify_log_pages,entries); max_entries = softc->valid_iddir_len - entries_offset; if ((softc->valid_iddir_len > (entries_offset + 1)) && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION) && (softc->ata_iddir.entry_count > 0)) { int num_entries, i; num_entries = softc->ata_iddir.entry_count; num_entries = min(num_entries, softc->valid_iddir_len - entries_offset); for (i = 0; i < num_entries && i < max_entries; i++) { if (softc->ata_iddir.entries[i] == ATA_IDL_SUP_CAP) softc->flags |= DA_FLAG_CAN_ATA_SUPCAP; else if (softc->ata_iddir.entries[i] == ATA_IDL_ZDI) softc->flags |= DA_FLAG_CAN_ATA_ZONE; if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) break; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data log * directory, then it effectively isn't * supported even if the ATA Log directory * a non-zero number of pages present for * this log. */ softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) { softc->state = DA_STATE_PROBE_ATA_SUP; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } static void dadone_probeatasup(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatasup\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; size_t needed_size; struct ata_identify_log_sup_cap *sup_cap; error = 0; sup_cap = (struct ata_identify_log_sup_cap *)csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_size = __offsetof(struct ata_identify_log_sup_cap, sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); if (valid_len >= needed_size) { uint64_t zoned, zac_cap; zoned = le64dec(sup_cap->zoned_cap); if (zoned & ATA_ZONED_VALID) { /* * This should have already been * set, because this is also in the * ATA identify data. */ if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) softc->zone_mode = DA_ZONE_HOST_AWARE; else if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) softc->zone_mode = DA_ZONE_DRIVE_MANAGED; } zac_cap = le64dec(sup_cap->sup_zac_cap); if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { if (zac_cap & ATA_REPORT_ZONES_SUP) softc->zone_flags |= DA_ZONE_FLAG_RZ_SUP; if (zac_cap & ATA_ND_OPEN_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_OPEN_SUP; if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_CLOSE_SUP; if (zac_cap & ATA_ND_FINISH_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_FINISH_SUP; if (zac_cap & ATA_ND_RWP_SUP) softc->zone_flags |= DA_ZONE_FLAG_RWP_SUP; } else { /* * This field was introduced in * ACS-4, r08 on April 28th, 2015. * If the drive firmware was written * to an earlier spec, it won't have * the field. So, assume all * commands are supported. */ softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data * Supported Capabilities page, clear the * flag... */ softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP; /* * And clear zone capabilities. */ softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) { softc->state = DA_STATE_PROBE_ATA_ZONE; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } static void dadone_probeatazone(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatazone\n")); softc = (struct da_softc *)periph->softc; csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct ata_zoned_info_log *zi_log; uint32_t valid_len; size_t needed_size; zi_log = (struct ata_zoned_info_log *)csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_size = __offsetof(struct ata_zoned_info_log, version_info) + 1 + sizeof(zi_log->version_info); if (valid_len >= needed_size) { uint64_t tmpvar; tmpvar = le64dec(zi_log->zoned_cap); if (tmpvar & ATA_ZDI_CAP_VALID) { if (tmpvar & ATA_ZDI_CAP_URSWRZ) softc->zone_flags |= DA_ZONE_FLAG_URSWRZ; else softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ; } tmpvar = le64dec(zi_log->optimal_seq_zones); if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = (tmpvar & ATA_ZDI_OPT_SEQ_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = 0; } tmpvar =le64dec(zi_log->optimal_nonseq_zones); if (tmpvar & ATA_ZDI_OPT_NS_VALID) { softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = (tmpvar & ATA_ZDI_OPT_NS_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = 0; } tmpvar = le64dec(zi_log->max_seq_req_zones); if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = (tmpvar & ATA_ZDI_MAX_SEQ_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = 0; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { softc->flags &= ~DA_FLAG_CAN_ATA_ZONE; softc->flags &= ~DA_ZONE_FLAG_SET_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); daprobedone(periph, done_ccb); return; } static void dadone_probezone(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probezone\n")); softc = (struct da_softc *)periph->softc; csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; size_t needed_len; struct scsi_vpd_zoned_bdc *zoned_bdc; error = 0; zoned_bdc = (struct scsi_vpd_zoned_bdc *)csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_len = __offsetof(struct scsi_vpd_zoned_bdc, max_seq_req_zones) + 1 + sizeof(zoned_bdc->max_seq_req_zones); if ((valid_len >= needed_len) && (scsi_2btoul(zoned_bdc->page_length) >= SVPD_ZBDC_PL)) { if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ) softc->zone_flags |= DA_ZONE_FLAG_URSWRZ; else softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ; softc->optimal_seq_zones = scsi_4btoul(zoned_bdc->optimal_seq_zones); softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_nonseq_zones = scsi_4btoul( zoned_bdc->optimal_nonseq_zones); softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->max_seq_zones = scsi_4btoul(zoned_bdc->max_seq_req_zones); softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; } /* * All of the zone commands are mandatory for SCSI * devices. * * XXX KDM this is valid as of September 2015. * Re-check this assumption once the SAT spec is * updated to support SCSI ZBC to ATA ZAC mapping. * Since ATA allows zone commands to be reported * as supported or not, this may not necessarily * be true for an ATA device behind a SAT (SCSI to * ATA Translation) layer. */ softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); daprobedone(periph, done_ccb); return; } static void dadone_tur(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_tur\n")); softc = (struct da_softc *)periph->softc; csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) return; /* Will complete again, keep reference */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } softc->flags &= ~DA_FLAG_TUR_PENDING; xpt_release_ccb(done_ccb); da_periph_release_locked(periph, DA_REF_TUR); return; } static void dareprobe(struct cam_periph *periph) { struct da_softc *softc; int status; softc = (struct da_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); /* Probe in progress; don't interfere. */ if (softc->state != DA_STATE_NORMAL) return; status = da_periph_acquire(periph, DA_REF_REPROBE); KASSERT(status == 0, ("dareprobe: cam_periph_acquire failed")); softc->state = DA_STATE_PROBE_WP; xpt_schedule(periph, CAM_PRIORITY_DEV); } static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct da_softc *softc; struct cam_periph *periph; int error, error_code, sense_key, asc, ascq; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->csio.bio != NULL) biotrack(ccb->csio.bio, __func__); #endif periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct da_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); /* * Automatically detect devices that do not support * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. */ error = 0; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { error = cmd6workaround(ccb); } else if (scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (sense_key == SSD_KEY_ILLEGAL_REQUEST) error = cmd6workaround(ccb); /* * If the target replied with CAPACITY DATA HAS CHANGED UA, * query the capacity and notify upper layers. */ else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x2A && ascq == 0x09) { xpt_print(periph->path, "Capacity data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x28 && ascq == 0x00) { softc->flags &= ~DA_FLAG_PROBED; disk_media_changed(softc->disk, M_NOWAIT); } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x3F && ascq == 0x03) { xpt_print(periph->path, "INQUIRY data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_NOT_READY && asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { softc->flags |= DA_FLAG_PACK_INVALID; disk_media_gone(softc->disk, M_NOWAIT); } } if (error == ERESTART) return (ERESTART); #ifdef CAM_IO_STATS switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: softc->timeouts++; break; case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: softc->errors++; break; default: break; } #endif /* * XXX * Until we have a better way of doing pack validation, * don't treat UAs as errors. */ sense_flags |= SF_RETRY_UA; if (softc->quirks & DA_Q_RETRY_BUSY) sense_flags |= SF_RETRY_BUSY; return(cam_periph_error(ccb, cam_flags, sense_flags)); } static void damediapoll(void *arg) { struct cam_periph *periph = arg; struct da_softc *softc = periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && (softc->flags & DA_FLAG_TUR_PENDING) == 0 && softc->state == DA_STATE_NORMAL && LIST_EMPTY(&softc->pending_ccbs)) { if (da_periph_acquire(periph, DA_REF_TUR) == 0) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* Queue us up again */ if (da_poll_period != 0) callout_schedule(&softc->mediapoll_c, da_poll_period * hz); } static void daprevent(struct cam_periph *periph, int action) { struct da_softc *softc; union ccb *ccb; int error; cam_periph_assert(periph, MA_OWNED); softc = (struct da_softc *)periph->softc; if (((action == PR_ALLOW) && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) || ((action == PR_PREVENT) && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { return; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_prevent(&ccb->csio, /*retries*/1, /*cbcfp*/NULL, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, 5000); error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~DA_FLAG_PACK_LOCKED; else softc->flags |= DA_FLAG_PACK_LOCKED; } xpt_release_ccb(ccb); } static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len) { struct ccb_calc_geometry ccg; struct da_softc *softc; struct disk_params *dp; u_int lbppbe, lalba; int error; softc = (struct da_softc *)periph->softc; dp = &softc->params; dp->secsize = block_len; dp->sectors = maxsector + 1; if (rcaplong != NULL) { lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; lalba = scsi_2btoul(rcaplong->lalba_lbp); lalba &= SRC16_LALBA_A; if (rcaplong->prot & SRC16_PROT_EN) softc->p_type = ((rcaplong->prot & SRC16_P_TYPE) >> SRC16_P_TYPE_SHIFT) + 1; else softc->p_type = 0; } else { lbppbe = 0; lalba = 0; softc->p_type = 0; } if (lbppbe > 0) { dp->stripesize = block_len << lbppbe; dp->stripeoffset = (dp->stripesize - block_len * lalba) % dp->stripesize; } else if (softc->quirks & DA_Q_4K) { dp->stripesize = 4096; dp->stripeoffset = 0; } else if (softc->unmap_gran != 0) { dp->stripesize = block_len * softc->unmap_gran; dp->stripeoffset = (dp->stripesize - block_len * softc->unmap_gran_align) % dp->stripesize; } else { dp->stripesize = 0; dp->stripeoffset = 0; } /* * Have the controller provide us with a geometry * for this disk. The only time the geometry * matters is when we boot and the controller * is the only one knowledgeable enough to come * up with something that will make this a bootable * device. */ xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; ccg.block_size = dp->secsize; ccg.volume_size = dp->sectors; ccg.heads = 0; ccg.secs_per_track = 0; ccg.cylinders = 0; xpt_action((union ccb*)&ccg); if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* * We don't know what went wrong here- but just pick * a geometry so we don't have nasty things like divide * by zero. */ dp->heads = 255; dp->secs_per_track = 255; dp->cylinders = dp->sectors / (255 * 255); if (dp->cylinders == 0) { dp->cylinders = 1; } } else { dp->heads = ccg.heads; dp->secs_per_track = ccg.secs_per_track; dp->cylinders = ccg.cylinders; } /* * If the user supplied a read capacity buffer, and if it is * different than the previous buffer, update the data in the EDT. * If it's the same, we don't bother. This avoids sending an * update every time someone opens this device. */ if ((rcaplong != NULL) && (bcmp(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)) != 0)) { struct ccb_dev_advinfo cdai; xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.buftype = CDAI_TYPE_RCAPLONG; cdai.flags = CDAI_FLAG_STORE; cdai.bufsiz = rcap_len; cdai.buf = (uint8_t *)rcaplong; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.ccb_h.status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: failed to set read " "capacity advinfo\n", __func__); /* Use cam_error_print() to decode the status */ cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS, CAM_EPF_ALL); } else { bcopy(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)); } } softc->disk->d_sectorsize = softc->params.secsize; softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; softc->disk->d_stripesize = softc->params.stripesize; softc->disk->d_stripeoffset = softc->params.stripeoffset; /* XXX: these are not actually "firmware" values, so they may be wrong */ softc->disk->d_fwsectors = softc->params.secs_per_track; softc->disk->d_fwheads = softc->params.heads; softc->disk->d_devstat->block_size = softc->params.secsize; softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; error = disk_resize(softc->disk, M_NOWAIT); if (error != 0) xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error); } static void dasendorderedtag(void *arg) { struct cam_periph *periph = arg; struct da_softc *softc = periph->softc; cam_periph_assert(periph, MA_OWNED); if (da_send_ordered) { if (!LIST_EMPTY(&softc->pending_ccbs)) { if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) softc->flags |= DA_FLAG_NEED_OTAG; softc->flags &= ~DA_FLAG_WAS_OTAG; } } /* Queue us up again */ callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, periph); } /* * Step through all DA peripheral drivers, and if the device is still open, * sync the disk cache to physical media. */ static void dashutdown(void * arg, int howto) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &dadriver) { softc = (struct da_softc *)periph->softc; if (SCHEDULER_STOPPED()) { /* If we paniced with the lock held, do not recurse. */ if (!cam_periph_owned(periph) && (softc->flags & DA_FLAG_OPEN)) { dadump(softc->disk, NULL, 0, 0, 0); } continue; } cam_periph_lock(periph); /* * We only sync the cache if the drive is still open, and * if the drive is capable of it.. */ if (((softc->flags & DA_FLAG_OPEN) == 0) || (softc->quirks & DA_Q_NO_SYNC_CACHE)) { cam_periph_unlock(periph); continue; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/0, /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /* whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 60 * 60 * 1000); error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } #else /* !_KERNEL */ /* * XXX These are only left out of the kernel build to silence warnings. If, * for some reason these functions are used in the kernel, the ifdefs should * be moved so they are included both in the kernel and userland. */ void scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_format_unit *scsi_cmd; scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = FORMAT_UNIT; scsi_cmd->byte2 = byte2; scsi_ulto2b(ileave, scsi_cmd->interleave); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t list_format, uint32_t addr_desc_index, uint8_t *data_ptr, uint32_t dxfer_len, int minimum_cmd_size, uint8_t sense_len, uint32_t timeout) { uint8_t cdb_len; /* * These conditions allow using the 10 byte command. Otherwise we * need to use the 12 byte command. */ if ((minimum_cmd_size <= 10) && (addr_desc_index == 0) && (dxfer_len <= SRDD10_MAX_LENGTH)) { struct scsi_read_defect_data_10 *cdb10; cdb10 = (struct scsi_read_defect_data_10 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb10); bzero(cdb10, cdb_len); cdb10->opcode = READ_DEFECT_DATA_10; cdb10->format = list_format; scsi_ulto2b(dxfer_len, cdb10->alloc_length); } else { struct scsi_read_defect_data_12 *cdb12; cdb12 = (struct scsi_read_defect_data_12 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb12); bzero(cdb12, cdb_len); cdb12->opcode = READ_DEFECT_DATA_12; cdb12->format = list_format; scsi_ulto4b(dxfer_len, cdb12->alloc_length); scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index); } cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, cdb_len, timeout); } void scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t control, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_sanitize *scsi_cmd; scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = SANITIZE; scsi_cmd->byte2 = byte2; scsi_cmd->control = control; scsi_ulto2b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } #endif /* _KERNEL */ void scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t service_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout) { struct scsi_zbc_out *scsi_cmd; scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = ZBC_OUT; scsi_cmd->service_action = service_action; scsi_u64to8b(zone_id, scsi_cmd->zone_id); scsi_cmd->zone_flags = zone_flags; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba, uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout) { struct scsi_zbc_in *scsi_cmd; scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = ZBC_IN; scsi_cmd->service_action = service_action; scsi_ulto4b(dxfer_len, scsi_cmd->length); scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba); scsi_cmd->zone_options = zone_options; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } int scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout) { uint8_t command_out, protocol, ata_flags; uint16_t features_out; uint32_t sectors_out, auxiliary; int retval; retval = 0; if (use_ncq == 0) { command_out = ATA_ZAC_MANAGEMENT_OUT; features_out = (zm_action & 0xf) | (zone_flags << 8); ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; if (dxfer_len == 0) { protocol = AP_PROTO_NON_DATA; ata_flags |= AP_FLAG_TLEN_NO_DATA; sectors_out = 0; } else { protocol = AP_PROTO_DMA; ata_flags |= AP_FLAG_TLEN_SECT_CNT | AP_FLAG_TDIR_TO_DEV; sectors_out = ((dxfer_len >> 9) & 0xffff); } auxiliary = 0; } else { ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; if (dxfer_len == 0) { command_out = ATA_NCQ_NON_DATA; features_out = ATA_NCQ_ZAC_MGMT_OUT; /* * We're assuming the SCSI to ATA translation layer * will set the NCQ tag number in the tag field. * That isn't clear from the SAT-4 spec (as of rev 05). */ sectors_out = 0; ata_flags |= AP_FLAG_TLEN_NO_DATA; } else { command_out = ATA_SEND_FPDMA_QUEUED; /* * Note that we're defaulting to normal priority, * and assuming that the SCSI to ATA translation * layer will insert the NCQ tag number in the tag * field. That isn't clear in the SAT-4 spec (as * of rev 05). */ sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8; ata_flags |= AP_FLAG_TLEN_FEAT | AP_FLAG_TDIR_TO_DEV; /* * For SEND FPDMA QUEUED, the transfer length is * encoded in the FEATURE register, and 0 means * that 65536 512 byte blocks are to be tranferred. * In practice, it seems unlikely that we'll see * a transfer that large, and it may confuse the * the SAT layer, because generally that means that * 0 bytes should be transferred. */ if (dxfer_len == (65536 * 512)) { features_out = 0; } else if (dxfer_len <= (65535 * 512)) { features_out = ((dxfer_len >> 9) & 0xffff); } else { /* The transfer is too big. */ retval = 1; goto bailout; } } auxiliary = (zm_action & 0xf) | (zone_flags << 8); protocol = AP_PROTO_FPDMA; } protocol |= AP_EXTEND; retval = scsi_ata_pass(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features_out, /*sector_count*/ sectors_out, /*lba*/ zone_id, /*command*/ command_out, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ auxiliary, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*cdb_storage*/ cdb_storage, /*cdb_storage_len*/ cdb_storage_len, /*minimum_cmd_size*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout); bailout: return (retval); } int scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout) { uint8_t command_out, protocol; uint16_t features_out, sectors_out; uint32_t auxiliary; int ata_flags; int retval; retval = 0; ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS; if (use_ncq == 0) { command_out = ATA_ZAC_MANAGEMENT_IN; /* XXX KDM put a macro here */ features_out = (zm_action & 0xf) | (zone_flags << 8); sectors_out = dxfer_len >> 9; /* XXX KDM macro */ protocol = AP_PROTO_DMA; ata_flags |= AP_FLAG_TLEN_SECT_CNT; auxiliary = 0; } else { ata_flags |= AP_FLAG_TLEN_FEAT; command_out = ATA_RECV_FPDMA_QUEUED; sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8; /* * For RECEIVE FPDMA QUEUED, the transfer length is * encoded in the FEATURE register, and 0 means * that 65536 512 byte blocks are to be tranferred. * In practice, it seems unlikely that we'll see * a transfer that large, and it may confuse the * the SAT layer, because generally that means that * 0 bytes should be transferred. */ if (dxfer_len == (65536 * 512)) { features_out = 0; } else if (dxfer_len <= (65535 * 512)) { features_out = ((dxfer_len >> 9) & 0xffff); } else { /* The transfer is too big. */ retval = 1; goto bailout; } auxiliary = (zm_action & 0xf) | (zone_flags << 8), protocol = AP_PROTO_FPDMA; } protocol |= AP_EXTEND; retval = scsi_ata_pass(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features_out, /*sector_count*/ sectors_out, /*lba*/ zone_id, /*command*/ command_out, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ auxiliary, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */ /*cdb_storage*/ cdb_storage, /*cdb_storage_len*/ cdb_storage_len, /*minimum_cmd_size*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout); bailout: return (retval); } Index: head/sys/dev/aic7xxx/aic_osm_lib.h =================================================================== --- head/sys/dev/aic7xxx/aic_osm_lib.h (revision 355600) +++ head/sys/dev/aic7xxx/aic_osm_lib.h (revision 355601) @@ -1,448 +1,448 @@ /*- * FreeBSD platform specific, shared driver option settings, data structures, * function declarations and includes. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2001-2003 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic_osm_lib.h#5 $ * * $FreeBSD$ */ /******************************** OS Includes *********************************/ #if __FreeBSD_version >= 500000 #include #endif /*************************** Library Symbol Mapping ***************************/ #define AIC_LIB_ENTRY_CONCAT(x, prefix) prefix ## x #define AIC_LIB_ENTRY_EXPAND(x, prefix) AIC_LIB_ENTRY_CONCAT(x, prefix) #define AIC_LIB_ENTRY(x) AIC_LIB_ENTRY_EXPAND(x, AIC_LIB_PREFIX) #define AIC_CONST_ENTRY(x) AIC_LIB_ENTRY_EXPAND(x,AIC_CONST_PREFIX) #define aic_softc AIC_LIB_ENTRY(_softc) #define aic_tailq AIC_LIB_ENTRY(_tailq) #define aic_transinfo AIC_LIB_ENTRY(_transinfo) #define aic_platform_data AIC_LIB_ENTRY(_platform_data) #define aic_devinfo AIC_LIB_ENTRY(_devinfo) #define aic_lock AIC_LIB_ENTRY(_lock) #define aic_unlock AIC_LIB_ENTRY(_unlock) #define aic_callback_t AIC_LIB_ENTRY(_callback_t) #define aic_platform_freeze_devq AIC_LIB_ENTRY(_platform_freeze_devq) #define aic_platform_abort_scbs AIC_LIB_ENTRY(_platform_abort_scbs) #define aic_platform_timeout AIC_LIB_ENTRY(_platform_timeout) #define aic_timeout AIC_LIB_ENTRY(_timeout) #define aic_set_recoveryscb AIC_LIB_ENTRY(_set_recoveryscb) #define aic_spawn_recovery_thread AIC_LIB_ENTRY(_spawn_recovery_thread) #define aic_wakeup_recovery_thread AIC_LIB_ENTRY(_wakeup_recovery_thread) #define aic_terminate_recovery_thread \ AIC_LIB_ENTRY(_terminate_recovery_thread) #define aic_recovery_thread AIC_LIB_ENTRY(_recovery_thread) #define aic_recover_commands AIC_LIB_ENTRY(_recover_commands) #define aic_calc_geometry AIC_LIB_ENTRY(_calc_geometry) #define AIC_RESOURCE_SHORTAGE AIC_CONST_ENTRY(_RESOURCE_SHORTAGE) #define AIC_SHUTDOWN_RECOVERY AIC_CONST_ENTRY(_SHUTDOWN_RECOVERY) /********************************* Byte Order *********************************/ #if __FreeBSD_version >= 500000 #define aic_htobe16(x) htobe16(x) #define aic_htobe32(x) htobe32(x) #define aic_htobe64(x) htobe64(x) #define aic_htole16(x) htole16(x) #define aic_htole32(x) htole32(x) #define aic_htole64(x) htole64(x) #define aic_be16toh(x) be16toh(x) #define aic_be32toh(x) be32toh(x) #define aic_be64toh(x) be64toh(x) #define aic_le16toh(x) le16toh(x) #define aic_le32toh(x) le32toh(x) #define aic_le64toh(x) le64toh(x) #else #define aic_htobe16(x) (x) #define aic_htobe32(x) (x) #define aic_htobe64(x) (x) #define aic_htole16(x) (x) #define aic_htole32(x) (x) #define aic_htole64(x) (x) #define aic_be16toh(x) (x) #define aic_be32toh(x) (x) #define aic_be64toh(x) (x) #define aic_le16toh(x) (x) #define aic_le32toh(x) (x) #define aic_le64toh(x) (x) #endif /************************* Forward Declarations *******************************/ typedef device_t aic_dev_softc_t; typedef union ccb *aic_io_ctx_t; struct aic_softc; struct scb; /*************************** Timer DataStructures *****************************/ typedef struct callout aic_timer_t; /****************************** Error Recovery ********************************/ void aic_set_recoveryscb(struct aic_softc *aic, struct scb *scb); -timeout_t aic_platform_timeout; +callout_func_t aic_platform_timeout; int aic_spawn_recovery_thread(struct aic_softc *aic); void aic_terminate_recovery_thread(struct aic_softc *aic); static __inline void aic_wakeup_recovery_thread(struct aic_softc *aic); static __inline void aic_wakeup_recovery_thread(struct aic_softc *aic) { wakeup(aic); } /****************************** Kernel Threads ********************************/ #if __FreeBSD_version > 500005 #if __FreeBSD_version > 800001 #define aic_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) #else #define aic_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) #endif #else #define aic_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kthread_create(func, farg, proc_ptr, fmtstr, arg) #endif /******************************* Bus Space/DMA ********************************/ #if __FreeBSD_version >= 501102 #define aic_dma_tag_create(aic, parent_tag, alignment, boundary, \ lowaddr, highaddr, filter, filterarg, \ maxsize, nsegments, maxsegsz, flags, \ dma_tagp) \ bus_dma_tag_create(parent_tag, alignment, boundary, \ lowaddr, highaddr, filter, filterarg, \ maxsize, nsegments, maxsegsz, flags, \ busdma_lock_mutex, &aic->platform_data->mtx, \ dma_tagp) #else #define aic_dma_tag_create(aic, parent_tag, alignment, boundary, \ lowaddr, highaddr, filter, filterarg, \ maxsize, nsegments, maxsegsz, flags, \ dma_tagp) \ bus_dma_tag_create(parent_tag, alignment, boundary, \ lowaddr, highaddr, filter, filterarg, \ maxsize, nsegments, maxsegsz, flags, \ dma_tagp) #endif #define aic_dma_tag_destroy(aic, tag) \ bus_dma_tag_destroy(tag) #define aic_dmamem_alloc(aic, dmat, vaddr, flags, mapp) \ bus_dmamem_alloc(dmat, vaddr, flags, mapp) #define aic_dmamem_free(aic, dmat, vaddr, map) \ bus_dmamem_free(dmat, vaddr, map) #define aic_dmamap_create(aic, tag, flags, mapp) \ bus_dmamap_create(tag, flags, mapp) #define aic_dmamap_destroy(aic, tag, map) \ bus_dmamap_destroy(tag, map) #define aic_dmamap_load(aic, dmat, map, addr, buflen, callback, \ callback_arg, flags) \ bus_dmamap_load(dmat, map, addr, buflen, callback, callback_arg, flags) #define aic_dmamap_unload(aic, tag, map) \ bus_dmamap_unload(tag, map) /* XXX Need to update Bus DMA for partial map syncs */ #define aic_dmamap_sync(aic, dma_tag, dmamap, offset, len, op) \ bus_dmamap_sync(dma_tag, dmamap, op) /***************************** Core Includes **********************************/ #include AIC_CORE_INCLUDE /***************************** Timer Facilities *******************************/ #if __FreeBSD_version >= 500000 #define aic_timer_init(timer) callout_init(timer, /*mpsafe*/1) #else #define aic_timer_init callout_init #endif #define aic_timer_stop callout_stop static __inline void aic_timer_reset(aic_timer_t *, u_int, aic_callback_t *, void *); static __inline u_int aic_get_timeout(struct scb *); static __inline void aic_scb_timer_reset(struct scb *, u_int); static __inline void aic_timer_reset(aic_timer_t *timer, u_int msec, aic_callback_t *func, void *arg) { uint64_t time; time = msec; time *= hz; time /= 1000; callout_reset(timer, time, func, arg); } static __inline u_int aic_get_timeout(struct scb *scb) { return (scb->io_ctx->ccb_h.timeout); } static __inline void aic_scb_timer_reset(struct scb *scb, u_int msec) { uint64_t time; time = msec; time *= hz; time /= 1000; callout_reset(&scb->io_timer, time, aic_platform_timeout, scb); } static __inline void aic_scb_timer_start(struct scb *scb) { if (AIC_SCB_DATA(scb->aic_softc)->recovery_scbs == 0 && scb->io_ctx->ccb_h.timeout != CAM_TIME_INFINITY) { aic_scb_timer_reset(scb, scb->io_ctx->ccb_h.timeout); } } /************************** Transaction Operations ****************************/ static __inline void aic_set_transaction_status(struct scb *, uint32_t); static __inline void aic_set_scsi_status(struct scb *, uint32_t); static __inline uint32_t aic_get_transaction_status(struct scb *); static __inline uint32_t aic_get_scsi_status(struct scb *); static __inline void aic_set_transaction_tag(struct scb *, int, u_int); static __inline u_long aic_get_transfer_length(struct scb *); static __inline int aic_get_transfer_dir(struct scb *); static __inline void aic_set_residual(struct scb *, u_long); static __inline void aic_set_sense_residual(struct scb *, u_long); static __inline u_long aic_get_residual(struct scb *); static __inline int aic_perform_autosense(struct scb *); static __inline uint32_t aic_get_sense_bufsize(struct aic_softc*, struct scb*); static __inline void aic_freeze_ccb(union ccb *ccb); static __inline void aic_freeze_scb(struct scb *scb); static __inline void aic_platform_freeze_devq(struct aic_softc *, struct scb *); static __inline int aic_platform_abort_scbs(struct aic_softc *aic, int target, char channel, int lun, u_int tag, role_t role, uint32_t status); static __inline void aic_set_transaction_status(struct scb *scb, uint32_t status) { scb->io_ctx->ccb_h.status &= ~CAM_STATUS_MASK; scb->io_ctx->ccb_h.status |= status; } static __inline void aic_set_scsi_status(struct scb *scb, uint32_t status) { scb->io_ctx->csio.scsi_status = status; } static __inline uint32_t aic_get_transaction_status(struct scb *scb) { return (scb->io_ctx->ccb_h.status & CAM_STATUS_MASK); } static __inline uint32_t aic_get_scsi_status(struct scb *scb) { return (scb->io_ctx->csio.scsi_status); } static __inline void aic_set_transaction_tag(struct scb *scb, int enabled, u_int type) { scb->io_ctx->csio.tag_action = type; if (enabled) scb->io_ctx->ccb_h.flags |= CAM_TAG_ACTION_VALID; else scb->io_ctx->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; } static __inline u_long aic_get_transfer_length(struct scb *scb) { return (scb->io_ctx->csio.dxfer_len); } static __inline int aic_get_transfer_dir(struct scb *scb) { return (scb->io_ctx->ccb_h.flags & CAM_DIR_MASK); } static __inline void aic_set_residual(struct scb *scb, u_long resid) { scb->io_ctx->csio.resid = resid; } static __inline void aic_set_sense_residual(struct scb *scb, u_long resid) { scb->io_ctx->csio.sense_resid = resid; } static __inline u_long aic_get_residual(struct scb *scb) { return (scb->io_ctx->csio.resid); } static __inline int aic_perform_autosense(struct scb *scb) { return (!(scb->io_ctx->ccb_h.flags & CAM_DIS_AUTOSENSE)); } static __inline uint32_t aic_get_sense_bufsize(struct aic_softc *aic, struct scb *scb) { return (sizeof(struct scsi_sense_data)); } static __inline void aic_freeze_ccb(union ccb *ccb) { if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); } } static __inline void aic_freeze_scb(struct scb *scb) { aic_freeze_ccb(scb->io_ctx); } static __inline void aic_platform_freeze_devq(struct aic_softc *aic, struct scb *scb) { /* Nothing to do here for FreeBSD */ } static __inline int aic_platform_abort_scbs(struct aic_softc *aic, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { /* Nothing to do here for FreeBSD */ return (0); } static __inline void aic_platform_scb_free(struct aic_softc *aic, struct scb *scb) { /* What do we do to generically handle driver resource shortages??? */ if ((aic->flags & AIC_RESOURCE_SHORTAGE) != 0 && scb->io_ctx != NULL && (scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; aic->flags &= ~AIC_RESOURCE_SHORTAGE; } scb->io_ctx = NULL; } /*************************** CAM CCB Operations *******************************/ void aic_calc_geometry(struct ccb_calc_geometry *ccg, int extended); /****************************** OS Primitives *********************************/ #define aic_delay DELAY /********************************** PCI ***************************************/ #ifdef AIC_PCI_CONFIG static __inline uint32_t aic_pci_read_config(aic_dev_softc_t pci, int reg, int width); static __inline void aic_pci_write_config(aic_dev_softc_t pci, int reg, uint32_t value, int width); static __inline int aic_get_pci_function(aic_dev_softc_t); static __inline int aic_get_pci_slot(aic_dev_softc_t); static __inline int aic_get_pci_bus(aic_dev_softc_t); static __inline uint32_t aic_pci_read_config(aic_dev_softc_t pci, int reg, int width) { return (pci_read_config(pci, reg, width)); } static __inline void aic_pci_write_config(aic_dev_softc_t pci, int reg, uint32_t value, int width) { pci_write_config(pci, reg, value, width); } static __inline int aic_get_pci_function(aic_dev_softc_t pci) { return (pci_get_function(pci)); } static __inline int aic_get_pci_slot(aic_dev_softc_t pci) { return (pci_get_slot(pci)); } static __inline int aic_get_pci_bus(aic_dev_softc_t pci) { return (pci_get_bus(pci)); } typedef enum { AIC_POWER_STATE_D0 = PCI_POWERSTATE_D0, AIC_POWER_STATE_D1 = PCI_POWERSTATE_D1, AIC_POWER_STATE_D2 = PCI_POWERSTATE_D2, AIC_POWER_STATE_D3 = PCI_POWERSTATE_D3 } aic_power_state; static __inline int aic_power_state_change(struct aic_softc *aic, aic_power_state new_state); static __inline int aic_power_state_change(struct aic_softc *aic, aic_power_state new_state) { return (pci_set_powerstate(aic->dev_softc, new_state)); } #endif Index: head/sys/dev/isp/isp_freebsd.c =================================================================== --- head/sys/dev/isp/isp_freebsd.c (revision 355600) +++ head/sys/dev/isp/isp_freebsd.c (revision 355601) @@ -1,4328 +1,4328 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2017 Alexander Motin * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include MODULE_VERSION(isp, 1); MODULE_DEPEND(isp, cam, 1, 1, 1); int isp_announced = 0; int isp_loop_down_limit = 60; /* default loop down limit */ int isp_quickboot_time = 7; /* don't wait more than N secs for loop up */ int isp_gone_device_time = 30; /* grace time before reporting device lost */ static const char prom3[] = "Chan %d [%u] PortID 0x%06x Departed because of %s"; static void isp_freeze_loopdown(ispsoftc_t *, int); static void isp_loop_changed(ispsoftc_t *isp, int chan); static d_ioctl_t ispioctl; static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); static void isp_poll(struct cam_sim *); -static timeout_t isp_watchdog; -static timeout_t isp_gdt; +static callout_func_t isp_watchdog; +static callout_func_t isp_gdt; static task_fn_t isp_gdt_task; static void isp_kthread(void *); static void isp_action(struct cam_sim *, union ccb *); static int isp_timer_count; static void isp_timer(void *); static struct cdevsw isp_cdevsw = { .d_version = D_VERSION, .d_ioctl = ispioctl, .d_name = "isp", }; static int isp_role_sysctl(SYSCTL_HANDLER_ARGS) { ispsoftc_t *isp = (ispsoftc_t *)arg1; int chan = arg2; int error, old, value; value = FCPARAM(isp, chan)->role; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); if (value < ISP_ROLE_NONE || value > ISP_ROLE_BOTH) return (EINVAL); ISP_LOCK(isp); old = FCPARAM(isp, chan)->role; /* We don't allow target mode switch from here. */ value = (old & ISP_ROLE_TARGET) | (value & ISP_ROLE_INITIATOR); /* If nothing has changed -- we are done. */ if (value == old) { ISP_UNLOCK(isp); return (0); } /* Actually change the role. */ error = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, value); ISP_UNLOCK(isp); return (error); } static int isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan) { struct ccb_setasync csa; struct cam_sim *sim; struct cam_path *path; #ifdef ISP_TARGET_MODE int i; #endif sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, device_get_unit(isp->isp_dev), &isp->isp_lock, isp->isp_maxcmds, isp->isp_maxcmds, devq); if (sim == NULL) return (ENOMEM); ISP_LOCK(isp); if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) { ISP_UNLOCK(isp); cam_sim_free(sim, FALSE); return (EIO); } ISP_UNLOCK(isp); if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { ISP_LOCK(isp); xpt_bus_deregister(cam_sim_path(sim)); ISP_UNLOCK(isp); cam_sim_free(sim, FALSE); return (ENXIO); } xpt_setup_ccb(&csa.ccb_h, path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = isp_cam_async; csa.callback_arg = sim; ISP_LOCK(isp); xpt_action((union ccb *)&csa); ISP_UNLOCK(isp); if (IS_SCSI(isp)) { struct isp_spi *spi = ISP_SPI_PC(isp, chan); spi->sim = sim; spi->path = path; #ifdef ISP_TARGET_MODE TAILQ_INIT(&spi->waitq); STAILQ_INIT(&spi->ntfree); for (i = 0; i < ATPDPSIZE; i++) STAILQ_INSERT_TAIL(&spi->ntfree, &spi->ntpool[i], next); LIST_INIT(&spi->atfree); for (i = ATPDPSIZE-1; i >= 0; i--) LIST_INSERT_HEAD(&spi->atfree, &spi->atpool[i], next); for (i = 0; i < ATPDPHASHSIZE; i++) LIST_INIT(&spi->atused[i]); #endif } else { fcparam *fcp = FCPARAM(isp, chan); struct isp_fc *fc = ISP_FC_PC(isp, chan); struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(isp->isp_osinfo.dev); struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev); char name[16]; ISP_LOCK(isp); fc->sim = sim; fc->path = path; fc->isp = isp; fc->ready = 1; fcp->isp_use_gft_id = 1; fcp->isp_use_gff_id = 1; callout_init_mtx(&fc->gdt, &isp->isp_lock, 0); TASK_INIT(&fc->gtask, 1, isp_gdt_task, fc); #ifdef ISP_TARGET_MODE TAILQ_INIT(&fc->waitq); STAILQ_INIT(&fc->ntfree); for (i = 0; i < ATPDPSIZE; i++) STAILQ_INSERT_TAIL(&fc->ntfree, &fc->ntpool[i], next); LIST_INIT(&fc->atfree); for (i = ATPDPSIZE-1; i >= 0; i--) LIST_INSERT_HEAD(&fc->atfree, &fc->atpool[i], next); for (i = 0; i < ATPDPHASHSIZE; i++) LIST_INIT(&fc->atused[i]); #endif isp_loop_changed(isp, chan); ISP_UNLOCK(isp); if (kproc_create(isp_kthread, fc, &fc->kproc, 0, 0, "%s_%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) { xpt_free_path(fc->path); ISP_LOCK(isp); xpt_bus_deregister(cam_sim_path(fc->sim)); ISP_UNLOCK(isp); cam_sim_free(fc->sim, FALSE); return (ENOMEM); } fc->num_threads += 1; if (chan > 0) { snprintf(name, sizeof(name), "chan%d", chan); tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, name, CTLFLAG_RW, 0, "Virtual channel"); } SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwnn", CTLFLAG_RD, &fcp->isp_wwnn, "World Wide Node Name"); SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwpn", CTLFLAG_RD, &fcp->isp_wwpn, "World Wide Port Name"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "loop_down_limit", CTLFLAG_RW, &fc->loop_down_limit, 0, "Loop Down Limit"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "gone_device_time", CTLFLAG_RW, &fc->gone_device_time, 0, "Gone Device Time"); #if defined(ISP_TARGET_MODE) && defined(DEBUG) SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "inject_lost_data_frame", CTLFLAG_RW, &fc->inject_lost_data_frame, 0, "Cause a Lost Frame on a Read"); #endif SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "role", CTLTYPE_INT | CTLFLAG_RW, isp, chan, isp_role_sysctl, "I", "Current role"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "speed", CTLFLAG_RD, &fcp->isp_gbspeed, 0, "Connection speed in gigabits"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "linkstate", CTLFLAG_RD, &fcp->isp_linkstate, 0, "Link state"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "fwstate", CTLFLAG_RD, &fcp->isp_fwstate, 0, "Firmware state"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "loopstate", CTLFLAG_RD, &fcp->isp_loopstate, 0, "Loop state"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "topo", CTLFLAG_RD, &fcp->isp_topo, 0, "Connection topology"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "use_gft_id", CTLFLAG_RWTUN, &fcp->isp_use_gft_id, 0, "Use GFT_ID during fabric scan"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "use_gff_id", CTLFLAG_RWTUN, &fcp->isp_use_gff_id, 0, "Use GFF_ID during fabric scan"); } return (0); } static void isp_detach_chan(ispsoftc_t *isp, int chan) { struct cam_sim *sim; struct cam_path *path; struct ccb_setasync csa; int *num_threads; ISP_GET_PC(isp, chan, sim, sim); ISP_GET_PC(isp, chan, path, path); ISP_GET_PC_ADDR(isp, chan, num_threads, num_threads); xpt_setup_ccb(&csa.ccb_h, path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = isp_cam_async; csa.callback_arg = sim; xpt_action((union ccb *)&csa); xpt_free_path(path); xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, FALSE); /* Wait for the channel's spawned threads to exit. */ wakeup(isp->isp_osinfo.pc.ptr); while (*num_threads != 0) mtx_sleep(isp, &isp->isp_lock, PRIBIO, "isp_reap", 100); } int isp_attach(ispsoftc_t *isp) { const char *nu = device_get_nameunit(isp->isp_osinfo.dev); int du = device_get_unit(isp->isp_dev); int chan; /* * Create the device queue for our SIM(s). */ isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds); if (isp->isp_osinfo.devq == NULL) { return (EIO); } for (chan = 0; chan < isp->isp_nchan; chan++) { if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) { goto unwind; } } callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_lock, 0); isp_timer_count = hz >> 2; callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp); isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu); if (isp->isp_osinfo.cdev) { isp->isp_osinfo.cdev->si_drv1 = isp; } return (0); unwind: while (--chan >= 0) { struct cam_sim *sim; struct cam_path *path; ISP_GET_PC(isp, chan, sim, sim); ISP_GET_PC(isp, chan, path, path); xpt_free_path(path); ISP_LOCK(isp); xpt_bus_deregister(cam_sim_path(sim)); ISP_UNLOCK(isp); cam_sim_free(sim, FALSE); } cam_simq_free(isp->isp_osinfo.devq); isp->isp_osinfo.devq = NULL; return (-1); } int isp_detach(ispsoftc_t *isp) { int chan; if (isp->isp_osinfo.cdev) { destroy_dev(isp->isp_osinfo.cdev); isp->isp_osinfo.cdev = NULL; } ISP_LOCK(isp); /* Tell spawned threads that we're exiting. */ isp->isp_osinfo.is_exiting = 1; for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1) isp_detach_chan(isp, chan); ISP_UNLOCK(isp); callout_drain(&isp->isp_osinfo.tmo); cam_simq_free(isp->isp_osinfo.devq); return (0); } static void isp_freeze_loopdown(ispsoftc_t *isp, int chan) { struct isp_fc *fc = ISP_FC_PC(isp, chan); if (fc->sim == NULL) return; if (fc->simqfrozen == 0) { isp_prt(isp, ISP_LOGDEBUG0, "Chan %d Freeze simq (loopdown)", chan); fc->simqfrozen = SIMQFRZ_LOOPDOWN; xpt_hold_boot(); xpt_freeze_simq(fc->sim, 1); } else { isp_prt(isp, ISP_LOGDEBUG0, "Chan %d Mark simq frozen (loopdown)", chan); fc->simqfrozen |= SIMQFRZ_LOOPDOWN; } } static void isp_unfreeze_loopdown(ispsoftc_t *isp, int chan) { struct isp_fc *fc = ISP_FC_PC(isp, chan); if (fc->sim == NULL) return; int wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN; fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN; if (wasfrozen && fc->simqfrozen == 0) { isp_prt(isp, ISP_LOGDEBUG0, "Chan %d Release simq", chan); xpt_release_simq(fc->sim, 1); xpt_release_boot(); } } static int ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td) { ispsoftc_t *isp; int nr, chan, retval = ENOTTY; isp = dev->si_drv1; switch (c) { case ISP_SDBLEV: { int olddblev = isp->isp_dblev; isp->isp_dblev = *(int *)addr; *(int *)addr = olddblev; retval = 0; break; } case ISP_GETROLE: chan = *(int *)addr; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } if (IS_FC(isp)) { *(int *)addr = FCPARAM(isp, chan)->role; } else { *(int *)addr = ISP_ROLE_INITIATOR; } retval = 0; break; case ISP_SETROLE: if (IS_SCSI(isp)) break; nr = *(int *)addr; chan = nr >> 8; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } nr &= 0xff; if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { retval = EINVAL; break; } ISP_LOCK(isp); *(int *)addr = FCPARAM(isp, chan)->role; retval = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, nr); ISP_UNLOCK(isp); retval = 0; break; case ISP_RESETHBA: ISP_LOCK(isp); isp_reinit(isp, 0); ISP_UNLOCK(isp); retval = 0; break; case ISP_RESCAN: if (IS_FC(isp)) { chan = *(intptr_t *)addr; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } ISP_LOCK(isp); if (isp_fc_runstate(isp, chan, 5 * 1000000) != LOOP_READY) { retval = EIO; } else { retval = 0; } ISP_UNLOCK(isp); } break; case ISP_FC_LIP: if (IS_FC(isp)) { chan = *(intptr_t *)addr; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } ISP_LOCK(isp); if (isp_control(isp, ISPCTL_SEND_LIP, chan)) { retval = EIO; } else { retval = 0; } ISP_UNLOCK(isp); } break; case ISP_FC_GETDINFO: { struct isp_fc_device *ifc = (struct isp_fc_device *) addr; fcportdb_t *lp; if (IS_SCSI(isp)) { break; } if (ifc->loopid >= MAX_FC_TARG) { retval = EINVAL; break; } lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid]; if (lp->state != FC_PORTDB_STATE_NIL) { ifc->role = (lp->prli_word3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; ifc->loopid = lp->handle; ifc->portid = lp->portid; ifc->node_wwn = lp->node_wwn; ifc->port_wwn = lp->port_wwn; retval = 0; } else { retval = ENODEV; } break; } case ISP_FC_GETHINFO: { struct isp_hba_device *hba = (struct isp_hba_device *) addr; int chan = hba->fc_channel; if (chan < 0 || chan >= isp->isp_nchan) { retval = ENXIO; break; } hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); hba->fc_nchannels = isp->isp_nchan; if (IS_FC(isp)) { hba->fc_nports = MAX_FC_TARG; hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed; hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1; hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid; hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram; hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram; hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn; hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn; } else { hba->fc_nports = MAX_TARGETS; hba->fc_speed = 0; hba->fc_topology = 0; hba->nvram_node_wwn = 0ull; hba->nvram_port_wwn = 0ull; hba->active_node_wwn = 0ull; hba->active_port_wwn = 0ull; } retval = 0; break; } case ISP_TSK_MGMT: { int needmarker; struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; uint16_t nphdl; mbreg_t mbs; if (IS_SCSI(isp)) { break; } chan = fct->chan; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } needmarker = retval = 0; nphdl = fct->loopid; ISP_LOCK(isp); if (IS_24XX(isp)) { void *reqp; uint8_t resp[QENTRY_LEN]; isp24xx_tmf_t tmf; isp24xx_statusreq_t sp; fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; int i; for (i = 0; i < MAX_FC_TARG; i++) { lp = &fcp->portdb[i]; if (lp->handle == nphdl) { break; } } if (i == MAX_FC_TARG) { retval = ENXIO; ISP_UNLOCK(isp); break; } ISP_MEMZERO(&tmf, sizeof(tmf)); tmf.tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT; tmf.tmf_header.rqs_entry_count = 1; tmf.tmf_nphdl = lp->handle; tmf.tmf_delay = 2; tmf.tmf_timeout = 4; tmf.tmf_tidlo = lp->portid; tmf.tmf_tidhi = lp->portid >> 16; tmf.tmf_vpidx = ISP_GET_VPIDX(isp, chan); tmf.tmf_lun[1] = fct->lun & 0xff; if (fct->lun >= 256) { tmf.tmf_lun[0] = 0x40 | (fct->lun >> 8); } switch (fct->action) { case IPT_CLEAR_ACA: tmf.tmf_flags = ISP24XX_TMF_CLEAR_ACA; break; case IPT_TARGET_RESET: tmf.tmf_flags = ISP24XX_TMF_TARGET_RESET; needmarker = 1; break; case IPT_LUN_RESET: tmf.tmf_flags = ISP24XX_TMF_LUN_RESET; needmarker = 1; break; case IPT_CLEAR_TASK_SET: tmf.tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET; needmarker = 1; break; case IPT_ABORT_TASK_SET: tmf.tmf_flags = ISP24XX_TMF_ABORT_TASK_SET; needmarker = 1; break; default: retval = EINVAL; break; } if (retval) { ISP_UNLOCK(isp); break; } /* Prepare space for response in memory */ memset(resp, 0xff, sizeof(resp)); tmf.tmf_handle = isp_allocate_handle(isp, resp, ISP_HANDLE_CTRL); if (tmf.tmf_handle == 0) { isp_prt(isp, ISP_LOGERR, "%s: TMF of Chan %d out of handles", __func__, chan); ISP_UNLOCK(isp); retval = ENOMEM; break; } /* Send request and wait for response. */ reqp = isp_getrqentry(isp); if (reqp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: TMF of Chan %d out of rqent", __func__, chan); isp_destroy_handle(isp, tmf.tmf_handle); ISP_UNLOCK(isp); retval = EIO; break; } isp_put_24xx_tmf(isp, &tmf, (isp24xx_tmf_t *)reqp); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB TMF", QENTRY_LEN, reqp); ISP_SYNC_REQUEST(isp); if (msleep(resp, &isp->isp_lock, 0, "TMF", 5*hz) == EWOULDBLOCK) { isp_prt(isp, ISP_LOGERR, "%s: TMF of Chan %d timed out", __func__, chan); isp_destroy_handle(isp, tmf.tmf_handle); ISP_UNLOCK(isp); retval = EIO; break; } if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB TMF response", QENTRY_LEN, resp); isp_get_24xx_response(isp, (isp24xx_statusreq_t *)resp, &sp); if (sp.req_completion_status != 0) retval = EIO; else if (needmarker) fcp->sendmarker = 1; } else { MBSINIT(&mbs, 0, MBLOGALL, 0); if (ISP_CAP_2KLOGIN(isp) == 0) { nphdl <<= 8; } switch (fct->action) { case IPT_CLEAR_ACA: mbs.param[0] = MBOX_CLEAR_ACA; mbs.param[1] = nphdl; mbs.param[2] = fct->lun; break; case IPT_TARGET_RESET: mbs.param[0] = MBOX_TARGET_RESET; mbs.param[1] = nphdl; needmarker = 1; break; case IPT_LUN_RESET: mbs.param[0] = MBOX_LUN_RESET; mbs.param[1] = nphdl; mbs.param[2] = fct->lun; needmarker = 1; break; case IPT_CLEAR_TASK_SET: mbs.param[0] = MBOX_CLEAR_TASK_SET; mbs.param[1] = nphdl; mbs.param[2] = fct->lun; needmarker = 1; break; case IPT_ABORT_TASK_SET: mbs.param[0] = MBOX_ABORT_TASK_SET; mbs.param[1] = nphdl; mbs.param[2] = fct->lun; needmarker = 1; break; default: retval = EINVAL; break; } if (retval == 0) { if (needmarker) { FCPARAM(isp, chan)->sendmarker = 1; } retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); if (retval) { retval = EIO; } } } ISP_UNLOCK(isp); break; } default: break; } return (retval); } /* * Local Inlines */ static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *); static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *); static ISP_INLINE int isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb) { ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free; if (ISP_PCMD(ccb) == NULL) { return (-1); } isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next; return (0); } static ISP_INLINE void isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb) { if (ISP_PCMD(ccb)) { #ifdef ISP_TARGET_MODE PISP_PCMD(ccb)->datalen = 0; #endif PISP_PCMD(ccb)->next = isp->isp_osinfo.pcmd_free; isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb); ISP_PCMD(ccb) = NULL; } } /* * Put the target mode functions here, because some are inlines */ #ifdef ISP_TARGET_MODE static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); static atio_private_data_t *isp_get_atpd(ispsoftc_t *, int, uint32_t); static atio_private_data_t *isp_find_atpd(ispsoftc_t *, int, uint32_t); static void isp_put_atpd(ispsoftc_t *, int, atio_private_data_t *); static inot_private_data_t *isp_get_ntpd(ispsoftc_t *, int); static inot_private_data_t *isp_find_ntpd(ispsoftc_t *, int, uint32_t, uint32_t); static void isp_put_ntpd(ispsoftc_t *, int, inot_private_data_t *); static cam_status create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); static void destroy_lun_state(ispsoftc_t *, int, tstate_t *); static void isp_enable_lun(ispsoftc_t *, union ccb *); static void isp_disable_lun(ispsoftc_t *, union ccb *); -static timeout_t isp_refire_putback_atio; -static timeout_t isp_refire_notify_ack; +static callout_func_t isp_refire_putback_atio; +static callout_func_t isp_refire_notify_ack; static void isp_complete_ctio(union ccb *); static void isp_target_putback_atio(union ccb *); enum Start_Ctio_How { FROM_CAM, FROM_TIMER, FROM_SRR, FROM_CTIO_DONE }; static void isp_target_start_ctio(ispsoftc_t *, union ccb *, enum Start_Ctio_How); static void isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *); static void isp_handle_platform_ctio(ispsoftc_t *, void *); static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *, uint32_t rsp); static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *); static void isp_target_mark_aborted_early(ispsoftc_t *, int chan, tstate_t *, uint32_t); static ISP_INLINE tstate_t * get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) { tstate_t *tptr = NULL; struct tslist *lhp; if (bus < isp->isp_nchan) { ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp); SLIST_FOREACH(tptr, lhp, next) { if (tptr->ts_lun == lun) return (tptr); } } return (NULL); } static int isp_atio_restart(ispsoftc_t *isp, int bus, tstate_t *tptr) { inot_private_data_t *ntp; struct ntpdlist rq; if (STAILQ_EMPTY(&tptr->restart_queue)) return (0); STAILQ_INIT(&rq); STAILQ_CONCAT(&rq, &tptr->restart_queue); while ((ntp = STAILQ_FIRST(&rq)) != NULL) { STAILQ_REMOVE_HEAD(&rq, next); if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at7_entry_t *)ntp->data)->at_rxid); isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->data); } else { isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at2_entry_t *)ntp->data)->at_rxid); isp_handle_platform_atio2(isp, (at2_entry_t *) ntp->data); } isp_put_ntpd(isp, bus, ntp); if (!STAILQ_EMPTY(&tptr->restart_queue)) break; } if (!STAILQ_EMPTY(&rq)) { STAILQ_CONCAT(&rq, &tptr->restart_queue); STAILQ_CONCAT(&tptr->restart_queue, &rq); } return (!STAILQ_EMPTY(&tptr->restart_queue)); } static void isp_tmcmd_restart(ispsoftc_t *isp) { tstate_t *tptr; union ccb *ccb; struct tslist *lhp; struct isp_ccbq *waitq; int bus, i; for (bus = 0; bus < isp->isp_nchan; bus++) { for (i = 0; i < LUN_HASH_SIZE; i++) { ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); SLIST_FOREACH(tptr, lhp, next) isp_atio_restart(isp, bus, tptr); } /* * We only need to do this once per channel. */ ISP_GET_PC_ADDR(isp, bus, waitq, waitq); ccb = (union ccb *)TAILQ_FIRST(waitq); if (ccb != NULL) { TAILQ_REMOVE(waitq, &ccb->ccb_h, sim_links.tqe); isp_target_start_ctio(isp, ccb, FROM_TIMER); } } } static atio_private_data_t * isp_get_atpd(ispsoftc_t *isp, int chan, uint32_t tag) { struct atpdlist *atfree; struct atpdlist *atused; atio_private_data_t *atp; ISP_GET_PC_ADDR(isp, chan, atfree, atfree); atp = LIST_FIRST(atfree); if (atp) { LIST_REMOVE(atp, next); atp->tag = tag; ISP_GET_PC(isp, chan, atused, atused); LIST_INSERT_HEAD(&atused[ATPDPHASH(tag)], atp, next); } return (atp); } static atio_private_data_t * isp_find_atpd(ispsoftc_t *isp, int chan, uint32_t tag) { struct atpdlist *atused; atio_private_data_t *atp; ISP_GET_PC(isp, chan, atused, atused); LIST_FOREACH(atp, &atused[ATPDPHASH(tag)], next) { if (atp->tag == tag) return (atp); } return (NULL); } static void isp_put_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp) { struct atpdlist *atfree; if (atp->ests) { isp_put_ecmd(isp, atp->ests); } LIST_REMOVE(atp, next); memset(atp, 0, sizeof (*atp)); ISP_GET_PC_ADDR(isp, chan, atfree, atfree); LIST_INSERT_HEAD(atfree, atp, next); } static void isp_dump_atpd(ispsoftc_t *isp, int chan) { atio_private_data_t *atp, *atpool; const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" }; ISP_GET_PC(isp, chan, atpool, atpool); for (atp = atpool; atp < &atpool[ATPDPSIZE]; atp++) { if (atp->state == ATPD_STATE_FREE) continue; isp_prt(isp, ISP_LOGALL, "Chan %d ATP [0x%x] origdlen %u bytes_xfrd %u lun %jx nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s", chan, atp->tag, atp->orig_datalen, atp->bytes_xfered, (uintmax_t)atp->lun, atp->nphdl, atp->sid, atp->did, atp->oxid, states[atp->state & 0x7]); } } static inot_private_data_t * isp_get_ntpd(ispsoftc_t *isp, int chan) { struct ntpdlist *ntfree; inot_private_data_t *ntp; ISP_GET_PC_ADDR(isp, chan, ntfree, ntfree); ntp = STAILQ_FIRST(ntfree); if (ntp) STAILQ_REMOVE_HEAD(ntfree, next); return (ntp); } static inot_private_data_t * isp_find_ntpd(ispsoftc_t *isp, int chan, uint32_t tag_id, uint32_t seq_id) { inot_private_data_t *ntp, *ntp2; ISP_GET_PC(isp, chan, ntpool, ntp); ISP_GET_PC_ADDR(isp, chan, ntpool[ATPDPSIZE], ntp2); for (; ntp < ntp2; ntp++) { if (ntp->tag_id == tag_id && ntp->seq_id == seq_id) return (ntp); } return (NULL); } static void isp_put_ntpd(ispsoftc_t *isp, int chan, inot_private_data_t *ntp) { struct ntpdlist *ntfree; ntp->tag_id = ntp->seq_id = 0; ISP_GET_PC_ADDR(isp, chan, ntfree, ntfree); STAILQ_INSERT_HEAD(ntfree, ntp, next); } static cam_status create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path, tstate_t **rslt) { lun_id_t lun; struct tslist *lhp; tstate_t *tptr; lun = xpt_path_lun_id(path); if (lun != CAM_LUN_WILDCARD) { if (ISP_MAX_LUNS(isp) > 0 && lun >= ISP_MAX_LUNS(isp)) { return (CAM_LUN_INVALID); } } tptr = malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); if (tptr == NULL) { return (CAM_RESRC_UNAVAIL); } tptr->ts_lun = lun; SLIST_INIT(&tptr->atios); SLIST_INIT(&tptr->inots); STAILQ_INIT(&tptr->restart_queue); ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp); SLIST_INSERT_HEAD(lhp, tptr, next); *rslt = tptr; ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n"); return (CAM_REQ_CMP); } static void destroy_lun_state(ispsoftc_t *isp, int bus, tstate_t *tptr) { union ccb *ccb; struct tslist *lhp; inot_private_data_t *ntp; while ((ccb = (union ccb *)SLIST_FIRST(&tptr->atios)) != NULL) { SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); }; while ((ccb = (union ccb *)SLIST_FIRST(&tptr->inots)) != NULL) { SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); } while ((ntp = STAILQ_FIRST(&tptr->restart_queue)) != NULL) { isp_endcmd(isp, ntp->data, NIL_HANDLE, bus, SCSI_STATUS_BUSY, 0); STAILQ_REMOVE_HEAD(&tptr->restart_queue, next); isp_put_ntpd(isp, bus, ntp); } ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(tptr->ts_lun)], lhp); SLIST_REMOVE(lhp, tptr, tstate, next); free(tptr, M_DEVBUF); } static void isp_enable_lun(ispsoftc_t *isp, union ccb *ccb) { tstate_t *tptr; int bus; target_id_t target; lun_id_t lun; if (!IS_FC(isp) || !ISP_CAP_TMODE(isp) || !ISP_CAP_SCCFW(isp)) { xpt_print(ccb->ccb_h.path, "Target mode is not supported\n"); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); return; } /* * We only support either target and lun both wildcard * or target and lun both non-wildcard. */ bus = XS_CHANNEL(ccb); target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, "enabling lun %jx\n", (uintmax_t)lun); if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } /* Create the state pointer. It should not already exist. */ tptr = get_lun_statep(isp, bus, lun); if (tptr) { ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; xpt_done(ccb); return; } ccb->ccb_h.status = create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); if (ccb->ccb_h.status != CAM_REQ_CMP) { xpt_done(ccb); return; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); } static void isp_disable_lun(ispsoftc_t *isp, union ccb *ccb) { tstate_t *tptr = NULL; int bus; target_id_t target; lun_id_t lun; bus = XS_CHANNEL(ccb); target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, "disabling lun %jx\n", (uintmax_t)lun); if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } /* Find the state pointer. */ if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) { ccb->ccb_h.status = CAM_PATH_INVALID; xpt_done(ccb); return; } destroy_lun_state(isp, bus, tptr); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); } static void isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how) { int fctape, sendstatus, resid; fcparam *fcp; atio_private_data_t *atp; struct ccb_scsiio *cso; struct isp_ccbq *waitq; uint32_t dmaresult, handle, xfrlen, sense_length, tmp; uint8_t local[QENTRY_LEN]; isp_prt(isp, ISP_LOGTDEBUG0, "%s: ENTRY[0x%x] how %u xfrlen %u sendstatus %d sense_len %u", __func__, ccb->csio.tag_id, how, ccb->csio.dxfer_len, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0, ((ccb->ccb_h.flags & CAM_SEND_SENSE)? ccb->csio.sense_len : 0)); ISP_GET_PC_ADDR(isp, XS_CHANNEL(ccb), waitq, waitq); switch (how) { case FROM_CAM: /* * Insert at the tail of the list, if any, waiting CTIO CCBs */ TAILQ_INSERT_TAIL(waitq, &ccb->ccb_h, sim_links.tqe); break; case FROM_TIMER: case FROM_SRR: case FROM_CTIO_DONE: TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); break; } while ((ccb = (union ccb *) TAILQ_FIRST(waitq)) != NULL) { TAILQ_REMOVE(waitq, &ccb->ccb_h, sim_links.tqe); cso = &ccb->csio; xfrlen = cso->dxfer_len; if (xfrlen == 0) { if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { ISP_PATH_PRT(isp, ISP_LOGERR, ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); continue; } } atp = isp_find_atpd(isp, XS_CHANNEL(ccb), cso->tag_id); if (atp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find private data adjunct in %s", __func__, cso->tag_id, __func__); isp_dump_atpd(isp, XS_CHANNEL(ccb)); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); continue; } /* * Is this command a dead duck? */ if (atp->dead) { isp_prt(isp, ISP_LOGERR, "%s: [0x%x] not sending a CTIO for a dead command", __func__, cso->tag_id); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); continue; } /* * Check to make sure we're still in target mode. */ fcp = FCPARAM(isp, XS_CHANNEL(ccb)); if ((fcp->role & ISP_ROLE_TARGET) == 0) { isp_prt(isp, ISP_LOGERR, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode", __func__, cso->tag_id); ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); continue; } /* * We're only handling ATPD_CCB_OUTSTANDING outstanding CCB at a time (one of which * could be split into two CTIOs to split data and status). */ if (atp->ctcnt >= ATPD_CCB_OUTSTANDING) { isp_prt(isp, ISP_LOGTINFO, "[0x%x] handling only %d CCBs at a time (flags for this ccb: 0x%x)", cso->tag_id, ATPD_CCB_OUTSTANDING, ccb->ccb_h.flags); TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); break; } /* * Does the initiator expect FC-Tape style responses? */ if ((atp->word3 & PRLI_WD3_RETRY) && fcp->fctape_enabled) { fctape = 1; } else { fctape = 0; } /* * If we already did the data xfer portion of a CTIO that sends data * and status, don't do it again and do the status portion now. */ if (atp->sendst) { isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u", cso->tag_id, atp->orig_datalen, atp->bytes_xfered, atp->bytes_in_transit); xfrlen = 0; /* we already did the data transfer */ atp->sendst = 0; } if (ccb->ccb_h.flags & CAM_SEND_STATUS) { sendstatus = 1; } else { sendstatus = 0; } if (ccb->ccb_h.flags & CAM_SEND_SENSE) { KASSERT((sendstatus != 0), ("how can you have CAM_SEND_SENSE w/o CAM_SEND_STATUS?")); /* * Sense length is not the entire sense data structure size. Periph * drivers don't seem to be setting sense_len to reflect the actual * size. We'll peek inside to get the right amount. */ sense_length = cso->sense_len; /* * This 'cannot' happen */ if (sense_length > (XCMD_SIZE - MIN_FCP_RESPONSE_SIZE)) { sense_length = XCMD_SIZE - MIN_FCP_RESPONSE_SIZE; } } else { sense_length = 0; } memset(local, 0, QENTRY_LEN); /* * Check for overflow */ tmp = atp->bytes_xfered + atp->bytes_in_transit; if (xfrlen > 0 && tmp > atp->orig_datalen) { isp_prt(isp, ISP_LOGERR, "%s: [0x%x] data overflow by %u bytes", __func__, cso->tag_id, tmp + xfrlen - atp->orig_datalen); ccb->ccb_h.status = CAM_DATA_RUN_ERR; xpt_done(ccb); continue; } if (xfrlen > atp->orig_datalen - tmp) { xfrlen = atp->orig_datalen - tmp; if (xfrlen == 0 && !sendstatus) { cso->resid = cso->dxfer_len; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); continue; } } if (IS_24XX(isp)) { ct7_entry_t *cto = (ct7_entry_t *) local; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; cto->ct_header.rqs_entry_count = 1; cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM; ATPD_SET_SEQNO(cto, atp); cto->ct_nphdl = atp->nphdl; cto->ct_rxid = atp->tag; cto->ct_iid_lo = atp->sid; cto->ct_iid_hi = atp->sid >> 16; cto->ct_oxid = atp->oxid; cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb)); cto->ct_timeout = XS_TIME(ccb); cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT; /* * Mode 1, status, no data. Only possible when we are sending status, have * no data to transfer, and any sense data can fit into a ct7_entry_t. * * Mode 2, status, no data. We have to use this in the case that * the sense data won't fit into a ct7_entry_t. * */ if (sendstatus && xfrlen == 0) { cto->ct_flags |= CT7_SENDSTATUS | CT7_NO_DATA; resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit; if (sense_length <= MAXRESPLEN_24XX) { cto->ct_flags |= CT7_FLAG_MODE1; cto->ct_scsi_status = cso->scsi_status; if (resid < 0) { cto->ct_resid = -resid; cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8); } else if (resid > 0) { cto->ct_resid = resid; cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8); } if (fctape) { cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; } if (sense_length) { cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8); cto->rsp.m1.ct_resplen = cto->ct_senselen = sense_length; memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length); } } else { bus_addr_t addr; char buf[XCMD_SIZE]; fcp_rsp_iu_t *rp; if (atp->ests == NULL) { atp->ests = isp_get_ecmd(isp); if (atp->ests == NULL) { TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); break; } } memset(buf, 0, sizeof (buf)); rp = (fcp_rsp_iu_t *)buf; if (fctape) { cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; rp->fcp_rsp_bits |= FCP_CONF_REQ; } cto->ct_flags |= CT7_FLAG_MODE2; rp->fcp_rsp_scsi_status = cso->scsi_status; if (resid < 0) { rp->fcp_rsp_resid = -resid; rp->fcp_rsp_bits |= FCP_RESID_OVERFLOW; } else if (resid > 0) { rp->fcp_rsp_resid = resid; rp->fcp_rsp_bits |= FCP_RESID_UNDERFLOW; } if (sense_length) { rp->fcp_rsp_snslen = sense_length; cto->ct_senselen = sense_length; rp->fcp_rsp_bits |= FCP_SNSLEN_VALID; isp_put_fcp_rsp_iu(isp, rp, atp->ests); memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length); } else { isp_put_fcp_rsp_iu(isp, rp, atp->ests); } if (isp->isp_dblev & ISP_LOGTDEBUG1) { isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests); } addr = isp->isp_osinfo.ecmd_dma; addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE); isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests, (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length); cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length; cto->rsp.m2.ct_fcp_rsp_iudata.ds_base = DMA_LO32(addr); cto->rsp.m2.ct_fcp_rsp_iudata.ds_basehi = DMA_HI32(addr); cto->rsp.m2.ct_fcp_rsp_iudata.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length; } if (sense_length) { isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d slen %u sense: %x %x/%x/%x", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, sense_length, cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]); } else { isp_prt(isp, ISP_LOGDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid); } atp->state = ATPD_STATE_LAST_CTIO; } /* * Mode 0 data transfers, *possibly* with status. */ if (xfrlen != 0) { cto->ct_flags |= CT7_FLAG_MODE0; if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cto->ct_flags |= CT7_DATA_IN; } else { cto->ct_flags |= CT7_DATA_OUT; } cto->rsp.m0.reloff = atp->bytes_xfered + atp->bytes_in_transit; cto->rsp.m0.ct_xfrlen = xfrlen; #ifdef DEBUG if (ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame && xfrlen > ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame) { isp_prt(isp, ISP_LOGWARN, "%s: truncating data frame with xfrlen %d to %d", __func__, xfrlen, xfrlen - (xfrlen >> 2)); ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame = 0; cto->rsp.m0.ct_xfrlen -= xfrlen >> 2; } #endif if (sendstatus) { resid = atp->orig_datalen - atp->bytes_xfered - xfrlen; if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /* && fctape == 0 */) { cto->ct_flags |= CT7_SENDSTATUS; atp->state = ATPD_STATE_LAST_CTIO; if (fctape) { cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; } } else { atp->sendst = 1; /* send status later */ cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM; atp->state = ATPD_STATE_CTIO; } } else { atp->state = ATPD_STATE_CTIO; } isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x xfrlen=%u off=%u", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, xfrlen, atp->bytes_xfered); } } else { ct2_entry_t *cto = (ct2_entry_t *) local; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; cto->ct_header.rqs_entry_count = 1; cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM; ATPD_SET_SEQNO(cto, atp); if (ISP_CAP_2KLOGIN(isp)) { ((ct2e_entry_t *)cto)->ct_iid = atp->nphdl; } else { cto->ct_iid = atp->nphdl; if (ISP_CAP_SCCFW(isp) == 0) { cto->ct_lun = ccb->ccb_h.target_lun; } } cto->ct_timeout = XS_TIME(ccb); cto->ct_rxid = cso->tag_id; /* * Mode 1, status, no data. Only possible when we are sending status, have * no data to transfer, and the sense length can fit in the ct7_entry. * * Mode 2, status, no data. We have to use this in the case the response * length won't fit into a ct2_entry_t. * * We'll fill out this structure with information as if this were a * Mode 1. The hardware layer will create the Mode 2 FCP RSP IU as * needed based upon this. */ if (sendstatus && xfrlen == 0) { cto->ct_flags |= CT2_SENDSTATUS | CT2_NO_DATA; resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit; if (sense_length <= MAXRESPLEN) { if (resid < 0) { cto->ct_resid = -resid; } else if (resid > 0) { cto->ct_resid = resid; } cto->ct_flags |= CT2_FLAG_MODE1; cto->rsp.m1.ct_scsi_status = cso->scsi_status; if (resid < 0) { cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER; } else if (resid > 0) { cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER; } if (fctape) { cto->ct_flags |= CT2_CONFIRM; } if (sense_length) { cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; cto->rsp.m1.ct_resplen = cto->rsp.m1.ct_senselen = sense_length; memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length); } } else { bus_addr_t addr; char buf[XCMD_SIZE]; fcp_rsp_iu_t *rp; if (atp->ests == NULL) { atp->ests = isp_get_ecmd(isp); if (atp->ests == NULL) { TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); break; } } memset(buf, 0, sizeof (buf)); rp = (fcp_rsp_iu_t *)buf; if (fctape) { cto->ct_flags |= CT2_CONFIRM; rp->fcp_rsp_bits |= FCP_CONF_REQ; } cto->ct_flags |= CT2_FLAG_MODE2; rp->fcp_rsp_scsi_status = cso->scsi_status; if (resid < 0) { rp->fcp_rsp_resid = -resid; rp->fcp_rsp_bits |= FCP_RESID_OVERFLOW; } else if (resid > 0) { rp->fcp_rsp_resid = resid; rp->fcp_rsp_bits |= FCP_RESID_UNDERFLOW; } if (sense_length) { rp->fcp_rsp_snslen = sense_length; rp->fcp_rsp_bits |= FCP_SNSLEN_VALID; isp_put_fcp_rsp_iu(isp, rp, atp->ests); memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length); } else { isp_put_fcp_rsp_iu(isp, rp, atp->ests); } if (isp->isp_dblev & ISP_LOGTDEBUG1) { isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests); } addr = isp->isp_osinfo.ecmd_dma; addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE); isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests, (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length); cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length; cto->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base = DMA_LO32(addr); cto->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length; } if (sense_length) { isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO2[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d sense: %x %x/%x/%x", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cso->scsi_status, cto->ct_flags, cto->ct_resid, cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]); } else { isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO2[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cso->scsi_status, cto->ct_flags, cto->ct_resid); } atp->state = ATPD_STATE_LAST_CTIO; } if (xfrlen != 0) { cto->ct_flags |= CT2_FLAG_MODE0; if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cto->ct_flags |= CT2_DATA_IN; } else { cto->ct_flags |= CT2_DATA_OUT; } cto->ct_reloff = atp->bytes_xfered + atp->bytes_in_transit; cto->rsp.m0.ct_xfrlen = xfrlen; if (sendstatus) { resid = atp->orig_datalen - atp->bytes_xfered - xfrlen; if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /*&& fctape == 0*/) { cto->ct_flags |= CT2_SENDSTATUS; atp->state = ATPD_STATE_LAST_CTIO; if (fctape) { cto->ct_flags |= CT2_CONFIRM; } } else { atp->sendst = 1; /* send status later */ cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM; atp->state = ATPD_STATE_CTIO; } } else { atp->state = ATPD_STATE_CTIO; } } isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO2[%x] seq %u nc %d CDB0=%x scsi status %x flags %x resid %d xfrlen %u offset %u", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cso->scsi_status, cto->ct_flags, cto->ct_resid, cso->dxfer_len, atp->bytes_xfered); } if (isp_get_pcmd(isp, ccb)) { ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "out of PCMDs\n"); TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); break; } handle = isp_allocate_handle(isp, ccb, ISP_HANDLE_TARGET); if (handle == 0) { ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__); TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); isp_free_pcmd(isp, ccb); break; } atp->bytes_in_transit += xfrlen; PISP_PCMD(ccb)->datalen = xfrlen; /* * Call the dma setup routines for this entry (and any subsequent * CTIOs) if there's data to move, and then tell the f/w it's got * new things to play with. As with isp_start's usage of DMA setup, * any swizzling is done in the machine dependent layer. Because * of this, we put the request onto the queue area first in native * format. */ if (IS_24XX(isp)) { ct7_entry_t *cto = (ct7_entry_t *) local; cto->ct_syshandle = handle; } else { ct2_entry_t *cto = (ct2_entry_t *) local; cto->ct_syshandle = handle; } dmaresult = ISP_DMASETUP(isp, cso, (ispreq_t *) local); if (dmaresult != CMD_QUEUED) { isp_destroy_handle(isp, handle); isp_free_pcmd(isp, ccb); if (dmaresult == CMD_EAGAIN) { TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); break; } ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); continue; } ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED; if (xfrlen) { ccb->ccb_h.spriv_field0 = atp->bytes_xfered; } else { ccb->ccb_h.spriv_field0 = ~0; } atp->ctcnt++; atp->seqno++; } } static void isp_refire_putback_atio(void *arg) { union ccb *ccb = arg; ISP_ASSERT_LOCKED((ispsoftc_t *)XS_ISP(ccb)); isp_target_putback_atio(ccb); } static void isp_refire_notify_ack(void *arg) { isp_tna_t *tp = arg; ispsoftc_t *isp = tp->isp; ISP_ASSERT_LOCKED(isp); if (isp_notify_ack(isp, tp->not)) { callout_schedule(&tp->timer, 5); } else { free(tp, M_DEVBUF); } } static void isp_target_putback_atio(union ccb *ccb) { ispsoftc_t *isp = XS_ISP(ccb); struct ccb_scsiio *cso = &ccb->csio; at2_entry_t local, *at = &local; ISP_MEMZERO(at, sizeof (at2_entry_t)); at->at_header.rqs_entry_type = RQSTYPE_ATIO2; at->at_header.rqs_entry_count = 1; if (ISP_CAP_SCCFW(isp)) { at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; } else { at->at_lun = (uint8_t) ccb->ccb_h.target_lun; } at->at_status = CT_OK; at->at_rxid = cso->tag_id; at->at_iid = cso->init_id; if (isp_target_put_entry(isp, at)) { callout_reset(&PISP_PCMD(ccb)->wdog, 10, isp_refire_putback_atio, ccb); } else isp_complete_ctio(ccb); } static void isp_complete_ctio(union ccb *ccb) { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); } } static void isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep) { fcparam *fcp; lun_id_t lun; fcportdb_t *lp; tstate_t *tptr; struct ccb_accept_tio *atiop; uint16_t nphdl; atio_private_data_t *atp; inot_private_data_t *ntp; /* * The firmware status (except for the QLTM_SVALID bit) * indicates why this ATIO was sent to us. * * If QLTM_SVALID is set, the firmware has recommended Sense Data. */ if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { isp_prt(isp, ISP_LOGWARN, "bogus atio (0x%x) leaked to platform", aep->at_status); isp_endcmd(isp, aep, NIL_HANDLE, 0, SCSI_STATUS_BUSY, 0); return; } fcp = FCPARAM(isp, 0); if (ISP_CAP_SCCFW(isp)) { lun = aep->at_scclun; } else { lun = aep->at_lun; } if (ISP_CAP_2KLOGIN(isp)) { nphdl = ((at2e_entry_t *)aep)->at_iid; } else { nphdl = aep->at_iid; } tptr = get_lun_statep(isp, 0, lun); if (tptr == NULL) { tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); if (tptr == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: [0x%x] no state pointer for lun %jx or wildcard", __func__, aep->at_rxid, (uintmax_t)lun); if (lun == 0) { isp_endcmd(isp, aep, nphdl, 0, SCSI_STATUS_BUSY, 0); } else { isp_endcmd(isp, aep, nphdl, 0, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0); } return; } } /* * Start any commands pending resources first. */ if (isp_atio_restart(isp, 0, tptr)) goto noresrc; atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); if (atiop == NULL) { goto noresrc; } atp = isp_get_atpd(isp, 0, aep->at_rxid); if (atp == NULL) { goto noresrc; } atp->state = ATPD_STATE_ATIO; SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO\n"); atiop->ccb_h.target_id = ISP_MAX_TARGETS(isp); atiop->ccb_h.target_lun = lun; /* * We don't get 'suggested' sense data as we do with SCSI cards. */ atiop->sense_len = 0; /* * If we're not in the port database, add ourselves. */ if (IS_2100(isp)) atiop->init_id = nphdl; else { if (isp_find_pdb_by_handle(isp, 0, nphdl, &lp)) { atiop->init_id = FC_PORTDB_TGT(isp, 0, lp); } else { isp_prt(isp, ISP_LOGTINFO, "%s: port %x isn't in PDB", __func__, nphdl); isp_dump_portdb(isp, 0); isp_endcmd(isp, aep, NIL_HANDLE, 0, ECMD_TERMINATE, 0); return; } } atiop->cdb_len = ATIO2_CDBLEN; ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); atiop->ccb_h.status = CAM_CDB_RECVD; atiop->tag_id = atp->tag; switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { case ATIO2_TC_ATTR_SIMPLEQ: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_SIMPLE_Q_TAG; break; case ATIO2_TC_ATTR_HEADOFQ: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_HEAD_OF_Q_TAG; break; case ATIO2_TC_ATTR_ORDERED: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_ORDERED_Q_TAG; break; case ATIO2_TC_ATTR_ACAQ: /* ?? */ case ATIO2_TC_ATTR_UNTAGGED: default: atiop->tag_action = 0; break; } atp->orig_datalen = aep->at_datalen; atp->bytes_xfered = 0; atp->lun = lun; atp->nphdl = nphdl; atp->sid = PORT_ANY; atp->oxid = aep->at_oxid; atp->cdb0 = aep->at_cdb[0]; atp->tattr = aep->at_taskflags & ATIO2_TC_ATTR_MASK; atp->state = ATPD_STATE_CAM; xpt_done((union ccb *)atiop); isp_prt(isp, ISP_LOGTDEBUG0, "ATIO2[0x%x] CDB=0x%x lun %jx datalen %u", aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen); return; noresrc: ntp = isp_get_ntpd(isp, 0); if (ntp == NULL) { isp_endcmd(isp, aep, nphdl, 0, SCSI_STATUS_BUSY, 0); return; } memcpy(ntp->data, aep, QENTRY_LEN); STAILQ_INSERT_TAIL(&tptr->restart_queue, ntp, next); } static void isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep) { int cdbxlen; lun_id_t lun; uint16_t chan, nphdl = NIL_HANDLE; uint32_t did, sid; fcportdb_t *lp; tstate_t *tptr; struct ccb_accept_tio *atiop; atio_private_data_t *atp = NULL; atio_private_data_t *oatp; inot_private_data_t *ntp; did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2]; sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(aep->at_cmnd.fcp_cmnd_lun)); if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) { /* Channel has to be derived from D_ID */ isp_find_chan_by_did(isp, did, &chan); if (chan == ISP_NOCHAN) { isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel", __func__, aep->at_rxid, did); isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, ECMD_TERMINATE, 0); return; } } else { chan = 0; } /* * Find the PDB entry for this initiator */ if (isp_find_pdb_by_portid(isp, chan, sid, &lp) == 0) { /* * If we're not in the port database terminate the exchange. */ isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already", __func__, aep->at_rxid, did, chan, sid); isp_dump_portdb(isp, chan); isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0); return; } nphdl = lp->handle; /* * Get the tstate pointer */ tptr = get_lun_statep(isp, chan, lun); if (tptr == NULL) { tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD); if (tptr == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: [0x%x] no state pointer for lun %jx or wildcard", __func__, aep->at_rxid, (uintmax_t)lun); if (lun == 0) { isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0); } else { isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0); } return; } } /* * Start any commands pending resources first. */ if (isp_atio_restart(isp, chan, tptr)) goto noresrc; /* * If the f/w is out of resources, just send a BUSY status back. */ if (aep->at_rxid == AT7_NORESRC_RXID) { isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0); return; } /* * If we're out of resources, just send a BUSY status back. */ atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); if (atiop == NULL) { isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid); goto noresrc; } oatp = isp_find_atpd(isp, chan, aep->at_rxid); if (oatp) { isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] tag wraparound in isp_handle_platforms_atio7 (N-Port Handle 0x%04x S_ID 0x%04x OX_ID 0x%04x) oatp state %d", aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id, oatp->state); /* * It's not a "no resource" condition- but we can treat it like one */ goto noresrc; } atp = isp_get_atpd(isp, chan, aep->at_rxid); if (atp == NULL) { isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid); goto noresrc; } atp->word3 = lp->prli_word3; atp->state = ATPD_STATE_ATIO; SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO\n"); atiop->init_id = FC_PORTDB_TGT(isp, chan, lp); atiop->ccb_h.target_id = ISP_MAX_TARGETS(isp); atiop->ccb_h.target_lun = lun; atiop->sense_len = 0; cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT; if (cdbxlen) { isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored"); } cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb); ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen); atiop->cdb_len = cdbxlen; atiop->ccb_h.status = CAM_CDB_RECVD; atiop->tag_id = atp->tag; switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) { case FCP_CMND_TASK_ATTR_SIMPLE: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_SIMPLE_Q_TAG; break; case FCP_CMND_TASK_ATTR_HEAD: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_HEAD_OF_Q_TAG; break; case FCP_CMND_TASK_ATTR_ORDERED: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_ORDERED_Q_TAG; break; default: /* FALLTHROUGH */ case FCP_CMND_TASK_ATTR_ACA: case FCP_CMND_TASK_ATTR_UNTAGGED: atiop->tag_action = 0; break; } atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl; atp->bytes_xfered = 0; atp->lun = lun; atp->nphdl = nphdl; atp->sid = sid; atp->did = did; atp->oxid = aep->at_hdr.ox_id; atp->rxid = aep->at_hdr.rx_id; atp->cdb0 = atiop->cdb_io.cdb_bytes[0]; atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK; atp->state = ATPD_STATE_CAM; isp_prt(isp, ISP_LOGTDEBUG0, "ATIO7[0x%x] CDB=0x%x lun %jx datalen %u", aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen); xpt_done((union ccb *)atiop); return; noresrc: if (atp) isp_put_atpd(isp, chan, atp); ntp = isp_get_ntpd(isp, chan); if (ntp == NULL) { isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0); return; } memcpy(ntp->data, aep, QENTRY_LEN); STAILQ_INSERT_TAIL(&tptr->restart_queue, ntp, next); } /* * Handle starting an SRR (sequence retransmit request) * We get here when we've gotten the immediate notify * and the return of all outstanding CTIOs for this * transaction. */ static void isp_handle_srr_start(ispsoftc_t *isp, atio_private_data_t *atp) { in_fcentry_24xx_t *inot; uint32_t srr_off, ccb_off, ccb_len, ccb_end; union ccb *ccb; inot = (in_fcentry_24xx_t *)atp->srr; srr_off = inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16); ccb = atp->srr_ccb; atp->srr_ccb = NULL; atp->nsrr++; if (ccb == NULL) { isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] null ccb", atp->tag); goto fail; } ccb_off = ccb->ccb_h.spriv_field0; ccb_len = ccb->csio.dxfer_len; ccb_end = (ccb_off == ~0)? ~0 : ccb_off + ccb_len; switch (inot->in_srr_iu) { case R_CTL_INFO_SOLICITED_DATA: /* * We have to restart a FCP_DATA data out transaction */ atp->sendst = 0; atp->bytes_xfered = srr_off; if (ccb_len == 0) { isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x but current CCB doesn't transfer data", atp->tag, srr_off); goto mdp; } if (srr_off < ccb_off || ccb_off > srr_off + ccb_len) { isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x not covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end); goto mdp; } isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end); break; case R_CTL_INFO_COMMAND_STATUS: isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] Got an FCP RSP SRR- resending status", atp->tag); atp->sendst = 1; /* * We have to restart a FCP_RSP IU transaction */ break; case R_CTL_INFO_DATA_DESCRIPTOR: /* * We have to restart an FCP DATA in transaction */ isp_prt(isp, ISP_LOGWARN, "Got an FCP DATA IN SRR- dropping"); goto fail; default: isp_prt(isp, ISP_LOGWARN, "Got an unknown information (%x) SRR- dropping", inot->in_srr_iu); goto fail; } /* * We can't do anything until this is acked, so we might as well start it now. * We aren't going to do the usual asynchronous ack issue because we need * to make sure this gets on the wire first. */ if (isp_notify_ack(isp, inot)) { isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose"); goto fail; } isp_target_start_ctio(isp, ccb, FROM_SRR); return; fail: inot->in_reserved = 1; isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP_ERR; isp_complete_ctio(ccb); return; mdp: if (isp_notify_ack(isp, inot)) { isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose"); goto fail; } ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status = CAM_MESSAGE_RECV; /* * This is not a strict interpretation of MDP, but it's close */ ccb->csio.msg_ptr = &ccb->csio.sense_data.sense_buf[SSD_FULL_SIZE - 16]; ccb->csio.msg_len = 7; ccb->csio.msg_ptr[0] = MSG_EXTENDED; ccb->csio.msg_ptr[1] = 5; ccb->csio.msg_ptr[2] = 0; /* modify data pointer */ ccb->csio.msg_ptr[3] = srr_off >> 24; ccb->csio.msg_ptr[4] = srr_off >> 16; ccb->csio.msg_ptr[5] = srr_off >> 8; ccb->csio.msg_ptr[6] = srr_off; isp_complete_ctio(ccb); } static void isp_handle_platform_srr(ispsoftc_t *isp, isp_notify_t *notify) { in_fcentry_24xx_t *inot = notify->nt_lreserved; atio_private_data_t *atp; uint32_t tag = notify->nt_tagval & 0xffffffff; atp = isp_find_atpd(isp, notify->nt_channel, tag); if (atp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x in SRR Notify", __func__, tag); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); return; } atp->srr_notify_rcvd = 1; memcpy(atp->srr, inot, sizeof (atp->srr)); isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] flags 0x%x srr_iu %x reloff 0x%x", inot->in_rxid, inot->in_flags, inot->in_srr_iu, ((uint32_t)inot->in_srr_reloff_hi << 16) | inot->in_srr_reloff_lo); if (atp->srr_ccb) isp_handle_srr_start(isp, atp); } static void isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) { union ccb *ccb; int sentstatus = 0, ok = 0, notify_cam = 0, failure = 0; atio_private_data_t *atp = NULL; int bus; uint32_t handle, data_requested, resid; handle = ((ct2_entry_t *)arg)->ct_syshandle; ccb = isp_find_xs(isp, handle); if (ccb == NULL) { isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, arg); return; } isp_destroy_handle(isp, handle); resid = data_requested = PISP_PCMD(ccb)->datalen; isp_free_pcmd(isp, ccb); bus = XS_CHANNEL(ccb); if (IS_24XX(isp)) { atp = isp_find_atpd(isp, bus, ((ct7_entry_t *)arg)->ct_rxid); } else { atp = isp_find_atpd(isp, bus, ((ct2_entry_t *)arg)->ct_rxid); } if (atp == NULL) { /* * XXX: isp_clear_commands() generates fake CTIO with zero * ct_rxid value, filling only ct_syshandle. Workaround * that using tag_id from the CCB, pointed by ct_syshandle. */ atp = isp_find_atpd(isp, bus, ccb->csio.tag_id); } if (atp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ccb->csio.tag_id); return; } KASSERT((atp->ctcnt > 0), ("ctio count not greater than zero")); atp->bytes_in_transit -= data_requested; atp->ctcnt -= 1; ccb->ccb_h.status &= ~CAM_STATUS_MASK; if (IS_24XX(isp)) { ct7_entry_t *ct = arg; if (ct->ct_nphdl == CT7_SRR) { atp->srr_ccb = ccb; if (atp->srr_notify_rcvd) isp_handle_srr_start(isp, atp); return; } if (ct->ct_nphdl == CT_HBA_RESET) { sentstatus = (ccb->ccb_h.flags & CAM_SEND_STATUS) && (atp->sendst == 0); failure = CAM_UNREC_HBA_ERROR; } else { sentstatus = ct->ct_flags & CT7_SENDSTATUS; ok = (ct->ct_nphdl == CT7_OK); notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0; if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA) resid = ct->ct_resid; } isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct), notify_cam, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID"); } else { ct2_entry_t *ct = arg; if (ct->ct_status == CT_SRR) { atp->srr_ccb = ccb; if (atp->srr_notify_rcvd) isp_handle_srr_start(isp, atp); isp_target_putback_atio(ccb); return; } if (ct->ct_status == CT_HBA_RESET) { sentstatus = (ccb->ccb_h.flags & CAM_SEND_STATUS) && (atp->sendst == 0); failure = CAM_UNREC_HBA_ERROR; } else { sentstatus = ct->ct_flags & CT2_SENDSTATUS; ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0; if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) resid = ct->ct_resid; } isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO2[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct), notify_cam, ct->ct_status, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID"); } if (ok) { if (data_requested > 0) { atp->bytes_xfered += data_requested - resid; ccb->csio.resid = ccb->csio.dxfer_len - (data_requested - resid); } if (sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) ccb->ccb_h.status |= CAM_SENT_SENSE; ccb->ccb_h.status |= CAM_REQ_CMP; } else { notify_cam = 1; if (failure == CAM_UNREC_HBA_ERROR) ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; else ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } atp->state = ATPD_STATE_PDON; /* * We never *not* notify CAM when there has been any error (ok == 0), * so we never need to do an ATIO putback if we're not notifying CAM. */ isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done (ok=%d nc=%d nowsendstatus=%d ccb ss=%d)", (sentstatus)? " FINAL " : "MIDTERM ", atp->tag, ok, notify_cam, atp->sendst, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0); if (notify_cam == 0) { if (atp->sendst) { isp_target_start_ctio(isp, ccb, FROM_CTIO_DONE); } return; } /* * We are done with this ATIO if we successfully sent status. * In all other cases expect either another CTIO or XPT_ABORT. */ if (ok && sentstatus) isp_put_atpd(isp, bus, atp); /* * We're telling CAM we're done with this CTIO transaction. * * 24XX cards never need an ATIO put back. * * Other cards need one put back only on error. * In the latter case, a timeout will re-fire * and try again in case we didn't have * queue resources to do so at first. In any case, * once the putback is done we do the completion * call. */ if (ok || IS_24XX(isp)) { isp_complete_ctio(ccb); } else { isp_target_putback_atio(ccb); } } static int isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp, uint32_t rsp) { if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL); return (0); } /* * This case is for a Task Management Function, which shows up as an ATIO7 entry. */ if (IS_24XX(isp) && mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) { ct7_entry_t local, *cto = &local; at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved; fcportdb_t *lp; uint32_t sid; uint16_t nphdl; sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; if (isp_find_pdb_by_portid(isp, mp->nt_channel, sid, &lp)) { nphdl = lp->handle; } else { nphdl = NIL_HANDLE; } ISP_MEMZERO(&local, sizeof (local)); cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; cto->ct_header.rqs_entry_count = 1; cto->ct_nphdl = nphdl; cto->ct_rxid = aep->at_rxid; cto->ct_vpidx = mp->nt_channel; cto->ct_iid_lo = sid; cto->ct_iid_hi = sid >> 16; cto->ct_oxid = aep->at_hdr.ox_id; cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1; cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT; if (rsp != 0) { cto->ct_scsi_status |= (FCP_RSPLEN_VALID << 8); cto->rsp.m1.ct_resplen = 4; ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp)); cto->rsp.m1.ct_resp[0] = rsp & 0xff; cto->rsp.m1.ct_resp[1] = (rsp >> 8) & 0xff; cto->rsp.m1.ct_resp[2] = (rsp >> 16) & 0xff; cto->rsp.m1.ct_resp[3] = (rsp >> 24) & 0xff; } return (isp_target_put_entry(isp, &local)); } /* * This case is for a responding to an ABTS frame */ if (IS_24XX(isp) && mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) { /* * Overload nt_need_ack here to mark whether we've terminated the associated command. */ if (mp->nt_need_ack) { uint8_t storage[QENTRY_LEN]; ct7_entry_t *cto = (ct7_entry_t *) storage; abts_t *abts = (abts_t *)mp->nt_lreserved; ISP_MEMZERO(cto, sizeof (ct7_entry_t)); isp_prt(isp, ISP_LOGTDEBUG0, "%s: [%x] terminating after ABTS received", __func__, abts->abts_rxid_task); cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; cto->ct_header.rqs_entry_count = 1; cto->ct_nphdl = mp->nt_nphdl; cto->ct_rxid = abts->abts_rxid_task; cto->ct_iid_lo = mp->nt_sid; cto->ct_iid_hi = mp->nt_sid >> 16; cto->ct_oxid = abts->abts_ox_id; cto->ct_vpidx = mp->nt_channel; cto->ct_flags = CT7_NOACK|CT7_TERMINATE; if (isp_target_put_entry(isp, cto)) { return (ENOMEM); } mp->nt_need_ack = 0; } if (isp_acknak_abts(isp, mp->nt_lreserved, 0) == ENOMEM) { return (ENOMEM); } else { return (0); } } /* * Handle logout cases here */ if (mp->nt_ncode == NT_GLOBAL_LOGOUT) { isp_del_all_wwn_entries(isp, mp->nt_channel); } if (mp->nt_ncode == NT_LOGOUT) { if (!IS_2100(isp) && IS_FC(isp)) { isp_del_wwn_entries(isp, mp); } } /* * General purpose acknowledgement */ if (mp->nt_need_ack) { isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL); /* * Don't need to use the guaranteed send because the caller can retry */ return (isp_notify_ack(isp, mp->nt_lreserved)); } return (0); } /* * Handle task management functions. * * We show up here with a notify structure filled out. * * The nt_lreserved tag points to the original queue entry */ static void isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify) { tstate_t *tptr; fcportdb_t *lp; struct ccb_immediate_notify *inot; inot_private_data_t *ntp = NULL; atio_private_data_t *atp; lun_id_t lun; isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid 0x%x tagval 0x%016llx chan %d lun %jx", __func__, notify->nt_ncode, notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun); if (notify->nt_lun == LUN_ANY) { if (notify->nt_tagval == TAG_ANY) { lun = CAM_LUN_WILDCARD; } else { atp = isp_find_atpd(isp, notify->nt_channel, notify->nt_tagval & 0xffffffff); lun = atp ? atp->lun : CAM_LUN_WILDCARD; } } else { lun = notify->nt_lun; } tptr = get_lun_statep(isp, notify->nt_channel, lun); if (tptr == NULL) { tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD); if (tptr == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun); goto bad; } } inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots); if (inot == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun); goto bad; } inot->ccb_h.target_id = ISP_MAX_TARGETS(isp); inot->ccb_h.target_lun = lun; if (isp_find_pdb_by_portid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0 && isp_find_pdb_by_handle(isp, notify->nt_channel, notify->nt_nphdl, &lp) == 0) { inot->initiator_id = CAM_TARGET_WILDCARD; } else { inot->initiator_id = FC_PORTDB_TGT(isp, notify->nt_channel, lp); } inot->seq_id = notify->nt_tagval; inot->tag_id = notify->nt_tagval >> 32; switch (notify->nt_ncode) { case NT_ABORT_TASK: isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, inot->tag_id); inot->arg = MSG_ABORT_TASK; break; case NT_ABORT_TASK_SET: isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, TAG_ANY); inot->arg = MSG_ABORT_TASK_SET; break; case NT_CLEAR_ACA: inot->arg = MSG_CLEAR_ACA; break; case NT_CLEAR_TASK_SET: inot->arg = MSG_CLEAR_TASK_SET; break; case NT_LUN_RESET: inot->arg = MSG_LOGICAL_UNIT_RESET; break; case NT_TARGET_RESET: inot->arg = MSG_TARGET_RESET; break; case NT_QUERY_TASK_SET: inot->arg = MSG_QUERY_TASK_SET; break; case NT_QUERY_ASYNC_EVENT: inot->arg = MSG_QUERY_ASYNC_EVENT; break; default: isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun %#jx", __func__, notify->nt_ncode, notify->nt_channel, (uintmax_t)lun); goto bad; } ntp = isp_get_ntpd(isp, notify->nt_channel); if (ntp == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__); goto bad; } ISP_MEMCPY(&ntp->nt, notify, sizeof (isp_notify_t)); if (notify->nt_lreserved) { ISP_MEMCPY(&ntp->data, notify->nt_lreserved, QENTRY_LEN); ntp->nt.nt_lreserved = &ntp->data; } ntp->seq_id = notify->nt_tagval; ntp->tag_id = notify->nt_tagval >> 32; SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "Take FREE INOT\n"); inot->ccb_h.status = CAM_MESSAGE_RECV; xpt_done((union ccb *)inot); return; bad: if (notify->nt_need_ack) { if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) { if (isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM)) { isp_prt(isp, ISP_LOGWARN, "you lose- unable to send an ACKNAK"); } } else { isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, notify->nt_lreserved); } } } static void isp_target_mark_aborted_early(ispsoftc_t *isp, int chan, tstate_t *tptr, uint32_t tag_id) { atio_private_data_t *atp, *atpool; inot_private_data_t *ntp, *tmp; uint32_t this_tag_id; /* * First, clean any commands pending restart */ STAILQ_FOREACH_SAFE(ntp, &tptr->restart_queue, next, tmp) { if (IS_24XX(isp)) this_tag_id = ((at7_entry_t *)ntp->data)->at_rxid; else this_tag_id = ((at2_entry_t *)ntp->data)->at_rxid; if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) { isp_endcmd(isp, ntp->data, NIL_HANDLE, chan, ECMD_TERMINATE, 0); isp_put_ntpd(isp, chan, ntp); STAILQ_REMOVE(&tptr->restart_queue, ntp, inot_private_data, next); } } /* * Now mark other ones dead as well. */ ISP_GET_PC(isp, chan, atpool, atpool); for (atp = atpool; atp < &atpool[ATPDPSIZE]; atp++) { if (atp->lun != tptr->ts_lun) continue; if ((uint64_t)tag_id == TAG_ANY || atp->tag == tag_id) atp->dead = 1; } } #endif static void isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) { struct cam_sim *sim; int bus, tgt; ispsoftc_t *isp; sim = (struct cam_sim *)cbarg; isp = (ispsoftc_t *) cam_sim_softc(sim); bus = cam_sim_bus(sim); tgt = xpt_path_target_id(path); switch (code) { case AC_LOST_DEVICE: if (IS_SCSI(isp)) { uint16_t oflags, nflags; sdparam *sdp = SDPARAM(isp, bus); if (tgt >= 0) { nflags = sdp->isp_devparam[tgt].nvrm_flags; nflags &= DPARM_SAFE_DFLT; if (isp->isp_loaded_fw) { nflags |= DPARM_NARROW | DPARM_ASYNC; } oflags = sdp->isp_devparam[tgt].goal_flags; sdp->isp_devparam[tgt].goal_flags = nflags; sdp->isp_devparam[tgt].dev_update = 1; sdp->update = 1; (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, bus); sdp->isp_devparam[tgt].goal_flags = oflags; } } break; default: isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); break; } } static void isp_poll(struct cam_sim *sim) { ispsoftc_t *isp = cam_sim_softc(sim); ISP_RUN_ISR(isp); } static void isp_watchdog(void *arg) { struct ccb_scsiio *xs = arg; ispsoftc_t *isp; uint32_t ohandle = ISP_HANDLE_FREE, handle; isp = XS_ISP(xs); handle = isp_find_handle(isp, xs); /* * Hand crank the interrupt code just to be sure the command isn't stuck somewhere. */ if (handle != ISP_HANDLE_FREE) { ISP_RUN_ISR(isp); ohandle = handle; handle = isp_find_handle(isp, xs); } if (handle != ISP_HANDLE_FREE) { /* * Try and make sure the command is really dead before * we release the handle (and DMA resources) for reuse. * * If we are successful in aborting the command then * we're done here because we'll get the command returned * back separately. */ if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) { return; } /* * Note that after calling the above, the command may in * fact have been completed. */ xs = isp_find_xs(isp, handle); /* * If the command no longer exists, then we won't * be able to find the xs again with this handle. */ if (xs == NULL) { return; } /* * After this point, the command is really dead. */ if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, handle); } isp_destroy_handle(isp, handle); isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle); XS_SETERR(xs, CAM_CMD_TIMEOUT); isp_done(xs); } else { if (ohandle != ISP_HANDLE_FREE) { isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle 0x%x, recovered during interrupt", __func__, ohandle); } else { isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle already free", __func__); } } } static void isp_make_here(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt) { union ccb *ccb; struct isp_fc *fc = ISP_FC_PC(isp, chan); /* * Allocate a CCB, create a wildcard path for this target and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan"); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void isp_make_gone(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt) { struct cam_path *tp; struct isp_fc *fc = ISP_FC_PC(isp, chan); if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) { xpt_async(AC_LOST_DEVICE, tp, NULL); xpt_free_path(tp); } } /* * Gone Device Timer Function- when we have decided that a device has gone * away, we wait a specific period of time prior to telling the OS it has * gone away. * * This timer function fires once a second and then scans the port database * for devices that are marked dead but still have a virtual target assigned. * We decrement a counter for that port database entry, and when it hits zero, * we tell the OS the device has gone away. */ static void isp_gdt(void *arg) { struct isp_fc *fc = arg; taskqueue_enqueue(taskqueue_thread, &fc->gtask); } static void isp_gdt_task(void *arg, int pending) { struct isp_fc *fc = arg; ispsoftc_t *isp = fc->isp; int chan = fc - isp->isp_osinfo.pc.fc; fcportdb_t *lp; struct ac_contract ac; struct ac_device_changed *adc; int dbidx, more_to_do = 0; ISP_LOCK(isp); isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan); for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { lp = &FCPARAM(isp, chan)->portdb[dbidx]; if (lp->state != FC_PORTDB_STATE_ZOMBIE) { continue; } if (lp->gone_timer != 0) { lp->gone_timer -= 1; more_to_do++; continue; } isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Gone Device Timeout"); if (lp->is_target) { lp->is_target = 0; isp_make_gone(isp, lp, chan, dbidx); } if (lp->is_initiator) { lp->is_initiator = 0; ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = lp->port_wwn; adc->port = lp->portid; adc->target = dbidx; adc->arrived = 0; xpt_async(AC_CONTRACT, fc->path, &ac); } lp->state = FC_PORTDB_STATE_NIL; } if (fc->ready) { if (more_to_do) { callout_reset(&fc->gdt, hz, isp_gdt, fc); } else { callout_deactivate(&fc->gdt); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Stopping Gone Device Timer @ %lu", chan, (unsigned long) time_uptime); } } ISP_UNLOCK(isp); } /* * When loop goes down we remember the time and freeze CAM command queue. * During some time period we are trying to reprobe the loop. But if we * fail, we tell the OS that devices have gone away and drop the freeze. * * We don't clear the devices out of our port database because, when loop * come back up, we have to do some actual cleanup with the chip at that * point (implicit PLOGO, e.g., to get the chip's port database state right). */ static void isp_loop_changed(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); struct isp_fc *fc = ISP_FC_PC(isp, chan); if (fc->loop_down_time) return; isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop changed", chan); if (fcp->role & ISP_ROLE_INITIATOR) isp_freeze_loopdown(isp, chan); fc->loop_down_time = time_uptime; wakeup(fc); } static void isp_loop_up(ispsoftc_t *isp, int chan) { struct isp_fc *fc = ISP_FC_PC(isp, chan); isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is up", chan); fc->loop_seen_once = 1; fc->loop_down_time = 0; isp_unfreeze_loopdown(isp, chan); } static void isp_loop_dead(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); struct isp_fc *fc = ISP_FC_PC(isp, chan); fcportdb_t *lp; struct ac_contract ac; struct ac_device_changed *adc; int dbidx, i; isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is dead", chan); /* * Notify to the OS all targets who we now consider have departed. */ for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { lp = &fcp->portdb[dbidx]; if (lp->state == FC_PORTDB_STATE_NIL) continue; for (i = 0; i < isp->isp_maxcmds; i++) { struct ccb_scsiio *xs; if (ISP_H2HT(isp->isp_xflist[i].handle) != ISP_HANDLE_INITIATOR) { continue; } if ((xs = isp->isp_xflist[i].cmd) == NULL) { continue; } if (dbidx != XS_TGT(xs)) { continue; } isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx orphaned by loop down timeout", isp->isp_xflist[i].handle, chan, XS_TGT(xs), (uintmax_t)XS_LUN(xs)); /* * Just like in isp_watchdog, abort the outstanding * command or immediately free its resources if it is * not active */ if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) { continue; } if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, isp->isp_xflist[i].handle); } isp_destroy_handle(isp, isp->isp_xflist[i].handle); isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx could not be aborted and was destroyed", isp->isp_xflist[i].handle, chan, XS_TGT(xs), (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BUSRESET); isp_done(xs); } isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Loop Down Timeout"); if (lp->is_target) { lp->is_target = 0; isp_make_gone(isp, lp, chan, dbidx); } if (lp->is_initiator) { lp->is_initiator = 0; ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = lp->port_wwn; adc->port = lp->portid; adc->target = dbidx; adc->arrived = 0; xpt_async(AC_CONTRACT, fc->path, &ac); } } isp_unfreeze_loopdown(isp, chan); fc->loop_down_time = 0; } static void isp_kthread(void *arg) { struct isp_fc *fc = arg; ispsoftc_t *isp = fc->isp; int chan = fc - isp->isp_osinfo.pc.fc; int slp = 0, d; int lb, lim; ISP_LOCK(isp); while (isp->isp_osinfo.is_exiting == 0) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Checking FC state", chan); lb = isp_fc_runstate(isp, chan, 250000); isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d FC got to %s state", chan, isp_fc_loop_statename(lb)); /* * Our action is different based upon whether we're supporting * Initiator mode or not. If we are, we might freeze the simq * when loop is down and set all sorts of different delays to * check again. * * If not, we simply just wait for loop to come up. */ if (lb == LOOP_READY || lb < 0) { slp = 0; } else { /* * If we've never seen loop up and we've waited longer * than quickboot time, or we've seen loop up but we've * waited longer than loop_down_limit, give up and go * to sleep until loop comes up. */ if (fc->loop_seen_once == 0) lim = isp_quickboot_time; else lim = fc->loop_down_limit; d = time_uptime - fc->loop_down_time; if (d >= lim) slp = 0; else if (d < 10) slp = 1; else if (d < 30) slp = 5; else if (d < 60) slp = 10; else if (d < 120) slp = 20; else slp = 30; } if (slp == 0) { if (lb == LOOP_READY) isp_loop_up(isp, chan); else isp_loop_dead(isp, chan); } isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d sleep for %d seconds", chan, slp); msleep(fc, &isp->isp_lock, PRIBIO, "ispf", slp * hz); } fc->num_threads -= 1; ISP_UNLOCK(isp); kthread_exit(); } #ifdef ISP_TARGET_MODE static void isp_abort_atio(ispsoftc_t *isp, union ccb *ccb) { atio_private_data_t *atp; union ccb *accb = ccb->cab.abort_ccb; struct ccb_hdr *sccb; tstate_t *tptr; tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb)); if (tptr != NULL) { /* Search for the ATIO among queueued. */ SLIST_FOREACH(sccb, &tptr->atios, sim_links.sle) { if (sccb != &accb->ccb_h) continue; SLIST_REMOVE(&tptr->atios, sccb, ccb_hdr, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path, "Abort FREE ATIO\n"); accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); ccb->ccb_h.status = CAM_REQ_CMP; return; } } /* Search for the ATIO among running. */ atp = isp_find_atpd(isp, XS_CHANNEL(accb), accb->atio.tag_id); if (atp != NULL) { /* Send TERMINATE to firmware. */ if (!atp->dead && IS_24XX(isp)) { uint8_t storage[QENTRY_LEN]; ct7_entry_t *cto = (ct7_entry_t *) storage; ISP_MEMZERO(cto, sizeof (ct7_entry_t)); cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; cto->ct_header.rqs_entry_count = 1; cto->ct_nphdl = atp->nphdl; cto->ct_rxid = atp->tag; cto->ct_iid_lo = atp->sid; cto->ct_iid_hi = atp->sid >> 16; cto->ct_oxid = atp->oxid; cto->ct_vpidx = XS_CHANNEL(accb); cto->ct_flags = CT7_NOACK|CT7_TERMINATE; isp_target_put_entry(isp, cto); } isp_put_atpd(isp, XS_CHANNEL(accb), atp); ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_UA_ABORT; } } static void isp_abort_inot(ispsoftc_t *isp, union ccb *ccb) { inot_private_data_t *ntp; union ccb *accb = ccb->cab.abort_ccb; struct ccb_hdr *sccb; tstate_t *tptr; tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb)); if (tptr != NULL) { /* Search for the INOT among queueued. */ SLIST_FOREACH(sccb, &tptr->inots, sim_links.sle) { if (sccb != &accb->ccb_h) continue; SLIST_REMOVE(&tptr->inots, sccb, ccb_hdr, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path, "Abort FREE INOT\n"); accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); ccb->ccb_h.status = CAM_REQ_CMP; return; } } /* Search for the INOT among running. */ ntp = isp_find_ntpd(isp, XS_CHANNEL(accb), accb->cin1.tag_id, accb->cin1.seq_id); if (ntp != NULL) { if (ntp->nt.nt_need_ack) { isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, ntp->nt.nt_lreserved); } isp_put_ntpd(isp, XS_CHANNEL(accb), ntp); ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_UA_ABORT; return; } } #endif static void isp_action(struct cam_sim *sim, union ccb *ccb) { int bus, tgt, error; ispsoftc_t *isp; struct ccb_trans_settings *cts; sbintime_t ts; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); isp = (ispsoftc_t *)cam_sim_softc(sim); ISP_ASSERT_LOCKED(isp); bus = cam_sim_bus(sim); isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); ISP_PCMD(ccb) = NULL; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ /* * Do a couple of preliminary checks... */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; isp_done((struct ccb_scsiio *) ccb); break; } } ccb->csio.req_map = NULL; #ifdef DIAGNOSTIC if (ccb->ccb_h.target_id >= ISP_MAX_TARGETS(isp)) { xpt_print(ccb->ccb_h.path, "invalid target\n"); ccb->ccb_h.status = CAM_PATH_INVALID; } else if (ISP_MAX_LUNS(isp) > 0 && ccb->ccb_h.target_lun >= ISP_MAX_LUNS(isp)) { xpt_print(ccb->ccb_h.path, "invalid lun\n"); ccb->ccb_h.status = CAM_PATH_INVALID; } if (ccb->ccb_h.status == CAM_PATH_INVALID) { xpt_done(ccb); break; } #endif ccb->csio.scsi_status = SCSI_STATUS_OK; if (isp_get_pcmd(isp, ccb)) { isp_prt(isp, ISP_LOGWARN, "out of PCMDs"); cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); break; } error = isp_start((XS_T *) ccb); switch (error) { case CMD_QUEUED: ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) break; /* Give firmware extra 10s to handle timeout. */ ts = SBT_1MS * ccb->ccb_h.timeout + 10 * SBT_1S; callout_reset_sbt(&PISP_PCMD(ccb)->wdog, ts, 0, isp_watchdog, ccb, 0); break; case CMD_RQLATER: isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx retry later", XS_TGT(ccb), (uintmax_t)XS_LUN(ccb)); cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); ccb->ccb_h.status = CAM_REQUEUE_REQ; isp_free_pcmd(isp, ccb); xpt_done(ccb); break; case CMD_EAGAIN: isp_free_pcmd(isp, ccb); cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 100, 0); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); break; case CMD_COMPLETE: isp_done((struct ccb_scsiio *) ccb); break; default: isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__); ccb->ccb_h.status = CAM_REQUEUE_REQ; isp_free_pcmd(isp, ccb); xpt_done(ccb); } break; #ifdef ISP_TARGET_MODE case XPT_EN_LUN: /* Enable/Disable LUN as a target */ if (ccb->cel.enable) { isp_enable_lun(isp, ccb); } else { isp_disable_lun(isp, ccb); } break; case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ { tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); if (tptr == NULL) { const char *str; if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) str = "XPT_IMMEDIATE_NOTIFY"; else str = "XPT_ACCEPT_TARGET_IO"; ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: no state pointer found for %s\n", __func__, str); ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); break; } ccb->ccb_h.spriv_field0 = 0; ccb->ccb_h.spriv_ptr1 = isp; if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { ccb->atio.tag_id = 0; SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, "Put FREE ATIO\n"); } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { ccb->cin1.seq_id = ccb->cin1.tag_id = 0; SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, "Put FREE INOT\n"); } ccb->ccb_h.status = CAM_REQ_INPROG; break; } case XPT_NOTIFY_ACKNOWLEDGE: /* notify ack */ { inot_private_data_t *ntp; /* * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb * XXX: matches that for the immediate notify, we have to *search* for the notify structure */ /* * All the relevant path information is in the associated immediate notify */ ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); ntp = isp_find_ntpd(isp, XS_CHANNEL(ccb), ccb->cna2.tag_id, ccb->cna2.seq_id); if (ntp == NULL) { ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); break; } if (isp_handle_platform_target_notify_ack(isp, &ntp->nt, (ccb->ccb_h.flags & CAM_SEND_STATUS) ? ccb->cna2.arg : 0)) { cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; } isp_put_ntpd(isp, XS_CHANNEL(ccb), ntp); ccb->ccb_h.status = CAM_REQ_CMP; ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); xpt_done(ccb); break; } case XPT_CONT_TARGET_IO: isp_target_start_ctio(isp, ccb, FROM_CAM); break; #endif case XPT_RESET_DEV: /* BDR the specified SCSI device */ tgt = ccb->ccb_h.target_id; tgt |= (bus << 16); error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt); if (error) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { /* * If we have a FC device, reset the Command * Reference Number, because the target will expect * that we re-start the CRN at 1 after a reset. */ if (IS_FC(isp)) isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); break; case XPT_ABORT: /* Abort the specified CCB */ { union ccb *accb = ccb->cab.abort_ccb; switch (accb->ccb_h.func_code) { #ifdef ISP_TARGET_MODE case XPT_ACCEPT_TARGET_IO: isp_abort_atio(isp, ccb); break; case XPT_IMMEDIATE_NOTIFY: isp_abort_inot(isp, ccb); break; #endif case XPT_SCSI_IO: error = isp_control(isp, ISPCTL_ABORT_CMD, accb); if (error) { ccb->ccb_h.status = CAM_UA_ABORT; } else { ccb->ccb_h.status = CAM_REQ_CMP; } break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } /* * This is not a queued CCB, so the caller expects it to be * complete when control is returned. */ break; } #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ cts = &ccb->cts; if (!IS_CURRENT_SETTINGS(cts)) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } tgt = cts->ccb_h.target_id; if (IS_SCSI(isp)) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; sdparam *sdp = SDPARAM(isp, bus); uint16_t *dptr; if (spi->valid == 0 && scsi->valid == 0) { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } /* * We always update (internally) from goal_flags * so any request to change settings just gets * vectored to that location. */ dptr = &sdp->isp_devparam[tgt].goal_flags; if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) *dptr |= DPARM_DISC; else *dptr &= ~DPARM_DISC; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) *dptr |= DPARM_TQING; else *dptr &= ~DPARM_TQING; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) *dptr |= DPARM_WIDE; else *dptr &= ~DPARM_WIDE; } /* * XXX: FIX ME */ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && (spi->valid & CTS_SPI_VALID_SYNC_RATE) && (spi->sync_period && spi->sync_offset)) { *dptr |= DPARM_SYNC; /* * XXX: CHECK FOR LEGALITY */ sdp->isp_devparam[tgt].goal_period = spi->sync_period; sdp->isp_devparam[tgt].goal_offset = spi->sync_offset; } else { *dptr &= ~DPARM_SYNC; } isp_prt(isp, ISP_LOGDEBUG0, "SET (%d.%d.%jx) to flags %x off %x per %x", bus, tgt, (uintmax_t)cts->ccb_h.target_lun, sdp->isp_devparam[tgt].goal_flags, sdp->isp_devparam[tgt].goal_offset, sdp->isp_devparam[tgt].goal_period); sdp->isp_devparam[tgt].dev_update = 1; sdp->update = 1; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: cts = &ccb->cts; tgt = cts->ccb_h.target_id; if (IS_FC(isp)) { fcparam *fcp = FCPARAM(isp, bus); struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_FC; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 100000; fc->bitrate *= fcp->isp_gbspeed; if (tgt < MAX_FC_TARG) { fcportdb_t *lp = &fcp->portdb[tgt]; fc->wwnn = lp->node_wwn; fc->wwpn = lp->port_wwn; fc->port = lp->portid; fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; } } else { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; sdparam *sdp = SDPARAM(isp, bus); uint16_t dval, pval, oval; if (IS_CURRENT_SETTINGS(cts)) { sdp->isp_devparam[tgt].dev_refresh = 1; sdp->update = 1; (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, bus); dval = sdp->isp_devparam[tgt].actv_flags; oval = sdp->isp_devparam[tgt].actv_offset; pval = sdp->isp_devparam[tgt].actv_period; } else { dval = sdp->isp_devparam[tgt].nvrm_flags; oval = sdp->isp_devparam[tgt].nvrm_offset; pval = sdp->isp_devparam[tgt].nvrm_period; } cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = 0; scsi->valid = 0; spi->flags = 0; scsi->flags = 0; if (dval & DPARM_DISC) { spi->flags |= CTS_SPI_FLAGS_DISC_ENB; } if ((dval & DPARM_SYNC) && oval && pval) { spi->sync_offset = oval; spi->sync_period = pval; } else { spi->sync_offset = 0; spi->sync_period = 0; } spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; if (dval & DPARM_WIDE) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; if (dval & DPARM_TQING) { scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->valid |= CTS_SPI_VALID_DISC; } isp_prt(isp, ISP_LOGDEBUG0, "GET %s (%d.%d.%jx) to flags %x off %x per %x", IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", bus, tgt, (uintmax_t)cts->ccb_h.target_lun, dval, oval, pval); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_RESET_BUS: /* Reset the specified bus */ error = isp_control(isp, ISPCTL_RESET_BUS, bus); if (error) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); break; } if (bootverbose) { xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus); } if (IS_FC(isp)) { xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0); } else { xpt_async(AC_BUS_RESET, ISP_SPI_PC(isp, bus)->path, 0); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_TERM_IO: /* Terminate the I/O process */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_SIM_KNOB: /* Set SIM knobs */ { struct ccb_sim_knob *kp = &ccb->knob; fcparam *fcp; if (!IS_FC(isp)) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } fcp = FCPARAM(isp, bus); if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) { fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn; fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn; isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn); } ccb->ccb_h.status = CAM_REQ_CMP; if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) { int rchange = 0; int newrole = 0; switch (kp->xport_specific.fc.role) { case KNOB_ROLE_NONE: if (fcp->role != ISP_ROLE_NONE) { rchange = 1; newrole = ISP_ROLE_NONE; } break; case KNOB_ROLE_TARGET: if (fcp->role != ISP_ROLE_TARGET) { rchange = 1; newrole = ISP_ROLE_TARGET; } break; case KNOB_ROLE_INITIATOR: if (fcp->role != ISP_ROLE_INITIATOR) { rchange = 1; newrole = ISP_ROLE_INITIATOR; } break; case KNOB_ROLE_BOTH: if (fcp->role != ISP_ROLE_BOTH) { rchange = 1; newrole = ISP_ROLE_BOTH; } break; } if (rchange) { ISP_PATH_PRT(isp, ISP_LOGCONFIG, ccb->ccb_h.path, "changing role on from %d to %d\n", fcp->role, newrole); if (isp_control(isp, ISPCTL_CHANGE_ROLE, bus, newrole) != 0) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); break; } } } xpt_done(ccb); break; } case XPT_GET_SIM_KNOB_OLD: /* Get SIM knobs -- compat value */ case XPT_GET_SIM_KNOB: /* Get SIM knobs */ { struct ccb_sim_knob *kp = &ccb->knob; if (IS_FC(isp)) { fcparam *fcp; fcp = FCPARAM(isp, bus); kp->xport_specific.fc.wwnn = fcp->isp_wwnn; kp->xport_specific.fc.wwpn = fcp->isp_wwpn; switch (fcp->role) { case ISP_ROLE_NONE: kp->xport_specific.fc.role = KNOB_ROLE_NONE; break; case ISP_ROLE_TARGET: kp->xport_specific.fc.role = KNOB_ROLE_TARGET; break; case ISP_ROLE_INITIATOR: kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR; break; case ISP_ROLE_BOTH: kp->xport_specific.fc.role = KNOB_ROLE_BOTH; break; } kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_REQ_INVALID; } xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; #ifdef ISP_TARGET_MODE if (IS_FC(isp) && ISP_CAP_TMODE(isp) && ISP_CAP_SCCFW(isp)) cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; else #endif cpi->target_sprt = 0; cpi->hba_eng_cnt = 0; cpi->max_target = ISP_MAX_TARGETS(isp) - 1; cpi->max_lun = ISP_MAX_LUNS(isp) == 0 ? 255 : ISP_MAX_LUNS(isp) - 1; cpi->bus_id = cam_sim_bus(sim); if (sizeof (bus_size_t) > 4) cpi->maxio = (ISP_NSEG64_MAX - 1) * PAGE_SIZE; else cpi->maxio = (ISP_NSEG_MAX - 1) * PAGE_SIZE; if (IS_FC(isp)) { fcparam *fcp = FCPARAM(isp, bus); cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN; /* * Because our loop ID can shift from time to time, * make our initiator ID out of range of our bus. */ cpi->initiator_id = cpi->max_target + 1; /* * Set base transfer capabilities for Fibre Channel, for this HBA. */ if (IS_25XX(isp)) { cpi->base_transfer_speed = 8000000; } else if (IS_24XX(isp)) { cpi->base_transfer_speed = 4000000; } else if (IS_23XX(isp)) { cpi->base_transfer_speed = 2000000; } else { cpi->base_transfer_speed = 1000000; } cpi->hba_inquiry = PI_TAG_ABLE; cpi->transport = XPORT_FC; cpi->transport_version = 0; cpi->xport_specific.fc.wwnn = fcp->isp_wwnn; cpi->xport_specific.fc.wwpn = fcp->isp_wwpn; cpi->xport_specific.fc.port = fcp->isp_portid; cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000; } else { sdparam *sdp = SDPARAM(isp, bus); cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->hba_misc = PIM_UNMAPPED; cpi->initiator_id = sdp->isp_initiator_id; cpi->base_transfer_speed = 3300; cpi->transport = XPORT_SPI; cpi->transport_version = 2; } cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } void isp_done(XS_T *sccb) { ispsoftc_t *isp = XS_ISP(sccb); uint32_t status; if (XS_NOERR(sccb)) XS_SETERR(sccb, CAM_REQ_CMP); if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) { sccb->ccb_h.status &= ~CAM_STATUS_MASK; if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; } else { sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; } } sccb->ccb_h.status &= ~CAM_SIM_QUEUED; status = sccb->ccb_h.status & CAM_STATUS_MASK; if (status != CAM_REQ_CMP && (sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { sccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(sccb->ccb_h.path, 1); } if (ISP_PCMD(sccb)) { if (callout_active(&PISP_PCMD(sccb)->wdog)) callout_stop(&PISP_PCMD(sccb)->wdog); isp_free_pcmd(isp, (union ccb *) sccb); } xpt_done((union ccb *) sccb); } void isp_async(ispsoftc_t *isp, ispasync_t cmd, ...) { int bus; static const char prom[] = "Chan %d [%d] WWPN 0x%16jx PortID 0x%06x handle 0x%x %s %s"; char buf[64]; char *msg = NULL; target_id_t tgt = 0; fcportdb_t *lp; struct isp_fc *fc; struct cam_path *tmppath; struct ac_contract ac; struct ac_device_changed *adc; va_list ap; switch (cmd) { case ISPASYNC_NEW_TGT_PARAMS: { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; int flags, tgt; sdparam *sdp; struct ccb_trans_settings cts; memset(&cts, 0, sizeof (struct ccb_trans_settings)); va_start(ap, cmd); bus = va_arg(ap, int); tgt = va_arg(ap, int); va_end(ap); sdp = SDPARAM(isp, bus); if (xpt_create_path(&tmppath, NULL, cam_sim_path(ISP_SPI_PC(isp, bus)->sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { isp_prt(isp, ISP_LOGWARN, "isp_async cannot make temp path for %d.%d", tgt, bus); break; } flags = sdp->isp_devparam[tgt].actv_flags; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.protocol = PROTO_SCSI; cts.transport = XPORT_SPI; scsi = &cts.proto_specific.scsi; spi = &cts.xport_specific.spi; if (flags & DPARM_TQING) { scsi->valid |= CTS_SCSI_VALID_TQ; scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } if (flags & DPARM_DISC) { spi->valid |= CTS_SPI_VALID_DISC; spi->flags |= CTS_SPI_FLAGS_DISC_ENB; } spi->flags |= CTS_SPI_VALID_BUS_WIDTH; if (flags & DPARM_WIDE) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (flags & DPARM_SYNC) { spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->sync_period = sdp->isp_devparam[tgt].actv_period; spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; } isp_prt(isp, ISP_LOGDEBUG2, "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", bus, tgt, sdp->isp_devparam[tgt].actv_period, sdp->isp_devparam[tgt].actv_offset, flags); xpt_setup_ccb(&cts.ccb_h, tmppath, 1); xpt_async(AC_TRANSFER_NEG, tmppath, &cts); xpt_free_path(tmppath); break; } case ISPASYNC_BUS_RESET: { va_start(ap, cmd); bus = va_arg(ap, int); va_end(ap); isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", bus); if (IS_FC(isp)) { xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, NULL); } else { xpt_async(AC_BUS_RESET, ISP_SPI_PC(isp, bus)->path, NULL); } break; } case ISPASYNC_LOOP_RESET: { uint16_t lipp; fcparam *fcp; va_start(ap, cmd); bus = va_arg(ap, int); va_end(ap); lipp = ISP_READ(isp, OUTMAILBOX1); fcp = FCPARAM(isp, bus); isp_prt(isp, ISP_LOGINFO, "Chan %d LOOP Reset, LIP primitive %x", bus, lipp); /* * Per FCP-4, a Reset LIP should result in a CRN reset. Other * LIPs and loop up/down events should never reset the CRN. For * an as of yet unknown reason, 24xx series cards (and * potentially others) can interrupt with a LIP Reset status * when no LIP reset came down the wire. Additionally, the LIP * primitive accompanying this status would not be a valid LIP * Reset primitive, but some variation of an invalid AL_PA * LIP. As a result, we have to verify the AL_PD in the LIP * addresses our port before blindly resetting. */ if (FCP_IS_DEST_ALPD(fcp, (lipp & 0x00FF))) isp_fcp_reset_crn(isp, bus, /*tgt*/0, /*tgt_set*/ 0); isp_loop_changed(isp, bus); break; } case ISPASYNC_LIP: if (msg == NULL) msg = "LIP Received"; /* FALLTHROUGH */ case ISPASYNC_LOOP_DOWN: if (msg == NULL) msg = "LOOP Down"; /* FALLTHROUGH */ case ISPASYNC_LOOP_UP: if (msg == NULL) msg = "LOOP Up"; va_start(ap, cmd); bus = va_arg(ap, int); va_end(ap); isp_loop_changed(isp, bus); isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg); break; case ISPASYNC_DEV_ARRIVED: va_start(ap, cmd); bus = va_arg(ap, int); lp = va_arg(ap, fcportdb_t *); va_end(ap); fc = ISP_FC_PC(isp, bus); tgt = FC_PORTDB_TGT(isp, bus, lp); isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "arrived"); if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && (lp->prli_word3 & PRLI_WD3_TARGET_FUNCTION)) { lp->is_target = 1; isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); isp_make_here(isp, lp, bus, tgt); } if ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) && (lp->prli_word3 & PRLI_WD3_INITIATOR_FUNCTION)) { lp->is_initiator = 1; ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = lp->port_wwn; adc->port = lp->portid; adc->target = tgt; adc->arrived = 1; xpt_async(AC_CONTRACT, fc->path, &ac); } break; case ISPASYNC_DEV_CHANGED: case ISPASYNC_DEV_STAYED: { int crn_reset_done; crn_reset_done = 0; va_start(ap, cmd); bus = va_arg(ap, int); lp = va_arg(ap, fcportdb_t *); va_end(ap); fc = ISP_FC_PC(isp, bus); tgt = FC_PORTDB_TGT(isp, bus, lp); isp_gen_role_str(buf, sizeof (buf), lp->new_prli_word3); if (cmd == ISPASYNC_DEV_CHANGED) isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->new_portid, lp->handle, buf, "changed"); else isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "stayed"); if (lp->is_target != ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && (lp->new_prli_word3 & PRLI_WD3_TARGET_FUNCTION))) { lp->is_target = !lp->is_target; if (lp->is_target) { if (cmd == ISPASYNC_DEV_CHANGED) { isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); crn_reset_done = 1; } isp_make_here(isp, lp, bus, tgt); } else { isp_make_gone(isp, lp, bus, tgt); if (cmd == ISPASYNC_DEV_CHANGED) { isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); crn_reset_done = 1; } } } if (lp->is_initiator != ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) && (lp->new_prli_word3 & PRLI_WD3_INITIATOR_FUNCTION))) { lp->is_initiator = !lp->is_initiator; ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = lp->port_wwn; adc->port = lp->portid; adc->target = tgt; adc->arrived = lp->is_initiator; xpt_async(AC_CONTRACT, fc->path, &ac); } if ((cmd == ISPASYNC_DEV_CHANGED) && (crn_reset_done == 0)) isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); break; } case ISPASYNC_DEV_GONE: va_start(ap, cmd); bus = va_arg(ap, int); lp = va_arg(ap, fcportdb_t *); va_end(ap); fc = ISP_FC_PC(isp, bus); tgt = FC_PORTDB_TGT(isp, bus, lp); /* * If this has a virtual target or initiator set the isp_gdt * timer running on it to delay its departure. */ isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); if (lp->is_target || lp->is_initiator) { lp->state = FC_PORTDB_STATE_ZOMBIE; lp->gone_timer = fc->gone_device_time; isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone zombie"); if (fc->ready && !callout_active(&fc->gdt)) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Starting Gone Device Timer with %u seconds time now %lu", bus, lp->gone_timer, (unsigned long)time_uptime); callout_reset(&fc->gdt, hz, isp_gdt, fc); } break; } isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone"); break; case ISPASYNC_CHANGE_NOTIFY: { char *msg; int evt, nphdl, nlstate, portid, reason; va_start(ap, cmd); bus = va_arg(ap, int); evt = va_arg(ap, int); if (evt == ISPASYNC_CHANGE_PDB) { nphdl = va_arg(ap, int); nlstate = va_arg(ap, int); reason = va_arg(ap, int); } else if (evt == ISPASYNC_CHANGE_SNS) { portid = va_arg(ap, int); } else { nphdl = NIL_HANDLE; nlstate = reason = 0; } va_end(ap); if (evt == ISPASYNC_CHANGE_PDB) { int tgt_set = 0; msg = "Port Database Changed"; isp_prt(isp, ISP_LOGINFO, "Chan %d %s (nphdl 0x%x state 0x%x reason 0x%x)", bus, msg, nphdl, nlstate, reason); /* * Port database syncs are not sufficient for * determining that logins or logouts are done on the * loop, but this information is directly available from * the reason code from the incoming mbox. We must reset * the fcp crn on these events according to FCP-4 */ switch (reason) { case PDB24XX_AE_IMPL_LOGO_1: case PDB24XX_AE_IMPL_LOGO_2: case PDB24XX_AE_IMPL_LOGO_3: case PDB24XX_AE_PLOGI_RCVD: case PDB24XX_AE_PRLI_RCVD: case PDB24XX_AE_PRLO_RCVD: case PDB24XX_AE_LOGO_RCVD: case PDB24XX_AE_PLOGI_DONE: case PDB24XX_AE_PRLI_DONE: /* * If the event is not global, twiddle tgt and * tgt_set to nominate only the target * associated with the nphdl. */ if (nphdl != PDB24XX_AE_GLOBAL) { /* Break if we don't yet have the pdb */ if (!isp_find_pdb_by_handle(isp, bus, nphdl, &lp)) break; tgt = FC_PORTDB_TGT(isp, bus, lp); tgt_set = 1; } isp_fcp_reset_crn(isp, bus, tgt, tgt_set); break; default: break; /* NOP */ } } else if (evt == ISPASYNC_CHANGE_SNS) { msg = "Name Server Database Changed"; isp_prt(isp, ISP_LOGINFO, "Chan %d %s (PortID 0x%06x)", bus, msg, portid); } else { msg = "Other Change Notify"; isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg); } isp_loop_changed(isp, bus); break; } #ifdef ISP_TARGET_MODE case ISPASYNC_TARGET_NOTIFY: { isp_notify_t *notify; va_start(ap, cmd); notify = va_arg(ap, isp_notify_t *); va_end(ap); switch (notify->nt_ncode) { case NT_ABORT_TASK: case NT_ABORT_TASK_SET: case NT_CLEAR_ACA: case NT_CLEAR_TASK_SET: case NT_LUN_RESET: case NT_TARGET_RESET: case NT_QUERY_TASK_SET: case NT_QUERY_ASYNC_EVENT: /* * These are task management functions. */ isp_handle_platform_target_tmf(isp, notify); break; case NT_BUS_RESET: case NT_LIP_RESET: case NT_LINK_UP: case NT_LINK_DOWN: case NT_HBA_RESET: /* * No action need be taken here. */ break; case NT_GLOBAL_LOGOUT: case NT_LOGOUT: /* * This is device arrival/departure notification */ isp_handle_platform_target_notify_ack(isp, notify, 0); break; case NT_SRR: isp_handle_platform_srr(isp, notify); break; default: isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode); isp_handle_platform_target_notify_ack(isp, notify, 0); break; } break; } case ISPASYNC_TARGET_NOTIFY_ACK: { void *inot; va_start(ap, cmd); inot = va_arg(ap, void *); va_end(ap); if (isp_notify_ack(isp, inot)) { isp_tna_t *tp = malloc(sizeof (*tp), M_DEVBUF, M_NOWAIT); if (tp) { tp->isp = isp; memcpy(tp->data, inot, sizeof (tp->data)); tp->not = tp->data; callout_init_mtx(&tp->timer, &isp->isp_lock, 0); callout_reset(&tp->timer, 5, isp_refire_notify_ack, tp); } else { isp_prt(isp, ISP_LOGERR, "you lose- cannot allocate a notify refire"); } } break; } case ISPASYNC_TARGET_ACTION: { isphdr_t *hp; va_start(ap, cmd); hp = va_arg(ap, isphdr_t *); va_end(ap); switch (hp->rqs_entry_type) { case RQSTYPE_ATIO: isp_handle_platform_atio7(isp, (at7_entry_t *) hp); break; case RQSTYPE_ATIO2: isp_handle_platform_atio2(isp, (at2_entry_t *) hp); break; case RQSTYPE_CTIO7: case RQSTYPE_CTIO3: case RQSTYPE_CTIO2: case RQSTYPE_CTIO: isp_handle_platform_ctio(isp, hp); break; default: isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x", __func__, hp->rqs_entry_type); break; } break; } #endif case ISPASYNC_FW_CRASH: { uint16_t mbox1, mbox6; mbox1 = ISP_READ(isp, OUTMAILBOX1); if (IS_DUALBUS(isp)) { mbox6 = ISP_READ(isp, OUTMAILBOX6); } else { mbox6 = 0; } isp_prt(isp, ISP_LOGERR, "Internal Firmware Error on bus %d @ RISC Address 0x%x", mbox6, mbox1); #if 0 mbox1 = isp->isp_osinfo.mbox_sleep_ok; isp->isp_osinfo.mbox_sleep_ok = 0; isp_reinit(isp, 1); isp->isp_osinfo.mbox_sleep_ok = mbox1; isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); #endif break; } default: isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); break; } } uint64_t isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn) { uint64_t seed; struct isp_fc *fc = ISP_FC_PC(isp, chan); /* First try to use explicitly configured WWNs. */ seed = iswwnn ? fc->def_wwnn : fc->def_wwpn; if (seed) return (seed); /* Otherwise try to use WWNs from NVRAM. */ if (isactive) { seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram : FCPARAM(isp, chan)->isp_wwpn_nvram; if (seed) return (seed); } /* If still no WWNs, try to steal them from the first channel. */ if (chan > 0) { seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn : ISP_FC_PC(isp, 0)->def_wwpn; if (seed == 0) { seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram : FCPARAM(isp, 0)->isp_wwpn_nvram; } } /* If still nothing -- improvise. */ if (seed == 0) { seed = 0x400000007F000000ull + device_get_unit(isp->isp_dev); if (!iswwnn) seed ^= 0x0100000000000000ULL; } /* For additional channels we have to improvise even more. */ if (!iswwnn && chan > 0) { /* * We'll stick our channel number plus one first into bits * 57..59 and thence into bits 52..55 which allows for 8 bits * of channel which is enough for our maximum of 255 channels. */ seed ^= 0x0100000000000000ULL; seed ^= ((uint64_t) (chan + 1) & 0xf) << 56; seed ^= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52; } return (seed); } void isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) { int loc; char lbuf[200]; va_list ap; if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { return; } snprintf(lbuf, sizeof (lbuf), "%s: ", device_get_nameunit(isp->isp_dev)); loc = strlen(lbuf); va_start(ap, fmt); vsnprintf(&lbuf[loc], sizeof (lbuf) - loc - 1, fmt, ap); va_end(ap); printf("%s\n", lbuf); } void isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...) { va_list ap; if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { return; } xpt_print_path(xs->ccb_h.path); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); printf("\n"); } uint64_t isp_nanotime_sub(struct timespec *b, struct timespec *a) { uint64_t elapsed; struct timespec x; timespecsub(b, a, &x); elapsed = GET_NANOSEC(&x); if (elapsed == 0) elapsed++; return (elapsed); } int isp_mbox_acquire(ispsoftc_t *isp) { if (isp->isp_osinfo.mboxbsy) { return (1); } else { isp->isp_osinfo.mboxcmd_done = 0; isp->isp_osinfo.mboxbsy = 1; return (0); } } void isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp) { u_int t, to; to = (mbp->timeout == 0) ? MBCMD_DEFAULT_TIMEOUT : mbp->timeout; if (isp->isp_osinfo.mbox_sleep_ok) { isp->isp_osinfo.mbox_sleep_ok = 0; isp->isp_osinfo.mbox_sleeping = 1; msleep_sbt(&isp->isp_osinfo.mboxcmd_done, &isp->isp_lock, PRIBIO, "ispmbx_sleep", to * SBT_1US, 0, 0); isp->isp_osinfo.mbox_sleep_ok = 1; isp->isp_osinfo.mbox_sleeping = 0; } else { for (t = 0; t < to; t += 100) { if (isp->isp_osinfo.mboxcmd_done) break; ISP_RUN_ISR(isp); if (isp->isp_osinfo.mboxcmd_done) break; ISP_DELAY(100); } } if (isp->isp_osinfo.mboxcmd_done == 0) { isp_prt(isp, ISP_LOGWARN, "%s Mailbox Command (0x%x) Timeout (%uus) (%s:%d)", isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", isp->isp_lastmbxcmd, to, mbp->func, mbp->lineno); mbp->param[0] = MBOX_TIMEOUT; isp->isp_osinfo.mboxcmd_done = 1; } } void isp_mbox_notify_done(ispsoftc_t *isp) { isp->isp_osinfo.mboxcmd_done = 1; if (isp->isp_osinfo.mbox_sleeping) wakeup(&isp->isp_osinfo.mboxcmd_done); } void isp_mbox_release(ispsoftc_t *isp) { isp->isp_osinfo.mboxbsy = 0; } int isp_fc_scratch_acquire(ispsoftc_t *isp, int chan) { int ret = 0; if (isp->isp_osinfo.pc.fc[chan].fcbsy) { ret = -1; } else { isp->isp_osinfo.pc.fc[chan].fcbsy = 1; } return (ret); } void isp_platform_intr(void *arg) { ispsoftc_t *isp = arg; ISP_LOCK(isp); ISP_RUN_ISR(isp); ISP_UNLOCK(isp); } void isp_platform_intr_resp(void *arg) { ispsoftc_t *isp = arg; ISP_LOCK(isp); isp_intr_respq(isp); ISP_UNLOCK(isp); /* We have handshake enabled, so explicitly complete interrupt */ ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); } void isp_platform_intr_atio(void *arg) { ispsoftc_t *isp = arg; ISP_LOCK(isp); #ifdef ISP_TARGET_MODE isp_intr_atioq(isp); #endif ISP_UNLOCK(isp); /* We have handshake enabled, so explicitly complete interrupt */ ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); } void isp_common_dmateardown(ispsoftc_t *isp, struct ccb_scsiio *csio, uint32_t hdl) { if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTREAD); } else { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); } /* * Reset the command reference number for all LUNs on a specific target * (needed when a target arrives again) or for all targets on a port * (needed for events like a LIP). */ void isp_fcp_reset_crn(ispsoftc_t *isp, int chan, uint32_t tgt, int tgt_set) { struct isp_fc *fc = ISP_FC_PC(isp, chan); struct isp_nexus *nxp; int i; if (tgt_set == 0) isp_prt(isp, ISP_LOGDEBUG0, "Chan %d resetting CRN on all targets", chan); else isp_prt(isp, ISP_LOGDEBUG0, "Chan %d resetting CRN on target %u", chan, tgt); for (i = 0; i < NEXUS_HASH_WIDTH; i++) { for (nxp = fc->nexus_hash[i]; nxp != NULL; nxp = nxp->next) { if (tgt_set == 0 || tgt == nxp->tgt) nxp->crnseed = 0; } } } int isp_fcp_next_crn(ispsoftc_t *isp, uint8_t *crnp, XS_T *cmd) { lun_id_t lun; uint32_t chan, tgt; struct isp_fc *fc; struct isp_nexus *nxp; int idx; if (IS_2100(isp)) return (0); chan = XS_CHANNEL(cmd); tgt = XS_TGT(cmd); lun = XS_LUN(cmd); fc = &isp->isp_osinfo.pc.fc[chan]; idx = NEXUS_HASH(tgt, lun); nxp = fc->nexus_hash[idx]; while (nxp) { if (nxp->tgt == tgt && nxp->lun == lun) break; nxp = nxp->next; } if (nxp == NULL) { nxp = fc->nexus_free_list; if (nxp == NULL) { nxp = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_ZERO|M_NOWAIT); if (nxp == NULL) { return (-1); } } else { fc->nexus_free_list = nxp->next; } nxp->tgt = tgt; nxp->lun = lun; nxp->next = fc->nexus_hash[idx]; fc->nexus_hash[idx] = nxp; } if (nxp->crnseed == 0) nxp->crnseed = 1; *crnp = nxp->crnseed++; return (0); } /* * We enter with the lock held */ void isp_timer(void *arg) { ispsoftc_t *isp = arg; #ifdef ISP_TARGET_MODE isp_tmcmd_restart(isp); #endif callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp); } isp_ecmd_t * isp_get_ecmd(ispsoftc_t *isp) { isp_ecmd_t *ecmd = isp->isp_osinfo.ecmd_free; if (ecmd) { isp->isp_osinfo.ecmd_free = ecmd->next; } return (ecmd); } void isp_put_ecmd(ispsoftc_t *isp, isp_ecmd_t *ecmd) { ecmd->next = isp->isp_osinfo.ecmd_free; isp->isp_osinfo.ecmd_free = ecmd; } Index: head/sys/dev/kbdmux/kbdmux.c =================================================================== --- head/sys/dev/kbdmux/kbdmux.c (revision 355600) +++ head/sys/dev/kbdmux/kbdmux.c (revision 355601) @@ -1,1491 +1,1491 @@ /* * kbdmux.c */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: kbdmux.c,v 1.4 2005/07/14 17:38:35 max Exp $ * $FreeBSD$ */ #include "opt_evdev.h" #include "opt_kbd.h" #include "opt_kbdmux.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* the initial key map, accent map and fkey strings */ #ifdef KBDMUX_DFLT_KEYMAP #define KBD_DFLT_KEYMAP #include "kbdmuxmap.h" #endif #include #ifdef EVDEV_SUPPORT #include #include #endif #define KEYBOARD_NAME "kbdmux" MALLOC_DECLARE(M_KBDMUX); MALLOC_DEFINE(M_KBDMUX, KEYBOARD_NAME, "Keyboard multiplexor"); /***************************************************************************** ***************************************************************************** ** Keyboard state ***************************************************************************** *****************************************************************************/ #define KBDMUX_Q_SIZE 512 /* input queue size */ /* * XXX * For now rely on Giant mutex to protect our data structures. * Just like the rest of keyboard drivers and syscons(4) do. * Note that callout is initialized as not MP-safe to make sure * Giant is held. */ #if 0 /* not yet */ #define KBDMUX_LOCK_DECL_GLOBAL \ struct mtx ks_lock #define KBDMUX_LOCK_INIT(s) \ mtx_init(&(s)->ks_lock, "kbdmux", NULL, MTX_DEF|MTX_RECURSE) #define KBDMUX_LOCK_DESTROY(s) \ mtx_destroy(&(s)->ks_lock) #define KBDMUX_LOCK(s) \ mtx_lock(&(s)->ks_lock) #define KBDMUX_UNLOCK(s) \ mtx_unlock(&(s)->ks_lock) #define KBDMUX_LOCK_ASSERT(s, w) \ mtx_assert(&(s)->ks_lock, (w)) #define KBDMUX_SLEEP(s, f, d, t) \ msleep(&(s)->f, &(s)->ks_lock, PCATCH | (PZERO + 1), (d), (t)) #define KBDMUX_CALLOUT_INIT(s) \ callout_init_mtx(&(s)->ks_timo, &(s)->ks_lock, 0) #define KBDMUX_QUEUE_INTR(s) \ taskqueue_enqueue(taskqueue_swi_giant, &(s)->ks_task) #else #define KBDMUX_LOCK_DECL_GLOBAL #define KBDMUX_LOCK_INIT(s) #define KBDMUX_LOCK_DESTROY(s) #define KBDMUX_LOCK(s) #define KBDMUX_UNLOCK(s) #define KBDMUX_LOCK_ASSERT(s, w) #define KBDMUX_SLEEP(s, f, d, t) \ tsleep(&(s)->f, PCATCH | (PZERO + 1), (d), (t)) #define KBDMUX_CALLOUT_INIT(s) \ callout_init(&(s)->ks_timo, 0) #define KBDMUX_QUEUE_INTR(s) \ taskqueue_enqueue(taskqueue_swi_giant, &(s)->ks_task) #endif /* not yet */ /* * kbdmux keyboard */ struct kbdmux_kbd { keyboard_t *kbd; /* keyboard */ SLIST_ENTRY(kbdmux_kbd) next; /* link to next */ }; typedef struct kbdmux_kbd kbdmux_kbd_t; /* * kbdmux state */ struct kbdmux_state { char ks_inq[KBDMUX_Q_SIZE]; /* input chars queue */ unsigned int ks_inq_start; unsigned int ks_inq_length; struct task ks_task; /* interrupt task */ struct callout ks_timo; /* timeout handler */ #define TICKS (hz) /* rate */ int ks_flags; /* flags */ #define COMPOSE (1 << 0) /* compose char flag */ #define TASK (1 << 2) /* interrupt task queued */ int ks_polling; /* poll nesting count */ int ks_mode; /* K_XLATE, K_RAW, K_CODE */ int ks_state; /* state */ int ks_accents; /* accent key index (> 0) */ u_int ks_composed_char; /* composed char code */ u_char ks_prefix; /* AT scan code prefix */ #ifdef EVDEV_SUPPORT struct evdev_dev * ks_evdev; int ks_evdev_state; #endif SLIST_HEAD(, kbdmux_kbd) ks_kbds; /* keyboards */ KBDMUX_LOCK_DECL_GLOBAL; }; typedef struct kbdmux_state kbdmux_state_t; /***************************************************************************** ***************************************************************************** ** Helper functions ***************************************************************************** *****************************************************************************/ static task_fn_t kbdmux_kbd_intr; -static timeout_t kbdmux_kbd_intr_timo; +static callout_func_t kbdmux_kbd_intr_timo; static kbd_callback_func_t kbdmux_kbd_event; static void kbdmux_kbd_putc(kbdmux_state_t *state, char c) { unsigned int p; if (state->ks_inq_length == KBDMUX_Q_SIZE) return; p = (state->ks_inq_start + state->ks_inq_length) % KBDMUX_Q_SIZE; state->ks_inq[p] = c; state->ks_inq_length++; } static int kbdmux_kbd_getc(kbdmux_state_t *state) { unsigned char c; if (state->ks_inq_length == 0) return (-1); c = state->ks_inq[state->ks_inq_start]; state->ks_inq_start = (state->ks_inq_start + 1) % KBDMUX_Q_SIZE; state->ks_inq_length--; return (c); } /* * Interrupt handler task */ void kbdmux_kbd_intr(void *xkbd, int pending) { keyboard_t *kbd = (keyboard_t *) xkbd; kbdmux_state_t *state = (kbdmux_state_t *) kbd->kb_data; kbdd_intr(kbd, NULL); KBDMUX_LOCK(state); state->ks_flags &= ~TASK; wakeup(&state->ks_task); KBDMUX_UNLOCK(state); } /* * Schedule interrupt handler on timeout. Called with locked state. */ void kbdmux_kbd_intr_timo(void *xstate) { kbdmux_state_t *state = (kbdmux_state_t *) xstate; KBDMUX_LOCK_ASSERT(state, MA_OWNED); if (callout_pending(&state->ks_timo)) return; /* callout was reset */ if (!callout_active(&state->ks_timo)) return; /* callout was stopped */ callout_deactivate(&state->ks_timo); /* queue interrupt task if needed */ if (state->ks_inq_length > 0 && !(state->ks_flags & TASK) && KBDMUX_QUEUE_INTR(state) == 0) state->ks_flags |= TASK; /* re-schedule timeout */ callout_reset(&state->ks_timo, TICKS, kbdmux_kbd_intr_timo, state); } /* * Process event from one of our keyboards */ static int kbdmux_kbd_event(keyboard_t *kbd, int event, void *arg) { kbdmux_state_t *state = (kbdmux_state_t *) arg; switch (event) { case KBDIO_KEYINPUT: { int c; KBDMUX_LOCK(state); /* * Read all chars from the keyboard * * Turns out that atkbd(4) check_char() method may return * "true" while read_char() method returns NOKEY. If this * happens we could stuck in the loop below. Avoid this * by breaking out of the loop if read_char() method returns * NOKEY. */ while (kbdd_check_char(kbd)) { c = kbdd_read_char(kbd, 0); if (c == NOKEY) break; if (c == ERRKEY) continue; /* XXX ring bell */ if (!KBD_IS_BUSY(kbd)) continue; /* not open - discard the input */ kbdmux_kbd_putc(state, c); } /* queue interrupt task if needed */ if (state->ks_inq_length > 0 && !(state->ks_flags & TASK) && KBDMUX_QUEUE_INTR(state) == 0) state->ks_flags |= TASK; KBDMUX_UNLOCK(state); } break; case KBDIO_UNLOADING: { kbdmux_kbd_t *k; KBDMUX_LOCK(state); SLIST_FOREACH(k, &state->ks_kbds, next) if (k->kbd == kbd) break; if (k != NULL) { kbd_release(k->kbd, &k->kbd); SLIST_REMOVE(&state->ks_kbds, k, kbdmux_kbd, next); k->kbd = NULL; free(k, M_KBDMUX); } KBDMUX_UNLOCK(state); } break; default: return (EINVAL); /* NOT REACHED */ } return (0); } /**************************************************************************** **************************************************************************** ** Keyboard driver **************************************************************************** ****************************************************************************/ static int kbdmux_configure(int flags); static kbd_probe_t kbdmux_probe; static kbd_init_t kbdmux_init; static kbd_term_t kbdmux_term; static kbd_intr_t kbdmux_intr; static kbd_test_if_t kbdmux_test_if; static kbd_enable_t kbdmux_enable; static kbd_disable_t kbdmux_disable; static kbd_read_t kbdmux_read; static kbd_check_t kbdmux_check; static kbd_read_char_t kbdmux_read_char; static kbd_check_char_t kbdmux_check_char; static kbd_ioctl_t kbdmux_ioctl; static kbd_lock_t kbdmux_lock; static void kbdmux_clear_state_locked(kbdmux_state_t *state); static kbd_clear_state_t kbdmux_clear_state; static kbd_get_state_t kbdmux_get_state; static kbd_set_state_t kbdmux_set_state; static kbd_poll_mode_t kbdmux_poll; static keyboard_switch_t kbdmuxsw = { .probe = kbdmux_probe, .init = kbdmux_init, .term = kbdmux_term, .intr = kbdmux_intr, .test_if = kbdmux_test_if, .enable = kbdmux_enable, .disable = kbdmux_disable, .read = kbdmux_read, .check = kbdmux_check, .read_char = kbdmux_read_char, .check_char = kbdmux_check_char, .ioctl = kbdmux_ioctl, .lock = kbdmux_lock, .clear_state = kbdmux_clear_state, .get_state = kbdmux_get_state, .set_state = kbdmux_set_state, .get_fkeystr = genkbd_get_fkeystr, .poll = kbdmux_poll, .diag = genkbd_diag, }; #ifdef EVDEV_SUPPORT static evdev_event_t kbdmux_ev_event; static const struct evdev_methods kbdmux_evdev_methods = { .ev_event = kbdmux_ev_event, }; #endif /* * Return the number of found keyboards */ static int kbdmux_configure(int flags) { return (1); } /* * Detect a keyboard */ static int kbdmux_probe(int unit, void *arg, int flags) { if (resource_disabled(KEYBOARD_NAME, unit)) return (ENXIO); return (0); } /* * Reset and initialize the keyboard (stolen from atkbd.c) */ static int kbdmux_init(int unit, keyboard_t **kbdp, void *arg, int flags) { keyboard_t *kbd = NULL; kbdmux_state_t *state = NULL; keymap_t *keymap = NULL; accentmap_t *accmap = NULL; fkeytab_t *fkeymap = NULL; int error, needfree, fkeymap_size, delay[2]; #ifdef EVDEV_SUPPORT struct evdev_dev *evdev; char phys_loc[NAMELEN]; #endif if (*kbdp == NULL) { *kbdp = kbd = malloc(sizeof(*kbd), M_KBDMUX, M_NOWAIT | M_ZERO); state = malloc(sizeof(*state), M_KBDMUX, M_NOWAIT | M_ZERO); keymap = malloc(sizeof(key_map), M_KBDMUX, M_NOWAIT); accmap = malloc(sizeof(accent_map), M_KBDMUX, M_NOWAIT); fkeymap = malloc(sizeof(fkey_tab), M_KBDMUX, M_NOWAIT); fkeymap_size = sizeof(fkey_tab)/sizeof(fkey_tab[0]); needfree = 1; if ((kbd == NULL) || (state == NULL) || (keymap == NULL) || (accmap == NULL) || (fkeymap == NULL)) { error = ENOMEM; goto bad; } KBDMUX_LOCK_INIT(state); TASK_INIT(&state->ks_task, 0, kbdmux_kbd_intr, (void *) kbd); KBDMUX_CALLOUT_INIT(state); SLIST_INIT(&state->ks_kbds); } else if (KBD_IS_INITIALIZED(*kbdp) && KBD_IS_CONFIGURED(*kbdp)) { return (0); } else { kbd = *kbdp; state = (kbdmux_state_t *) kbd->kb_data; keymap = kbd->kb_keymap; accmap = kbd->kb_accentmap; fkeymap = kbd->kb_fkeytab; fkeymap_size = kbd->kb_fkeytab_size; needfree = 0; } if (!KBD_IS_PROBED(kbd)) { /* XXX assume 101/102 keys keyboard */ kbd_init_struct(kbd, KEYBOARD_NAME, KB_101, unit, flags, 0, 0); bcopy(&key_map, keymap, sizeof(key_map)); bcopy(&accent_map, accmap, sizeof(accent_map)); bcopy(fkey_tab, fkeymap, imin(fkeymap_size*sizeof(fkeymap[0]), sizeof(fkey_tab))); kbd_set_maps(kbd, keymap, accmap, fkeymap, fkeymap_size); kbd->kb_data = (void *)state; KBD_FOUND_DEVICE(kbd); KBD_PROBE_DONE(kbd); KBDMUX_LOCK(state); kbdmux_clear_state_locked(state); state->ks_mode = K_XLATE; KBDMUX_UNLOCK(state); } if (!KBD_IS_INITIALIZED(kbd) && !(flags & KB_CONF_PROBE_ONLY)) { kbd->kb_config = flags & ~KB_CONF_PROBE_ONLY; kbdmux_ioctl(kbd, KDSETLED, (caddr_t)&state->ks_state); delay[0] = kbd->kb_delay1; delay[1] = kbd->kb_delay2; kbdmux_ioctl(kbd, KDSETREPEAT, (caddr_t)delay); #ifdef EVDEV_SUPPORT /* register as evdev provider */ evdev = evdev_alloc(); evdev_set_name(evdev, "System keyboard multiplexer"); snprintf(phys_loc, NAMELEN, KEYBOARD_NAME"%d", unit); evdev_set_phys(evdev, phys_loc); evdev_set_id(evdev, BUS_VIRTUAL, 0, 0, 0); evdev_set_methods(evdev, kbd, &kbdmux_evdev_methods); evdev_support_event(evdev, EV_SYN); evdev_support_event(evdev, EV_KEY); evdev_support_event(evdev, EV_LED); evdev_support_event(evdev, EV_REP); evdev_support_all_known_keys(evdev); evdev_support_led(evdev, LED_NUML); evdev_support_led(evdev, LED_CAPSL); evdev_support_led(evdev, LED_SCROLLL); if (evdev_register_mtx(evdev, &Giant)) evdev_free(evdev); else state->ks_evdev = evdev; state->ks_evdev_state = 0; #endif KBD_INIT_DONE(kbd); } if (!KBD_IS_CONFIGURED(kbd)) { if (kbd_register(kbd) < 0) { error = ENXIO; goto bad; } KBD_CONFIG_DONE(kbd); KBDMUX_LOCK(state); callout_reset(&state->ks_timo, TICKS, kbdmux_kbd_intr_timo, state); KBDMUX_UNLOCK(state); } return (0); bad: if (needfree) { if (state != NULL) free(state, M_KBDMUX); if (keymap != NULL) free(keymap, M_KBDMUX); if (accmap != NULL) free(accmap, M_KBDMUX); if (fkeymap != NULL) free(fkeymap, M_KBDMUX); if (kbd != NULL) { free(kbd, M_KBDMUX); *kbdp = NULL; /* insure ref doesn't leak to caller */ } } return (error); } /* * Finish using this keyboard */ static int kbdmux_term(keyboard_t *kbd) { kbdmux_state_t *state = (kbdmux_state_t *) kbd->kb_data; kbdmux_kbd_t *k; KBDMUX_LOCK(state); /* kill callout */ callout_stop(&state->ks_timo); /* wait for interrupt task */ while (state->ks_flags & TASK) KBDMUX_SLEEP(state, ks_task, "kbdmuxc", 0); /* release all keyboards from the mux */ while ((k = SLIST_FIRST(&state->ks_kbds)) != NULL) { kbd_release(k->kbd, &k->kbd); SLIST_REMOVE_HEAD(&state->ks_kbds, next); k->kbd = NULL; free(k, M_KBDMUX); } KBDMUX_UNLOCK(state); kbd_unregister(kbd); #ifdef EVDEV_SUPPORT evdev_free(state->ks_evdev); #endif KBDMUX_LOCK_DESTROY(state); bzero(state, sizeof(*state)); free(state, M_KBDMUX); free(kbd->kb_keymap, M_KBDMUX); free(kbd->kb_accentmap, M_KBDMUX); free(kbd->kb_fkeytab, M_KBDMUX); free(kbd, M_KBDMUX); return (0); } /* * Keyboard interrupt routine */ static int kbdmux_intr(keyboard_t *kbd, void *arg) { int c; if (KBD_IS_ACTIVE(kbd) && KBD_IS_BUSY(kbd)) { /* let the callback function to process the input */ (*kbd->kb_callback.kc_func)(kbd, KBDIO_KEYINPUT, kbd->kb_callback.kc_arg); } else { /* read and discard the input; no one is waiting for input */ do { c = kbdmux_read_char(kbd, FALSE); } while (c != NOKEY); } return (0); } /* * Test the interface to the device */ static int kbdmux_test_if(keyboard_t *kbd) { return (0); } /* * Enable the access to the device; until this function is called, * the client cannot read from the keyboard. */ static int kbdmux_enable(keyboard_t *kbd) { KBD_ACTIVATE(kbd); return (0); } /* * Disallow the access to the device */ static int kbdmux_disable(keyboard_t *kbd) { KBD_DEACTIVATE(kbd); return (0); } /* * Read one byte from the keyboard if it's allowed */ static int kbdmux_read(keyboard_t *kbd, int wait) { kbdmux_state_t *state = (kbdmux_state_t *) kbd->kb_data; int c; KBDMUX_LOCK(state); c = kbdmux_kbd_getc(state); KBDMUX_UNLOCK(state); if (c != -1) kbd->kb_count ++; return (KBD_IS_ACTIVE(kbd)? c : -1); } /* * Check if data is waiting */ static int kbdmux_check(keyboard_t *kbd) { kbdmux_state_t *state = (kbdmux_state_t *) kbd->kb_data; int ready; if (!KBD_IS_ACTIVE(kbd)) return (FALSE); KBDMUX_LOCK(state); ready = (state->ks_inq_length > 0) ? TRUE : FALSE; KBDMUX_UNLOCK(state); return (ready); } /* * Read char from the keyboard (stolen from atkbd.c) */ static u_int kbdmux_read_char(keyboard_t *kbd, int wait) { kbdmux_state_t *state = (kbdmux_state_t *) kbd->kb_data; u_int action; int scancode, keycode; KBDMUX_LOCK(state); next_code: /* do we have a composed char to return? */ if (!(state->ks_flags & COMPOSE) && (state->ks_composed_char > 0)) { action = state->ks_composed_char; state->ks_composed_char = 0; if (action > UCHAR_MAX) { KBDMUX_UNLOCK(state); return (ERRKEY); } KBDMUX_UNLOCK(state); return (action); } /* see if there is something in the keyboard queue */ scancode = kbdmux_kbd_getc(state); if (scancode == -1) { if (state->ks_polling != 0) { kbdmux_kbd_t *k; SLIST_FOREACH(k, &state->ks_kbds, next) { while (kbdd_check_char(k->kbd)) { scancode = kbdd_read_char(k->kbd, 0); if (scancode == NOKEY) break; if (scancode == ERRKEY) continue; if (!KBD_IS_BUSY(k->kbd)) continue; kbdmux_kbd_putc(state, scancode); } } if (state->ks_inq_length > 0) goto next_code; } KBDMUX_UNLOCK(state); return (NOKEY); } /* XXX FIXME: check for -1 if wait == 1! */ kbd->kb_count ++; #ifdef EVDEV_SUPPORT /* push evdev event */ if (evdev_rcpt_mask & EVDEV_RCPT_KBDMUX && state->ks_evdev != NULL) { uint16_t key = evdev_scancode2key(&state->ks_evdev_state, scancode); if (key != KEY_RESERVED) { evdev_push_event(state->ks_evdev, EV_KEY, key, scancode & 0x80 ? 0 : 1); evdev_sync(state->ks_evdev); } } #endif /* return the byte as is for the K_RAW mode */ if (state->ks_mode == K_RAW) { KBDMUX_UNLOCK(state); return (scancode); } /* translate the scan code into a keycode */ keycode = scancode & 0x7F; switch (state->ks_prefix) { case 0x00: /* normal scancode */ switch(scancode) { case 0xB8: /* left alt (compose key) released */ if (state->ks_flags & COMPOSE) { state->ks_flags &= ~COMPOSE; if (state->ks_composed_char > UCHAR_MAX) state->ks_composed_char = 0; } break; case 0x38: /* left alt (compose key) pressed */ if (!(state->ks_flags & COMPOSE)) { state->ks_flags |= COMPOSE; state->ks_composed_char = 0; } break; case 0xE0: case 0xE1: state->ks_prefix = scancode; goto next_code; } break; case 0xE0: /* 0xE0 prefix */ state->ks_prefix = 0; switch (keycode) { case 0x1C: /* right enter key */ keycode = 0x59; break; case 0x1D: /* right ctrl key */ keycode = 0x5A; break; case 0x35: /* keypad divide key */ keycode = 0x5B; break; case 0x37: /* print scrn key */ keycode = 0x5C; break; case 0x38: /* right alt key (alt gr) */ keycode = 0x5D; break; case 0x46: /* ctrl-pause/break on AT 101 (see below) */ keycode = 0x68; break; case 0x47: /* grey home key */ keycode = 0x5E; break; case 0x48: /* grey up arrow key */ keycode = 0x5F; break; case 0x49: /* grey page up key */ keycode = 0x60; break; case 0x4B: /* grey left arrow key */ keycode = 0x61; break; case 0x4D: /* grey right arrow key */ keycode = 0x62; break; case 0x4F: /* grey end key */ keycode = 0x63; break; case 0x50: /* grey down arrow key */ keycode = 0x64; break; case 0x51: /* grey page down key */ keycode = 0x65; break; case 0x52: /* grey insert key */ keycode = 0x66; break; case 0x53: /* grey delete key */ keycode = 0x67; break; /* the following 3 are only used on the MS "Natural" keyboard */ case 0x5b: /* left Window key */ keycode = 0x69; break; case 0x5c: /* right Window key */ keycode = 0x6a; break; case 0x5d: /* menu key */ keycode = 0x6b; break; case 0x5e: /* power key */ keycode = 0x6d; break; case 0x5f: /* sleep key */ keycode = 0x6e; break; case 0x63: /* wake key */ keycode = 0x6f; break; case 0x64: /* [JP106USB] backslash, underscore */ keycode = 0x73; break; default: /* ignore everything else */ goto next_code; } break; case 0xE1: /* 0xE1 prefix */ /* * The pause/break key on the 101 keyboard produces: * E1-1D-45 E1-9D-C5 * Ctrl-pause/break produces: * E0-46 E0-C6 (See above.) */ state->ks_prefix = 0; if (keycode == 0x1D) state->ks_prefix = 0x1D; goto next_code; /* NOT REACHED */ case 0x1D: /* pause / break */ state->ks_prefix = 0; if (keycode != 0x45) goto next_code; keycode = 0x68; break; } /* XXX assume 101/102 keys AT keyboard */ switch (keycode) { case 0x5c: /* print screen */ if (state->ks_flags & ALTS) keycode = 0x54; /* sysrq */ break; case 0x68: /* pause/break */ if (state->ks_flags & CTLS) keycode = 0x6c; /* break */ break; } /* return the key code in the K_CODE mode */ if (state->ks_mode == K_CODE) { KBDMUX_UNLOCK(state); return (keycode | (scancode & 0x80)); } /* compose a character code */ if (state->ks_flags & COMPOSE) { switch (keycode | (scancode & 0x80)) { /* key pressed, process it */ case 0x47: case 0x48: case 0x49: /* keypad 7,8,9 */ state->ks_composed_char *= 10; state->ks_composed_char += keycode - 0x40; if (state->ks_composed_char > UCHAR_MAX) { KBDMUX_UNLOCK(state); return (ERRKEY); } goto next_code; case 0x4B: case 0x4C: case 0x4D: /* keypad 4,5,6 */ state->ks_composed_char *= 10; state->ks_composed_char += keycode - 0x47; if (state->ks_composed_char > UCHAR_MAX) { KBDMUX_UNLOCK(state); return (ERRKEY); } goto next_code; case 0x4F: case 0x50: case 0x51: /* keypad 1,2,3 */ state->ks_composed_char *= 10; state->ks_composed_char += keycode - 0x4E; if (state->ks_composed_char > UCHAR_MAX) { KBDMUX_UNLOCK(state); return (ERRKEY); } goto next_code; case 0x52: /* keypad 0 */ state->ks_composed_char *= 10; if (state->ks_composed_char > UCHAR_MAX) { KBDMUX_UNLOCK(state); return (ERRKEY); } goto next_code; /* key released, no interest here */ case 0xC7: case 0xC8: case 0xC9: /* keypad 7,8,9 */ case 0xCB: case 0xCC: case 0xCD: /* keypad 4,5,6 */ case 0xCF: case 0xD0: case 0xD1: /* keypad 1,2,3 */ case 0xD2: /* keypad 0 */ goto next_code; case 0x38: /* left alt key */ break; default: if (state->ks_composed_char > 0) { state->ks_flags &= ~COMPOSE; state->ks_composed_char = 0; KBDMUX_UNLOCK(state); return (ERRKEY); } break; } } /* keycode to key action */ action = genkbd_keyaction(kbd, keycode, scancode & 0x80, &state->ks_state, &state->ks_accents); if (action == NOKEY) goto next_code; KBDMUX_UNLOCK(state); return (action); } /* * Check if char is waiting */ static int kbdmux_check_char(keyboard_t *kbd) { kbdmux_state_t *state = (kbdmux_state_t *) kbd->kb_data; int ready; if (!KBD_IS_ACTIVE(kbd)) return (FALSE); KBDMUX_LOCK(state); if (!(state->ks_flags & COMPOSE) && (state->ks_composed_char != 0)) ready = TRUE; else ready = (state->ks_inq_length > 0) ? TRUE : FALSE; KBDMUX_UNLOCK(state); return (ready); } /* * Keyboard ioctl's */ static int kbdmux_ioctl(keyboard_t *kbd, u_long cmd, caddr_t arg) { static int delays[] = { 250, 500, 750, 1000 }; static int rates[] = { 34, 38, 42, 46, 50, 55, 59, 63, 68, 76, 84, 92, 100, 110, 118, 126, 136, 152, 168, 184, 200, 220, 236, 252, 272, 304, 336, 368, 400, 440, 472, 504 }; kbdmux_state_t *state = (kbdmux_state_t *) kbd->kb_data; kbdmux_kbd_t *k; keyboard_info_t *ki; int error = 0, mode; #ifdef COMPAT_FREEBSD6 int ival; #endif if (state == NULL) return (ENXIO); switch (cmd) { case KBADDKBD: /* add keyboard to the mux */ ki = (keyboard_info_t *) arg; if (ki == NULL || ki->kb_unit < 0 || ki->kb_name[0] == '\0' || strcmp(ki->kb_name, "*") == 0) return (EINVAL); /* bad input */ KBDMUX_LOCK(state); SLIST_FOREACH(k, &state->ks_kbds, next) if (k->kbd->kb_unit == ki->kb_unit && strcmp(k->kbd->kb_name, ki->kb_name) == 0) break; if (k != NULL) { KBDMUX_UNLOCK(state); return (0); /* keyboard already in the mux */ } k = malloc(sizeof(*k), M_KBDMUX, M_NOWAIT | M_ZERO); if (k == NULL) { KBDMUX_UNLOCK(state); return (ENOMEM); /* out of memory */ } k->kbd = kbd_get_keyboard( kbd_allocate( ki->kb_name, ki->kb_unit, (void *) &k->kbd, kbdmux_kbd_event, (void *) state)); if (k->kbd == NULL) { KBDMUX_UNLOCK(state); free(k, M_KBDMUX); return (EINVAL); /* bad keyboard */ } kbdd_enable(k->kbd); kbdd_clear_state(k->kbd); /* set K_RAW mode on slave keyboard */ mode = K_RAW; error = kbdd_ioctl(k->kbd, KDSKBMODE, (caddr_t)&mode); if (error == 0) { /* set lock keys state on slave keyboard */ mode = state->ks_state & LOCK_MASK; error = kbdd_ioctl(k->kbd, KDSKBSTATE, (caddr_t)&mode); } if (error != 0) { KBDMUX_UNLOCK(state); kbd_release(k->kbd, &k->kbd); k->kbd = NULL; free(k, M_KBDMUX); return (error); /* could not set mode */ } SLIST_INSERT_HEAD(&state->ks_kbds, k, next); KBDMUX_UNLOCK(state); break; case KBRELKBD: /* release keyboard from the mux */ ki = (keyboard_info_t *) arg; if (ki == NULL || ki->kb_unit < 0 || ki->kb_name[0] == '\0' || strcmp(ki->kb_name, "*") == 0) return (EINVAL); /* bad input */ KBDMUX_LOCK(state); SLIST_FOREACH(k, &state->ks_kbds, next) if (k->kbd->kb_unit == ki->kb_unit && strcmp(k->kbd->kb_name, ki->kb_name) == 0) break; if (k != NULL) { error = kbd_release(k->kbd, &k->kbd); if (error == 0) { SLIST_REMOVE(&state->ks_kbds, k, kbdmux_kbd, next); k->kbd = NULL; free(k, M_KBDMUX); } } else error = ENXIO; /* keyboard is not in the mux */ KBDMUX_UNLOCK(state); break; case KDGKBMODE: /* get kyboard mode */ KBDMUX_LOCK(state); *(int *)arg = state->ks_mode; KBDMUX_UNLOCK(state); break; #ifdef COMPAT_FREEBSD6 case _IO('K', 7): ival = IOCPARM_IVAL(arg); arg = (caddr_t)&ival; /* FALLTHROUGH */ #endif case KDSKBMODE: /* set keyboard mode */ KBDMUX_LOCK(state); switch (*(int *)arg) { case K_XLATE: if (state->ks_mode != K_XLATE) { /* make lock key state and LED state match */ state->ks_state &= ~LOCK_MASK; state->ks_state |= KBD_LED_VAL(kbd); } /* FALLTHROUGH */ case K_RAW: case K_CODE: if (state->ks_mode != *(int *)arg) { kbdmux_clear_state_locked(state); state->ks_mode = *(int *)arg; } break; default: error = EINVAL; break; } KBDMUX_UNLOCK(state); break; case KDGETLED: /* get keyboard LED */ KBDMUX_LOCK(state); *(int *)arg = KBD_LED_VAL(kbd); KBDMUX_UNLOCK(state); break; #ifdef COMPAT_FREEBSD6 case _IO('K', 66): ival = IOCPARM_IVAL(arg); arg = (caddr_t)&ival; /* FALLTHROUGH */ #endif case KDSETLED: /* set keyboard LED */ KBDMUX_LOCK(state); /* NOTE: lock key state in ks_state won't be changed */ if (*(int *)arg & ~LOCK_MASK) { KBDMUX_UNLOCK(state); return (EINVAL); } KBD_LED_VAL(kbd) = *(int *)arg; #ifdef EVDEV_SUPPORT if (state->ks_evdev != NULL && evdev_rcpt_mask & EVDEV_RCPT_KBDMUX) evdev_push_leds(state->ks_evdev, *(int *)arg); #endif /* KDSETLED on all slave keyboards */ SLIST_FOREACH(k, &state->ks_kbds, next) (void)kbdd_ioctl(k->kbd, KDSETLED, arg); KBDMUX_UNLOCK(state); break; case KDGKBSTATE: /* get lock key state */ KBDMUX_LOCK(state); *(int *)arg = state->ks_state & LOCK_MASK; KBDMUX_UNLOCK(state); break; #ifdef COMPAT_FREEBSD6 case _IO('K', 20): ival = IOCPARM_IVAL(arg); arg = (caddr_t)&ival; /* FALLTHROUGH */ #endif case KDSKBSTATE: /* set lock key state */ KBDMUX_LOCK(state); if (*(int *)arg & ~LOCK_MASK) { KBDMUX_UNLOCK(state); return (EINVAL); } state->ks_state &= ~LOCK_MASK; state->ks_state |= *(int *)arg; /* KDSKBSTATE on all slave keyboards */ SLIST_FOREACH(k, &state->ks_kbds, next) (void)kbdd_ioctl(k->kbd, KDSKBSTATE, arg); KBDMUX_UNLOCK(state); return (kbdmux_ioctl(kbd, KDSETLED, arg)); /* NOT REACHED */ #ifdef COMPAT_FREEBSD6 case _IO('K', 67): cmd = KDSETRAD; ival = IOCPARM_IVAL(arg); arg = (caddr_t)&ival; /* FALLTHROUGH */ #endif case KDSETREPEAT: /* set keyboard repeat rate (new interface) */ case KDSETRAD: /* set keyboard repeat rate (old interface) */ KBDMUX_LOCK(state); if (cmd == KDSETREPEAT) { int i; /* lookup delay */ for (i = sizeof(delays)/sizeof(delays[0]) - 1; i > 0; i --) if (((int *)arg)[0] >= delays[i]) break; mode = i << 5; /* lookup rate */ for (i = sizeof(rates)/sizeof(rates[0]) - 1; i > 0; i --) if (((int *)arg)[1] >= rates[i]) break; mode |= i; } else mode = *(int *)arg; if (mode & ~0x7f) { KBDMUX_UNLOCK(state); return (EINVAL); } kbd->kb_delay1 = delays[(mode >> 5) & 3]; kbd->kb_delay2 = rates[mode & 0x1f]; #ifdef EVDEV_SUPPORT if (state->ks_evdev != NULL && evdev_rcpt_mask & EVDEV_RCPT_KBDMUX) evdev_push_repeats(state->ks_evdev, kbd); #endif /* perform command on all slave keyboards */ SLIST_FOREACH(k, &state->ks_kbds, next) (void)kbdd_ioctl(k->kbd, cmd, arg); KBDMUX_UNLOCK(state); break; case PIO_KEYMAP: /* set keyboard translation table */ case OPIO_KEYMAP: /* set keyboard translation table (compat) */ case PIO_KEYMAPENT: /* set keyboard translation table entry */ case PIO_DEADKEYMAP: /* set accent key translation table */ KBDMUX_LOCK(state); state->ks_accents = 0; /* perform command on all slave keyboards */ SLIST_FOREACH(k, &state->ks_kbds, next) (void)kbdd_ioctl(k->kbd, cmd, arg); KBDMUX_UNLOCK(state); /* FALLTHROUGH */ default: error = genkbd_commonioctl(kbd, cmd, arg); break; } return (error); } /* * Lock the access to the keyboard */ static int kbdmux_lock(keyboard_t *kbd, int lock) { return (1); /* XXX */ } /* * Clear the internal state of the keyboard */ static void kbdmux_clear_state_locked(kbdmux_state_t *state) { KBDMUX_LOCK_ASSERT(state, MA_OWNED); state->ks_flags &= ~COMPOSE; state->ks_polling = 0; state->ks_state &= LOCK_MASK; /* preserve locking key state */ state->ks_accents = 0; state->ks_composed_char = 0; /* state->ks_prefix = 0; XXX */ state->ks_inq_length = 0; } static void kbdmux_clear_state(keyboard_t *kbd) { kbdmux_state_t *state = (kbdmux_state_t *) kbd->kb_data; KBDMUX_LOCK(state); kbdmux_clear_state_locked(state); KBDMUX_UNLOCK(state); } /* * Save the internal state */ static int kbdmux_get_state(keyboard_t *kbd, void *buf, size_t len) { if (len == 0) return (sizeof(kbdmux_state_t)); if (len < sizeof(kbdmux_state_t)) return (-1); bcopy(kbd->kb_data, buf, sizeof(kbdmux_state_t)); /* XXX locking? */ return (0); } /* * Set the internal state */ static int kbdmux_set_state(keyboard_t *kbd, void *buf, size_t len) { if (len < sizeof(kbdmux_state_t)) return (ENOMEM); bcopy(buf, kbd->kb_data, sizeof(kbdmux_state_t)); /* XXX locking? */ return (0); } /* * Set polling */ static int kbdmux_poll(keyboard_t *kbd, int on) { kbdmux_state_t *state = (kbdmux_state_t *) kbd->kb_data; kbdmux_kbd_t *k; KBDMUX_LOCK(state); if (on) state->ks_polling++; else state->ks_polling--; /* set poll on slave keyboards */ SLIST_FOREACH(k, &state->ks_kbds, next) kbdd_poll(k->kbd, on); KBDMUX_UNLOCK(state); return (0); } #ifdef EVDEV_SUPPORT static void kbdmux_ev_event(struct evdev_dev *evdev, uint16_t type, uint16_t code, int32_t value) { keyboard_t *kbd = evdev_get_softc(evdev); if (evdev_rcpt_mask & EVDEV_RCPT_KBDMUX && (type == EV_LED || type == EV_REP)) { mtx_lock(&Giant); kbd_ev_event(kbd, type, code, value); mtx_unlock(&Giant); } } #endif /***************************************************************************** ***************************************************************************** ** Module ***************************************************************************** *****************************************************************************/ KEYBOARD_DRIVER(kbdmux, kbdmuxsw, kbdmux_configure); static int kbdmux_modevent(module_t mod, int type, void *data) { keyboard_switch_t *sw; keyboard_t *kbd; int error; switch (type) { case MOD_LOAD: if ((error = kbd_add_driver(&kbdmux_kbd_driver)) != 0) break; if ((sw = kbd_get_switch(KEYBOARD_NAME)) == NULL) { kbd_delete_driver(&kbdmux_kbd_driver); error = ENXIO; break; } kbd = NULL; if ((error = (*sw->probe)(0, NULL, 0)) != 0 || (error = (*sw->init)(0, &kbd, NULL, 0)) != 0) { kbd_delete_driver(&kbdmux_kbd_driver); break; } #ifdef KBD_INSTALL_CDEV if ((error = kbd_attach(kbd)) != 0) { (*sw->term)(kbd); kbd_delete_driver(&kbdmux_kbd_driver); break; } #endif if ((error = (*sw->enable)(kbd)) != 0) { (*sw->disable)(kbd); #ifdef KBD_INSTALL_CDEV kbd_detach(kbd); #endif (*sw->term)(kbd); kbd_delete_driver(&kbdmux_kbd_driver); break; } break; case MOD_UNLOAD: if ((sw = kbd_get_switch(KEYBOARD_NAME)) == NULL) panic("kbd_get_switch(" KEYBOARD_NAME ") == NULL"); kbd = kbd_get_keyboard(kbd_find_keyboard(KEYBOARD_NAME, 0)); if (kbd != NULL) { (*sw->disable)(kbd); #ifdef KBD_INSTALL_CDEV kbd_detach(kbd); #endif (*sw->term)(kbd); kbd_delete_driver(&kbdmux_kbd_driver); } error = 0; break; default: error = EOPNOTSUPP; break; } return (error); } DEV_MODULE(kbdmux, kbdmux_modevent, NULL); #ifdef EVDEV_SUPPORT MODULE_DEPEND(kbdmux, evdev, 1, 1, 1); #endif Index: head/sys/dev/mpt/mpt_cam.c =================================================================== --- head/sys/dev/mpt/mpt_cam.c (revision 355600) +++ head/sys/dev/mpt/mpt_cam.c (revision 355601) @@ -1,5369 +1,5369 @@ /*- * FreeBSD/CAM specific routines for LSI '909 FC adapters. * FreeBSD Version. * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-3-Clause * * Copyright (c) 2000, 2001 by Greg Ansley * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002, 2006 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Support from Chris Ellsworth in order to make SAS adapters work * is gratefully acknowledged. * * Support from LSI-Logic has also gone a great deal toward making this a * workable subsystem and is gratefully acknowledged. */ /*- * Copyright (c) 2004, Avid Technology, Inc. and its contributors. * Copyright (c) 2005, WHEEL Sp. z o.o. * Copyright (c) 2004, 2005 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ #include "dev/mpt/mpilib/mpi_init.h" #include "dev/mpt/mpilib/mpi_targ.h" #include "dev/mpt/mpilib/mpi_fc.h" #include "dev/mpt/mpilib/mpi_sas.h" #include #include #include static void mpt_poll(struct cam_sim *); -static timeout_t mpt_timeout; +static callout_func_t mpt_timeout; static void mpt_action(struct cam_sim *, union ccb *); static int mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); static void mpt_setwidth(struct mpt_softc *, int, int); static void mpt_setsync(struct mpt_softc *, int, int, int); static int mpt_update_spi_config(struct mpt_softc *, int); static mpt_reply_handler_t mpt_scsi_reply_handler; static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; static mpt_reply_handler_t mpt_fc_els_reply_handler; static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, MSG_DEFAULT_REPLY *); static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); static int mpt_fc_reset_link(struct mpt_softc *, int); static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); static void mpt_recovery_thread(void *arg); static void mpt_recover_commands(struct mpt_softc *mpt); static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, target_id_t, lun_id_t, u_int, int); static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); static void mpt_post_target_command(struct mpt_softc *, request_t *, int); static int mpt_add_els_buffers(struct mpt_softc *mpt); static int mpt_add_target_commands(struct mpt_softc *mpt); static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); static void mpt_target_start_io(struct mpt_softc *, union ccb *); static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, uint8_t, uint8_t const *, u_int); static void mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, tgt_resource_t *, int); static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; static mpt_reply_handler_t mpt_sata_pass_reply_handler; static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; static mpt_probe_handler_t mpt_cam_probe; static mpt_attach_handler_t mpt_cam_attach; static mpt_enable_handler_t mpt_cam_enable; static mpt_ready_handler_t mpt_cam_ready; static mpt_event_handler_t mpt_cam_event; static mpt_reset_handler_t mpt_cam_ioc_reset; static mpt_detach_handler_t mpt_cam_detach; static struct mpt_personality mpt_cam_personality = { .name = "mpt_cam", .probe = mpt_cam_probe, .attach = mpt_cam_attach, .enable = mpt_cam_enable, .ready = mpt_cam_ready, .event = mpt_cam_event, .reset = mpt_cam_ioc_reset, .detach = mpt_cam_detach, }; DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); int mpt_enable_sata_wc = -1; TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); static int mpt_cam_probe(struct mpt_softc *mpt) { int role; /* * Only attach to nodes that support the initiator or target role * (or want to) or have RAID physical devices that need CAM pass-thru * support. */ if (mpt->do_cfg_role) { role = mpt->cfg_role; } else { role = mpt->role; } if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { return (0); } return (ENODEV); } static int mpt_cam_attach(struct mpt_softc *mpt) { struct cam_devq *devq; mpt_handler_t handler; int maxq; int error; MPT_LOCK(mpt); TAILQ_INIT(&mpt->request_timeout_list); maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); handler.reply_handler = mpt_scsi_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &scsi_io_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } handler.reply_handler = mpt_scsi_tmf_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &scsi_tmf_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } /* * If we're fibre channel and could support target mode, we register * an ELS reply handler and give it resources. */ if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { handler.reply_handler = mpt_fc_els_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &fc_els_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } if (mpt_add_els_buffers(mpt) == FALSE) { error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } maxq -= mpt->els_cmds_allocated; } /* * If we support target mode, we register a reply handler for it, * but don't add command resources until we actually enable target * mode. */ if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { handler.reply_handler = mpt_scsi_tgt_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &mpt->scsi_tgt_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } } if (mpt->is_sas) { handler.reply_handler = mpt_sata_pass_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &sata_pass_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } } /* * We keep one request reserved for timeout TMF requests. */ mpt->tmf_req = mpt_get_request(mpt, FALSE); if (mpt->tmf_req == NULL) { mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } /* * Mark the request as free even though not on the free list. * There is only one TMF request allowed to be outstanding at * a time and the TMF routines perform their own allocation * tracking using the standard state flags. */ mpt->tmf_req->state = REQ_STATE_FREE; maxq--; /* * The rest of this is CAM foo, for which we need to drop our lock */ MPT_UNLOCK(mpt); if (mpt_spawn_recovery_thread(mpt) != 0) { mpt_prt(mpt, "Unable to spawn recovery thread!\n"); error = ENOMEM; goto cleanup; } /* * Create the device queue for our SIM(s). */ devq = cam_simq_alloc(maxq); if (devq == NULL) { mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); error = ENOMEM; goto cleanup; } /* * Construct our SIM entry. */ mpt->sim = mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); if (mpt->sim == NULL) { mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); cam_simq_free(devq); error = ENOMEM; goto cleanup; } /* * Register exactly this bus. */ MPT_LOCK(mpt); if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { mpt_prt(mpt, "Bus registration Failed!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "Unable to allocate Path!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } MPT_UNLOCK(mpt); /* * Only register a second bus for RAID physical * devices if the controller supports RAID. */ if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { return (0); } /* * Create a "bus" to export all hidden disks to CAM. */ mpt->phydisk_sim = mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); if (mpt->phydisk_sim == NULL) { mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); error = ENOMEM; goto cleanup; } /* * Register this bus. */ MPT_LOCK(mpt); if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != CAM_SUCCESS) { mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } if (xpt_create_path(&mpt->phydisk_path, NULL, cam_sim_path(mpt->phydisk_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } MPT_UNLOCK(mpt); mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); return (0); cleanup: mpt_cam_detach(mpt); return (error); } /* * Read FC configuration information */ static int mpt_read_config_info_fc(struct mpt_softc *mpt) { struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; char *topology = NULL; int rv; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", mpt->mpt_fcport_page0.Header.PageVersion, mpt->mpt_fcport_page0.Header.PageLength, mpt->mpt_fcport_page0.Header.PageNumber, mpt->mpt_fcport_page0.Header.PageType); rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, sizeof(mpt->mpt_fcport_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read FC Port Page 0\n"); return (-1); } mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); switch (mpt->mpt_fcport_page0.CurrentSpeed) { case MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT: mpt->mpt_fcport_speed = 1; break; case MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT: mpt->mpt_fcport_speed = 2; break; case MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT: mpt->mpt_fcport_speed = 10; break; case MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT: mpt->mpt_fcport_speed = 4; break; default: mpt->mpt_fcport_speed = 0; break; } switch (mpt->mpt_fcport_page0.Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: mpt->mpt_fcport_speed = 0; topology = ""; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: topology = "N-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: topology = "NL-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: topology = "F-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: topology = "FL-Port"; break; default: mpt->mpt_fcport_speed = 0; topology = "?"; break; } mpt->scinfo.fc.wwnn = ((uint64_t)mpt->mpt_fcport_page0.WWNN.High << 32) | mpt->mpt_fcport_page0.WWNN.Low; mpt->scinfo.fc.wwpn = ((uint64_t)mpt->mpt_fcport_page0.WWPN.High << 32) | mpt->mpt_fcport_page0.WWPN.Low; mpt->scinfo.fc.portid = mpt->mpt_fcport_page0.PortIdentifier; mpt_lprt(mpt, MPT_PRT_INFO, "FC Port Page 0: Topology <%s> WWNN 0x%16jx WWPN 0x%16jx " "Speed %u-Gbit\n", topology, (uintmax_t)mpt->scinfo.fc.wwnn, (uintmax_t)mpt->scinfo.fc.wwpn, mpt->mpt_fcport_speed); MPT_UNLOCK(mpt); ctx = device_get_sysctl_ctx(mpt->dev); tree = device_get_sysctl_tree(mpt->dev); SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwnn", CTLFLAG_RD, &mpt->scinfo.fc.wwnn, "World Wide Node Name"); SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwpn", CTLFLAG_RD, &mpt->scinfo.fc.wwpn, "World Wide Port Name"); MPT_LOCK(mpt); return (0); } /* * Set FC configuration information. */ static int mpt_set_initial_config_fc(struct mpt_softc *mpt) { CONFIG_PAGE_FC_PORT_1 fc; U32 fl; int r, doit = 0; int role; r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, &fc.Header, FALSE, 5000); if (r) { mpt_prt(mpt, "failed to read FC page 1 header\n"); return (mpt_fc_reset_link(mpt, 1)); } r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, &fc.Header, sizeof (fc), FALSE, 5000); if (r) { mpt_prt(mpt, "failed to read FC page 1\n"); return (mpt_fc_reset_link(mpt, 1)); } mpt2host_config_page_fc_port_1(&fc); /* * Check our flags to make sure we support the role we want. */ doit = 0; role = 0; fl = fc.Flags; if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { role |= MPT_ROLE_INITIATOR; } if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { role |= MPT_ROLE_TARGET; } fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; if (mpt->do_cfg_role == 0) { role = mpt->cfg_role; } else { mpt->do_cfg_role = 0; } if (role != mpt->cfg_role) { if (mpt->cfg_role & MPT_ROLE_INITIATOR) { if ((role & MPT_ROLE_INITIATOR) == 0) { mpt_prt(mpt, "adding initiator role\n"); fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; doit++; } else { mpt_prt(mpt, "keeping initiator role\n"); } } else if (role & MPT_ROLE_INITIATOR) { mpt_prt(mpt, "removing initiator role\n"); doit++; } if (mpt->cfg_role & MPT_ROLE_TARGET) { if ((role & MPT_ROLE_TARGET) == 0) { mpt_prt(mpt, "adding target role\n"); fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; doit++; } else { mpt_prt(mpt, "keeping target role\n"); } } else if (role & MPT_ROLE_TARGET) { mpt_prt(mpt, "removing target role\n"); doit++; } mpt->role = mpt->cfg_role; } if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { mpt_prt(mpt, "adding OXID option\n"); fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; doit++; } } if (doit) { fc.Flags = fl; host2mpt_config_page_fc_port_1(&fc); r = mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, sizeof(fc), FALSE, 5000); if (r != 0) { mpt_prt(mpt, "failed to update NVRAM with changes\n"); return (0); } mpt_prt(mpt, "NOTE: NVRAM changes will not take " "effect until next reboot or IOC reset\n"); } return (0); } static int mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) { ConfigExtendedPageHeader_t hdr; struct mptsas_phyinfo *phyinfo; SasIOUnitPage0_t *buffer; int error, len, i; error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, &hdr, 0, 10000); if (error) goto out; if (hdr.ExtPageLength == 0) { error = ENXIO; goto out; } len = hdr.ExtPageLength * 4; buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); if (buffer == NULL) { error = ENOMEM; goto out; } error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 0, &hdr, buffer, len, 0, 10000); if (error) { free(buffer, M_DEVBUF); goto out; } portinfo->num_phys = buffer->NumPhys; portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) * portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); if (portinfo->phy_info == NULL) { free(buffer, M_DEVBUF); error = ENOMEM; goto out; } for (i = 0; i < portinfo->num_phys; i++) { phyinfo = &portinfo->phy_info[i]; phyinfo->phy_num = i; phyinfo->port_id = buffer->PhyData[i].Port; phyinfo->negotiated_link_rate = buffer->PhyData[i].NegotiatedLinkRate; phyinfo->handle = le16toh(buffer->PhyData[i].ControllerDevHandle); } free(buffer, M_DEVBUF); out: return (error); } static int mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, uint32_t form, uint32_t form_specific) { ConfigExtendedPageHeader_t hdr; SasPhyPage0_t *buffer; int error; error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 0, 10000); if (error) goto out; if (hdr.ExtPageLength == 0) { error = ENXIO; goto out; } buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); if (buffer == NULL) { error = ENOMEM; goto out; } error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, form + form_specific, &hdr, buffer, sizeof(SasPhyPage0_t), 0, 10000); if (error) { free(buffer, M_DEVBUF); goto out; } phy_info->hw_link_rate = buffer->HwLinkRate; phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); free(buffer, M_DEVBUF); out: return (error); } static int mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, uint32_t form, uint32_t form_specific) { ConfigExtendedPageHeader_t hdr; SasDevicePage0_t *buffer; uint64_t sas_address; int error = 0; bzero(device_info, sizeof(*device_info)); error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, &hdr, 0, 10000); if (error) goto out; if (hdr.ExtPageLength == 0) { error = ENXIO; goto out; } buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); if (buffer == NULL) { error = ENOMEM; goto out; } error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, form + form_specific, &hdr, buffer, sizeof(SasDevicePage0_t), 0, 10000); if (error) { free(buffer, M_DEVBUF); goto out; } device_info->dev_handle = le16toh(buffer->DevHandle); device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); device_info->slot = le16toh(buffer->Slot); device_info->phy_num = buffer->PhyNum; device_info->physical_port = buffer->PhysicalPort; device_info->target_id = buffer->TargetID; device_info->bus = buffer->Bus; bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); device_info->sas_address = le64toh(sas_address); device_info->device_info = le32toh(buffer->DeviceInfo); free(buffer, M_DEVBUF); out: return (error); } /* * Read SAS configuration information. Nothing to do yet. */ static int mpt_read_config_info_sas(struct mpt_softc *mpt) { struct mptsas_portinfo *portinfo; struct mptsas_phyinfo *phyinfo; int error, i; portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); if (portinfo == NULL) return (ENOMEM); error = mptsas_sas_io_unit_pg0(mpt, portinfo); if (error) { free(portinfo, M_DEVBUF); return (0); } for (i = 0; i < portinfo->num_phys; i++) { phyinfo = &portinfo->phy_info[i]; error = mptsas_sas_phy_pg0(mpt, phyinfo, (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << MPI_SAS_PHY_PGAD_FORM_SHIFT), i); if (error) break; error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), phyinfo->handle); if (error) break; phyinfo->identify.phy_num = phyinfo->phy_num = i; if (phyinfo->attached.dev_handle) error = mptsas_sas_device_pg0(mpt, &phyinfo->attached, (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), phyinfo->attached.dev_handle); if (error) break; } mpt->sas_portinfo = portinfo; return (0); } static void mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, int enabled) { SataPassthroughRequest_t *pass; request_t *req; int error, status; req = mpt_get_request(mpt, 0); if (req == NULL) return; pass = req->req_vbuf; bzero(pass, sizeof(SataPassthroughRequest_t)); pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; pass->TargetID = devinfo->target_id; pass->Bus = devinfo->bus; pass->PassthroughFlags = 0; pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; pass->DataLength = 0; pass->MsgContext = htole32(req->index | sata_pass_handler_id); pass->CommandFIS[0] = 0x27; pass->CommandFIS[1] = 0x80; pass->CommandFIS[2] = 0xef; pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; pass->CommandFIS[7] = 0x40; pass->CommandFIS[15] = 0x08; mpt_check_doorbell(mpt); mpt_send_cmd(mpt, req); error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 10 * 1000); if (error) { mpt_free_request(mpt, req); printf("error %d sending passthrough\n", error); return; } status = le16toh(req->IOCStatus); if (status != MPI_IOCSTATUS_SUCCESS) { mpt_free_request(mpt, req); printf("IOCSTATUS %d\n", status); return; } mpt_free_request(mpt, req); } /* * Set SAS configuration information. Nothing to do yet. */ static int mpt_set_initial_config_sas(struct mpt_softc *mpt) { struct mptsas_phyinfo *phyinfo; int i; if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { phyinfo = &mpt->sas_portinfo->phy_info[i]; if (phyinfo->attached.dev_handle == 0) continue; if ((phyinfo->attached.device_info & MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) continue; if (bootverbose) device_printf(mpt->dev, "%sabling SATA WC on phy %d\n", (mpt_enable_sata_wc) ? "En" : "Dis", i); mptsas_set_sata_wc(mpt, &phyinfo->attached, mpt_enable_sata_wc); } } return (0); } static int mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { if (req != NULL) { if (reply_frame != NULL) { req->IOCStatus = le16toh(reply_frame->IOCStatus); } req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; TAILQ_REMOVE(&mpt->request_pending_list, req, links); if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { wakeup(req); } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { /* * Whew- we can free this request (late completion) */ mpt_free_request(mpt, req); } } return (TRUE); } /* * Read SCSI configuration information */ static int mpt_read_config_info_spi(struct mpt_softc *mpt) { int rv, i; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, &mpt->mpt_port_page0.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", mpt->mpt_port_page0.Header.PageVersion, mpt->mpt_port_page0.Header.PageLength, mpt->mpt_port_page0.Header.PageNumber, mpt->mpt_port_page0.Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, &mpt->mpt_port_page1.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", mpt->mpt_port_page1.Header.PageVersion, mpt->mpt_port_page1.Header.PageLength, mpt->mpt_port_page1.Header.PageNumber, mpt->mpt_port_page1.Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, &mpt->mpt_port_page2.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", mpt->mpt_port_page2.Header.PageVersion, mpt->mpt_port_page2.Header.PageLength, mpt->mpt_port_page2.Header.PageNumber, mpt->mpt_port_page2.Header.PageType); for (i = 0; i < 16; i++) { rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, mpt->mpt_dev_page0[i].Header.PageVersion, mpt->mpt_dev_page0[i].Header.PageLength, mpt->mpt_dev_page0[i].Header.PageNumber, mpt->mpt_dev_page0[i].Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, mpt->mpt_dev_page1[i].Header.PageVersion, mpt->mpt_dev_page1[i].Header.PageLength, mpt->mpt_dev_page1[i].Header.PageNumber, mpt->mpt_dev_page1[i].Header.PageType); } /* * At this point, we don't *have* to fail. As long as we have * valid config header information, we can (barely) lurch * along. */ rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, sizeof(mpt->mpt_port_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 0\n"); } else { mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", mpt->mpt_port_page0.Capabilities, mpt->mpt_port_page0.PhysicalInterface); } rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, sizeof(mpt->mpt_port_page1), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 1\n"); } else { mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", mpt->mpt_port_page1.Configuration, mpt->mpt_port_page1.OnBusTimerValue); } rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, sizeof(mpt->mpt_port_page2), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 2\n"); } else { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "Port Page 2: Flags %x Settings %x\n", mpt->mpt_port_page2.PortFlags, mpt->mpt_port_page2.PortSettings); mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); for (i = 0; i < 16; i++) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); } } for (i = 0; i < 16; i++) { rv = mpt_read_cur_cfg_page(mpt, i, &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "cannot read SPI Target %d Device Page 0\n", i); continue; } mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "target %d page 0: Negotiated Params %x Information %x\n", i, mpt->mpt_dev_page0[i].NegotiatedParameters, mpt->mpt_dev_page0[i].Information); rv = mpt_read_cur_cfg_page(mpt, i, &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), FALSE, 5000); if (rv) { mpt_prt(mpt, "cannot read SPI Target %d Device Page 1\n", i); continue; } mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "target %d page 1: Requested Params %x Configuration %x\n", i, mpt->mpt_dev_page1[i].RequestedParameters, mpt->mpt_dev_page1[i].Configuration); } return (0); } /* * Validate SPI configuration information. * * In particular, validate SPI Port Page 1. */ static int mpt_set_initial_config_spi(struct mpt_softc *mpt) { int error, i, pp1val; mpt->mpt_disc_enable = 0xff; mpt->mpt_tag_enable = 0; pp1val = ((1 << mpt->mpt_ini_id) << MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id; if (mpt->mpt_port_page1.Configuration != pp1val) { CONFIG_PAGE_SCSI_PORT_1 tmp; mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); tmp = mpt->mpt_port_page1; tmp.Configuration = pp1val; host2mpt_config_page_scsi_port_1(&tmp); error = mpt_write_cur_cfg_page(mpt, 0, &tmp.Header, sizeof(tmp), FALSE, 5000); if (error) { return (-1); } error = mpt_read_cur_cfg_page(mpt, 0, &tmp.Header, sizeof(tmp), FALSE, 5000); if (error) { return (-1); } mpt2host_config_page_scsi_port_1(&tmp); if (tmp.Configuration != pp1val) { mpt_prt(mpt, "failed to reset SPI Port Page 1 Config value\n"); return (-1); } mpt->mpt_port_page1 = tmp; } /* * The purpose of this exercise is to get * all targets back to async/narrow. * * We skip this step if the BIOS has already negotiated * speeds with the targets. */ i = mpt->mpt_port_page2.PortSettings & MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "honoring BIOS transfer negotiations\n"); } else { for (i = 0; i < 16; i++) { mpt->mpt_dev_page1[i].RequestedParameters = 0; mpt->mpt_dev_page1[i].Configuration = 0; (void) mpt_update_spi_config(mpt, i); } } return (0); } static int mpt_cam_enable(struct mpt_softc *mpt) { int error; MPT_LOCK(mpt); error = EIO; if (mpt->is_fc) { if (mpt_read_config_info_fc(mpt)) { goto out; } if (mpt_set_initial_config_fc(mpt)) { goto out; } } else if (mpt->is_sas) { if (mpt_read_config_info_sas(mpt)) { goto out; } if (mpt_set_initial_config_sas(mpt)) { goto out; } } else if (mpt->is_spi) { if (mpt_read_config_info_spi(mpt)) { goto out; } if (mpt_set_initial_config_spi(mpt)) { goto out; } } error = 0; out: MPT_UNLOCK(mpt); return (error); } static void mpt_cam_ready(struct mpt_softc *mpt) { /* * If we're in target mode, hang out resources now * so we don't cause the world to hang talking to us. */ if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { /* * Try to add some target command resources */ MPT_LOCK(mpt); if (mpt_add_target_commands(mpt) == FALSE) { mpt_prt(mpt, "failed to add target commands\n"); } MPT_UNLOCK(mpt); } mpt->ready = 1; } static void mpt_cam_detach(struct mpt_softc *mpt) { mpt_handler_t handler; MPT_LOCK(mpt); mpt->ready = 0; mpt_terminate_recovery_thread(mpt); handler.reply_handler = mpt_scsi_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, scsi_io_handler_id); handler.reply_handler = mpt_scsi_tmf_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, scsi_tmf_handler_id); handler.reply_handler = mpt_fc_els_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, fc_els_handler_id); handler.reply_handler = mpt_scsi_tgt_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, mpt->scsi_tgt_handler_id); handler.reply_handler = mpt_sata_pass_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, sata_pass_handler_id); if (mpt->tmf_req != NULL) { mpt->tmf_req->state = REQ_STATE_ALLOCATED; mpt_free_request(mpt, mpt->tmf_req); mpt->tmf_req = NULL; } if (mpt->sas_portinfo != NULL) { free(mpt->sas_portinfo, M_DEVBUF); mpt->sas_portinfo = NULL; } if (mpt->sim != NULL) { xpt_free_path(mpt->path); xpt_bus_deregister(cam_sim_path(mpt->sim)); cam_sim_free(mpt->sim, TRUE); mpt->sim = NULL; } if (mpt->phydisk_sim != NULL) { xpt_free_path(mpt->phydisk_path); xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); cam_sim_free(mpt->phydisk_sim, TRUE); mpt->phydisk_sim = NULL; } MPT_UNLOCK(mpt); } /* This routine is used after a system crash to dump core onto the swap device. */ static void mpt_poll(struct cam_sim *sim) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)cam_sim_softc(sim); mpt_intr(mpt); } /* * Watchdog timeout routine for SCSI requests. */ static void mpt_timeout(void *arg) { union ccb *ccb; struct mpt_softc *mpt; request_t *req; ccb = (union ccb *)arg; mpt = ccb->ccb_h.ccb_mpt_ptr; MPT_LOCK_ASSERT(mpt); req = ccb->ccb_h.ccb_req_ptr; mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, req->serno, ccb, req->ccb); /* XXX: WHAT ARE WE TRYING TO DO HERE? */ if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { TAILQ_REMOVE(&mpt->request_pending_list, req, links); TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); req->state |= REQ_STATE_TIMEDOUT; mpt_wakeup_recovery_thread(mpt); } } /* * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called * directly. * * Takes a list of physical segments and builds the SGL for SCSI IO command * and forwards the commard to the IOC after one last check that CAM has not * aborted the transaction. */ static void mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { request_t *req, *trq; char *mpt_off; union ccb *ccb; struct mpt_softc *mpt; bus_addr_t chain_list_addr; int first_lim, seg, this_seg_lim; uint32_t addr, cur_off, flags, nxt_off, tf; void *sglp = NULL; MSG_REQUEST_HEADER *hdrp; SGE_SIMPLE64 *se; SGE_CHAIN64 *ce; int istgt = 0; req = (request_t *)arg; ccb = req->ccb; mpt = ccb->ccb_h.ccb_mpt_ptr; req = ccb->ccb_h.ccb_req_ptr; hdrp = req->req_vbuf; mpt_off = req->req_vbuf; if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; } if (error == 0) { switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: istgt = 0; sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; break; case MPI_FUNCTION_TARGET_ASSIST: istgt = 1; sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; break; default: mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", hdrp->Function); error = EINVAL; break; } } if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; mpt_prt(mpt, "segment count %d too large (max %u)\n", nseg, mpt->max_seg_cnt); } bad: if (error != 0) { if (error != EFBIG && error != ENOMEM) { mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { cam_status status; mpt_freeze_ccb(ccb); if (error == EFBIG) { status = CAM_REQ_TOO_BIG; } else if (error == ENOMEM) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } status = CAM_REQUEUE_REQ; } else { status = CAM_REQ_CMP_ERR; } mpt_set_ccb_status(ccb, status); } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } /* * No data to transfer? * Just make a single simple SGL with zero length. */ if (mpt->verbose >= MPT_PRT_DEBUG) { int tidx = ((char *)sglp) - mpt_off; memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); } if (nseg == 0) { SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; MPI_pSGE_SET_FLAGS(se1, (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); se1->FlagsLength = htole32(se1->FlagsLength); goto out; } flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREWRITE; } else { op = BUS_DMASYNC_PREREAD; } } bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); } /* * Okay, fill in what we can at the end of the command frame. * If we have up to MPT_NSGL_FIRST, we can fit them all into * the command frame. * * Otherwise, we fill up through MPT_NSGL_FIRST less one * SIMPLE64 pointers and start doing CHAIN64 entries after * that. */ if (nseg < MPT_NSGL_FIRST(mpt)) { first_lim = nseg; } else { /* * Leave room for CHAIN element */ first_lim = MPT_NSGL_FIRST(mpt) - 1; } se = (SGE_SIMPLE64 *) sglp; for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { tf = flags; memset(se, 0, sizeof (*se)); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); if (sizeof(bus_addr_t) > 4) { addr = ((uint64_t)dm_segs->ds_addr) >> 32; /* SAS1078 36GB limitation WAR */ if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { addr |= (1U << 31); tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; } se->Address.High = htole32(addr); } if (seg == first_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); } if (seg == nseg) { goto out; } /* * Tell the IOC where to find the first chain element. */ hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; nxt_off = MPT_RQSL(mpt); trq = req; /* * Make up the rest of the data segments out of a chain element * (contained in the current request frame) which points to * SIMPLE64 elements in the next request frame, possibly ending * with *another* chain element (if there's more). */ while (seg < nseg) { /* * Point to the chain descriptor. Note that the chain * descriptor is at the end of the *previous* list (whether * chain or simple). */ ce = (SGE_CHAIN64 *) se; /* * Before we change our current pointer, make sure we won't * overflow the request area with this frame. Note that we * test against 'greater than' here as it's okay in this case * to have next offset be just outside the request area. */ if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { nxt_off = MPT_REQUEST_AREA; goto next_chain; } /* * Set our SGE element pointer to the beginning of the chain * list and update our next chain list offset. */ se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; cur_off = nxt_off; nxt_off += MPT_RQSL(mpt); /* * Now initialize the chain descriptor. */ memset(ce, 0, sizeof (*ce)); /* * Get the physical address of the chain list. */ chain_list_addr = trq->req_pbuf; chain_list_addr += cur_off; if (sizeof (bus_addr_t) > 4) { ce->Address.High = htole32(((uint64_t)chain_list_addr) >> 32); } ce->Address.Low = htole32(chain_list_addr & 0xffffffff); ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; /* * If we have more than a frame's worth of segments left, * set up the chain list to have the last element be another * chain descriptor. */ if ((nseg - seg) > MPT_NSGL(mpt)) { this_seg_lim = seg + MPT_NSGL(mpt) - 1; /* * The length of the chain is the length in bytes of the * number of segments plus the next chain element. * * The next chain descriptor offset is the length, * in words, of the number of segments. */ ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE64); ce->NextChainOffset = ce->Length >> 2; ce->Length += sizeof (SGE_CHAIN64); } else { this_seg_lim = nseg; ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE64); } ce->Length = htole16(ce->Length); /* * Fill in the chain list SGE elements with our segment data. * * If we're the last element in this chain list, set the last * element flag. If we're the completely last element period, * set the end of list and end of buffer flags. */ while (seg < this_seg_lim) { tf = flags; memset(se, 0, sizeof (*se)); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); if (sizeof (bus_addr_t) > 4) { addr = ((uint64_t)dm_segs->ds_addr) >> 32; /* SAS1078 36GB limitation WAR */ if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { addr |= (1U << 31); tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; } se->Address.High = htole32(addr); } if (seg == this_seg_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); se++; seg++; dm_segs++; } next_chain: /* * If we have more segments to do and we've used up all of * the space in a request area, go allocate another one * and chain to that. */ if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { request_t *nrq; nrq = mpt_get_request(mpt, FALSE); if (nrq == NULL) { error = ENOMEM; goto bad; } /* * Append the new request area on the tail of our list. */ if ((trq = req->chain) == NULL) { req->chain = nrq; } else { while (trq->chain != NULL) { trq = trq->chain; } trq->chain = nrq; } trq = nrq; mpt_off = trq->req_vbuf; if (mpt->verbose >= MPT_PRT_DEBUG) { memset(mpt_off, 0xff, MPT_REQUEST_AREA); } nxt_off = 0; } } out: /* * Last time we need to check if this CCB needs to be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } mpt_prt(mpt, "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", ccb->ccb_h.status & CAM_STATUS_MASK); if (nseg) { bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout, mpt_timeout, ccb); } if (mpt->verbose > MPT_PRT_DEBUG) { int nc = 0; mpt_print_request(req->req_vbuf); for (trq = req->chain; trq; trq = trq->chain) { printf(" Additional Chain Area %d\n", nc++); mpt_dump_sgl(trq->req_vbuf, 0); } } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; } else { tgt->state = TGT_STATE_MOVING_DATA; } #else tgt->state = TGT_STATE_MOVING_DATA; #endif } mpt_send_cmd(mpt, req); } static void mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { request_t *req, *trq; char *mpt_off; union ccb *ccb; struct mpt_softc *mpt; int seg, first_lim; uint32_t flags, nxt_off; void *sglp = NULL; MSG_REQUEST_HEADER *hdrp; SGE_SIMPLE32 *se; SGE_CHAIN32 *ce; int istgt = 0; req = (request_t *)arg; ccb = req->ccb; mpt = ccb->ccb_h.ccb_mpt_ptr; req = ccb->ccb_h.ccb_req_ptr; hdrp = req->req_vbuf; mpt_off = req->req_vbuf; if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; } if (error == 0) { switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; break; case MPI_FUNCTION_TARGET_ASSIST: istgt = 1; sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; break; default: mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", hdrp->Function); error = EINVAL; break; } } if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; mpt_prt(mpt, "segment count %d too large (max %u)\n", nseg, mpt->max_seg_cnt); } bad: if (error != 0) { if (error != EFBIG && error != ENOMEM) { mpt_prt(mpt, "mpt_execute_req: err %d\n", error); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { cam_status status; mpt_freeze_ccb(ccb); if (error == EFBIG) { status = CAM_REQ_TOO_BIG; } else if (error == ENOMEM) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } status = CAM_REQUEUE_REQ; } else { status = CAM_REQ_CMP_ERR; } mpt_set_ccb_status(ccb, status); } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } /* * No data to transfer? * Just make a single simple SGL with zero length. */ if (mpt->verbose >= MPT_PRT_DEBUG) { int tidx = ((char *)sglp) - mpt_off; memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); } if (nseg == 0) { SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; MPI_pSGE_SET_FLAGS(se1, (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); se1->FlagsLength = htole32(se1->FlagsLength); goto out; } flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if (istgt) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREWRITE; } else { op = BUS_DMASYNC_PREREAD; } } bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); } /* * Okay, fill in what we can at the end of the command frame. * If we have up to MPT_NSGL_FIRST, we can fit them all into * the command frame. * * Otherwise, we fill up through MPT_NSGL_FIRST less one * SIMPLE32 pointers and start doing CHAIN32 entries after * that. */ if (nseg < MPT_NSGL_FIRST(mpt)) { first_lim = nseg; } else { /* * Leave room for CHAIN element */ first_lim = MPT_NSGL_FIRST(mpt) - 1; } se = (SGE_SIMPLE32 *) sglp; for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { uint32_t tf; memset(se, 0,sizeof (*se)); se->Address = htole32(dm_segs->ds_addr); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (seg == first_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); } if (seg == nseg) { goto out; } /* * Tell the IOC where to find the first chain element. */ hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; nxt_off = MPT_RQSL(mpt); trq = req; /* * Make up the rest of the data segments out of a chain element * (contained in the current request frame) which points to * SIMPLE32 elements in the next request frame, possibly ending * with *another* chain element (if there's more). */ while (seg < nseg) { int this_seg_lim; uint32_t tf, cur_off; bus_addr_t chain_list_addr; /* * Point to the chain descriptor. Note that the chain * descriptor is at the end of the *previous* list (whether * chain or simple). */ ce = (SGE_CHAIN32 *) se; /* * Before we change our current pointer, make sure we won't * overflow the request area with this frame. Note that we * test against 'greater than' here as it's okay in this case * to have next offset be just outside the request area. */ if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { nxt_off = MPT_REQUEST_AREA; goto next_chain; } /* * Set our SGE element pointer to the beginning of the chain * list and update our next chain list offset. */ se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; cur_off = nxt_off; nxt_off += MPT_RQSL(mpt); /* * Now initialize the chain descriptor. */ memset(ce, 0, sizeof (*ce)); /* * Get the physical address of the chain list. */ chain_list_addr = trq->req_pbuf; chain_list_addr += cur_off; ce->Address = htole32(chain_list_addr); ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; /* * If we have more than a frame's worth of segments left, * set up the chain list to have the last element be another * chain descriptor. */ if ((nseg - seg) > MPT_NSGL(mpt)) { this_seg_lim = seg + MPT_NSGL(mpt) - 1; /* * The length of the chain is the length in bytes of the * number of segments plus the next chain element. * * The next chain descriptor offset is the length, * in words, of the number of segments. */ ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE32); ce->NextChainOffset = ce->Length >> 2; ce->Length += sizeof (SGE_CHAIN32); } else { this_seg_lim = nseg; ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE32); } ce->Length = htole16(ce->Length); /* * Fill in the chain list SGE elements with our segment data. * * If we're the last element in this chain list, set the last * element flag. If we're the completely last element period, * set the end of list and end of buffer flags. */ while (seg < this_seg_lim) { memset(se, 0, sizeof (*se)); se->Address = htole32(dm_segs->ds_addr); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (seg == this_seg_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); se++; seg++; dm_segs++; } next_chain: /* * If we have more segments to do and we've used up all of * the space in a request area, go allocate another one * and chain to that. */ if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { request_t *nrq; nrq = mpt_get_request(mpt, FALSE); if (nrq == NULL) { error = ENOMEM; goto bad; } /* * Append the new request area on the tail of our list. */ if ((trq = req->chain) == NULL) { req->chain = nrq; } else { while (trq->chain != NULL) { trq = trq->chain; } trq->chain = nrq; } trq = nrq; mpt_off = trq->req_vbuf; if (mpt->verbose >= MPT_PRT_DEBUG) { memset(mpt_off, 0xff, MPT_REQUEST_AREA); } nxt_off = 0; } } out: /* * Last time we need to check if this CCB needs to be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } mpt_prt(mpt, "mpt_execute_req: I/O cancelled (status 0x%x)\n", ccb->ccb_h.status & CAM_STATUS_MASK); if (nseg) { bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout, mpt_timeout, ccb); } if (mpt->verbose > MPT_PRT_DEBUG) { int nc = 0; mpt_print_request(req->req_vbuf); for (trq = req->chain; trq; trq = trq->chain) { printf(" Additional Chain Area %d\n", nc++); mpt_dump_sgl(trq->req_vbuf, 0); } } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; } else { tgt->state = TGT_STATE_MOVING_DATA; } #else tgt->state = TGT_STATE_MOVING_DATA; #endif } mpt_send_cmd(mpt, req); } static void mpt_start(struct cam_sim *sim, union ccb *ccb) { request_t *req; struct mpt_softc *mpt; MSG_SCSI_IO_REQUEST *mpt_req; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccbh = &ccb->ccb_h; bus_dmamap_callback_t *cb; target_id_t tgt; int raid_passthru; int error; /* Get the pointer for the physical addapter */ mpt = ccb->ccb_h.ccb_mpt_ptr; raid_passthru = (sim == mpt->phydisk_sim); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); return; } #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); #endif if (sizeof (bus_addr_t) > 4) { cb = mpt_execute_req_a64; } else { cb = mpt_execute_req; } /* * Link the ccb and the request structure so we can find * the other knowing either the request or the ccb */ req->ccb = ccb; ccb->ccb_h.ccb_req_ptr = req; /* Now we build the command for the IOC */ mpt_req = req->req_vbuf; memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; if (raid_passthru) { mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } mpt_req->Bus = 0; /* we never set bus here */ } else { tgt = ccb->ccb_h.target_id; mpt_req->Bus = 0; /* XXX */ } mpt_req->SenseBufferLength = (csio->sense_len < MPT_SENSE_SIZE) ? csio->sense_len : MPT_SENSE_SIZE; /* * We use the message context to find the request structure when we * Get the command completion interrupt from the IOC. */ mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); /* Which physical device to do the I/O on */ mpt_req->TargetID = tgt; be64enc(mpt_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); /* Set the direction of the transfer */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { mpt_req->Control = MPI_SCSIIO_CONTROL_READ; } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; } else { mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; } if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { switch(ccb->csio.tag_action) { case MSG_HEAD_OF_Q_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; break; case MSG_ACA_TASK: mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; break; case MSG_ORDERED_Q_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; break; case MSG_SIMPLE_Q_TAG: default: mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; break; } } else { if (mpt->is_fc || mpt->is_sas) { mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; } else { /* XXX No such thing for a target doing packetized. */ mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; } } if (mpt->is_spi) { if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; } } mpt_req->Control = htole32(mpt_req->Control); /* Copy the scsi command block into place */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); } mpt_req->CDBLength = csio->cdb_len; mpt_req->DataLength = htole32(csio->dxfer_len); mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); /* * Do a *short* print here if we're set to MPT_PRT_DEBUG */ if (mpt->verbose == MPT_PRT_DEBUG) { U32 df; mpt_prt(mpt, "mpt_start: %s op 0x%x ", (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { mpt_prtc(mpt, "(%s %u byte%s ", (df == MPI_SCSIIO_CONTROL_READ)? "read" : "write", csio->dxfer_len, (csio->dxfer_len == 1)? ")" : "s)"); } mpt_prtc(mpt, "tgt %u lun %jx req %p:%u\n", tgt, (uintmax_t)ccb->ccb_h.target_lun, req, req->serno); } error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb, req, 0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the controller queue * until our mapping is returned. */ xpt_freeze_simq(mpt->sim, 1); ccbh->status |= CAM_RELEASE_SIMQ; } } static int mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, int sleep_ok) { int error; uint16_t status; uint8_t response; error = mpt_scsi_send_tmf(mpt, (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 0, /* XXX How do I get the channel ID? */ tgt != CAM_TARGET_WILDCARD ? tgt : 0, lun != CAM_LUN_WILDCARD ? lun : 0, 0, sleep_ok); if (error != 0) { /* * mpt_scsi_send_tmf hard resets on failure, so no * need to do so here. */ mpt_prt(mpt, "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); return (EIO); } /* Wait for bus reset to be processed by the IOC. */ error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, REQ_STATE_DONE, sleep_ok, 5000); status = le16toh(mpt->tmf_req->IOCStatus); response = mpt->tmf_req->ResponseCode; mpt->tmf_req->state = REQ_STATE_FREE; if (error) { mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " "Resetting controller.\n"); mpt_reset(mpt, TRUE); return (ETIMEDOUT); } if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " "Resetting controller.\n", status); mpt_reset(mpt, TRUE); return (EIO); } if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " "Resetting controller.\n", response); mpt_reset(mpt, TRUE); return (EIO); } return (0); } static int mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) { int r = 0; request_t *req; PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; req = mpt_get_request(mpt, FALSE); if (req == NULL) { return (ENOMEM); } fc = req->req_vbuf; memset(fc, 0, sizeof(*fc)); fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; fc->MsgContext = htole32(req->index | fc_els_handler_id); mpt_send_cmd(mpt, req); if (dowait) { r = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, FALSE, 60 * 1000); if (r == 0) { mpt_free_request(mpt, req); } } return (r); } static int mpt_cam_event(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) { uint32_t data0, data1; data0 = le32toh(msg->Data[0]); data1 = le32toh(msg->Data[1]); switch(msg->Event & 0xFF) { case MPI_EVENT_UNIT_ATTENTION: mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", (data0 >> 8) & 0xff, data0 & 0xff); break; case MPI_EVENT_IOC_BUS_RESET: /* We generated a bus reset */ mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", (data0 >> 8) & 0xff); xpt_async(AC_BUS_RESET, mpt->path, NULL); break; case MPI_EVENT_EXT_BUS_RESET: /* Someone else generated a bus reset */ mpt_prt(mpt, "External Bus Reset Detected\n"); /* * These replies don't return EventData like the MPI * spec says they do */ xpt_async(AC_BUS_RESET, mpt->path, NULL); break; case MPI_EVENT_RESCAN: { union ccb *ccb; uint32_t pathid; /* * In general this means a device has been added to the loop. */ mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); if (mpt->ready == 0) { break; } if (mpt->phydisk_sim) { pathid = cam_sim_path(mpt->phydisk_sim); } else { pathid = cam_sim_path(mpt->sim); } /* * Allocate a CCB, create a wildcard path for this bus, * and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mpt_prt(mpt, "unable to alloc CCB for rescan\n"); break; } if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create path for rescan\n"); xpt_free_ccb(ccb); break; } xpt_rescan(ccb); break; } case MPI_EVENT_LINK_STATUS_CHANGE: mpt_prt(mpt, "Port %d: LinkState: %s\n", (data1 >> 8) & 0xff, ((data0 & 0xff) == 0)? "Failed" : "Active"); break; case MPI_EVENT_LOOP_STATE_CHANGE: switch ((data0 >> 16) & 0xff) { case 0x01: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " "(Loop Initialization)\n", (data1 >> 8) & 0xff, (data0 >> 8) & 0xff, (data0 ) & 0xff); switch ((data0 >> 8) & 0xff) { case 0xF7: if ((data0 & 0xff) == 0xF7) { mpt_prt(mpt, "Device needs AL_PA\n"); } else { mpt_prt(mpt, "Device %02x doesn't like " "FC performance\n", data0 & 0xFF); } break; case 0xF8: if ((data0 & 0xff) == 0xF7) { mpt_prt(mpt, "Device had loop failure " "at its receiver prior to acquiring" " AL_PA\n"); } else { mpt_prt(mpt, "Device %02x detected loop" " failure at its receiver\n", data0 & 0xFF); } break; default: mpt_prt(mpt, "Device %02x requests that device " "%02x reset itself\n", data0 & 0xFF, (data0 >> 8) & 0xFF); break; } break; case 0x02: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " "LPE(%02x,%02x) (Loop Port Enable)\n", (data1 >> 8) & 0xff, /* Port */ (data0 >> 8) & 0xff, /* Character 3 */ (data0 ) & 0xff /* Character 4 */); break; case 0x03: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " "LPB(%02x,%02x) (Loop Port Bypass)\n", (data1 >> 8) & 0xff, /* Port */ (data0 >> 8) & 0xff, /* Character 3 */ (data0 ) & 0xff /* Character 4 */); break; default: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " "FC event (%02x %02x %02x)\n", (data1 >> 8) & 0xff, /* Port */ (data0 >> 16) & 0xff, /* Event */ (data0 >> 8) & 0xff, /* Character 3 */ (data0 ) & 0xff /* Character 4 */); } break; case MPI_EVENT_LOGOUT: mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", (data1 >> 8) & 0xff, data0); break; case MPI_EVENT_QUEUE_FULL: { struct cam_sim *sim; struct cam_path *tmppath; struct ccb_relsim crs; PTR_EVENT_DATA_QUEUE_FULL pqf; lun_id_t lun_id; pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; pqf->CurrentDepth = le16toh(pqf->CurrentDepth); if (bootverbose) { mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x " "Depth %d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); } if (mpt->phydisk_sim && mpt_is_raid_member(mpt, pqf->TargetID) != 0) { sim = mpt->phydisk_sim; } else { sim = mpt->sim; } for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), pqf->TargetID, lun_id) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create a path to send " "XPT_REL_SIMQ"); break; } xpt_setup_ccb(&crs.ccb_h, tmppath, 5); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = CAM_DEV_QFREEZE; crs.release_flags = RELSIM_ADJUST_OPENINGS; crs.openings = pqf->CurrentDepth - 1; xpt_action((union ccb *)&crs); if (crs.ccb_h.status != CAM_REQ_CMP) { mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); } xpt_free_path(tmppath); } break; } case MPI_EVENT_IR_RESYNC_UPDATE: mpt_prt(mpt, "IR resync update %d completed\n", (data0 >> 16) & 0xff); break; case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: { union ccb *ccb; struct cam_sim *sim; struct cam_path *tmppath; PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc; psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data; if (mpt->phydisk_sim && mpt_is_raid_member(mpt, psdsc->TargetID) != 0) sim = mpt->phydisk_sim; else sim = mpt->sim; switch(psdsc->ReasonCode) { case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mpt_prt(mpt, "unable to alloc CCB for rescan\n"); break; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sim), psdsc->TargetID, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create path for rescan\n"); xpt_free_ccb(ccb); break; } xpt_rescan(ccb); break; case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), psdsc->TargetID, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create path for async event"); break; } xpt_async(AC_LOST_DEVICE, tmppath, NULL); xpt_free_path(tmppath); break; case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET: case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL: case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: break; default: mpt_lprt(mpt, MPT_PRT_WARN, "SAS device status change: Bus: 0x%02x TargetID: " "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus, psdsc->TargetID, psdsc->ReasonCode); break; } break; } case MPI_EVENT_SAS_DISCOVERY_ERROR: { PTR_EVENT_DATA_DISCOVERY_ERROR pde; pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data; pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus); mpt_lprt(mpt, MPT_PRT_WARN, "SAS discovery error: Port: 0x%02x Status: 0x%08x\n", pde->Port, pde->DiscoveryStatus); break; } case MPI_EVENT_EVENT_CHANGE: case MPI_EVENT_INTEGRATED_RAID: case MPI_EVENT_IR2: case MPI_EVENT_LOG_ENTRY_ADDED: case MPI_EVENT_SAS_DISCOVERY: case MPI_EVENT_SAS_PHY_LINK_STATUS: case MPI_EVENT_SAS_SES: break; default: mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", msg->Event & 0xFF); return (0); } return (1); } /* * Reply path for all SCSI I/O requests, called from our * interrupt handler by extracting our handler index from * the MsgContext field of the reply from the IOC. * * This routine is optimized for the common case of a * completion without error. All exception handling is * offloaded to non-inlined helper routines to minimize * cache footprint. */ static int mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { MSG_SCSI_IO_REQUEST *scsi_req; union ccb *ccb; if (req->state == REQ_STATE_FREE) { mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); return (TRUE); } scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; ccb = req->ccb; if (ccb == NULL) { mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", req, req->serno); return (TRUE); } mpt_req_untimeout(req, mpt_timeout, ccb); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } if (reply_frame == NULL) { /* * Context only reply, completion without error status. */ ccb->csio.resid = 0; mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->csio.scsi_status = SCSI_STATUS_OK; } else { mpt_scsi_reply_frame_handler(mpt, req, reply_frame); } if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { struct scsi_inquiry_data *iq = (struct scsi_inquiry_data *)ccb->csio.data_ptr; if (scsi_req->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { /* * Fake out the device type so that only the * pass-thru device will attach. */ iq->device &= ~0x1F; iq->device |= T_NODEVICE; } } if (mpt->verbose == MPT_PRT_DEBUG) { mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", req, req->serno); } KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); if ((req->state & REQ_STATE_TIMEDOUT) == 0) { TAILQ_REMOVE(&mpt->request_pending_list, req, links); } else { mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", req, req->serno); TAILQ_REMOVE(&mpt->request_timeout_list, req, links); } KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, ("CCB req needed wakeup")); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); #endif mpt_free_request(mpt, req); return (TRUE); } static int mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); #endif tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; /* Record IOC Status and Response Code of TMF for any waiters. */ req->IOCStatus = le16toh(tmf_reply->IOCStatus); req->ResponseCode = tmf_reply->ResponseCode; mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", req, req->serno, le16toh(tmf_reply->IOCStatus)); TAILQ_REMOVE(&mpt->request_pending_list, req, links); if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { req->state |= REQ_STATE_DONE; wakeup(req); } else { mpt->tmf_req->state = REQ_STATE_FREE; } return (TRUE); } /* * XXX: Move to definitions file */ #define ELS 0x22 #define FC4LS 0x32 #define ABTS 0x81 #define BA_ACC 0x84 #define LS_RJT 0x01 #define LS_ACC 0x02 #define PLOGI 0x03 #define LOGO 0x05 #define SRR 0x14 #define PRLI 0x20 #define PRLO 0x21 #define ADISC 0x52 #define RSCN 0x61 static void mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) { uint32_t fl; MSG_LINK_SERVICE_RSP_REQUEST tmp; PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; /* * We are going to reuse the ELS request to send this response back. */ rsp = &tmp; memset(rsp, 0, sizeof(*rsp)); #ifdef USE_IMMEDIATE_LINK_DATA /* * Apparently the IMMEDIATE stuff doesn't seem to work. */ rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; #endif rsp->RspLength = length; rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; rsp->MsgContext = htole32(req->index | fc_els_handler_id); /* * Copy over information from the original reply frame to * it's correct place in the response. */ memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); /* * And now copy back the temporary area to the original frame. */ memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); rsp = req->req_vbuf; #ifdef USE_IMMEDIATE_LINK_DATA memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); #else { PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; bus_addr_t paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); fl = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; fl <<= MPI_SGE_FLAGS_SHIFT; fl |= (length); se->FlagsLength = htole32(fl); se->Address = htole32((uint32_t) paddr); } #endif /* * Send it on... */ mpt_send_cmd(mpt, req); } static int mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; U8 rctl; U8 type; U8 cmd; U16 status = le16toh(reply_frame->IOCStatus); U32 *elsbuf; int ioindex; int do_refresh = TRUE; #ifdef INVARIANTS KASSERT(mpt_req_on_free_list(mpt, req) == 0, ("fc_els_reply_handler: req %p:%u for function %x on freelist!", req, req->serno, rp->Function)); if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); } else { mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); } #endif mpt_lprt(mpt, MPT_PRT_DEBUG, "FC_ELS Complete: req %p:%u, reply %p function %x\n", req, req->serno, reply_frame, reply_frame->Function); if (status != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", status, reply_frame->Function); if (status == MPI_IOCSTATUS_INVALID_STATE) { /* * XXX: to get around shutdown issue */ mpt->disabled = 1; return (TRUE); } return (TRUE); } /* * If the function of a link service response, we recycle the * response to be a refresh for a new link service request. * * The request pointer is bogus in this case and we have to fetch * it based upon the TransactionContext. */ if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { /* Freddie Uncle Charlie Katie */ /* We don't get the IOINDEX as part of the Link Svc Rsp */ for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) if (mpt->els_cmd_ptrs[ioindex] == req) { break; } KASSERT(ioindex < mpt->els_cmds_allocated, ("can't find my mommie!")); /* remove from active list as we're going to re-post it */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_post_els(mpt, req, ioindex); return (TRUE); } if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; if (req->state & REQ_STATE_TIMEDOUT) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Sync Primitive Send Completed After Timeout\n"); mpt_free_request(mpt, req); } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Async Primitive Send Complete\n"); mpt_free_request(mpt, req); } else { mpt_lprt(mpt, MPT_PRT_DEBUG, "Sync Primitive Send Complete- Waking Waiter\n"); wakeup(req); } return (TRUE); } if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " "Length %d Message Flags %x\n", rp->Function, rp->Flags, rp->MsgLength, rp->MsgFlags); return (TRUE); } if (rp->MsgLength <= 5) { /* * This is just a ack of an original ELS buffer post */ mpt_lprt(mpt, MPT_PRT_DEBUG, "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); return (TRUE); } rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; cmd = be32toh(elsbuf[0]) >> 24; if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); return (TRUE); } ioindex = le32toh(rp->TransactionContext); req = mpt->els_cmd_ptrs[ioindex]; if (rctl == ELS && type == 1) { switch (cmd) { case PRLI: /* * Send back a PRLI ACC */ mpt_prt(mpt, "PRLI from 0x%08x%08x\n", le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); elsbuf[0] = htobe32(0x02100014); elsbuf[1] |= htobe32(0x00000100); elsbuf[4] = htobe32(0x00000002); if (mpt->role & MPT_ROLE_TARGET) elsbuf[4] |= htobe32(0x00000010); if (mpt->role & MPT_ROLE_INITIATOR) elsbuf[4] |= htobe32(0x00000020); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 20); do_refresh = FALSE; break; case PRLO: memset(elsbuf, 0, 5 * (sizeof (U32))); elsbuf[0] = htobe32(0x02100014); elsbuf[1] = htobe32(0x08000100); mpt_prt(mpt, "PRLO from 0x%08x%08x\n", le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 20); do_refresh = FALSE; break; default: mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); break; } } else if (rctl == ABTS && type == 0) { uint16_t rx_id = le16toh(rp->Rxid); uint16_t ox_id = le16toh(rp->Oxid); mpt_tgt_state_t *tgt; request_t *tgt_req = NULL; union ccb *ccb; uint32_t ct_id; mpt_prt(mpt, "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); if (rx_id >= mpt->mpt_max_tgtcmds) { mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); } else if (mpt->tgt_cmd_ptrs == NULL) { mpt_prt(mpt, "No TGT CMD PTRS\n"); } else { tgt_req = mpt->tgt_cmd_ptrs[rx_id]; } if (tgt_req == NULL) { mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); goto skip; } tgt = MPT_TGT_STATE(mpt, tgt_req); /* Check to make sure we have the correct command. */ ct_id = GET_IO_INDEX(tgt->reply_desc); if (ct_id != rx_id) { mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " "RX_ID received=0x%x, in cmd=0x%x\n", rx_id, ct_id); goto skip; } if (tgt->itag != ox_id) { mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " "OX_ID received=0x%x, in cmd=0x%x\n", ox_id, tgt->itag); goto skip; } if ((ccb = tgt->ccb) != NULL) { mpt_prt(mpt, "CCB (%p): lun %jx flags %x status %x\n", ccb, (uintmax_t)ccb->ccb_h.target_lun, ccb->ccb_h.flags, ccb->ccb_h.status); } mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " "%x nxfers %x\n", tgt->state, tgt->resid, tgt->bytes_xfered, tgt->reply_desc, tgt->nxfers); if (mpt_abort_target_cmd(mpt, tgt_req)) mpt_prt(mpt, "unable to start TargetAbort\n"); skip: memset(elsbuf, 0, 5 * (sizeof (U32))); elsbuf[0] = htobe32(0); elsbuf[1] = htobe32((ox_id << 16) | rx_id); elsbuf[2] = htobe32(0x000ffff); /* * Dork with the reply frame so that the response to it * will be correct. */ rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 12); do_refresh = FALSE; } else { mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); } if (do_refresh == TRUE) { /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_post_els(mpt, req, ioindex); } return (TRUE); } /* * Clean up all SCSI Initiator personality state in response * to a controller reset. */ static void mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) { /* * The pending list is already run down by * the generic handler. Perform the same * operation on the timed out request list. */ mpt_complete_request_chain(mpt, &mpt->request_timeout_list, MPI_IOCSTATUS_INVALID_STATE); /* * XXX: We need to repost ELS and Target Command Buffers? */ /* * Inform the XPT that a bus reset has occurred. */ xpt_async(AC_BUS_RESET, mpt->path, NULL); } /* * Parse additional completion information in the reply * frame for SCSI I/O requests. */ static int mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, MSG_DEFAULT_REPLY *reply_frame) { union ccb *ccb; MSG_SCSI_IO_REPLY *scsi_io_reply; u_int ioc_status; u_int sstate; MPT_DUMP_REPLY_FRAME(mpt, reply_frame); KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, ("MPT SCSI I/O Handler called with incorrect reply type")); KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, ("MPT SCSI I/O Handler called with continuation reply")); scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; ioc_status = le16toh(scsi_io_reply->IOCStatus); ioc_status &= MPI_IOCSTATUS_MASK; sstate = scsi_io_reply->SCSIState; ccb = req->ccb; ccb->csio.resid = ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { uint32_t sense_returned; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; sense_returned = le32toh(scsi_io_reply->SenseCount); if (sense_returned < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - sense_returned; else ccb->csio.sense_resid = 0; bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); bcopy(req->sense_vbuf, &ccb->csio.sense_data, min(ccb->csio.sense_len, sense_returned)); } if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { /* * Tag messages rejected, but non-tagged retry * was successful. XXXX mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); */ } switch(ioc_status) { case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* * XXX * Linux driver indicates that a zero * transfer length with this error code * indicates a CRC error. * * No need to swap the bytes for checking * against zero. */ if (scsi_io_reply->TransferCount == 0) { mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); break; } /* FALLTHROUGH */ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: case MPI_IOCSTATUS_SUCCESS: case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { /* * Status was never returned for this transaction. */ mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { /* XXX Handle SPI-Packet and FCP-2 response info. */ mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } else mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); break; case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); break; case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* * Since selection timeouts and "device really not * there" are grouped into this error code, report * selection timeout. Selection timeouts are * typically retried before giving up on the device * whereas "device not there" errors are considered * unretryable. */ mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); break; case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); break; case MPI_IOCSTATUS_SCSI_INVALID_BUS: mpt_set_ccb_status(ccb, CAM_PATH_INVALID); break; case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: mpt_set_ccb_status(ccb, CAM_TID_INVALID); break; case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: ccb->ccb_h.status = CAM_UA_TERMIO; break; case MPI_IOCSTATUS_INVALID_STATE: /* * The IOC has been reset. Emulate a bus reset. */ /* FALLTHROUGH */ case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* * Don't clobber any timeout status that has * already been set for this transaction. We * want the SCSI layer to be able to differentiate * between the command we aborted due to timeout * and any innocent bystanders. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) break; mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); break; case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); break; case MPI_IOCSTATUS_BUSY: mpt_set_ccb_status(ccb, CAM_BUSY); break; case MPI_IOCSTATUS_INVALID_FUNCTION: case MPI_IOCSTATUS_INVALID_SGL: case MPI_IOCSTATUS_INTERNAL_ERROR: case MPI_IOCSTATUS_INVALID_FIELD: default: /* XXX * Some of the above may need to kick * of a recovery action!!!! */ ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; break; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { mpt_freeze_ccb(ccb); } return (TRUE); } static void mpt_action(struct cam_sim *sim, union ccb *ccb) { struct mpt_softc *mpt; struct ccb_trans_settings *cts; target_id_t tgt; lun_id_t lun; int raid_passthru; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); mpt = (struct mpt_softc *)cam_sim_softc(sim); raid_passthru = (sim == mpt->phydisk_sim); MPT_LOCK_ASSERT(mpt); tgt = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ && ccb->ccb_h.func_code != XPT_RESET_BUS && ccb->ccb_h.func_code != XPT_RESET_DEV) { if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } } ccb->ccb_h.ccb_mpt_ptr = mpt; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ /* * Do a couple of preliminary checks... */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } } /* Max supported CDB length is 16 bytes */ /* XXX Unless we implement the new 32byte message type */ if (ccb->csio.cdb_len > sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } #ifdef MPT_TEST_MULTIPATH if (mpt->failure_id == ccb->ccb_h.target_id) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); break; } #endif ccb->csio.scsi_status = SCSI_STATUS_OK; mpt_start(sim, ccb); return; case XPT_RESET_BUS: if (raid_passthru) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } case XPT_RESET_DEV: if (ccb->ccb_h.func_code == XPT_RESET_BUS) { if (bootverbose) { xpt_print(ccb->ccb_h.path, "reset bus\n"); } } else { xpt_print(ccb->ccb_h.path, "reset device\n"); } (void) mpt_bus_reset(mpt, tgt, lun, FALSE); /* * mpt_bus_reset is always successful in that it * will fall back to a hard reset should a bus * reset attempt fail. */ ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; case XPT_ABORT: { union ccb *accb = ccb->cab.abort_ccb; switch (accb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMEDIATE_NOTIFY: ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); break; case XPT_CONT_TARGET_IO: mpt_prt(mpt, "cannot abort active CTIOs yet\n"); ccb->ccb_h.status = CAM_UA_ABORT; break; case XPT_SCSI_IO: ccb->ccb_h.status = CAM_UA_ABORT; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } break; } #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) #define DP_DISC_ENABLE 0x1 #define DP_DISC_DISABL 0x2 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) #define DP_TQING_ENABLE 0x4 #define DP_TQING_DISABL 0x8 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) #define DP_WIDE 0x10 #define DP_NARROW 0x20 #define DP_WIDTH (DP_WIDE|DP_NARROW) #define DP_SYNC 0x40 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; uint8_t dval; u_int period; u_int offset; int i, j; cts = &ccb->cts; if (mpt->is_fc || mpt->is_sas) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; /* * We can be called just to valid transport and proto versions */ if (scsi->valid == 0 && spi->valid == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } /* * Skip attempting settings on RAID volume disks. * Other devices on the bus get the normal treatment. */ if (mpt->phydisk_sim && raid_passthru == 0 && mpt_is_raid_volume(mpt, tgt) != 0) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "no transfer settings for RAID vols\n"); mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } i = mpt->mpt_port_page2.PortSettings & MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; j = mpt->mpt_port_page2.PortFlags & MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { mpt_lprt(mpt, MPT_PRT_ALWAYS, "honoring BIOS transfer negotiations\n"); mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } dval = 0; period = 0; offset = 0; if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? DP_DISC_ENABLE : DP_DISC_DISABL; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? DP_TQING_ENABLE : DP_TQING_DISABL; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? DP_WIDE : DP_NARROW; } if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { dval |= DP_SYNC; offset = spi->sync_offset; } else { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = &mpt->mpt_dev_page1[tgt]; offset = ptr->RequestedParameters; offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; } if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { dval |= DP_SYNC; period = spi->sync_period; } else { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = &mpt->mpt_dev_page1[tgt]; period = ptr->RequestedParameters; period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; } if (dval & DP_DISC_ENABLE) { mpt->mpt_disc_enable |= (1 << tgt); } else if (dval & DP_DISC_DISABL) { mpt->mpt_disc_enable &= ~(1 << tgt); } if (dval & DP_TQING_ENABLE) { mpt->mpt_tag_enable |= (1 << tgt); } else if (dval & DP_TQING_DISABL) { mpt->mpt_tag_enable &= ~(1 << tgt); } if (dval & DP_WIDTH) { mpt_setwidth(mpt, tgt, 1); } if (dval & DP_SYNC) { mpt_setsync(mpt, tgt, period, offset); } if (dval == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "set [%d]: 0x%x period 0x%x offset %d\n", tgt, dval, period, offset); if (mpt_update_spi_config(mpt, tgt)) { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } else { mpt_set_ccb_status(ccb, CAM_REQ_CMP); } break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings_scsi *scsi; cts = &ccb->cts; cts->protocol = PROTO_SCSI; if (mpt->is_fc) { struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; cts->protocol_version = SCSI_REV_SPC; cts->transport = XPORT_FC; cts->transport_version = 0; if (mpt->mpt_fcport_speed != 0) { fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 100000 * mpt->mpt_fcport_speed; } } else if (mpt->is_sas) { struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_SAS; cts->transport_version = 0; sas->valid = CTS_SAS_VALID_SPEED; sas->bitrate = 300000; } else { cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (mpt_get_spi_settings(mpt, cts) != 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); break; } } scsi = &cts->proto_specific.scsi; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; if (ccg->block_size == 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } cam_calc_geometry(ccg, /* extended */ 1); KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); break; } case XPT_GET_SIM_KNOB: { struct ccb_sim_knob *kp = &ccb->knob; if (mpt->is_fc) { kp->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn; kp->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn; switch (mpt->role) { case MPT_ROLE_NONE: kp->xport_specific.fc.role = KNOB_ROLE_NONE; break; case MPT_ROLE_INITIATOR: kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR; break; case MPT_ROLE_TARGET: kp->xport_specific.fc.role = KNOB_ROLE_TARGET; break; case MPT_ROLE_BOTH: kp->xport_specific.fc.role = KNOB_ROLE_BOTH; break; } kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_REQ_INVALID; } xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->target_sprt = 0; cpi->hba_eng_cnt = 0; cpi->max_target = mpt->port_facts[0].MaxDevices - 1; cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE; /* * FC cards report MAX_DEVICES of 512, but * the MSG_SCSI_IO_REQUEST target id field * is only 8 bits. Until we fix the driver * to support 'channels' for bus overflow, * just limit it. */ if (cpi->max_target > 255) { cpi->max_target = 255; } /* * VMware ESX reports > 16 devices and then dies when we probe. */ if (mpt->is_spi && cpi->max_target > 15) { cpi->max_target = 15; } if (mpt->is_spi) cpi->max_lun = 7; else cpi->max_lun = MPT_MAX_LUNS; cpi->initiator_id = mpt->mpt_ini_id; cpi->bus_id = cam_sim_bus(sim); /* * The base speed is the speed of the underlying connection. */ cpi->protocol = PROTO_SCSI; if (mpt->is_fc) { cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_EXTLUNS; cpi->base_transfer_speed = 100000; cpi->hba_inquiry = PI_TAG_ABLE; cpi->transport = XPORT_FC; cpi->transport_version = 0; cpi->protocol_version = SCSI_REV_SPC; cpi->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn; cpi->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn; cpi->xport_specific.fc.port = mpt->scinfo.fc.portid; cpi->xport_specific.fc.bitrate = 100000 * mpt->mpt_fcport_speed; } else if (mpt->is_sas) { cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_EXTLUNS; cpi->base_transfer_speed = 300000; cpi->hba_inquiry = PI_TAG_ABLE; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol_version = SCSI_REV_SPC2; } else { cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED | PIM_EXTLUNS; cpi->base_transfer_speed = 3300; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol_version = SCSI_REV_2; } /* * We give our fake RAID passhtru bus a width that is MaxVolumes * wide and restrict it to one lun. */ if (raid_passthru) { cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; cpi->initiator_id = cpi->max_target + 1; cpi->max_lun = 0; } if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { cpi->hba_misc |= PIM_NOINITIATOR; } if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; } else { cpi->target_sprt = 0; } strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "LSI", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_EN_LUN: /* Enable LUN as a target */ { int result; if (ccb->cel.enable) result = mpt_enable_lun(mpt, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); else result = mpt_disable_lun(mpt, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); if (result == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); } else { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } break; } case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ { tgt_resource_t *trtp; lun_id_t lun = ccb->ccb_h.target_lun; ccb->ccb_h.sim_priv.entries[0].field = 0; ccb->ccb_h.sim_priv.entries[1].ptr = mpt; if (lun == CAM_LUN_WILDCARD) { if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } trtp = &mpt->trt_wildcard; } else if (lun >= MPT_MAX_LUNS) { mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } else { trtp = &mpt->trt[lun]; } if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { mpt_lprt(mpt, MPT_PRT_DEBUG1, "Put FREE ATIO %p lun %jx\n", ccb, (uintmax_t)lun); STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, sim_links.stqe); } else { mpt_lprt(mpt, MPT_PRT_DEBUG1, "Put FREE INOT lun %jx\n", (uintmax_t)lun); STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, sim_links.stqe); } mpt_set_ccb_status(ccb, CAM_REQ_INPROG); return; } case XPT_NOTIFY_ACKNOWLEDGE: /* Task management request done. */ { request_t *req = MPT_TAG_2_REQ(mpt, ccb->cna2.tag_id); mpt_lprt(mpt, MPT_PRT_DEBUG, "Got Notify ACK\n"); mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL, 0); mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } case XPT_CONT_TARGET_IO: mpt_target_start_io(mpt, ccb); return; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } static int mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; target_id_t tgt; uint32_t dval, pval, oval; int rv; if (IS_CURRENT_SETTINGS(cts) == 0) { tgt = cts->ccb_h.target_id; } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { return (-1); } } else { tgt = cts->ccb_h.target_id; } /* * We aren't looking at Port Page 2 BIOS settings here- * sometimes these have been known to be bogus XXX. * * For user settings, we pick the max from port page 0 * * For current settings we read the current settings out from * device page 0 for that target. */ if (IS_CURRENT_SETTINGS(cts)) { CONFIG_PAGE_SCSI_DEVICE_0 tmp; dval = 0; tmp = mpt->mpt_dev_page0[tgt]; rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp), FALSE, 5000); if (rv) { mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); return (rv); } mpt2host_config_page_scsi_device_0(&tmp); mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, tmp.NegotiatedParameters, tmp.Information); dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? DP_WIDE : DP_NARROW; dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? DP_DISC_ENABLE : DP_DISC_DISABL; dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? DP_TQING_ENABLE : DP_TQING_DISABL; oval = tmp.NegotiatedParameters; oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; pval = tmp.NegotiatedParameters; pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; mpt->mpt_dev_page0[tgt] = tmp; } else { dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; oval = mpt->mpt_port_page0.Capabilities; oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); pval = mpt->mpt_port_page0.Capabilities; pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); } spi->valid = 0; scsi->valid = 0; spi->flags = 0; scsi->flags = 0; spi->sync_offset = oval; spi->sync_period = pval; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; if (dval & DP_WIDE) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; if (dval & DP_TQING_ENABLE) { scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->valid |= CTS_SPI_VALID_DISC; if (dval & DP_DISC_ENABLE) { spi->flags |= CTS_SPI_FLAGS_DISC_ENB; } } mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval); return (0); } static void mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; ptr = &mpt->mpt_dev_page1[tgt]; if (onoff) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; } else { ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; } } static void mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; ptr = &mpt->mpt_dev_page1[tgt]; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; if (period == 0) { return; } ptr->RequestedParameters |= period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; ptr->RequestedParameters |= offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; if (period < 0xa) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; } if (period < 0x9) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; } } static int mpt_update_spi_config(struct mpt_softc *mpt, int tgt) { CONFIG_PAGE_SCSI_DEVICE_1 tmp; int rv; mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); tmp = mpt->mpt_dev_page1[tgt]; host2mpt_config_page_scsi_device_1(&tmp); rv = mpt_write_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp), FALSE, 5000); if (rv) { mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); return (-1); } return (0); } /****************************** Timeout Recovery ******************************/ static int mpt_spawn_recovery_thread(struct mpt_softc *mpt) { int error; error = kproc_create(mpt_recovery_thread, mpt, &mpt->recovery_thread, /*flags*/0, /*altstack*/0, "mpt_recovery%d", mpt->unit); return (error); } static void mpt_terminate_recovery_thread(struct mpt_softc *mpt) { if (mpt->recovery_thread == NULL) { return; } mpt->shutdwn_recovery = 1; wakeup(mpt); /* * Sleep on a slightly different location * for this interlock just for added safety. */ mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); } static void mpt_recovery_thread(void *arg) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)arg; MPT_LOCK(mpt); for (;;) { if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { if (mpt->shutdwn_recovery == 0) { mpt_sleep(mpt, mpt, PUSER, "idle", 0); } } if (mpt->shutdwn_recovery != 0) { break; } mpt_recover_commands(mpt); } mpt->recovery_thread = NULL; wakeup(&mpt->recovery_thread); MPT_UNLOCK(mpt); kproc_exit(0); } static int mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, u_int channel, target_id_t target, lun_id_t lun, u_int abort_ctx, int sleep_ok) { MSG_SCSI_TASK_MGMT *tmf_req; int error; /* * Wait for any current TMF request to complete. * We're only allowed to issue one TMF at a time. */ error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, sleep_ok, MPT_TMF_MAX_TIMEOUT); if (error != 0) { mpt_reset(mpt, TRUE); return (ETIMEDOUT); } mpt_assign_serno(mpt, mpt->tmf_req); mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; memset(tmf_req, 0, sizeof(*tmf_req)); tmf_req->TargetID = target; tmf_req->Bus = channel; tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; tmf_req->TaskType = type; tmf_req->MsgFlags = flags; tmf_req->MsgContext = htole32(mpt->tmf_req->index | scsi_tmf_handler_id); be64enc(tmf_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun)); tmf_req->TaskMsgContext = abort_ctx; mpt_lprt(mpt, MPT_PRT_DEBUG, "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, mpt->tmf_req->serno, tmf_req->MsgContext); if (mpt->verbose > MPT_PRT_DEBUG) { mpt_print_request(tmf_req); } KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, ("mpt_scsi_send_tmf: tmf_req already on pending list")); TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); if (error != MPT_OK) { TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); mpt->tmf_req->state = REQ_STATE_FREE; mpt_reset(mpt, TRUE); } return (error); } /* * When a command times out, it is placed on the requeust_timeout_list * and we wake our recovery thread. The MPT-Fusion architecture supports * only a single TMF operation at a time, so we serially abort/bdr, etc, * the timedout transactions. The next TMF is issued either by the * completion handler of the current TMF waking our recovery thread, * or the TMF timeout handler causing a hard reset sequence. */ static void mpt_recover_commands(struct mpt_softc *mpt) { request_t *req; union ccb *ccb; int error; if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { /* * No work to do- leave. */ mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); return; } /* * Flush any commands whose completion coincides with their timeout. */ mpt_intr(mpt); if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { /* * The timedout commands have already * completed. This typically means * that either the timeout value was on * the hairy edge of what the device * requires or - more likely - interrupts * are not happening. */ mpt_prt(mpt, "Timedout requests already complete. " "Interrupts may not be functioning.\n"); mpt_enable_ints(mpt); return; } /* * We have no visibility into the current state of the * controller, so attempt to abort the commands in the * order they timed-out. For initiator commands, we * depend on the reply handler pulling requests off * the timeout list. */ while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { uint16_t status; uint8_t response; MSG_REQUEST_HEADER *hdrp = req->req_vbuf; mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", req, req->serno, hdrp->Function); ccb = req->ccb; if (ccb == NULL) { mpt_prt(mpt, "null ccb in timed out request. " "Resetting Controller.\n"); mpt_reset(mpt, TRUE); continue; } mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); /* * Check to see if this is not an initiator command and * deal with it differently if it is. */ switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: break; default: /* * XXX: FIX ME: need to abort target assists... */ mpt_prt(mpt, "just putting it back on the pend q\n"); TAILQ_REMOVE(&mpt->request_timeout_list, req, links); TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); continue; } error = mpt_scsi_send_tmf(mpt, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, htole32(req->index | scsi_io_handler_id), TRUE); if (error != 0) { /* * mpt_scsi_send_tmf hard resets on failure, so no * need to do so here. Our queue should be emptied * by the hard reset. */ continue; } error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE, 500); status = le16toh(mpt->tmf_req->IOCStatus); response = mpt->tmf_req->ResponseCode; mpt->tmf_req->state = REQ_STATE_FREE; if (error != 0) { /* * If we've errored out,, reset the controller. */ mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " "Resetting controller\n"); mpt_reset(mpt, TRUE); continue; } if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " "Resetting controller.\n", status); mpt_reset(mpt, TRUE); continue; } if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " "Resetting controller.\n", response); mpt_reset(mpt, TRUE); continue; } mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); } } /************************ Target Mode Support ****************************/ static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) { MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; PTR_SGE_TRANSACTION32 tep; PTR_SGE_SIMPLE32 se; bus_addr_t paddr; uint32_t fl; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); fc = req->req_vbuf; memset(fc, 0, MPT_REQUEST_AREA); fc->BufferCount = 1; fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; fc->MsgContext = htole32(req->index | fc_els_handler_id); /* * Okay, set up ELS buffer pointers. ELS buffer pointers * consist of a TE SGL element (with details length of zero) * followed by a SIMPLE SGL element which holds the address * of the buffer. */ tep = (PTR_SGE_TRANSACTION32) &fc->SGL; tep->ContextSize = 4; tep->Flags = 0; tep->TransactionContext[0] = htole32(ioindex); se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; fl = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; fl <<= MPI_SGE_FLAGS_SHIFT; fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); se->FlagsLength = htole32(fl); se->Address = htole32((uint32_t) paddr); mpt_lprt(mpt, MPT_PRT_DEBUG, "add ELS index %d ioindex %d for %p:%u\n", req->index, ioindex, req, req->serno); KASSERT(((req->state & REQ_STATE_LOCKED) != 0), ("mpt_fc_post_els: request not locked")); mpt_send_cmd(mpt, req); } static void mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) { PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; PTR_CMD_BUFFER_DESCRIPTOR cb; bus_addr_t paddr; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); memset(req->req_vbuf, 0, MPT_REQUEST_AREA); MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; fc = req->req_vbuf; fc->BufferCount = 1; fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; fc->BufferLength = MIN(MPT_REQUEST_AREA - MPT_RQSL(mpt), UINT8_MAX); fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); cb = &fc->Buffer[0]; cb->IoIndex = htole16(ioindex); cb->u.PhysicalAddress32 = htole32((U32) paddr); mpt_check_doorbell(mpt); mpt_send_cmd(mpt, req); } static int mpt_add_els_buffers(struct mpt_softc *mpt) { int i; if (mpt->is_fc == 0) { return (TRUE); } if (mpt->els_cmds_allocated) { return (TRUE); } mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->els_cmd_ptrs == NULL) { return (FALSE); } /* * Feed the chip some ELS buffer resources */ for (i = 0; i < MPT_MAX_ELS; i++) { request_t *req = mpt_get_request(mpt, FALSE); if (req == NULL) { break; } req->state |= REQ_STATE_LOCKED; mpt->els_cmd_ptrs[i] = req; mpt_fc_post_els(mpt, req, i); } if (i == 0) { mpt_prt(mpt, "unable to add ELS buffer resources\n"); free(mpt->els_cmd_ptrs, M_DEVBUF); mpt->els_cmd_ptrs = NULL; return (FALSE); } if (i != MPT_MAX_ELS) { mpt_lprt(mpt, MPT_PRT_INFO, "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); } mpt->els_cmds_allocated = i; return(TRUE); } static int mpt_add_target_commands(struct mpt_softc *mpt) { int i, max; if (mpt->tgt_cmd_ptrs) { return (TRUE); } max = MPT_MAX_REQUESTS(mpt) >> 1; if (max > mpt->mpt_max_tgtcmds) { max = mpt->mpt_max_tgtcmds; } mpt->tgt_cmd_ptrs = malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->tgt_cmd_ptrs == NULL) { mpt_prt(mpt, "mpt_add_target_commands: could not allocate cmd ptrs\n"); return (FALSE); } for (i = 0; i < max; i++) { request_t *req; req = mpt_get_request(mpt, FALSE); if (req == NULL) { break; } req->state |= REQ_STATE_LOCKED; mpt->tgt_cmd_ptrs[i] = req; mpt_post_target_command(mpt, req, i); } if (i == 0) { mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); free(mpt->tgt_cmd_ptrs, M_DEVBUF); mpt->tgt_cmd_ptrs = NULL; return (FALSE); } mpt->tgt_cmds_allocated = i; if (i < max) { mpt_lprt(mpt, MPT_PRT_INFO, "added %d of %d target bufs\n", i, max); } return (i); } static int mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) { if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { mpt->twildcard = 1; } else if (lun >= MPT_MAX_LUNS) { return (EINVAL); } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { return (EINVAL); } if (mpt->tenabled == 0) { if (mpt->is_fc) { (void) mpt_fc_reset_link(mpt, 0); } mpt->tenabled = 1; } if (lun == CAM_LUN_WILDCARD) { mpt->trt_wildcard.enabled = 1; } else { mpt->trt[lun].enabled = 1; } return (0); } static int mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) { int i; if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { mpt->twildcard = 0; } else if (lun >= MPT_MAX_LUNS) { return (EINVAL); } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { return (EINVAL); } if (lun == CAM_LUN_WILDCARD) { mpt->trt_wildcard.enabled = 0; } else { mpt->trt[lun].enabled = 0; } for (i = 0; i < MPT_MAX_LUNS; i++) { if (mpt->trt[i].enabled) { break; } } if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { if (mpt->is_fc) { (void) mpt_fc_reset_link(mpt, 0); } mpt->tenabled = 0; } return (0); } /* * Called with MPT lock held */ static void mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) { struct ccb_scsiio *csio = &ccb->csio; request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); switch (tgt->state) { case TGT_STATE_IN_CAM: break; case TGT_STATE_MOVING_DATA: mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_freeze_simq(mpt->sim, 1); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; xpt_done(ccb); return; default: mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); mpt_tgt_dump_req_state(mpt, cmd_req); mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); xpt_done(ccb); return; } if (csio->dxfer_len) { bus_dmamap_callback_t *cb; PTR_MSG_TARGET_ASSIST_REQUEST ta; request_t *req; int error; KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, ("dxfer_len %u but direction is NONE", csio->dxfer_len)); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); return; } ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; if (sizeof (bus_addr_t) > 4) { cb = mpt_execute_req_a64; } else { cb = mpt_execute_req; } req->ccb = ccb; ccb->ccb_h.ccb_req_ptr = req; /* * Record the currently active ccb and the * request for it in our target state area. */ tgt->ccb = ccb; tgt->req = req; memset(req->req_vbuf, 0, MPT_RQSL(mpt)); ta = req->req_vbuf; if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; ta->QueueTag = ssp->InitiatorTag; } else if (mpt->is_spi) { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; ta->QueueTag = sp->Tag; } ta->Function = MPI_FUNCTION_TARGET_ASSIST; ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); ta->ReplyWord = htole32(tgt->reply_desc); be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(csio->ccb_h.target_lun)); ta->RelativeOffset = tgt->bytes_xfered; ta->DataLength = ccb->csio.dxfer_len; if (ta->DataLength > tgt->resid) { ta->DataLength = tgt->resid; } /* * XXX Should be done after data transfer completes? */ csio->resid = csio->dxfer_len - ta->DataLength; tgt->resid -= csio->dxfer_len; tgt->bytes_xfered += csio->dxfer_len; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; } #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_AUTO_STATUS; } #endif tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; mpt_lprt(mpt, MPT_PRT_DEBUG, "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb, req, 0); if (error == EINPROGRESS) { xpt_freeze_simq(mpt->sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } } else { /* * XXX: I don't know why this seems to happen, but * XXX: completing the CCB seems to make things happy. * XXX: This seems to happen if the initiator requests * XXX: enough data that we have to do multiple CTIOs. */ if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Meaningless STATUS CCB (%p): flags %x status %x " "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return; } mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, (void *)&csio->sense_data, (ccb->ccb_h.flags & CAM_SEND_SENSE) ? csio->sense_len : 0); } } static void mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, lun_id_t lun, int send, uint8_t *data, size_t length) { mpt_tgt_state_t *tgt; PTR_MSG_TARGET_ASSIST_REQUEST ta; SGE_SIMPLE32 *se; uint32_t flags; uint8_t *dptr; bus_addr_t pptr; request_t *req; /* * We enter with resid set to the data load for the command. */ tgt = MPT_TGT_STATE(mpt, cmd_req); if (length == 0 || tgt->resid == 0) { tgt->resid = 0; mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL, 0); return; } if ((req = mpt_get_request(mpt, FALSE)) == NULL) { mpt_prt(mpt, "out of resources- dropping local response\n"); return; } tgt->is_local = 1; memset(req->req_vbuf, 0, MPT_RQSL(mpt)); ta = req->req_vbuf; if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; ta->QueueTag = ssp->InitiatorTag; } else if (mpt->is_spi) { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; ta->QueueTag = sp->Tag; } ta->Function = MPI_FUNCTION_TARGET_ASSIST; ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); ta->ReplyWord = htole32(tgt->reply_desc); be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun)); ta->RelativeOffset = 0; ta->DataLength = length; dptr = req->req_vbuf; dptr += MPT_RQSL(mpt); pptr = req->req_pbuf; pptr += MPT_RQSL(mpt); memcpy(dptr, data, min(length, MPT_RQSL(mpt))); se = (SGE_SIMPLE32 *) &ta->SGL[0]; memset(se, 0,sizeof (*se)); flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; if (send) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } se->Address = pptr; MPI_pSGE_SET_LENGTH(se, length); flags |= MPI_SGE_FLAGS_LAST_ELEMENT; flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; MPI_pSGE_SET_FLAGS(se, flags); tgt->ccb = NULL; tgt->req = req; tgt->resid -= length; tgt->bytes_xfered = length; #ifdef WE_TRUST_AUTO_GOOD_STATUS tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; #else tgt->state = TGT_STATE_MOVING_DATA; #endif mpt_send_cmd(mpt, req); } /* * Abort queued up CCBs */ static cam_status mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) { struct mpt_hdr_stailq *lp; struct ccb_hdr *srch; union ccb *accb = ccb->cab.abort_ccb; tgt_resource_t *trtp; mpt_tgt_state_t *tgt; request_t *req; uint32_t tag; mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) trtp = &mpt->trt_wildcard; else trtp = &mpt->trt[ccb->ccb_h.target_lun]; if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { lp = &trtp->atios; tag = accb->atio.tag_id; } else { lp = &trtp->inots; tag = accb->cin1.tag_id; } /* Search the CCB among queued. */ STAILQ_FOREACH(srch, lp, sim_links.stqe) { if (srch != &accb->ccb_h) continue; STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); return (CAM_REQ_CMP); } /* Search the CCB among running. */ req = MPT_TAG_2_REQ(mpt, tag); tgt = MPT_TGT_STATE(mpt, req); if (tgt->tag_id == tag) { mpt_abort_target_cmd(mpt, req); return (CAM_REQ_CMP); } return (CAM_UA_ABORT); } /* * Ask the MPT to abort the current target command */ static int mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) { int error; request_t *req; PTR_MSG_TARGET_MODE_ABORT abtp; req = mpt_get_request(mpt, FALSE); if (req == NULL) { return (-1); } abtp = req->req_vbuf; memset(abtp, 0, sizeof (*abtp)); abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); error = 0; if (mpt->is_fc || mpt->is_sas) { mpt_send_cmd(mpt, req); } else { error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); } return (error); } /* * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the * FC929 to set bogus FC_RSP fields (nonzero residuals * but w/o RESID fields set). This causes QLogic initiators * to think maybe that a frame was lost. * * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because * we use allocated requests to do TARGET_ASSIST and we * need to know when to release them. */ static void mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, uint8_t status, uint8_t const *sense_data, u_int sense_len) { uint8_t *cmd_vbuf; mpt_tgt_state_t *tgt; PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; request_t *req; bus_addr_t paddr; int resplen = 0; uint32_t fl; cmd_vbuf = cmd_req->req_vbuf; cmd_vbuf += MPT_RQSL(mpt); tgt = MPT_TGT_STATE(mpt, cmd_req); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } if (ccb) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); } else { mpt_prt(mpt, "could not allocate status request- dropping\n"); } return; } req->ccb = ccb; if (ccb) { ccb->ccb_h.ccb_mpt_ptr = mpt; ccb->ccb_h.ccb_req_ptr = req; } /* * Record the currently active ccb, if any, and the * request for it in our target state area. */ tgt->ccb = ccb; tgt->req = req; tgt->state = TGT_STATE_SENDING_STATUS; tp = req->req_vbuf; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); memset(tp, 0, sizeof (*tp)); tp->StatusCode = status; tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; if (mpt->is_fc) { PTR_MPI_TARGET_FCP_CMD_BUFFER fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; uint8_t *sts_vbuf; uint32_t *rsp; sts_vbuf = req->req_vbuf; sts_vbuf += MPT_RQSL(mpt); rsp = (uint32_t *) sts_vbuf; memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); /* * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. * It has to be big-endian in memory and is organized * in 32 bit words, which are much easier to deal with * as words which are swizzled as needed. * * All we're filling here is the FC_RSP payload. * We may just have the chip synthesize it if * we have no residual and an OK status. * */ memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); rsp[2] = htobe32(status); #define MIN_FCP_RESPONSE_SIZE 24 #ifndef WE_TRUST_AUTO_GOOD_STATUS resplen = MIN_FCP_RESPONSE_SIZE; #endif if (tgt->resid < 0) { rsp[2] |= htobe32(0x400); /* XXXX NEED MNEMONIC!!!! */ rsp[3] = htobe32(-tgt->resid); resplen = MIN_FCP_RESPONSE_SIZE; } else if (tgt->resid > 0) { rsp[2] |= htobe32(0x800); /* XXXX NEED MNEMONIC!!!! */ rsp[3] = htobe32(tgt->resid); resplen = MIN_FCP_RESPONSE_SIZE; } if (sense_len > 0) { rsp[2] |= htobe32(0x200); /* XXXX NEED MNEMONIC!!!! */ rsp[4] = htobe32(sense_len); memcpy(&rsp[6], sense_data, sense_len); resplen = MIN_FCP_RESPONSE_SIZE + sense_len; } } else if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); } else { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; tp->QueueTag = htole16(sp->Tag); memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); } tp->ReplyWord = htole32(tgt->reply_desc); tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); #ifdef WE_CAN_USE_AUTO_REPOST tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; #endif if (status == SCSI_STATUS_OK && resplen == 0) { tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; } else { tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); fl = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; fl <<= MPI_SGE_FLAGS_SHIFT; fl |= resplen; tp->StatusDataSGE.FlagsLength = htole32(fl); } mpt_lprt(mpt, MPT_PRT_DEBUG, "STATUS_CCB %p (with%s sense) tag %x req %p:%u resid %u\n", ccb, sense_len > 0 ? "" : "out", tgt->tag_id, req, req->serno, tgt->resid); if (mpt->verbose > MPT_PRT_DEBUG) mpt_print_request(req->req_vbuf); if (ccb) { ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; mpt_req_timeout(req, SBT_1S * 60, mpt_timeout, ccb); } mpt_send_cmd(mpt, req); } static void mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, tgt_resource_t *trtp, int init_id) { struct ccb_immediate_notify *inot; mpt_tgt_state_t *tgt; tgt = MPT_TGT_STATE(mpt, req); inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots); if (inot == NULL) { mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL, 0); return; } STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); mpt_lprt(mpt, MPT_PRT_DEBUG1, "Get FREE INOT %p lun %jx\n", inot, (uintmax_t)inot->ccb_h.target_lun); inot->initiator_id = init_id; /* XXX */ inot->tag_id = tgt->tag_id; inot->seq_id = 0; /* * This is a somewhat grotesque attempt to map from task management * to old style SCSI messages. God help us all. */ switch (fc) { case MPT_QUERY_TASK_SET: inot->arg = MSG_QUERY_TASK_SET; break; case MPT_ABORT_TASK_SET: inot->arg = MSG_ABORT_TASK_SET; break; case MPT_CLEAR_TASK_SET: inot->arg = MSG_CLEAR_TASK_SET; break; case MPT_QUERY_ASYNC_EVENT: inot->arg = MSG_QUERY_ASYNC_EVENT; break; case MPT_LOGICAL_UNIT_RESET: inot->arg = MSG_LOGICAL_UNIT_RESET; break; case MPT_TARGET_RESET: inot->arg = MSG_TARGET_RESET; break; case MPT_CLEAR_ACA: inot->arg = MSG_CLEAR_ACA; break; default: inot->arg = MSG_NOOP; break; } tgt->ccb = (union ccb *) inot; inot->ccb_h.status = CAM_MESSAGE_RECV; xpt_done((union ccb *)inot); } static void mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) { static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', '0', '0', '0', '1' }; struct ccb_accept_tio *atiop; lun_id_t lun; int tag_action = 0; mpt_tgt_state_t *tgt; tgt_resource_t *trtp = NULL; U8 *lunptr; U8 *vbuf; U16 ioindex; mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; uint8_t *cdbp; /* * Stash info for the current command where we can get at it later. */ vbuf = req->req_vbuf; vbuf += MPT_RQSL(mpt); if (mpt->verbose >= MPT_PRT_DEBUG) { mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); } /* * Get our state pointer set up. */ tgt = MPT_TGT_STATE(mpt, req); if (tgt->state != TGT_STATE_LOADED) { mpt_tgt_dump_req_state(mpt, req); panic("bad target state in mpt_scsi_tgt_atio"); } memset(tgt, 0, sizeof (mpt_tgt_state_t)); tgt->state = TGT_STATE_IN_CAM; tgt->reply_desc = reply_desc; ioindex = GET_IO_INDEX(reply_desc); /* * The tag we construct here allows us to find the * original request that the command came in with. * * This way we don't have to depend on anything but the * tag to find things when CCBs show back up from CAM. */ tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); if (mpt->is_fc) { PTR_MPI_TARGET_FCP_CMD_BUFFER fc; fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; if (fc->FcpCntl[2]) { /* * Task Management Request */ switch (fc->FcpCntl[2]) { case 0x1: fct = MPT_QUERY_TASK_SET; break; case 0x2: fct = MPT_ABORT_TASK_SET; break; case 0x4: fct = MPT_CLEAR_TASK_SET; break; case 0x8: fct = MPT_QUERY_ASYNC_EVENT; break; case 0x10: fct = MPT_LOGICAL_UNIT_RESET; break; case 0x20: fct = MPT_TARGET_RESET; break; case 0x40: fct = MPT_CLEAR_ACA; break; default: mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", fc->FcpCntl[2]); mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_OK, NULL, 0); return; } } else { switch (fc->FcpCntl[1]) { case 0: tag_action = MSG_SIMPLE_Q_TAG; break; case 1: tag_action = MSG_HEAD_OF_Q_TAG; break; case 2: tag_action = MSG_ORDERED_Q_TAG; break; default: /* * Bah. Ignore Untagged Queing and ACA */ tag_action = MSG_SIMPLE_Q_TAG; break; } } tgt->resid = be32toh(fc->FcpDl); cdbp = fc->FcpCdb; lunptr = fc->FcpLun; tgt->itag = fc->OptionalOxid; } else if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; cdbp = ssp->CDB; lunptr = ssp->LogicalUnitNumber; tgt->itag = ssp->InitiatorTag; } else { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; cdbp = sp->CDB; lunptr = sp->LogicalUnitNumber; tgt->itag = sp->Tag; } lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(lunptr)); /* * Deal with non-enabled or bad luns here. */ if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || mpt->trt[lun].enabled == 0) { if (mpt->twildcard) { trtp = &mpt->trt_wildcard; } else if (fct == MPT_NIL_TMT_VALUE) { /* * In this case, we haven't got an upstream listener * for either a specific lun or wildcard luns. We * have to make some sensible response. For regular * inquiry, just return some NOT HERE inquiry data. * For VPD inquiry, report illegal field in cdb. * For REQUEST SENSE, just return NO SENSE data. * REPORT LUNS gets illegal command. * All other commands get 'no such device'. */ uint8_t sense[MPT_SENSE_SIZE]; size_t len; memset(sense, 0, sizeof(sense)); sense[0] = 0xf0; sense[2] = 0x5; sense[7] = 0x8; switch (cdbp[0]) { case INQUIRY: { if (cdbp[1] != 0) { sense[12] = 0x26; sense[13] = 0x01; break; } len = min(tgt->resid, cdbp[4]); len = min(len, sizeof (null_iqd)); mpt_lprt(mpt, MPT_PRT_DEBUG, "local inquiry %ld bytes\n", (long) len); mpt_scsi_tgt_local(mpt, req, lun, 1, null_iqd, len); return; } case REQUEST_SENSE: { sense[2] = 0x0; len = min(tgt->resid, cdbp[4]); len = min(len, sizeof (sense)); mpt_lprt(mpt, MPT_PRT_DEBUG, "local reqsense %ld bytes\n", (long) len); mpt_scsi_tgt_local(mpt, req, lun, 1, sense, len); return; } case REPORT_LUNS: mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); sense[12] = 0x26; return; default: mpt_lprt(mpt, MPT_PRT_DEBUG, "CMD 0x%x to unmanaged lun %jx\n", cdbp[0], (uintmax_t)lun); sense[12] = 0x25; break; } mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_CHECK_COND, sense, sizeof(sense)); return; } /* otherwise, leave trtp NULL */ } else { trtp = &mpt->trt[lun]; } /* * Deal with any task management */ if (fct != MPT_NIL_TMT_VALUE) { if (trtp == NULL) { mpt_prt(mpt, "task mgmt function %x but no listener\n", fct); mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_OK, NULL, 0); } else { mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, GET_INITIATOR_INDEX(reply_desc)); } return; } atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); if (atiop == NULL) { mpt_lprt(mpt, MPT_PRT_WARN, "no ATIOs for lun %jx- sending back %s\n", (uintmax_t)lun, mpt->tenabled? "QUEUE FULL" : "BUSY"); mpt_scsi_tgt_status(mpt, NULL, req, mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, NULL, 0); return; } STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); mpt_lprt(mpt, MPT_PRT_DEBUG1, "Get FREE ATIO %p lun %jx\n", atiop, (uintmax_t)atiop->ccb_h.target_lun); atiop->ccb_h.ccb_mpt_ptr = mpt; atiop->ccb_h.status = CAM_CDB_RECVD; atiop->ccb_h.target_lun = lun; atiop->sense_len = 0; atiop->tag_id = tgt->tag_id; atiop->init_id = GET_INITIATOR_INDEX(reply_desc); atiop->cdb_len = 16; memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); if (tag_action) { atiop->tag_action = tag_action; atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; } if (mpt->verbose >= MPT_PRT_DEBUG) { int i; mpt_prt(mpt, "START_CCB %p for lun %jx CDB=<", atiop, (uintmax_t)atiop->ccb_h.target_lun); for (i = 0; i < atiop->cdb_len; i++) { mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, (i == (atiop->cdb_len - 1))? '>' : ' '); } mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", tgt->itag, tgt->tag_id, tgt->reply_desc, tgt->resid); } xpt_done((union ccb *)atiop); } static void mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) { mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " "nx %d tag 0x%08x itag 0x%04x state=%d\n", req, req->serno, tgt->reply_desc, tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, tgt->tag_id, tgt->itag, tgt->state); } static void mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) { mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, req->index, req->index, req->state); mpt_tgt_dump_tgt_state(mpt, req); } static int mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { int dbg; union ccb *ccb; U16 status; if (reply_frame == NULL) { /* * Figure out what the state of the command is. */ mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); #ifdef INVARIANTS mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); if (tgt->req) { mpt_req_not_spcl(mpt, tgt->req, "turbo scsi_tgt_reply associated req", __LINE__); } #endif switch(tgt->state) { case TGT_STATE_LOADED: /* * This is a new command starting. */ mpt_scsi_tgt_atio(mpt, req, reply_desc); break; case TGT_STATE_MOVING_DATA: { ccb = tgt->ccb; if (tgt->req == NULL) { panic("mpt: turbo target reply with null " "associated request moving data"); /* NOTREACHED */ } if (ccb == NULL) { if (tgt->is_local == 0) { panic("mpt: turbo target reply with " "null associated ccb moving data"); /* NOTREACHED */ } mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_ASSIST local done\n"); TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL, 0); return (TRUE); } tgt->ccb = NULL; tgt->nxfers++; mpt_req_untimeout(tgt->req, mpt_timeout, ccb); mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); /* * Free the Target Assist Request */ KASSERT(tgt->req->ccb == ccb, ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, tgt->req->serno, tgt->req->ccb)); TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; /* * Do we need to send status now? That is, are * we done with all our data transfers? */ if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); tgt->state = TGT_STATE_IN_CAM; if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } xpt_done(ccb); break; } /* * Otherwise, send status (and sense) */ mpt_scsi_tgt_status(mpt, ccb, req, ccb->csio.scsi_status, (void *)&ccb->csio.sense_data, (ccb->ccb_h.flags & CAM_SEND_SENSE) ? ccb->csio.sense_len : 0); break; } case TGT_STATE_SENDING_STATUS: case TGT_STATE_MOVING_DATA_AND_STATUS: { int ioindex; ccb = tgt->ccb; if (tgt->req == NULL) { panic("mpt: turbo target reply with null " "associated request sending status"); /* NOTREACHED */ } if (ccb) { tgt->ccb = NULL; if (tgt->state == TGT_STATE_MOVING_DATA_AND_STATUS) { tgt->nxfers++; } mpt_req_untimeout(tgt->req, mpt_timeout, ccb); if (ccb->ccb_h.flags & CAM_SEND_SENSE) { ccb->ccb_h.status |= CAM_SENT_SENSE; } mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_STATUS tag %x sts %x flgs %x req " "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, ccb->ccb_h.flags, tgt->req); /* * Free the Target Send Status Request */ KASSERT(tgt->req->ccb == ccb, ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, tgt->req->serno, tgt->req->ccb)); /* * Notify CAM that we're done */ mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("ZERO ccb sts at %d", __LINE__)); tgt->ccb = NULL; } else { mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_STATUS non-CAM for req %p:%u\n", tgt->req, tgt->req->serno); } TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; /* * And re-post the Command Buffer. * This will reset the state. */ ioindex = GET_IO_INDEX(reply_desc); TAILQ_REMOVE(&mpt->request_pending_list, req, links); tgt->is_local = 0; mpt_post_target_command(mpt, req, ioindex); /* * And post a done for anyone who cares */ if (ccb) { if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } xpt_done(ccb); } break; } case TGT_STATE_NIL: /* XXX This Never Happens XXX */ tgt->state = TGT_STATE_LOADED; break; default: mpt_prt(mpt, "Unknown Target State 0x%x in Context " "Reply Function\n", tgt->state); } return (TRUE); } status = le16toh(reply_frame->IOCStatus); if (status != MPI_IOCSTATUS_SUCCESS) { dbg = MPT_PRT_ERROR; } else { dbg = MPT_PRT_DEBUG1; } mpt_lprt(mpt, dbg, "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", req, req->serno, reply_frame, reply_frame->Function, status); switch (reply_frame->Function) { case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: { mpt_tgt_state_t *tgt; #ifdef INVARIANTS mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); #endif if (status != MPI_IOCSTATUS_SUCCESS) { /* * XXX What to do? */ break; } tgt = MPT_TGT_STATE(mpt, req); KASSERT(tgt->state == TGT_STATE_LOADING, ("bad state 0x%x on reply to buffer post", tgt->state)); mpt_assign_serno(mpt, req); tgt->state = TGT_STATE_LOADED; break; } case MPI_FUNCTION_TARGET_ASSIST: #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); #endif mpt_prt(mpt, "target assist completion\n"); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; case MPI_FUNCTION_TARGET_STATUS_SEND: #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); #endif mpt_prt(mpt, "status send completion\n"); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; case MPI_FUNCTION_TARGET_MODE_ABORT: { PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; PTR_MSG_TARGET_MODE_ABORT abtp = (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); #endif mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; } default: mpt_prt(mpt, "Unknown Target Address Reply Function code: " "0x%x\n", reply_frame->Function); break; } return (TRUE); } Index: head/sys/dev/mpt/mpt_raid.c =================================================================== --- head/sys/dev/mpt/mpt_raid.c (revision 355600) +++ head/sys/dev/mpt/mpt_raid.c (revision 355601) @@ -1,1846 +1,1846 @@ /*- * Routines for handling the integrated RAID features LSI MPT Fusion adapters. * * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2005, WHEEL Sp. z o.o. * Copyright (c) 2005 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Some Breakage and Bug Fixing added later. * Copyright (c) 2006, by Matthew Jacob * All Rights Reserved * * Support from LSI-Logic has also gone a great deal toward making this a * workable subsystem and is gratefully acknowledged. */ #include __FBSDID("$FreeBSD$"); #include #include #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ #include "dev/mpt/mpilib/mpi_raid.h" #include #include #include #include #include #include #include #include #include struct mpt_raid_action_result { union { MPI_RAID_VOL_INDICATOR indicator_struct; uint32_t new_settings; uint8_t phys_disk_num; } action_data; uint16_t action_status; }; #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \ (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1)) #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK) static mpt_probe_handler_t mpt_raid_probe; static mpt_attach_handler_t mpt_raid_attach; static mpt_enable_handler_t mpt_raid_enable; static mpt_event_handler_t mpt_raid_event; static mpt_shutdown_handler_t mpt_raid_shutdown; static mpt_reset_handler_t mpt_raid_ioc_reset; static mpt_detach_handler_t mpt_raid_detach; static struct mpt_personality mpt_raid_personality = { .name = "mpt_raid", .probe = mpt_raid_probe, .attach = mpt_raid_attach, .enable = mpt_raid_enable, .event = mpt_raid_event, .reset = mpt_raid_ioc_reset, .shutdown = mpt_raid_shutdown, .detach = mpt_raid_detach, }; DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD); MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1); static mpt_reply_handler_t mpt_raid_reply_handler; static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req, MSG_DEFAULT_REPLY *reply_frame); static int mpt_spawn_raid_thread(struct mpt_softc *mpt); static void mpt_terminate_raid_thread(struct mpt_softc *mpt); static void mpt_raid_thread(void *arg); -static timeout_t mpt_raid_timer; +static callout_func_t mpt_raid_timer; #if 0 static void mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol, int enable); #endif static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *); static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *, struct cam_path *); static void mpt_raid_sysctl_attach(struct mpt_softc *); static const char *mpt_vol_type(struct mpt_raid_volume *vol); static const char *mpt_vol_state(struct mpt_raid_volume *vol); static const char *mpt_disk_state(struct mpt_raid_disk *disk); static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol, const char *fmt, ...); static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk, const char *fmt, ...); static int mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req, u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len, int write, int wait); static int mpt_refresh_raid_data(struct mpt_softc *mpt); static void mpt_schedule_raid_refresh(struct mpt_softc *mpt); static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE; static const char * mpt_vol_type(struct mpt_raid_volume *vol) { switch (vol->config_page->VolumeType) { case MPI_RAID_VOL_TYPE_IS: return ("RAID-0"); case MPI_RAID_VOL_TYPE_IME: return ("RAID-1E"); case MPI_RAID_VOL_TYPE_IM: return ("RAID-1"); default: return ("Unknown"); } } static const char * mpt_vol_state(struct mpt_raid_volume *vol) { switch (vol->config_page->VolumeStatus.State) { case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: return ("Optimal"); case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: return ("Degraded"); case MPI_RAIDVOL0_STATUS_STATE_FAILED: return ("Failed"); default: return ("Unknown"); } } static const char * mpt_disk_state(struct mpt_raid_disk *disk) { switch (disk->config_page.PhysDiskStatus.State) { case MPI_PHYSDISK0_STATUS_ONLINE: return ("Online"); case MPI_PHYSDISK0_STATUS_MISSING: return ("Missing"); case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE: return ("Incompatible"); case MPI_PHYSDISK0_STATUS_FAILED: return ("Failed"); case MPI_PHYSDISK0_STATUS_INITIALIZING: return ("Initializing"); case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED: return ("Offline Requested"); case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED: return ("Failed per Host Request"); case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE: return ("Offline"); default: return ("Unknown"); } } static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol, const char *fmt, ...) { va_list ap; printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev), (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev), vol->config_page->VolumeBus, vol->config_page->VolumeID); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); } static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk, const char *fmt, ...) { va_list ap; if (disk->volume != NULL) { printf("(%s:vol%d:%d): ", device_get_nameunit(mpt->dev), disk->volume->config_page->VolumeID, disk->member_number); } else { printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev), disk->config_page.PhysDiskBus, disk->config_page.PhysDiskID); } va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); } static void mpt_raid_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct mpt_softc *mpt; mpt = (struct mpt_softc*)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; struct mpt_raid_volume *mpt_vol; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { break; } mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n", cgd->ccb_h.target_id); RAID_VOL_FOREACH(mpt, mpt_vol) { if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) continue; if (mpt_vol->config_page->VolumeID == cgd->ccb_h.target_id) { mpt_adjust_queue_depth(mpt, mpt_vol, path); break; } } } default: break; } } static int mpt_raid_probe(struct mpt_softc *mpt) { if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { return (ENODEV); } return (0); } static int mpt_raid_attach(struct mpt_softc *mpt) { struct ccb_setasync csa; mpt_handler_t handler; int error; mpt_callout_init(mpt, &mpt->raid_timer); error = mpt_spawn_raid_thread(mpt); if (error != 0) { mpt_prt(mpt, "Unable to spawn RAID thread!\n"); goto cleanup; } MPT_LOCK(mpt); handler.reply_handler = mpt_raid_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &raid_handler_id); if (error != 0) { mpt_prt(mpt, "Unable to register RAID haandler!\n"); goto cleanup; } xpt_setup_ccb(&csa.ccb_h, mpt->path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE; csa.callback = mpt_raid_async; csa.callback_arg = mpt; xpt_action((union ccb *)&csa); if (csa.ccb_h.status != CAM_REQ_CMP) { mpt_prt(mpt, "mpt_raid_attach: Unable to register " "CAM async handler.\n"); } MPT_UNLOCK(mpt); mpt_raid_sysctl_attach(mpt); return (0); cleanup: MPT_UNLOCK(mpt); mpt_raid_detach(mpt); return (error); } static int mpt_raid_enable(struct mpt_softc *mpt) { return (0); } static void mpt_raid_detach(struct mpt_softc *mpt) { struct ccb_setasync csa; mpt_handler_t handler; mpt_callout_drain(mpt, &mpt->raid_timer); MPT_LOCK(mpt); mpt_terminate_raid_thread(mpt); handler.reply_handler = mpt_raid_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, raid_handler_id); xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = mpt_raid_async; csa.callback_arg = mpt; xpt_action((union ccb *)&csa); MPT_UNLOCK(mpt); } static void mpt_raid_ioc_reset(struct mpt_softc *mpt, int type) { /* Nothing to do yet. */ } static const char *raid_event_txt[] = { "Volume Created", "Volume Deleted", "Volume Settings Changed", "Volume Status Changed", "Volume Physical Disk Membership Changed", "Physical Disk Created", "Physical Disk Deleted", "Physical Disk Settings Changed", "Physical Disk Status Changed", "Domain Validation Required", "SMART Data Received", "Replace Action Started", }; static int mpt_raid_event(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) { EVENT_DATA_RAID *raid_event; struct mpt_raid_volume *mpt_vol; struct mpt_raid_disk *mpt_disk; CONFIG_PAGE_RAID_VOL_0 *vol_pg; int i; int print_event; if (msg->Event != MPI_EVENT_INTEGRATED_RAID) { return (0); } raid_event = (EVENT_DATA_RAID *)&msg->Data; mpt_vol = NULL; vol_pg = NULL; if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) { for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { mpt_vol = &mpt->raid_volumes[i]; vol_pg = mpt_vol->config_page; if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) continue; if (vol_pg->VolumeID == raid_event->VolumeID && vol_pg->VolumeBus == raid_event->VolumeBus) break; } if (i >= mpt->ioc_page2->MaxVolumes) { mpt_vol = NULL; vol_pg = NULL; } } mpt_disk = NULL; if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) { mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum; if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) { mpt_disk = NULL; } } print_event = 1; switch(raid_event->ReasonCode) { case MPI_EVENT_RAID_RC_VOLUME_CREATED: case MPI_EVENT_RAID_RC_VOLUME_DELETED: break; case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: if (mpt_vol != NULL) { if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) { mpt_vol->flags &= ~MPT_RVF_UP2DATE; } else { /* * Coalesce status messages into one * per background run of our RAID thread. * This removes "spurious" status messages * from our output. */ print_event = 0; } } break; case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED: case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED: mpt->raid_rescan++; if (mpt_vol != NULL) { mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED); } break; case MPI_EVENT_RAID_RC_PHYSDISK_CREATED: case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: mpt->raid_rescan++; break; case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED: case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED: mpt->raid_rescan++; if (mpt_disk != NULL) { mpt_disk->flags &= ~MPT_RDF_UP2DATE; } break; case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED: mpt->raid_rescan++; break; case MPI_EVENT_RAID_RC_SMART_DATA: case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED: break; } if (print_event) { if (mpt_disk != NULL) { mpt_disk_prt(mpt, mpt_disk, ""); } else if (mpt_vol != NULL) { mpt_vol_prt(mpt, mpt_vol, ""); } else { mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus, raid_event->VolumeID); if (raid_event->PhysDiskNum != 0xFF) mpt_prtc(mpt, ":%d): ", raid_event->PhysDiskNum); else mpt_prtc(mpt, "): "); } if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt)) mpt_prtc(mpt, "Unhandled RaidEvent %#x\n", raid_event->ReasonCode); else mpt_prtc(mpt, "%s\n", raid_event_txt[raid_event->ReasonCode]); } if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) { /* XXX Use CAM's print sense for this... */ if (mpt_disk != NULL) mpt_disk_prt(mpt, mpt_disk, ""); else mpt_prt(mpt, "Volume(%d:%d:%d: ", raid_event->VolumeBus, raid_event->VolumeID, raid_event->PhysDiskNum); mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n", raid_event->ASC, raid_event->ASCQ); } mpt_raid_wakeup(mpt); return (1); } static void mpt_raid_shutdown(struct mpt_softc *mpt) { struct mpt_raid_volume *mpt_vol; if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) { return; } mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF; RAID_VOL_FOREACH(mpt, mpt_vol) { mpt_verify_mwce(mpt, mpt_vol); } } static int mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { int free_req; if (req == NULL) return (TRUE); free_req = TRUE; if (reply_frame != NULL) free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame); #ifdef NOTYET else if (req->ccb != NULL) { /* Complete Quiesce CCB with error... */ } #endif req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; TAILQ_REMOVE(&mpt->request_pending_list, req, links); if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { wakeup(req); } else if (free_req) { mpt_free_request(mpt, req); } return (TRUE); } /* * Parse additional completion information in the reply * frame for RAID I/O requests. */ static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req, MSG_DEFAULT_REPLY *reply_frame) { MSG_RAID_ACTION_REPLY *reply; struct mpt_raid_action_result *action_result; MSG_RAID_ACTION_REQUEST *rap; reply = (MSG_RAID_ACTION_REPLY *)reply_frame; req->IOCStatus = le16toh(reply->IOCStatus); rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf; switch (rap->Action) { case MPI_RAID_ACTION_QUIESCE_PHYS_IO: mpt_prt(mpt, "QUIESCE PHYSIO DONE\n"); break; case MPI_RAID_ACTION_ENABLE_PHYS_IO: mpt_prt(mpt, "ENABLY PHYSIO DONE\n"); break; default: break; } action_result = REQ_TO_RAID_ACTION_RESULT(req); memcpy(&action_result->action_data, &reply->ActionData, sizeof(action_result->action_data)); action_result->action_status = le16toh(reply->ActionStatus); return (TRUE); } /* * Utiltity routine to perform a RAID action command; */ static int mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req, u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len, int write, int wait) { MSG_RAID_ACTION_REQUEST *rap; SGE_SIMPLE32 *se; rap = req->req_vbuf; memset(rap, 0, sizeof *rap); rap->Action = Action; rap->ActionDataWord = htole32(ActionDataWord); rap->Function = MPI_FUNCTION_RAID_ACTION; rap->VolumeID = vol->config_page->VolumeID; rap->VolumeBus = vol->config_page->VolumeBus; if (disk != NULL) rap->PhysDiskNum = disk->config_page.PhysDiskNum; else rap->PhysDiskNum = 0xFF; se = (SGE_SIMPLE32 *)&rap->ActionDataSGE; se->Address = htole32(addr); MPI_pSGE_SET_LENGTH(se, len); MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_END_OF_LIST | (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST))); se->FlagsLength = htole32(se->FlagsLength); rap->MsgContext = htole32(req->index | raid_handler_id); mpt_check_doorbell(mpt); mpt_send_cmd(mpt, req); if (wait) { return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, /*sleep_ok*/FALSE, /*time_ms*/2000)); } else { return (0); } } /*************************** RAID Status Monitoring ***************************/ static int mpt_spawn_raid_thread(struct mpt_softc *mpt) { int error; /* * Freeze out any CAM transactions until our thread * is able to run at least once. We need to update * our RAID pages before acception I/O or we may * reject I/O to an ID we later determine is for a * hidden physdisk. */ MPT_LOCK(mpt); xpt_freeze_simq(mpt->phydisk_sim, 1); MPT_UNLOCK(mpt); error = kproc_create(mpt_raid_thread, mpt, &mpt->raid_thread, /*flags*/0, /*altstack*/0, "mpt_raid%d", mpt->unit); if (error != 0) { MPT_LOCK(mpt); xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE); MPT_UNLOCK(mpt); } return (error); } static void mpt_terminate_raid_thread(struct mpt_softc *mpt) { if (mpt->raid_thread == NULL) { return; } mpt->shutdwn_raid = 1; wakeup(&mpt->raid_volumes); /* * Sleep on a slightly different location * for this interlock just for added safety. */ mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0); } static void mpt_raid_thread(void *arg) { struct mpt_softc *mpt; int firstrun; mpt = (struct mpt_softc *)arg; firstrun = 1; MPT_LOCK(mpt); while (mpt->shutdwn_raid == 0) { if (mpt->raid_wakeup == 0) { mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0); continue; } mpt->raid_wakeup = 0; if (mpt_refresh_raid_data(mpt)) { mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */ continue; } /* * Now that we have our first snapshot of RAID data, * allow CAM to access our physical disk bus. */ if (firstrun) { firstrun = 0; xpt_release_simq(mpt->phydisk_sim, TRUE); } if (mpt->raid_rescan != 0) { union ccb *ccb; int error; mpt->raid_rescan = 0; MPT_UNLOCK(mpt); ccb = xpt_alloc_ccb(); MPT_LOCK(mpt); error = xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(mpt->phydisk_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (error != CAM_REQ_CMP) { xpt_free_ccb(ccb); mpt_prt(mpt, "Unable to rescan RAID Bus!\n"); } else { xpt_rescan(ccb); } } } mpt->raid_thread = NULL; wakeup(&mpt->raid_thread); MPT_UNLOCK(mpt); kproc_exit(0); } #if 0 static void mpt_raid_quiesce_timeout(void *arg) { /* Complete the CCB with error */ /* COWWWW */ } static timeout_t mpt_raid_quiesce_timeout; cam_status mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk, request_t *req) { union ccb *ccb; ccb = req->ccb; if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0) return (CAM_REQ_CMP); if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) { int rv; mpt_disk->flags |= MPT_RDF_QUIESCING; xpt_freeze_devq(ccb->ccb_h.path, 1); rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req, MPI_RAID_ACTION_QUIESCE_PHYS_IO, /*ActionData*/0, /*addr*/0, /*len*/0, /*write*/FALSE, /*wait*/FALSE); if (rv != 0) return (CAM_REQ_CMP_ERR); mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz); #if 0 if (rv == ETIMEDOUT) { mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: " "Quiece Timed-out\n"); xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0); return (CAM_REQ_CMP_ERR); } ar = REQ_TO_RAID_ACTION_RESULT(req); if (rv != 0 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { mpt_disk_prt(mpt, mpt_disk, "Quiece Failed" "%d:%x:%x\n", rv, req->IOCStatus, ar->action_status); xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0); return (CAM_REQ_CMP_ERR); } #endif return (CAM_REQ_INPROG); } return (CAM_REQUEUE_REQ); } #endif /* XXX Ignores that there may be multiple buses/IOCs involved. */ cam_status mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt) { struct mpt_raid_disk *mpt_disk; mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id; if (ccb->ccb_h.target_id < mpt->raid_max_disks && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) { *tgt = mpt_disk->config_page.PhysDiskID; return (0); } mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n", ccb->ccb_h.target_id); return (-1); } /* XXX Ignores that there may be multiple buses/IOCs involved. */ int mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt) { struct mpt_raid_disk *mpt_disk; int i; if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) return (0); for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) { mpt_disk = &mpt->raid_disks[i]; if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 && mpt_disk->config_page.PhysDiskID == tgt) return (1); } return (0); } /* XXX Ignores that there may be multiple buses/IOCs involved. */ int mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt) { CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol; CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol; if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { return (0); } ioc_vol = mpt->ioc_page2->RaidVolume; ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes; for (;ioc_vol != ioc_last_vol; ioc_vol++) { if (ioc_vol->VolumeID == tgt) { return (1); } } return (0); } #if 0 static void mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol, int enable) { request_t *req; struct mpt_raid_action_result *ar; CONFIG_PAGE_RAID_VOL_0 *vol_pg; int enabled; int rv; vol_pg = mpt_vol->config_page; enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED; /* * If the setting matches the configuration, * there is nothing to do. */ if ((enabled && enable) || (!enabled && !enable)) return; req = mpt_get_request(mpt, /*sleep_ok*/TRUE); if (req == NULL) { mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: Get request failed!\n"); return; } rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req, enable ? MPI_RAID_ACTION_ENABLE_VOLUME : MPI_RAID_ACTION_DISABLE_VOLUME, /*data*/0, /*addr*/0, /*len*/0, /*write*/FALSE, /*wait*/TRUE); if (rv == ETIMEDOUT) { mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: " "%s Volume Timed-out\n", enable ? "Enable" : "Disable"); return; } ar = REQ_TO_RAID_ACTION_RESULT(req); if (rv != 0 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n", enable ? "Enable" : "Disable", rv, req->IOCStatus, ar->action_status); } mpt_free_request(mpt, req); } #endif static void mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol) { request_t *req; struct mpt_raid_action_result *ar; CONFIG_PAGE_RAID_VOL_0 *vol_pg; uint32_t data; int rv; int resyncing; int mwce; vol_pg = mpt_vol->config_page; resyncing = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS; mwce = vol_pg->VolumeSettings.Settings & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; /* * If the setting matches the configuration, * there is nothing to do. */ switch (mpt->raid_mwce_setting) { case MPT_RAID_MWCE_REBUILD_ONLY: if ((resyncing && mwce) || (!resyncing && !mwce)) { return; } mpt_vol->flags ^= MPT_RVF_WCE_CHANGED; if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) { /* * Wait one more status update to see if * resyncing gets enabled. It gets disabled * temporarilly when WCE is changed. */ return; } break; case MPT_RAID_MWCE_ON: if (mwce) return; break; case MPT_RAID_MWCE_OFF: if (!mwce) return; break; case MPT_RAID_MWCE_NC: return; } req = mpt_get_request(mpt, /*sleep_ok*/TRUE); if (req == NULL) { mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: Get request failed!\n"); return; } vol_pg->VolumeSettings.Settings ^= MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; memcpy(&data, &vol_pg->VolumeSettings, sizeof(data)); vol_pg->VolumeSettings.Settings ^= MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req, MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS, data, /*addr*/0, /*len*/0, /*write*/FALSE, /*wait*/TRUE); if (rv == ETIMEDOUT) { mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: " "Write Cache Enable Timed-out\n"); return; } ar = REQ_TO_RAID_ACTION_RESULT(req); if (rv != 0 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: " "%d:%x:%x\n", rv, req->IOCStatus, ar->action_status); } else { vol_pg->VolumeSettings.Settings ^= MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; } mpt_free_request(mpt, req); } static void mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol) { request_t *req; struct mpt_raid_action_result *ar; CONFIG_PAGE_RAID_VOL_0 *vol_pg; u_int prio; int rv; vol_pg = mpt_vol->config_page; if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC) return; /* * If the current RAID resync rate does not * match our configured rate, update it. */ prio = vol_pg->VolumeSettings.Settings & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; if (vol_pg->ResyncRate != 0 && vol_pg->ResyncRate != mpt->raid_resync_rate) { req = mpt_get_request(mpt, /*sleep_ok*/TRUE); if (req == NULL) { mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: " "Get request failed!\n"); return; } rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req, MPI_RAID_ACTION_SET_RESYNC_RATE, mpt->raid_resync_rate, /*addr*/0, /*len*/0, /*write*/FALSE, /*wait*/TRUE); if (rv == ETIMEDOUT) { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: " "Resync Rate Setting Timed-out\n"); return; } ar = REQ_TO_RAID_ACTION_RESULT(req); if (rv != 0 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: " "%d:%x:%x\n", rv, req->IOCStatus, ar->action_status); } else vol_pg->ResyncRate = mpt->raid_resync_rate; mpt_free_request(mpt, req); } else if ((prio && mpt->raid_resync_rate < 128) || (!prio && mpt->raid_resync_rate >= 128)) { uint32_t data; req = mpt_get_request(mpt, /*sleep_ok*/TRUE); if (req == NULL) { mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: " "Get request failed!\n"); return; } vol_pg->VolumeSettings.Settings ^= MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; memcpy(&data, &vol_pg->VolumeSettings, sizeof(data)); vol_pg->VolumeSettings.Settings ^= MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req, MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS, data, /*addr*/0, /*len*/0, /*write*/FALSE, /*wait*/TRUE); if (rv == ETIMEDOUT) { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: " "Resync Rate Setting Timed-out\n"); return; } ar = REQ_TO_RAID_ACTION_RESULT(req); if (rv != 0 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: " "%d:%x:%x\n", rv, req->IOCStatus, ar->action_status); } else { vol_pg->VolumeSettings.Settings ^= MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; } mpt_free_request(mpt, req); } } static void mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol, struct cam_path *path) { struct ccb_relsim crs; xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = CAM_DEV_QFREEZE; crs.release_flags = RELSIM_ADJUST_OPENINGS; crs.openings = mpt->raid_queue_depth; xpt_action((union ccb *)&crs); if (crs.ccb_h.status != CAM_REQ_CMP) mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed " "with CAM status %#x\n", crs.ccb_h.status); } static void mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol) { CONFIG_PAGE_RAID_VOL_0 *vol_pg; u_int i; vol_pg = mpt_vol->config_page; mpt_vol_prt(mpt, mpt_vol, "Settings ("); for (i = 1; i <= 0x8000; i <<= 1) { switch (vol_pg->VolumeSettings.Settings & i) { case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE: mpt_prtc(mpt, " Member-WCE"); break; case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART: mpt_prtc(mpt, " Offline-On-SMART-Err"); break; case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE: mpt_prtc(mpt, " Hot-Plug-Spares"); break; case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC: mpt_prtc(mpt, " High-Priority-ReSync"); break; default: break; } } mpt_prtc(mpt, " )\n"); if (vol_pg->VolumeSettings.HotSparePool != 0) { mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s", powerof2(vol_pg->VolumeSettings.HotSparePool) ? ":" : "s:"); for (i = 0; i < 8; i++) { u_int mask; mask = 0x1 << i; if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0) continue; mpt_prtc(mpt, " %d", i); } mpt_prtc(mpt, "\n"); } mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks); for (i = 0; i < vol_pg->NumPhysDisks; i++){ struct mpt_raid_disk *mpt_disk; CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg; int pt_bus = cam_sim_bus(mpt->phydisk_sim); U8 f, s; mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum; disk_pg = &mpt_disk->config_page; mpt_prtc(mpt, " "); mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev), pt_bus, disk_pg->PhysDiskID); if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) { mpt_prtc(mpt, "%s", mpt_disk->member_number == 0? "Primary" : "Secondary"); } else { mpt_prtc(mpt, "Stripe Position %d", mpt_disk->member_number); } f = disk_pg->PhysDiskStatus.Flags; s = disk_pg->PhysDiskStatus.State; if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) { mpt_prtc(mpt, " Out of Sync"); } if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) { mpt_prtc(mpt, " Quiesced"); } if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) { mpt_prtc(mpt, " Inactive"); } if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) { mpt_prtc(mpt, " Was Optimal"); } if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) { mpt_prtc(mpt, " Was Non-Optimal"); } switch (s) { case MPI_PHYSDISK0_STATUS_ONLINE: mpt_prtc(mpt, " Online"); break; case MPI_PHYSDISK0_STATUS_MISSING: mpt_prtc(mpt, " Missing"); break; case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE: mpt_prtc(mpt, " Incompatible"); break; case MPI_PHYSDISK0_STATUS_FAILED: mpt_prtc(mpt, " Failed"); break; case MPI_PHYSDISK0_STATUS_INITIALIZING: mpt_prtc(mpt, " Initializing"); break; case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED: mpt_prtc(mpt, " Requested Offline"); break; case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED: mpt_prtc(mpt, " Requested Failed"); break; case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE: default: mpt_prtc(mpt, " Offline Other (%x)", s); break; } mpt_prtc(mpt, "\n"); } } static void mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk) { CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg; int rd_bus = cam_sim_bus(mpt->sim); int pt_bus = cam_sim_bus(mpt->phydisk_sim); u_int i; disk_pg = &mpt_disk->config_page; mpt_disk_prt(mpt, mpt_disk, "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n", device_get_nameunit(mpt->dev), rd_bus, disk_pg->PhysDiskID, device_get_nameunit(mpt->dev), pt_bus, mpt_disk - mpt->raid_disks); if (disk_pg->PhysDiskSettings.HotSparePool == 0) return; mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s", powerof2(disk_pg->PhysDiskSettings.HotSparePool) ? ":" : "s:"); for (i = 0; i < 8; i++) { u_int mask; mask = 0x1 << i; if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0) continue; mpt_prtc(mpt, " %d", i); } mpt_prtc(mpt, "\n"); } static void mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk, IOC_3_PHYS_DISK *ioc_disk) { int rv; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK, /*PageNumber*/0, ioc_disk->PhysDiskNum, &mpt_disk->config_page.Header, /*sleep_ok*/TRUE, /*timeout_ms*/5000); if (rv != 0) { mpt_prt(mpt, "mpt_refresh_raid_disk: " "Failed to read RAID Disk Hdr(%d)\n", ioc_disk->PhysDiskNum); return; } rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum, &mpt_disk->config_page.Header, sizeof(mpt_disk->config_page), /*sleep_ok*/TRUE, /*timeout_ms*/5000); if (rv != 0) mpt_prt(mpt, "mpt_refresh_raid_disk: " "Failed to read RAID Disk Page(%d)\n", ioc_disk->PhysDiskNum); mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page); } static void mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol, CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol) { CONFIG_PAGE_RAID_VOL_0 *vol_pg; struct mpt_raid_action_result *ar; request_t *req; int rv; int i; vol_pg = mpt_vol->config_page; mpt_vol->flags &= ~MPT_RVF_UP2DATE; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0, ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000); if (rv != 0) { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n", ioc_vol->VolumePageNumber); return; } rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber, &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000); if (rv != 0) { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n", ioc_vol->VolumePageNumber); return; } mpt2host_config_page_raid_vol_0(vol_pg); mpt_vol->flags |= MPT_RVF_ACTIVE; /* Update disk entry array data. */ for (i = 0; i < vol_pg->NumPhysDisks; i++) { struct mpt_raid_disk *mpt_disk; mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum; mpt_disk->volume = mpt_vol; mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap; if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) { mpt_disk->member_number--; } } if ((vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0) return; req = mpt_get_request(mpt, TRUE); if (req == NULL) { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: Get request failed!\n"); return; } rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req, MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE); if (rv == ETIMEDOUT) { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n"); mpt_free_request(mpt, req); return; } ar = REQ_TO_RAID_ACTION_RESULT(req); if (rv == 0 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) { memcpy(&mpt_vol->sync_progress, &ar->action_data.indicator_struct, sizeof(mpt_vol->sync_progress)); mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress); } else { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: Progress indicator fetch failed!\n"); } mpt_free_request(mpt, req); } /* * Update in-core information about RAID support. We update any entries * that didn't previously exists or have been marked as needing to * be updated by our event handler. Interesting changes are displayed * to the console. */ static int mpt_refresh_raid_data(struct mpt_softc *mpt) { CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol; CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol; IOC_3_PHYS_DISK *ioc_disk; IOC_3_PHYS_DISK *ioc_last_disk; CONFIG_PAGE_RAID_VOL_0 *vol_pg; size_t len; int rv; int i; u_int nonopt_volumes; if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) { return (0); } /* * Mark all items as unreferenced by the configuration. * This allows us to find, report, and discard stale * entries. */ for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) { mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED; } for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED; } /* * Get Physical Disk information. */ len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t); rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, &mpt->ioc_page3->Header, len, /*sleep_ok*/TRUE, /*timeout_ms*/5000); if (rv) { mpt_prt(mpt, "mpt_refresh_raid_data: Failed to read IOC Page 3\n"); return (-1); } mpt2host_config_page_ioc3(mpt->ioc_page3); ioc_disk = mpt->ioc_page3->PhysDisk; ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks; for (; ioc_disk != ioc_last_disk; ioc_disk++) { struct mpt_raid_disk *mpt_disk; mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum; mpt_disk->flags |= MPT_RDF_REFERENCED; if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) { mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk); } mpt_disk->flags |= MPT_RDF_ACTIVE; mpt->raid_rescan++; } /* * Refresh volume data. */ len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t); rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, &mpt->ioc_page2->Header, len, /*sleep_ok*/TRUE, /*timeout_ms*/5000); if (rv) { mpt_prt(mpt, "mpt_refresh_raid_data: " "Failed to read IOC Page 2\n"); return (-1); } mpt2host_config_page_ioc2(mpt->ioc_page2); ioc_vol = mpt->ioc_page2->RaidVolume; ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes; for (;ioc_vol != ioc_last_vol; ioc_vol++) { struct mpt_raid_volume *mpt_vol; mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber; mpt_vol->flags |= MPT_RVF_REFERENCED; vol_pg = mpt_vol->config_page; if (vol_pg == NULL) continue; if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE)) != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE)) || (vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) { mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol); } mpt_vol->flags |= MPT_RVF_ACTIVE; } nonopt_volumes = 0; for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { struct mpt_raid_volume *mpt_vol; uint64_t total; uint64_t left; int m; u_int prio; mpt_vol = &mpt->raid_volumes[i]; if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) { continue; } vol_pg = mpt_vol->config_page; if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED)) == MPT_RVF_ANNOUNCED) { mpt_vol_prt(mpt, mpt_vol, "No longer configured\n"); mpt_vol->flags = 0; continue; } if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) { mpt_announce_vol(mpt, mpt_vol); mpt_vol->flags |= MPT_RVF_ANNOUNCED; } if (vol_pg->VolumeStatus.State != MPI_RAIDVOL0_STATUS_STATE_OPTIMAL) nonopt_volumes++; if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) continue; mpt_vol->flags |= MPT_RVF_UP2DATE; mpt_vol_prt(mpt, mpt_vol, "%s - %s\n", mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol)); mpt_verify_mwce(mpt, mpt_vol); if (vol_pg->VolumeStatus.Flags == 0) { continue; } mpt_vol_prt(mpt, mpt_vol, "Status ("); for (m = 1; m <= 0x80; m <<= 1) { switch (vol_pg->VolumeStatus.Flags & m) { case MPI_RAIDVOL0_STATUS_FLAG_ENABLED: mpt_prtc(mpt, " Enabled"); break; case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED: mpt_prtc(mpt, " Quiesced"); break; case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS: mpt_prtc(mpt, " Re-Syncing"); break; case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE: mpt_prtc(mpt, " Inactive"); break; default: break; } } mpt_prtc(mpt, " )\n"); if ((vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0) continue; mpt_verify_resync_rate(mpt, mpt_vol); left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining); total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks); if (vol_pg->ResyncRate != 0) { prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF; mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n", prio / 1000, prio % 1000); } else { prio = vol_pg->VolumeSettings.Settings & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n", prio ? "High" : "Low"); } mpt_vol_prt(mpt, mpt_vol, "%ju of %ju " "blocks remaining\n", (uintmax_t)left, (uintmax_t)total); /* Periodically report on sync progress. */ mpt_schedule_raid_refresh(mpt); } for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) { struct mpt_raid_disk *mpt_disk; CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg; int m; mpt_disk = &mpt->raid_disks[i]; disk_pg = &mpt_disk->config_page; if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) continue; if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED)) == MPT_RDF_ANNOUNCED) { mpt_disk_prt(mpt, mpt_disk, "No longer configured\n"); mpt_disk->flags = 0; mpt->raid_rescan++; continue; } if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) { mpt_announce_disk(mpt, mpt_disk); mpt_disk->flags |= MPT_RVF_ANNOUNCED; } if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0) continue; mpt_disk->flags |= MPT_RDF_UP2DATE; mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk)); if (disk_pg->PhysDiskStatus.Flags == 0) continue; mpt_disk_prt(mpt, mpt_disk, "Status ("); for (m = 1; m <= 0x80; m <<= 1) { switch (disk_pg->PhysDiskStatus.Flags & m) { case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC: mpt_prtc(mpt, " Out-Of-Sync"); break; case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED: mpt_prtc(mpt, " Quiesced"); break; default: break; } } mpt_prtc(mpt, " )\n"); } mpt->raid_nonopt_volumes = nonopt_volumes; return (0); } static void mpt_raid_timer(void *arg) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)arg; MPT_LOCK_ASSERT(mpt); mpt_raid_wakeup(mpt); } static void mpt_schedule_raid_refresh(struct mpt_softc *mpt) { callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL, mpt_raid_timer, mpt); } void mpt_raid_free_mem(struct mpt_softc *mpt) { if (mpt->raid_volumes) { struct mpt_raid_volume *mpt_raid; int i; for (i = 0; i < mpt->raid_max_volumes; i++) { mpt_raid = &mpt->raid_volumes[i]; if (mpt_raid->config_page) { free(mpt_raid->config_page, M_DEVBUF); mpt_raid->config_page = NULL; } } free(mpt->raid_volumes, M_DEVBUF); mpt->raid_volumes = NULL; } if (mpt->raid_disks) { free(mpt->raid_disks, M_DEVBUF); mpt->raid_disks = NULL; } if (mpt->ioc_page2) { free(mpt->ioc_page2, M_DEVBUF); mpt->ioc_page2 = NULL; } if (mpt->ioc_page3) { free(mpt->ioc_page3, M_DEVBUF); mpt->ioc_page3 = NULL; } mpt->raid_max_volumes = 0; mpt->raid_max_disks = 0; } static int mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate) { struct mpt_raid_volume *mpt_vol; if ((rate > MPT_RAID_RESYNC_RATE_MAX || rate < MPT_RAID_RESYNC_RATE_MIN) && rate != MPT_RAID_RESYNC_RATE_NC) return (EINVAL); MPT_LOCK(mpt); mpt->raid_resync_rate = rate; RAID_VOL_FOREACH(mpt, mpt_vol) { if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) { continue; } mpt_verify_resync_rate(mpt, mpt_vol); } MPT_UNLOCK(mpt); return (0); } static int mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth) { struct mpt_raid_volume *mpt_vol; if (vol_queue_depth > 255 || vol_queue_depth < 1) return (EINVAL); MPT_LOCK(mpt); mpt->raid_queue_depth = vol_queue_depth; RAID_VOL_FOREACH(mpt, mpt_vol) { struct cam_path *path; int error; if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) continue; mpt->raid_rescan = 0; error = xpt_create_path(&path, NULL, cam_sim_path(mpt->sim), mpt_vol->config_page->VolumeID, /*lun*/0); if (error != CAM_REQ_CMP) { mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n"); continue; } mpt_adjust_queue_depth(mpt, mpt_vol, path); xpt_free_path(path); } MPT_UNLOCK(mpt); return (0); } static int mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce) { struct mpt_raid_volume *mpt_vol; int force_full_resync; MPT_LOCK(mpt); if (mwce == mpt->raid_mwce_setting) { MPT_UNLOCK(mpt); return (0); } /* * Catch MWCE being left on due to a failed shutdown. Since * sysctls cannot be set by the loader, we treat the first * setting of this varible specially and force a full volume * resync if MWCE is enabled and a resync is in progress. */ force_full_resync = 0; if (mpt->raid_mwce_set == 0 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC && mwce == MPT_RAID_MWCE_REBUILD_ONLY) force_full_resync = 1; mpt->raid_mwce_setting = mwce; RAID_VOL_FOREACH(mpt, mpt_vol) { CONFIG_PAGE_RAID_VOL_0 *vol_pg; int resyncing; int mwce; if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) continue; vol_pg = mpt_vol->config_page; resyncing = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS; mwce = vol_pg->VolumeSettings.Settings & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; if (force_full_resync && resyncing && mwce) { /* * XXX disable/enable volume should force a resync, * but we'll need to queice, drain, and restart * I/O to do that. */ mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown " "detected. Suggest full resync.\n"); } mpt_verify_mwce(mpt, mpt_vol); } mpt->raid_mwce_set = 1; MPT_UNLOCK(mpt); return (0); } static const char *mpt_vol_mwce_strs[] = { "On", "Off", "On-During-Rebuild", "NC" }; static int mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS) { char inbuf[20]; struct mpt_softc *mpt; const char *str; int error; u_int size; u_int i; GIANT_REQUIRED; mpt = (struct mpt_softc *)arg1; str = mpt_vol_mwce_strs[mpt->raid_mwce_setting]; error = SYSCTL_OUT(req, str, strlen(str) + 1); if (error || !req->newptr) { return (error); } size = req->newlen - req->newidx; if (size >= sizeof(inbuf)) { return (EINVAL); } error = SYSCTL_IN(req, inbuf, size); if (error) { return (error); } inbuf[size] = '\0'; for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) { if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) { return (mpt_raid_set_vol_mwce(mpt, i)); } } return (EINVAL); } static int mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS) { struct mpt_softc *mpt; u_int raid_resync_rate; int error; GIANT_REQUIRED; mpt = (struct mpt_softc *)arg1; raid_resync_rate = mpt->raid_resync_rate; error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req); if (error || !req->newptr) { return error; } return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate)); } static int mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS) { struct mpt_softc *mpt; u_int raid_queue_depth; int error; GIANT_REQUIRED; mpt = (struct mpt_softc *)arg1; raid_queue_depth = mpt->raid_queue_depth; error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req); if (error || !req->newptr) { return error; } return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth)); } static void mpt_raid_sysctl_attach(struct mpt_softc *mpt) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0, mpt_raid_sysctl_vol_member_wce, "A", "volume member WCE(On,Off,On-During-Rebuild,NC)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0, mpt_raid_sysctl_vol_queue_depth, "I", "default volume queue depth"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0, mpt_raid_sysctl_vol_resync_rate, "I", "volume resync priority (0 == NC, 1 - 255)"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "nonoptimal_volumes", CTLFLAG_RD, &mpt->raid_nonopt_volumes, 0, "number of nonoptimal volumes"); } Index: head/sys/dev/ocs_fc/ocs_os.c =================================================================== --- head/sys/dev/ocs_fc/ocs_os.c (revision 355600) +++ head/sys/dev/ocs_fc/ocs_os.c (revision 355601) @@ -1,1045 +1,1045 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * Implementation of common BSD OS abstraction functions */ #include "ocs.h" static MALLOC_DEFINE(M_OCS, "OCS", "OneCore Storage data"); #include #include #include -timeout_t __ocs_callout; +callout_func_t __ocs_callout; uint32_t ocs_config_read32(ocs_os_handle_t os, uint32_t reg) { return pci_read_config(os->dev, reg, 4); } uint16_t ocs_config_read16(ocs_os_handle_t os, uint32_t reg) { return pci_read_config(os->dev, reg, 2); } uint8_t ocs_config_read8(ocs_os_handle_t os, uint32_t reg) { return pci_read_config(os->dev, reg, 1); } void ocs_config_write8(ocs_os_handle_t os, uint32_t reg, uint8_t val) { return pci_write_config(os->dev, reg, val, 1); } void ocs_config_write16(ocs_os_handle_t os, uint32_t reg, uint16_t val) { return pci_write_config(os->dev, reg, val, 2); } void ocs_config_write32(ocs_os_handle_t os, uint32_t reg, uint32_t val) { return pci_write_config(os->dev, reg, val, 4); } /** * @ingroup os * @brief Read a 32bit PCI register * * The SLI documentation uses the term "register set" to describe one or more * PCI BARs which form a logical address. For example, a 64-bit address uses * two BARs, and thus constitute a register set. * * @param ocs Pointer to the driver's context * @param rset Register Set to use * @param off Offset from the base address of the Register Set * * @return register value */ uint32_t ocs_reg_read32(ocs_t *ocs, uint32_t rset, uint32_t off) { ocs_pci_reg_t *reg = NULL; reg = &ocs->reg[rset]; return bus_space_read_4(reg->btag, reg->bhandle, off); } /** * @ingroup os * @brief Read a 16bit PCI register * * The SLI documentation uses the term "register set" to describe one or more * PCI BARs which form a logical address. For example, a 64-bit address uses * two BARs, and thus constitute a register set. * * @param ocs Pointer to the driver's context * @param rset Register Set to use * @param off Offset from the base address of the Register Set * * @return register value */ uint16_t ocs_reg_read16(ocs_t *ocs, uint32_t rset, uint32_t off) { ocs_pci_reg_t *reg = NULL; reg = &ocs->reg[rset]; return bus_space_read_2(reg->btag, reg->bhandle, off); } /** * @ingroup os * @brief Read a 8bit PCI register * * The SLI documentation uses the term "register set" to describe one or more * PCI BARs which form a logical address. For example, a 64-bit address uses * two BARs, and thus constitute a register set. * * @param ocs Pointer to the driver's context * @param rset Register Set to use * @param off Offset from the base address of the Register Set * * @return register value */ uint8_t ocs_reg_read8(ocs_t *ocs, uint32_t rset, uint32_t off) { ocs_pci_reg_t *reg = NULL; reg = &ocs->reg[rset]; return bus_space_read_1(reg->btag, reg->bhandle, off); } /** * @ingroup os * @brief Write a 32bit PCI register * * The SLI documentation uses the term "register set" to describe one or more * PCI BARs which form a logical address. For example, a 64-bit address uses * two BARs, and thus constitute a register set. * * @param ocs Pointer to the driver's context * @param rset Register Set to use * @param off Offset from the base address of the Register Set * @param val Value to write * * @return none */ void ocs_reg_write32(ocs_t *ocs, uint32_t rset, uint32_t off, uint32_t val) { ocs_pci_reg_t *reg = NULL; reg = &ocs->reg[rset]; return bus_space_write_4(reg->btag, reg->bhandle, off, val); } /** * @ingroup os * @brief Write a 16-bit PCI register * * The SLI documentation uses the term "register set" to describe one or more * PCI BARs which form a logical address. For example, a 64-bit address uses * two BARs, and thus constitute a register set. * * @param ocs Pointer to the driver's context * @param rset Register Set to use * @param off Offset from the base address of the Register Set * @param val Value to write * * @return none */ void ocs_reg_write16(ocs_t *ocs, uint32_t rset, uint32_t off, uint16_t val) { ocs_pci_reg_t *reg = NULL; reg = &ocs->reg[rset]; return bus_space_write_2(reg->btag, reg->bhandle, off, val); } /** * @ingroup os * @brief Write a 8-bit PCI register * * The SLI documentation uses the term "register set" to describe one or more * PCI BARs which form a logical address. For example, a 64-bit address uses * two BARs, and thus constitute a register set. * * @param ocs Pointer to the driver's context * @param rset Register Set to use * @param off Offset from the base address of the Register Set * @param val Value to write * * @return none */ void ocs_reg_write8(ocs_t *ocs, uint32_t rset, uint32_t off, uint8_t val) { ocs_pci_reg_t *reg = NULL; reg = &ocs->reg[rset]; return bus_space_write_1(reg->btag, reg->bhandle, off, val); } /** * @ingroup os * @brief Allocate host memory * * @param os OS handle * @param size number of bytes to allocate * @param flags additional options * * @return pointer to allocated memory, NULL otherwise */ void * ocs_malloc(ocs_os_handle_t os, size_t size, int32_t flags) { if ((flags & OCS_M_NOWAIT) == 0) { flags |= M_WAITOK; } #ifndef OCS_DEBUG_MEMORY return malloc(size, M_OCS, flags); #else char nameb[80]; long offset = 0; void *addr = malloc(size, M_OCS, flags); linker_ddb_search_symbol_name(__builtin_return_address(1), nameb, sizeof(nameb), &offset); printf("A: %p %ld @ %s+%#lx\n", addr, size, nameb, offset); return addr; #endif } /** * @ingroup os * @brief Free host memory * * @param os OS handle * @param addr pointer to memory * @param size bytes to free * * @note size ignored in BSD */ void ocs_free(ocs_os_handle_t os, void *addr, size_t size) { #ifndef OCS_DEBUG_MEMORY free(addr, M_OCS); #else printf("F: %p %ld\n", addr, size); free(addr, M_OCS); #endif } /** * @brief Callback function provided to bus_dmamap_load * * Function loads the physical / bus address into the DMA descriptor. The caller * can detect a mapping failure if a descriptor's phys element is zero. * * @param arg Argument provided to bus_dmamap_load is a ocs_dma_t * @param seg Array of DMA segment(s), each describing segment's address and length * @param nseg Number of elements in array * @param error Indicates success (0) or failure of mapping */ static void ocs_dma_load(void *arg, bus_dma_segment_t *seg, int nseg, int error) { ocs_dma_t *dma = arg; if (error) { printf("%s: error=%d\n", __func__, error); dma->phys = 0; } else { dma->phys = seg->ds_addr; } } /** * @ingroup os * @brief Free a DMA capable block of memory * * @param os Device abstraction * @param dma DMA descriptor for memory to be freed * * @return 0 if memory is de-allocated, -1 otherwise */ int32_t ocs_dma_free(ocs_os_handle_t os, ocs_dma_t *dma) { struct ocs_softc *ocs = os; if (!dma) { device_printf(ocs->dev, "%s: bad parameter(s) dma=%p\n", __func__, dma); return -1; } if (dma->size == 0) { return 0; } if (dma->map) { bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->tag, dma->map); } if (dma->virt) { bus_dmamem_free(dma->tag, dma->virt, dma->map); bus_dmamap_destroy(dma->tag, dma->map); } bus_dma_tag_destroy(dma->tag); bzero(dma, sizeof(ocs_dma_t)); return 0; } /** * @ingroup os * @brief Allocate a DMA capable block of memory * * @param os Device abstraction * @param dma DMA descriptor containing results of memory allocation * @param size Size in bytes of desired allocation * @param align Alignment in bytes * * @return 0 on success, ENOMEM otherwise */ int32_t ocs_dma_alloc(ocs_os_handle_t os, ocs_dma_t *dma, size_t size, size_t align) { struct ocs_softc *ocs = os; if (!dma || !size) { device_printf(ocs->dev, "%s bad parameter(s) dma=%p size=%zd\n", __func__, dma, size); return ENOMEM; } bzero(dma, sizeof(ocs_dma_t)); /* create a "tag" that describes the desired memory allocation */ if (bus_dma_tag_create(ocs->dmat, align, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, 0, NULL, NULL, &dma->tag)) { device_printf(ocs->dev, "DMA tag allocation failed\n"); return ENOMEM; } dma->size = size; /* allocate the memory */ if (bus_dmamem_alloc(dma->tag, &dma->virt, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->map)) { device_printf(ocs->dev, "DMA memory allocation failed s=%zd a=%zd\n", size, align); ocs_dma_free(ocs, dma); return ENOMEM; } dma->alloc = dma->virt; /* map virtual address to device visible address */ if (bus_dmamap_load(dma->tag, dma->map, dma->virt, dma->size, ocs_dma_load, dma, 0)) { device_printf(ocs->dev, "DMA memory load failed\n"); ocs_dma_free(ocs, dma); return ENOMEM; } /* if the DMA map load callback fails, it sets the physical address to zero */ if (0 == dma->phys) { device_printf(ocs->dev, "ocs_dma_load failed\n"); ocs_dma_free(ocs, dma); return ENOMEM; } return 0; } /** * @ingroup os * @brief Synchronize the DMA buffer memory * * Ensures memory coherency between the CPU and device * * @param dma DMA descriptor of memory to synchronize * @param flags Describes direction of synchronization * See BUS_DMA(9) for details * - BUS_DMASYNC_PREWRITE * - BUS_DMASYNC_POSTREAD */ void ocs_dma_sync(ocs_dma_t *dma, uint32_t flags) { bus_dmamap_sync(dma->tag, dma->map, flags); } int32_t ocs_dma_copy_in(ocs_dma_t *dma, void *buffer, uint32_t buffer_length) { if (!dma) return -1; if (!buffer) return -1; if (buffer_length == 0) return 0; if (buffer_length > dma->size) buffer_length = dma->size; ocs_memcpy(dma->virt, buffer, buffer_length); dma->len = buffer_length; return buffer_length; } int32_t ocs_dma_copy_out(ocs_dma_t *dma, void *buffer, uint32_t buffer_length) { if (!dma) return -1; if (!buffer) return -1; if (buffer_length == 0) return 0; if (buffer_length > dma->len) buffer_length = dma->len; ocs_memcpy(buffer, dma->virt, buffer_length); return buffer_length; } /** * @ingroup os * @brief Initialize a lock * * @param lock lock to initialize * @param name string identifier for the lock */ void ocs_lock_init(void *os, ocs_lock_t *lock, const char *name, ...) { va_list ap; va_start(ap, name); ocs_vsnprintf(lock->name, MAX_LOCK_DESC_LEN, name, ap); va_end(ap); mtx_init(&lock->lock, lock->name, NULL, MTX_DEF); } /** * @brief Allocate a bit map * * For BSD, this is a simple character string * * @param n_bits number of bits in bit map * * @return pointer to the bit map, NULL on error */ ocs_bitmap_t * ocs_bitmap_alloc(uint32_t n_bits) { return malloc(bitstr_size(n_bits), M_OCS, M_ZERO | M_NOWAIT); } /** * @brief Free a bit map * * @param bitmap pointer to previously allocated bit map */ void ocs_bitmap_free(ocs_bitmap_t *bitmap) { free(bitmap, M_OCS); } /** * @brief find next unset bit and set it * * @param bitmap bit map to search * @param n_bits number of bits in map * * @return bit position or -1 if map is full */ int32_t ocs_bitmap_find(ocs_bitmap_t *bitmap, uint32_t n_bits) { int32_t position = -1; bit_ffc(bitmap, n_bits, &position); if (-1 != position) { bit_set(bitmap, position); } return position; } /** * @brief search for next (un)set bit * * @param bitmap bit map to search * @param set search for a set or unset bit * @param n_bits number of bits in map * * @return bit position or -1 */ int32_t ocs_bitmap_search(ocs_bitmap_t *bitmap, uint8_t set, uint32_t n_bits) { int32_t position; if (!bitmap) { return -1; } if (set) { bit_ffs(bitmap, n_bits, &position); } else { bit_ffc(bitmap, n_bits, &position); } return position; } /** * @brief clear the specified bit * * @param bitmap pointer to bit map * @param bit bit number to clear */ void ocs_bitmap_clear(ocs_bitmap_t *bitmap, uint32_t bit) { bit_clear(bitmap, bit); } void _ocs_log(ocs_t *ocs, const char *func_name, int line, const char *fmt, ...) { va_list ap; char buf[256]; char *p = buf; va_start(ap, fmt); /* TODO: Add Current PID info here. */ p += snprintf(p, sizeof(buf) - (p - buf), "%s: ", DRV_NAME); p += snprintf(p, sizeof(buf) - (p - buf), "%s:", func_name); p += snprintf(p, sizeof(buf) - (p - buf), "%i:", line); p += snprintf(p, sizeof(buf) - (p - buf), "%s:", (ocs != NULL) ? device_get_nameunit(ocs->dev) : ""); p += vsnprintf(p, sizeof(buf) - (p - buf), fmt, ap); va_end(ap); printf("%s", buf); } /** * @brief Common thread call function * * This is the common function called whenever a thread instantiated by ocs_thread_create() is started. * It captures the return value from the actual thread function and stashes it in the thread object, to * be later retrieved by ocs_thread_get_retval(), and calls kthread_exit(), the proscribed method to terminate * a thread. * * @param arg a pointer to the thread object * * @return none */ static void ocs_thread_call_fctn(void *arg) { ocs_thread_t *thread = arg; thread->retval = (*thread->fctn)(thread->arg); ocs_free(NULL, thread->name, ocs_strlen(thread->name+1)); kthread_exit(); } /** * @brief Create a kernel thread * * Creates a kernel thread and optionally starts it. If the thread is not immediately * started, ocs_thread_start() should be called at some later point. * * @param os OS handle * @param thread pointer to thread object * @param fctn function for thread to be begin executing * @param name text name to identify thread * @param arg application specific argument passed to thread function * @param start start option, OCS_THREAD_RUN will start the thread immediately, * OCS_THREAD_CREATE will create but not start the thread * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_thread_create(ocs_os_handle_t os, ocs_thread_t *thread, ocs_thread_fctn fctn, const char *name, void *arg, ocs_thread_start_e start) { int32_t rc = 0; ocs_memset(thread, 0, sizeof(*thread)); thread->fctn = fctn; thread->name = ocs_strdup(name); if (thread->name == NULL) { thread->name = "unknown"; } thread->arg = arg; ocs_atomic_set(&thread->terminate, 0); rc = kthread_add(ocs_thread_call_fctn, thread, NULL, &thread->tcb, (start == OCS_THREAD_CREATE) ? RFSTOPPED : 0, OCS_THREAD_DEFAULT_STACK_SIZE_PAGES, "%s", name); return rc; } /** * @brief Start a thread * * Starts a thread that was created with OCS_THREAD_CREATE rather than OCS_THREAD_RUN * * @param thread pointer to thread object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_thread_start(ocs_thread_t *thread) { sched_add(thread->tcb, SRQ_BORING); return 0; } /** * @brief return thread argument * * Returns a pointer to the thread's application specific argument * * @param mythread pointer to the thread object * * @return pointer to application specific argument */ void *ocs_thread_get_arg(ocs_thread_t *mythread) { return mythread->arg; } /** * @brief Request thread stop * * A stop request is made to the thread. This is a voluntary call, the thread needs * to periodically query its terminate request using ocs_thread_terminate_requested() * * @param thread pointer to thread object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_thread_terminate(ocs_thread_t *thread) { ocs_atomic_set(&thread->terminate, 1); return 0; } /** * @brief See if a terminate request has been made * * Check to see if a stop request has been made to the current thread. This * function would be used by a thread to see if it should terminate. * * @return returns non-zero if a stop has been requested */ int32_t ocs_thread_terminate_requested(ocs_thread_t *thread) { return ocs_atomic_read(&thread->terminate); } /** * @brief Retrieve threads return value * * After a thread has terminated, it's return value may be retrieved with this function. * * @param thread pointer to thread object * * @return return value from thread function */ int32_t ocs_thread_get_retval(ocs_thread_t *thread) { return thread->retval; } /** * @brief Request that the currently running thread yield * * The currently running thread yields to the scheduler * * @param thread pointer to thread (ignored) * * @return none */ void ocs_thread_yield(ocs_thread_t *thread) { pause("thread yield", 1); } ocs_thread_t * ocs_thread_self(void) { ocs_printf(">>> %s not implemented\n", __func__); ocs_abort(); } int32_t ocs_thread_setcpu(ocs_thread_t *thread, uint32_t cpu) { ocs_printf(">>> %s not implemented\n", __func__); return -1; } int32_t ocs_thread_getcpu(void) { return curcpu; } int ocs_sem_init(ocs_sem_t *sem, int val, const char *name, ...) { va_list ap; va_start(ap, name); ocs_vsnprintf(sem->name, sizeof(sem->name), name, ap); va_end(ap); sema_init(&sem->sem, val, sem->name); return 0; } /** * @ingroup os * @brief Copy user arguments in to kernel space for an ioctl * @par Description * This function is called at the beginning of an ioctl function * to copy the ioctl argument from user space to kernel space. * * BSD handles this for us - arg is already in kernel space, * so we just return it. * * @param os OS handle * @param arg The argument passed to the ioctl function * @param size The size of the structure pointed to by arg * * @return A pointer to a kernel space copy of the argument on * success; NULL on failure */ void *ocs_ioctl_preprocess(ocs_os_handle_t os, void *arg, size_t size) { return arg; } /** * @ingroup os * @brief Copy results of an ioctl back to user space * @par Description * This function is called at the end of ioctl processing to * copy the argument back to user space. * * BSD handles this for us. * * @param os OS handle * @param arg The argument passed to the ioctl function * @param kern_ptr A pointer to the kernel space copy of the * argument * @param size The size of the structure pointed to by arg. * * @return Returns 0. */ int32_t ocs_ioctl_postprocess(ocs_os_handle_t os, void *arg, void *kern_ptr, size_t size) { return 0; } /** * @ingroup os * @brief Free memory allocated by ocs_ioctl_preprocess * @par Description * This function is called in the event of an error in ioctl * processing. For operating environments where ocs_ioctlpreprocess * allocates memory, this call frees the memory without copying * results back to user space. * * For BSD, because no memory was allocated in ocs_ioctl_preprocess, * nothing needs to be done here. * * @param os OS handle * @param kern_ptr A pointer to the kernel space copy of the * argument * @param size The size of the structure pointed to by arg. * * @return Returns nothing. */ void ocs_ioctl_free(ocs_os_handle_t os, void *kern_ptr, size_t size) { return; } void ocs_intr_disable(ocs_os_handle_t os) { } void ocs_intr_enable(ocs_os_handle_t os) { } void ocs_print_stack(void) { #if defined(STACK) struct stack st; stack_zero(&st); stack_save(&st); stack_print(&st); #endif } void ocs_abort(void) { panic(">>> abort/panic\n"); } const char * ocs_pci_model(uint16_t vendor, uint16_t device) { switch (device) { case PCI_PRODUCT_EMULEX_OCE16002: return "OCE16002"; case PCI_PRODUCT_EMULEX_OCE1600_VF: return "OCE1600_VF"; case PCI_PRODUCT_EMULEX_OCE50102: return "OCE50102"; case PCI_PRODUCT_EMULEX_OCE50102_VF: return "OCE50102_VR"; default: break; } return "unknown"; } int32_t ocs_get_bus_dev_func(ocs_t *ocs, uint8_t* bus, uint8_t* dev, uint8_t* func) { *bus = pci_get_bus(ocs->dev); *dev = pci_get_slot(ocs->dev); *func= pci_get_function(ocs->dev); return 0; } /** * @brief return CPU information * * This function populates the ocs_cpuinfo_t buffer with CPU information * * @param cpuinfo pointer to ocs_cpuinfo_t buffer * * @return returns 0 for success, a negative error code value for failure. */ extern int mp_ncpus; int32_t ocs_get_cpuinfo(ocs_cpuinfo_t *cpuinfo) { cpuinfo->num_cpus = mp_ncpus; return 0; } uint32_t ocs_get_num_cpus(void) { static ocs_cpuinfo_t cpuinfo; if (cpuinfo.num_cpus == 0) { ocs_get_cpuinfo(&cpuinfo); } return cpuinfo.num_cpus; } void __ocs_callout(void *t) { ocs_timer_t *timer = t; if (callout_pending(&timer->callout)) { /* Callout was reset */ return; } if (!callout_active(&timer->callout)) { /* Callout was stopped */ return; } callout_deactivate(&timer->callout); if (timer->func) { timer->func(timer->data); } } int32_t ocs_setup_timer(ocs_os_handle_t os, ocs_timer_t *timer, void(*func)(void *arg), void *data, uint32_t timeout_ms) { struct timeval tv; int hz; if (timer == NULL) { ocs_log_err(NULL, "bad parameter\n"); return -1; } if (!mtx_initialized(&timer->lock)) { mtx_init(&timer->lock, "ocs_timer", NULL, MTX_DEF); } callout_init_mtx(&timer->callout, &timer->lock, 0); timer->func = func; timer->data = data; tv.tv_sec = timeout_ms / 1000; tv.tv_usec = (timeout_ms % 1000) * 1000; hz = tvtohz(&tv); if (hz < 0) hz = INT32_MAX; if (hz == 0) hz = 1; mtx_lock(&timer->lock); callout_reset(&timer->callout, hz, __ocs_callout, timer); mtx_unlock(&timer->lock); return 0; } int32_t ocs_mod_timer(ocs_timer_t *timer, uint32_t timeout_ms) { struct timeval tv; int hz; if (timer == NULL) { ocs_log_err(NULL, "bad parameter\n"); return -1; } tv.tv_sec = timeout_ms / 1000; tv.tv_usec = (timeout_ms % 1000) * 1000; hz = tvtohz(&tv); if (hz < 0) hz = INT32_MAX; if (hz == 0) hz = 1; mtx_lock(&timer->lock); callout_reset(&timer->callout, hz, __ocs_callout, timer); mtx_unlock(&timer->lock); return 0; } int32_t ocs_timer_pending(ocs_timer_t *timer) { return callout_active(&timer->callout); } int32_t ocs_del_timer(ocs_timer_t *timer) { mtx_lock(&timer->lock); callout_stop(&timer->callout); mtx_unlock(&timer->lock); return 0; } char * ocs_strdup(const char *s) { uint32_t l = strlen(s); char *d; d = ocs_malloc(NULL, l+1, OCS_M_NOWAIT); if (d != NULL) { ocs_strcpy(d, s); } return d; } void _ocs_assert(const char *cond, const char *filename, int linenum) { const char *fn = strrchr(__FILE__, '/'); ocs_log_err(NULL, "%s(%d) assertion (%s) failed\n", (fn ? fn + 1 : filename), linenum, cond); ocs_print_stack(); ocs_save_ddump_all(OCS_DDUMP_FLAGS_WQES|OCS_DDUMP_FLAGS_CQES|OCS_DDUMP_FLAGS_MQES, -1, TRUE); } Index: head/sys/dev/ppbus/lpt.c =================================================================== --- head/sys/dev/ppbus/lpt.c (revision 355600) +++ head/sys/dev/ppbus/lpt.c (revision 355601) @@ -1,1004 +1,1004 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1990 William F. Jolitz, TeleMuse * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This software is a component of "386BSD" developed by * William F. Jolitz, TeleMuse. * 4. Neither the name of the developer nor the name "386BSD" * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS A COMPONENT OF 386BSD DEVELOPED BY WILLIAM F. JOLITZ * AND IS INTENDED FOR RESEARCH AND EDUCATIONAL PURPOSES ONLY. THIS * SOFTWARE SHOULD NOT BE CONSIDERED TO BE A COMMERCIAL PRODUCT. * THE DEVELOPER URGES THAT USERS WHO REQUIRE A COMMERCIAL PRODUCT * NOT MAKE USE OF THIS WORK. * * FOR USERS WHO WISH TO UNDERSTAND THE 386BSD SYSTEM DEVELOPED * BY WILLIAM F. JOLITZ, WE RECOMMEND THE USER STUDY WRITTEN * REFERENCES SUCH AS THE "PORTING UNIX TO THE 386" SERIES * (BEGINNING JANUARY 1991 "DR. DOBBS JOURNAL", USA AND BEGINNING * JUNE 1991 "UNIX MAGAZIN", GERMANY) BY WILLIAM F. JOLITZ AND * LYNNE GREER JOLITZ, AS WELL AS OTHER BOOKS ON UNIX AND THE * ON-LINE 386BSD USER MANUAL BEFORE USE. A BOOK DISCUSSING THE INTERNALS * OF 386BSD ENTITLED "386BSD FROM THE INSIDE OUT" WILL BE AVAILABLE LATE 1992. * * THIS SOFTWARE IS PROVIDED BY THE DEVELOPER ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE DEVELOPER BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: unknown origin, 386BSD 0.1 * From Id: lpt.c,v 1.55.2.1 1996/11/12 09:08:38 phk Exp * From Id: nlpt.c,v 1.14 1999/02/08 13:55:43 des Exp */ #include __FBSDID("$FreeBSD$"); /* * Device Driver for AT parallel printer port * Written by William Jolitz 12/18/90 */ /* * Updated for ppbus by Nicolas Souchu * [Mon Jul 28 1997] */ #include "opt_lpt.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ppbus_if.h" #include #ifndef LPT_DEBUG #define lprintf(args) #else #define lprintf(args) \ do { \ if (lptflag) \ printf args; \ } while (0) static int volatile lptflag = 1; #endif #define LPINITRDY 4 /* wait up to 4 seconds for a ready */ #define LPTOUTINITIAL 10 /* initial timeout to wait for ready 1/10 s */ #define LPTOUTMAX 1 /* maximal timeout 1 s */ #define LPPRI (PZERO+8) #define BUFSIZE 1024 #define BUFSTATSIZE 32 struct lpt_data { device_t sc_dev; struct cdev *sc_cdev; struct cdev *sc_cdev_bypass; short sc_state; /* default case: negative prime, negative ack, handshake strobe, prime once */ u_char sc_control; char sc_flags; #define LP_POS_INIT 0x04 /* if we are a positive init signal */ #define LP_POS_ACK 0x08 /* if we are a positive going ack */ #define LP_NO_PRIME 0x10 /* don't prime the printer at all */ #define LP_PRIMEOPEN 0x20 /* prime on every open */ #define LP_AUTOLF 0x40 /* tell printer to do an automatic lf */ #define LP_BYPASS 0x80 /* bypass printer ready checks */ void *sc_inbuf; void *sc_statbuf; short sc_xfercnt ; char sc_primed; char *sc_cp ; u_short sc_irq ; /* IRQ status of port */ #define LP_HAS_IRQ 0x01 /* we have an irq available */ #define LP_USE_IRQ 0x02 /* we are using our irq */ #define LP_ENABLE_IRQ 0x04 /* enable IRQ on open */ #define LP_ENABLE_EXT 0x10 /* we shall use advanced mode when possible */ u_char sc_backoff ; /* time to call lptout() again */ struct callout sc_timer; struct resource *sc_intr_resource; /* interrupt resource */ void *sc_intr_cookie; /* interrupt cookie */ }; #define LPT_NAME "lpt" /* our official name */ -static timeout_t lptout; +static callout_func_t lptout; static int lpt_port_test(device_t dev, u_char data, u_char mask); static int lpt_detect(device_t dev); #define DEVTOSOFTC(dev) \ ((struct lpt_data *)device_get_softc(dev)) static void lptintr(void *arg); static devclass_t lpt_devclass; /* bits for state */ #define OPEN (1<<0) /* device is open */ #define ASLP (1<<1) /* awaiting draining of printer */ #define EERROR (1<<2) /* error was received from printer */ #define OBUSY (1<<3) /* printer is busy doing output */ #define LPTOUT (1<<4) /* timeout while not selected */ #define TOUT (1<<5) /* timeout while not selected */ #define LPTINIT (1<<6) /* waiting to initialize for open */ #define INTERRUPTED (1<<7) /* write call was interrupted */ #define HAVEBUS (1<<8) /* the driver owns the bus */ /* status masks to interrogate printer status */ #define RDY_MASK (LPS_SEL|LPS_OUT|LPS_NBSY|LPS_NERR) /* ready ? */ #define LP_READY (LPS_SEL|LPS_NBSY|LPS_NERR) /* Printer Ready condition - from lpa.c */ /* Only used in polling code */ #define LPS_INVERT (LPS_NBSY | LPS_NACK | LPS_SEL | LPS_NERR) #define LPS_MASK (LPS_NBSY | LPS_NACK | LPS_OUT | LPS_SEL | LPS_NERR) #define NOT_READY(ppbus) ((ppb_rstr(ppbus)^LPS_INVERT)&LPS_MASK) #define MAX_SLEEP (hz*5) /* Timeout while waiting for device ready */ #define MAX_SPIN 20 /* Max delay for device ready in usecs */ static d_open_t lptopen; static d_close_t lptclose; static d_write_t lptwrite; static d_read_t lptread; static d_ioctl_t lptioctl; static struct cdevsw lpt_cdevsw = { .d_version = D_VERSION, .d_open = lptopen, .d_close = lptclose, .d_read = lptread, .d_write = lptwrite, .d_ioctl = lptioctl, .d_name = LPT_NAME, }; static int lpt_request_ppbus(device_t dev, int how) { device_t ppbus = device_get_parent(dev); struct lpt_data *sc = DEVTOSOFTC(dev); int error; /* * We might already have the bus for a write(2) after an interrupted * write(2) call. */ ppb_assert_locked(ppbus); if (sc->sc_state & HAVEBUS) return (0); error = ppb_request_bus(ppbus, dev, how); if (error == 0) sc->sc_state |= HAVEBUS; return (error); } static int lpt_release_ppbus(device_t dev) { device_t ppbus = device_get_parent(dev); struct lpt_data *sc = DEVTOSOFTC(dev); int error = 0; ppb_assert_locked(ppbus); if (sc->sc_state & HAVEBUS) { error = ppb_release_bus(ppbus, dev); if (error == 0) sc->sc_state &= ~HAVEBUS; } return (error); } /* * Internal routine to lptprobe to do port tests of one byte value */ static int lpt_port_test(device_t ppbus, u_char data, u_char mask) { int temp, timeout; data = data & mask; ppb_wdtr(ppbus, data); timeout = 10000; do { DELAY(10); temp = ppb_rdtr(ppbus) & mask; } while (temp != data && --timeout); lprintf(("out=%x\tin=%x\ttout=%d\n", data, temp, timeout)); return (temp == data); } /* * Probe simplified by replacing multiple loops with a hardcoded * test pattern - 1999/02/08 des@freebsd.org * * New lpt port probe Geoff Rehmet - Rhodes University - 14/2/94 * Based partially on Rod Grimes' printer probe * * Logic: * 1) If no port address was given, use the bios detected ports * and autodetect what ports the printers are on. * 2) Otherwise, probe the data port at the address given, * using the method in Rod Grimes' port probe. * (Much code ripped off directly from Rod's probe.) * * Comments from Rod's probe: * Logic: * 1) You should be able to write to and read back the same value * to the data port. Do an alternating zeros, alternating ones, * walking zero, and walking one test to check for stuck bits. * * 2) You should be able to write to and read back the same value * to the control port lower 5 bits, the upper 3 bits are reserved * per the IBM PC technical reference manauls and different boards * do different things with them. Do an alternating zeros, alternating * ones, walking zero, and walking one test to check for stuck bits. * * Some printers drag the strobe line down when the are powered off * so this bit has been masked out of the control port test. * * XXX Some printers may not like a fast pulse on init or strobe, I * don't know at this point, if that becomes a problem these bits * should be turned off in the mask byte for the control port test. * * We are finally left with a mask of 0x14, due to some printers * being adamant about holding other bits high ........ * * Before probing the control port, we write a 0 to the data port - * If not, some printers chuck out garbage when the strobe line * gets toggled. * * 3) Set the data and control ports to a value of 0 * * This probe routine has been tested on Epson Lx-800, HP LJ3P, * Epson FX-1170 and C.Itoh 8510RM * printers. * Quick exit on fail added. */ static int lpt_detect(device_t dev) { device_t ppbus = device_get_parent(dev); static u_char testbyte[18] = { 0x55, /* alternating zeros */ 0xaa, /* alternating ones */ 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f, /* walking zero */ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 /* walking one */ }; int i, error, status; status = 1; /* assume success */ ppb_lock(ppbus); if ((error = lpt_request_ppbus(dev, PPB_DONTWAIT))) { ppb_unlock(ppbus); device_printf(dev, "cannot alloc ppbus (%d)!\n", error); return (0); } for (i = 0; i < 18 && status; i++) if (!lpt_port_test(ppbus, testbyte[i], 0xff)) { status = 0; break; } /* write 0's to control and data ports */ ppb_wdtr(ppbus, 0); ppb_wctr(ppbus, 0); lpt_release_ppbus(dev); ppb_unlock(ppbus); return (status); } static void lpt_identify(driver_t *driver, device_t parent) { device_t dev; dev = device_find_child(parent, LPT_NAME, -1); if (!dev) BUS_ADD_CHILD(parent, 0, LPT_NAME, -1); } /* * lpt_probe() */ static int lpt_probe(device_t dev) { if (!lpt_detect(dev)) return (ENXIO); device_set_desc(dev, "Printer"); return (0); } static int lpt_attach(device_t dev) { device_t ppbus = device_get_parent(dev); struct lpt_data *sc = DEVTOSOFTC(dev); int rid = 0, unit = device_get_unit(dev); int error; sc->sc_primed = 0; /* not primed yet */ ppb_init_callout(ppbus, &sc->sc_timer, 0); ppb_lock(ppbus); if ((error = lpt_request_ppbus(dev, PPB_DONTWAIT))) { ppb_unlock(ppbus); device_printf(dev, "cannot alloc ppbus (%d)!\n", error); return (0); } ppb_wctr(ppbus, LPC_NINIT); lpt_release_ppbus(dev); ppb_unlock(ppbus); /* declare our interrupt handler */ sc->sc_intr_resource = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE); if (sc->sc_intr_resource) { error = bus_setup_intr(dev, sc->sc_intr_resource, INTR_TYPE_TTY | INTR_MPSAFE, NULL, lptintr, sc, &sc->sc_intr_cookie); if (error) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->sc_intr_resource); device_printf(dev, "Unable to register interrupt handler\n"); return (error); } sc->sc_irq = LP_HAS_IRQ | LP_USE_IRQ | LP_ENABLE_IRQ; device_printf(dev, "Interrupt-driven port\n"); } else { sc->sc_irq = 0; device_printf(dev, "Polled port\n"); } lprintf(("irq %x\n", sc->sc_irq)); sc->sc_inbuf = malloc(BUFSIZE, M_DEVBUF, M_WAITOK); sc->sc_statbuf = malloc(BUFSTATSIZE, M_DEVBUF, M_WAITOK); sc->sc_dev = dev; sc->sc_cdev = make_dev(&lpt_cdevsw, unit, UID_ROOT, GID_WHEEL, 0600, LPT_NAME "%d", unit); sc->sc_cdev->si_drv1 = sc; sc->sc_cdev->si_drv2 = 0; sc->sc_cdev_bypass = make_dev(&lpt_cdevsw, unit, UID_ROOT, GID_WHEEL, 0600, LPT_NAME "%d.ctl", unit); sc->sc_cdev_bypass->si_drv1 = sc; sc->sc_cdev_bypass->si_drv2 = (void *)LP_BYPASS; return (0); } static int lpt_detach(device_t dev) { struct lpt_data *sc = DEVTOSOFTC(dev); device_t ppbus = device_get_parent(dev); destroy_dev(sc->sc_cdev); destroy_dev(sc->sc_cdev_bypass); ppb_lock(ppbus); lpt_release_ppbus(dev); ppb_unlock(ppbus); callout_drain(&sc->sc_timer); if (sc->sc_intr_resource != NULL) { bus_teardown_intr(dev, sc->sc_intr_resource, sc->sc_intr_cookie); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_intr_resource); } free(sc->sc_inbuf, M_DEVBUF); free(sc->sc_statbuf, M_DEVBUF); return (0); } static void lptout(void *arg) { struct lpt_data *sc = arg; device_t dev = sc->sc_dev; device_t ppbus; ppbus = device_get_parent(dev); ppb_assert_locked(ppbus); lprintf(("T %x ", ppb_rstr(ppbus))); if (sc->sc_state & OPEN) { sc->sc_backoff++; if (sc->sc_backoff > hz/LPTOUTMAX) sc->sc_backoff = hz/LPTOUTMAX; callout_reset(&sc->sc_timer, sc->sc_backoff, lptout, sc); } else sc->sc_state &= ~TOUT; if (sc->sc_state & EERROR) sc->sc_state &= ~EERROR; /* * Avoid possible hangs due to missed interrupts */ if (sc->sc_xfercnt) { lptintr(sc); } else { sc->sc_state &= ~OBUSY; wakeup(dev); } } /* * lptopen -- reset the printer, then wait until it's selected and not busy. * If LP_BYPASS flag is selected, then we do not try to select the * printer -- this is just used for passing ioctls. */ static int lptopen(struct cdev *dev, int flags, int fmt, struct thread *td) { int trys, err; struct lpt_data *sc = dev->si_drv1; device_t lptdev; device_t ppbus; if (!sc) return (ENXIO); lptdev = sc->sc_dev; ppbus = device_get_parent(lptdev); ppb_lock(ppbus); if (sc->sc_state) { lprintf(("%s: still open %x\n", device_get_nameunit(lptdev), sc->sc_state)); ppb_unlock(ppbus); return(EBUSY); } else sc->sc_state |= LPTINIT; sc->sc_flags = (uintptr_t)dev->si_drv2; /* Check for open with BYPASS flag set. */ if (sc->sc_flags & LP_BYPASS) { sc->sc_state = OPEN; ppb_unlock(ppbus); return(0); } /* request the ppbus only if we don't have it already */ if ((err = lpt_request_ppbus(lptdev, PPB_WAIT|PPB_INTR)) != 0) { /* give it a chance to try later */ sc->sc_state = 0; ppb_unlock(ppbus); return (err); } lprintf(("%s flags 0x%x\n", device_get_nameunit(lptdev), sc->sc_flags)); /* set IRQ status according to ENABLE_IRQ flag */ if (sc->sc_irq & LP_ENABLE_IRQ) sc->sc_irq |= LP_USE_IRQ; else sc->sc_irq &= ~LP_USE_IRQ; /* init printer */ if ((sc->sc_flags & LP_NO_PRIME) == 0) { if ((sc->sc_flags & LP_PRIMEOPEN) || sc->sc_primed == 0) { ppb_wctr(ppbus, 0); sc->sc_primed++; DELAY(500); } } ppb_wctr(ppbus, LPC_SEL|LPC_NINIT); /* wait till ready (printer running diagnostics) */ trys = 0; do { /* ran out of waiting for the printer */ if (trys++ >= LPINITRDY*4) { lprintf(("status %x\n", ppb_rstr(ppbus))); lpt_release_ppbus(lptdev); sc->sc_state = 0; ppb_unlock(ppbus); return (EBUSY); } /* wait 1/4 second, give up if we get a signal */ if (ppb_sleep(ppbus, lptdev, LPPRI | PCATCH, "lptinit", hz / 4) != EWOULDBLOCK) { lpt_release_ppbus(lptdev); sc->sc_state = 0; ppb_unlock(ppbus); return (EBUSY); } /* is printer online and ready for output */ } while ((ppb_rstr(ppbus) & (LPS_SEL|LPS_OUT|LPS_NBSY|LPS_NERR)) != (LPS_SEL|LPS_NBSY|LPS_NERR)); sc->sc_control = LPC_SEL|LPC_NINIT; if (sc->sc_flags & LP_AUTOLF) sc->sc_control |= LPC_AUTOL; /* enable interrupt if interrupt-driven */ if (sc->sc_irq & LP_USE_IRQ) sc->sc_control |= LPC_ENA; ppb_wctr(ppbus, sc->sc_control); sc->sc_state &= ~LPTINIT; sc->sc_state |= OPEN; sc->sc_xfercnt = 0; /* only use timeout if using interrupt */ lprintf(("irq %x\n", sc->sc_irq)); if (sc->sc_irq & LP_USE_IRQ) { sc->sc_state |= TOUT; sc->sc_backoff = hz / LPTOUTINITIAL; callout_reset(&sc->sc_timer, sc->sc_backoff, lptout, sc); } /* release the ppbus */ lpt_release_ppbus(lptdev); ppb_unlock(ppbus); lprintf(("opened.\n")); return(0); } /* * lptclose -- close the device, free the local line buffer. * * Check for interrupted write call added. */ static int lptclose(struct cdev *dev, int flags, int fmt, struct thread *td) { struct lpt_data *sc = dev->si_drv1; device_t lptdev = sc->sc_dev; device_t ppbus = device_get_parent(lptdev); int err; ppb_lock(ppbus); if (sc->sc_flags & LP_BYPASS) goto end_close; if ((err = lpt_request_ppbus(lptdev, PPB_WAIT|PPB_INTR)) != 0) { ppb_unlock(ppbus); return (err); } /* if the last write was interrupted, don't complete it */ if ((!(sc->sc_state & INTERRUPTED)) && (sc->sc_irq & LP_USE_IRQ)) while ((ppb_rstr(ppbus) & (LPS_SEL|LPS_OUT|LPS_NBSY|LPS_NERR)) != (LPS_SEL|LPS_NBSY|LPS_NERR) || sc->sc_xfercnt) /* wait 1 second, give up if we get a signal */ if (ppb_sleep(ppbus, lptdev, LPPRI | PCATCH, "lpclose", hz) != EWOULDBLOCK) break; sc->sc_state &= ~OPEN; callout_stop(&sc->sc_timer); ppb_wctr(ppbus, LPC_NINIT); /* * unregistration of interrupt forced by release */ lpt_release_ppbus(lptdev); end_close: sc->sc_state = 0; sc->sc_xfercnt = 0; ppb_unlock(ppbus); lprintf(("closed.\n")); return(0); } /* * lpt_pushbytes() * Workhorse for actually spinning and writing bytes to printer * Derived from lpa.c * Originally by ? * * This code is only used when we are polling the port */ static int lpt_pushbytes(struct lpt_data *sc) { device_t dev = sc->sc_dev; device_t ppbus = device_get_parent(dev); int spin, err, tic; char ch; ppb_assert_locked(ppbus); lprintf(("p")); /* loop for every character .. */ while (sc->sc_xfercnt > 0) { /* printer data */ ch = *(sc->sc_cp); sc->sc_cp++; sc->sc_xfercnt--; /* * Wait for printer ready. * Loop 20 usecs testing BUSY bit, then sleep * for exponentially increasing timeout. (vak) */ for (spin = 0; NOT_READY(ppbus) && spin < MAX_SPIN; ++spin) DELAY(1); /* XXX delay is NOT this accurate! */ if (spin >= MAX_SPIN) { tic = 0; while (NOT_READY(ppbus)) { /* * Now sleep, every cycle a * little longer .. */ tic = tic + tic + 1; /* * But no more than 10 seconds. (vak) */ if (tic > MAX_SLEEP) tic = MAX_SLEEP; err = ppb_sleep(ppbus, dev, LPPRI, LPT_NAME "poll", tic); if (err != EWOULDBLOCK) { return (err); } } } /* output data */ ppb_wdtr(ppbus, ch); /* strobe */ ppb_wctr(ppbus, sc->sc_control|LPC_STB); ppb_wctr(ppbus, sc->sc_control); } return(0); } /* * lptread --retrieve printer status in IEEE1284 NIBBLE mode */ static int lptread(struct cdev *dev, struct uio *uio, int ioflag) { struct lpt_data *sc = dev->si_drv1; device_t lptdev = sc->sc_dev; device_t ppbus = device_get_parent(lptdev); int error = 0, len; if (sc->sc_flags & LP_BYPASS) { /* we can't do reads in bypass mode */ return (EPERM); } ppb_lock(ppbus); if ((error = ppb_1284_negociate(ppbus, PPB_NIBBLE, 0))) { ppb_unlock(ppbus); return (error); } /* read data in an other buffer, read/write may be simultaneous */ len = 0; while (uio->uio_resid) { if ((error = ppb_1284_read(ppbus, PPB_NIBBLE, sc->sc_statbuf, min(BUFSTATSIZE, uio->uio_resid), &len))) { goto error; } if (!len) goto error; /* no more data */ ppb_unlock(ppbus); error = uiomove(sc->sc_statbuf, len, uio); ppb_lock(ppbus); if (error) goto error; } error: ppb_1284_terminate(ppbus); ppb_unlock(ppbus); return (error); } /* * lptwrite --copy a line from user space to a local buffer, then call * putc to get the chars moved to the output queue. * * Flagging of interrupted write added. */ static int lptwrite(struct cdev *dev, struct uio *uio, int ioflag) { register unsigned n; int err; struct lpt_data *sc = dev->si_drv1; device_t lptdev = sc->sc_dev; device_t ppbus = device_get_parent(lptdev); if (sc->sc_flags & LP_BYPASS) { /* we can't do writes in bypass mode */ return (EPERM); } /* request the ppbus only if we don't have it already */ ppb_lock(ppbus); if ((err = lpt_request_ppbus(lptdev, PPB_WAIT|PPB_INTR)) != 0) { ppb_unlock(ppbus); return (err); } sc->sc_state &= ~INTERRUPTED; while ((n = min(BUFSIZE, uio->uio_resid)) != 0) { sc->sc_cp = sc->sc_inbuf; ppb_unlock(ppbus); err = uiomove(sc->sc_cp, n, uio); ppb_lock(ppbus); if (err) break; sc->sc_xfercnt = n; if (sc->sc_irq & LP_ENABLE_EXT) { /* try any extended mode */ err = ppb_write(ppbus, sc->sc_cp, sc->sc_xfercnt, 0); switch (err) { case 0: /* if not all data was sent, we could rely * on polling for the last bytes */ sc->sc_xfercnt = 0; break; case EINTR: sc->sc_state |= INTERRUPTED; ppb_unlock(ppbus); return (err); case EINVAL: /* advanced mode not avail */ log(LOG_NOTICE, "%s: advanced mode not avail, polling\n", device_get_nameunit(sc->sc_dev)); break; default: ppb_unlock(ppbus); return (err); } } else while ((sc->sc_xfercnt > 0)&&(sc->sc_irq & LP_USE_IRQ)) { lprintf(("i")); /* if the printer is ready for a char, */ /* give it one */ if ((sc->sc_state & OBUSY) == 0){ lprintf(("\nC %d. ", sc->sc_xfercnt)); lptintr(sc); } lprintf(("W ")); if (sc->sc_state & OBUSY) if ((err = ppb_sleep(ppbus, lptdev, LPPRI|PCATCH, LPT_NAME "write", 0))) { sc->sc_state |= INTERRUPTED; ppb_unlock(ppbus); return(err); } } /* check to see if we must do a polled write */ if (!(sc->sc_irq & LP_USE_IRQ) && (sc->sc_xfercnt)) { lprintf(("p")); err = lpt_pushbytes(sc); if (err) { ppb_unlock(ppbus); return (err); } } } /* we have not been interrupted, release the ppbus */ lpt_release_ppbus(lptdev); ppb_unlock(ppbus); return (err); } /* * lptintr -- handle printer interrupts which occur when the printer is * ready to accept another char. * * do checking for interrupted write call. */ static void lptintr(void *arg) { struct lpt_data *sc = arg; device_t lptdev = sc->sc_dev; device_t ppbus = device_get_parent(lptdev); int sts = 0; int i; /* * Is printer online and ready for output? * * Avoid falling back to lptout() too quickly. First spin-loop * to see if the printer will become ready ``really soon now''. */ for (i = 0; i < 100 && ((sts=ppb_rstr(ppbus)) & RDY_MASK) != LP_READY; i++) ; if ((sts & RDY_MASK) == LP_READY) { sc->sc_state = (sc->sc_state | OBUSY) & ~EERROR; sc->sc_backoff = hz / LPTOUTINITIAL; if (sc->sc_xfercnt) { /* send char */ /*lprintf(("%x ", *sc->sc_cp)); */ ppb_wdtr(ppbus, *sc->sc_cp++) ; ppb_wctr(ppbus, sc->sc_control|LPC_STB); /* DELAY(X) */ ppb_wctr(ppbus, sc->sc_control); /* any more data for printer */ if (--(sc->sc_xfercnt) > 0) return; } /* * No more data waiting for printer. * Wakeup is not done if write call was not interrupted. */ sc->sc_state &= ~OBUSY; if (!(sc->sc_state & INTERRUPTED)) wakeup(lptdev); lprintf(("w ")); return; } else { /* check for error */ if (((sts & (LPS_NERR | LPS_OUT) ) != LPS_NERR) && (sc->sc_state & OPEN)) sc->sc_state |= EERROR; /* lptout() will jump in and try to restart. */ } lprintf(("sts %x ", sts)); } static int lptioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td) { int error = 0; struct lpt_data *sc = dev->si_drv1; device_t ppbus; u_char old_sc_irq; /* old printer IRQ status */ switch (cmd) { case LPT_IRQ : ppbus = device_get_parent(sc->sc_dev); ppb_lock(ppbus); if (sc->sc_irq & LP_HAS_IRQ) { /* * NOTE: * If the IRQ status is changed, * this will only be visible on the * next open. * * If interrupt status changes, * this gets syslog'd. */ old_sc_irq = sc->sc_irq; switch (*(int*)data) { case 0: sc->sc_irq &= (~LP_ENABLE_IRQ); break; case 1: sc->sc_irq &= (~LP_ENABLE_EXT); sc->sc_irq |= LP_ENABLE_IRQ; break; case 2: /* classic irq based transfer and advanced * modes are in conflict */ sc->sc_irq &= (~LP_ENABLE_IRQ); sc->sc_irq |= LP_ENABLE_EXT; break; case 3: sc->sc_irq &= (~LP_ENABLE_EXT); break; default: break; } if (old_sc_irq != sc->sc_irq ) log(LOG_NOTICE, "%s: switched to %s %s mode\n", device_get_nameunit(sc->sc_dev), (sc->sc_irq & LP_ENABLE_IRQ)? "interrupt-driven":"polled", (sc->sc_irq & LP_ENABLE_EXT)? "extended":"standard"); } else /* polled port */ error = EOPNOTSUPP; ppb_unlock(ppbus); break; default: error = ENODEV; } return(error); } static device_method_t lpt_methods[] = { /* device interface */ DEVMETHOD(device_identify, lpt_identify), DEVMETHOD(device_probe, lpt_probe), DEVMETHOD(device_attach, lpt_attach), DEVMETHOD(device_detach, lpt_detach), { 0, 0 } }; static driver_t lpt_driver = { LPT_NAME, lpt_methods, sizeof(struct lpt_data), }; DRIVER_MODULE(lpt, ppbus, lpt_driver, lpt_devclass, 0, 0); MODULE_DEPEND(lpt, ppbus, 1, 1, 1); Index: head/sys/dev/sbni/if_sbni.c =================================================================== --- head/sys/dev/sbni/if_sbni.c (revision 355600) +++ head/sys/dev/sbni/if_sbni.c (revision 355601) @@ -1,1277 +1,1277 @@ /*- * Copyright (c) 1997-2001 Granch, Ltd. All rights reserved. * Author: Denis I.Timofeev * * Redistributon and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Device driver for Granch SBNI12 leased line adapters * * Revision 2.0.0 1997/08/06 * Initial revision by Alexey Zverev * * Revision 2.0.1 1997/08/11 * Additional internal statistics support (tx statistics) * * Revision 2.0.2 1997/11/05 * if_bpf bug has been fixed * * Revision 2.0.3 1998/12/20 * Memory leakage has been eliminated in * the sbni_st and sbni_timeout routines. * * Revision 3.0 2000/08/10 by Yaroslav Polyakov * Support for PCI cards. 4.1 modification. * * Revision 3.1 2000/09/12 * Removed extra #defines around bpf functions * * Revision 4.0 2000/11/23 by Denis Timofeev * Completely redesigned the buffer management * * Revision 4.1 2001/01/21 * Support for PCI Dual cards and new SBNI12D-10, -11 Dual/ISA cards * * Written with reference to NE2000 driver developed by David Greenman. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void sbni_init(void *); static void sbni_init_locked(struct sbni_softc *); static void sbni_start(struct ifnet *); static void sbni_start_locked(struct ifnet *); static int sbni_ioctl(struct ifnet *, u_long, caddr_t); static void sbni_stop(struct sbni_softc *); static void handle_channel(struct sbni_softc *); static void card_start(struct sbni_softc *); static int recv_frame(struct sbni_softc *); static void send_frame(struct sbni_softc *); static int upload_data(struct sbni_softc *, u_int, u_int, u_int, u_int32_t); static int skip_tail(struct sbni_softc *, u_int, u_int32_t); static void interpret_ack(struct sbni_softc *, u_int); static void download_data(struct sbni_softc *, u_int32_t *); static void prepare_to_send(struct sbni_softc *); static void drop_xmit_queue(struct sbni_softc *); static int get_rx_buf(struct sbni_softc *); static void indicate_pkt(struct sbni_softc *); static void change_level(struct sbni_softc *); static int check_fhdr(struct sbni_softc *, u_int *, u_int *, u_int *, u_int *, u_int32_t *); static int append_frame_to_pkt(struct sbni_softc *, u_int, u_int32_t); static void timeout_change_level(struct sbni_softc *); static void send_frame_header(struct sbni_softc *, u_int32_t *); static void set_initial_values(struct sbni_softc *, struct sbni_flags); static u_int32_t calc_crc32(u_int32_t, caddr_t, u_int); -static timeout_t sbni_timeout; +static callout_func_t sbni_timeout; static __inline u_char sbni_inb(struct sbni_softc *, enum sbni_reg); static __inline void sbni_outb(struct sbni_softc *, enum sbni_reg, u_char); static __inline void sbni_insb(struct sbni_softc *, u_char *, u_int); static __inline void sbni_outsb(struct sbni_softc *, u_char *, u_int); static u_int32_t crc32tab[]; #ifdef SBNI_DUAL_COMPOUND static struct mtx headlist_lock; MTX_SYSINIT(headlist_lock, &headlist_lock, "sbni headlist", MTX_DEF); static struct sbni_softc *sbni_headlist; #endif /* -------------------------------------------------------------------------- */ static __inline u_char sbni_inb(struct sbni_softc *sc, enum sbni_reg reg) { return bus_space_read_1( rman_get_bustag(sc->io_res), rman_get_bushandle(sc->io_res), sc->io_off + reg); } static __inline void sbni_outb(struct sbni_softc *sc, enum sbni_reg reg, u_char value) { bus_space_write_1( rman_get_bustag(sc->io_res), rman_get_bushandle(sc->io_res), sc->io_off + reg, value); } static __inline void sbni_insb(struct sbni_softc *sc, u_char *to, u_int len) { bus_space_read_multi_1( rman_get_bustag(sc->io_res), rman_get_bushandle(sc->io_res), sc->io_off + DAT, to, len); } static __inline void sbni_outsb(struct sbni_softc *sc, u_char *from, u_int len) { bus_space_write_multi_1( rman_get_bustag(sc->io_res), rman_get_bushandle(sc->io_res), sc->io_off + DAT, from, len); } /* Valid combinations in CSR0 (for probing): VALID_DECODER 0000,0011,1011,1010 ; 0 ; - TR_REQ ; 1 ; + TR_RDY ; 2 ; - TR_RDY TR_REQ ; 3 ; + BU_EMP ; 4 ; + BU_EMP TR_REQ ; 5 ; + BU_EMP TR_RDY ; 6 ; - BU_EMP TR_RDY TR_REQ ; 7 ; + RC_RDY ; 8 ; + RC_RDY TR_REQ ; 9 ; + RC_RDY TR_RDY ; 10 ; - RC_RDY TR_RDY TR_REQ ; 11 ; - RC_RDY BU_EMP ; 12 ; - RC_RDY BU_EMP TR_REQ ; 13 ; - RC_RDY BU_EMP TR_RDY ; 14 ; - RC_RDY BU_EMP TR_RDY TR_REQ ; 15 ; - */ #define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200) int sbni_probe(struct sbni_softc *sc) { u_char csr0; csr0 = sbni_inb(sc, CSR0); if (csr0 != 0xff && csr0 != 0x00) { csr0 &= ~EN_INT; if (csr0 & BU_EMP) csr0 |= EN_INT; if (VALID_DECODER & (1 << (csr0 >> 4))) return (0); } return (ENXIO); } /* * Install interface into kernel networking data structures */ int sbni_attach(struct sbni_softc *sc, int unit, struct sbni_flags flags) { struct ifnet *ifp; u_char csr0; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) return (ENOMEM); sbni_outb(sc, CSR0, 0); set_initial_values(sc, flags); /* Initialize ifnet structure */ ifp->if_softc = sc; if_initname(ifp, "sbni", unit); ifp->if_init = sbni_init; ifp->if_start = sbni_start; ifp->if_ioctl = sbni_ioctl; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); /* report real baud rate */ csr0 = sbni_inb(sc, CSR0); ifp->if_baudrate = (csr0 & 0x01 ? 500000 : 2000000) / (1 << flags.rate); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; mtx_init(&sc->lock, ifp->if_xname, MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->wch, &sc->lock, 0); ether_ifattach(ifp, sc->enaddr); /* device attach does transition from UNCONFIGURED to IDLE state */ if_printf(ifp, "speed %ju, rxl ", (uintmax_t)ifp->if_baudrate); if (sc->delta_rxl) printf("auto\n"); else printf("%d (fixed)\n", sc->cur_rxl_index); return (0); } void sbni_detach(struct sbni_softc *sc) { SBNI_LOCK(sc); sbni_stop(sc); SBNI_UNLOCK(sc); callout_drain(&sc->wch); ether_ifdetach(sc->ifp); if (sc->irq_handle) bus_teardown_intr(sc->dev, sc->irq_res, sc->irq_handle); mtx_destroy(&sc->lock); if_free(sc->ifp); } void sbni_release_resources(struct sbni_softc *sc) { if (sc->irq_res) bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); if (sc->io_res && sc->io_off == 0) bus_release_resource(sc->dev, SYS_RES_IOPORT, sc->io_rid, sc->io_res); } /* -------------------------------------------------------------------------- */ static void sbni_init(void *xsc) { struct sbni_softc *sc; sc = (struct sbni_softc *)xsc; SBNI_LOCK(sc); sbni_init_locked(sc); SBNI_UNLOCK(sc); } static void sbni_init_locked(struct sbni_softc *sc) { struct ifnet *ifp; ifp = sc->ifp; /* * kludge to avoid multiple initialization when more than once * protocols configured */ if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; card_start(sc); callout_reset(&sc->wch, hz/SBNI_HZ, sbni_timeout, sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* attempt to start output */ sbni_start_locked(ifp); } static void sbni_start(struct ifnet *ifp) { struct sbni_softc *sc = ifp->if_softc; SBNI_LOCK(sc); sbni_start_locked(ifp); SBNI_UNLOCK(sc); } static void sbni_start_locked(struct ifnet *ifp) { struct sbni_softc *sc = ifp->if_softc; if (sc->tx_frameno == 0) prepare_to_send(sc); } static void sbni_stop(struct sbni_softc *sc) { sbni_outb(sc, CSR0, 0); drop_xmit_queue(sc); if (sc->rx_buf_p) { m_freem(sc->rx_buf_p); sc->rx_buf_p = NULL; } callout_stop(&sc->wch); sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } /* -------------------------------------------------------------------------- */ /* interrupt handler */ /* * SBNI12D-10, -11/ISA boards within "common interrupt" mode could not * be looked as two independent single-channel devices. Every channel seems * as Ethernet interface but interrupt handler must be common. Really, first * channel ("master") driver only registers the handler. In it's struct softc * it has got pointer to "slave" channel's struct softc and handles that's * interrupts too. * softc of successfully attached ISA SBNI boards is linked to list. * While next board driver is initialized, it scans this list. If one * has found softc with same irq and ioaddr different by 4 then it assumes * this board to be "master". */ void sbni_intr(void *arg) { struct sbni_softc *sc; int repeat; sc = (struct sbni_softc *)arg; do { repeat = 0; SBNI_LOCK(sc); if (sbni_inb(sc, CSR0) & (RC_RDY | TR_RDY)) { handle_channel(sc); repeat = 1; } SBNI_UNLOCK(sc); if (sc->slave_sc) { /* second channel present */ SBNI_LOCK(sc->slave_sc); if (sbni_inb(sc->slave_sc, CSR0) & (RC_RDY | TR_RDY)) { handle_channel(sc->slave_sc); repeat = 1; } SBNI_UNLOCK(sc->slave_sc); } } while (repeat); } static void handle_channel(struct sbni_softc *sc) { int req_ans; u_char csr0; sbni_outb(sc, CSR0, (sbni_inb(sc, CSR0) & ~EN_INT) | TR_REQ); sc->timer_ticks = CHANGE_LEVEL_START_TICKS; for (;;) { csr0 = sbni_inb(sc, CSR0); if ((csr0 & (RC_RDY | TR_RDY)) == 0) break; req_ans = !(sc->state & FL_PREV_OK); if (csr0 & RC_RDY) req_ans = recv_frame(sc); /* * TR_RDY always equals 1 here because we have owned the marker, * and we set TR_REQ when disabled interrupts */ csr0 = sbni_inb(sc, CSR0); if ((csr0 & TR_RDY) == 0 || (csr0 & RC_RDY) != 0) if_printf(sc->ifp, "internal error!\n"); /* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */ if (req_ans || sc->tx_frameno != 0) send_frame(sc); else { /* send the marker without any data */ sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) & ~TR_REQ); } } sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) | EN_INT); } /* * Routine returns 1 if it need to acknoweledge received frame. * Empty frame received without errors won't be acknoweledged. */ static int recv_frame(struct sbni_softc *sc) { u_int32_t crc; u_int framelen, frameno, ack; u_int is_first, frame_ok; crc = CRC32_INITIAL; if (check_fhdr(sc, &framelen, &frameno, &ack, &is_first, &crc)) { frame_ok = framelen > 4 ? upload_data(sc, framelen, frameno, is_first, crc) : skip_tail(sc, framelen, crc); if (frame_ok) interpret_ack(sc, ack); } else { framelen = 0; frame_ok = 0; } sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) ^ CT_ZER); if (frame_ok) { sc->state |= FL_PREV_OK; if (framelen > 4) sc->in_stats.all_rx_number++; } else { sc->state &= ~FL_PREV_OK; change_level(sc); sc->in_stats.all_rx_number++; sc->in_stats.bad_rx_number++; } return (!frame_ok || framelen > 4); } static void send_frame(struct sbni_softc *sc) { u_int32_t crc; u_char csr0; crc = CRC32_INITIAL; if (sc->state & FL_NEED_RESEND) { /* if frame was sended but not ACK'ed - resend it */ if (sc->trans_errors) { sc->trans_errors--; if (sc->framelen != 0) sc->in_stats.resend_tx_number++; } else { /* cannot xmit with many attempts */ drop_xmit_queue(sc); goto do_send; } } else sc->trans_errors = TR_ERROR_COUNT; send_frame_header(sc, &crc); sc->state |= FL_NEED_RESEND; /* * FL_NEED_RESEND will be cleared after ACK, but if empty * frame sended then in prepare_to_send next frame */ if (sc->framelen) { download_data(sc, &crc); sc->in_stats.all_tx_number++; sc->state |= FL_WAIT_ACK; } sbni_outsb(sc, (u_char *)&crc, sizeof crc); do_send: csr0 = sbni_inb(sc, CSR0); sbni_outb(sc, CSR0, csr0 & ~TR_REQ); if (sc->tx_frameno) { /* next frame exists - request to send */ sbni_outb(sc, CSR0, csr0 | TR_REQ); } } static void download_data(struct sbni_softc *sc, u_int32_t *crc_p) { struct mbuf *m; caddr_t data_p; u_int data_len, pos, slice; data_p = NULL; /* initialized to avoid warn */ pos = 0; for (m = sc->tx_buf_p; m != NULL && pos < sc->pktlen; m = m->m_next) { if (pos + m->m_len > sc->outpos) { data_len = m->m_len - (sc->outpos - pos); data_p = mtod(m, caddr_t) + (sc->outpos - pos); goto do_copy; } else pos += m->m_len; } data_len = 0; do_copy: pos = 0; do { if (data_len) { slice = min(data_len, sc->framelen - pos); sbni_outsb(sc, data_p, slice); *crc_p = calc_crc32(*crc_p, data_p, slice); pos += slice; if (data_len -= slice) data_p += slice; else { do { m = m->m_next; } while (m != NULL && m->m_len == 0); if (m) { data_len = m->m_len; data_p = mtod(m, caddr_t); } } } else { /* frame too short - zero padding */ pos = sc->framelen - pos; while (pos--) { sbni_outb(sc, DAT, 0); *crc_p = CRC32(0, *crc_p); } return; } } while (pos < sc->framelen); } static int upload_data(struct sbni_softc *sc, u_int framelen, u_int frameno, u_int is_first, u_int32_t crc) { int frame_ok; if (is_first) { sc->wait_frameno = frameno; sc->inppos = 0; } if (sc->wait_frameno == frameno) { if (sc->inppos + framelen <= ETHER_MAX_LEN) { frame_ok = append_frame_to_pkt(sc, framelen, crc); /* * if CRC is right but framelen incorrect then transmitter * error was occurred... drop entire packet */ } else if ((frame_ok = skip_tail(sc, framelen, crc)) != 0) { sc->wait_frameno = 0; sc->inppos = 0; if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); /* now skip all frames until is_first != 0 */ } } else frame_ok = skip_tail(sc, framelen, crc); if (is_first && !frame_ok) { /* * Frame has been violated, but we have stored * is_first already... Drop entire packet. */ sc->wait_frameno = 0; if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); } return (frame_ok); } static __inline void send_complete(struct sbni_softc *); static __inline void send_complete(struct sbni_softc *sc) { m_freem(sc->tx_buf_p); sc->tx_buf_p = NULL; if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); } static void interpret_ack(struct sbni_softc *sc, u_int ack) { if (ack == FRAME_SENT_OK) { sc->state &= ~FL_NEED_RESEND; if (sc->state & FL_WAIT_ACK) { sc->outpos += sc->framelen; if (--sc->tx_frameno) { sc->framelen = min( sc->maxframe, sc->pktlen - sc->outpos); } else { send_complete(sc); prepare_to_send(sc); } } } sc->state &= ~FL_WAIT_ACK; } /* * Glue received frame with previous fragments of packet. * Indicate packet when last frame would be accepted. */ static int append_frame_to_pkt(struct sbni_softc *sc, u_int framelen, u_int32_t crc) { caddr_t p; if (sc->inppos + framelen > ETHER_MAX_LEN) return (0); if (!sc->rx_buf_p && !get_rx_buf(sc)) return (0); p = sc->rx_buf_p->m_data + sc->inppos; sbni_insb(sc, p, framelen); if (calc_crc32(crc, p, framelen) != CRC32_REMAINDER) return (0); sc->inppos += framelen - 4; if (--sc->wait_frameno == 0) { /* last frame received */ indicate_pkt(sc); if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); } return (1); } /* * Prepare to start output on adapter. Current priority must be set to splimp * before this routine is called. * Transmitter will be actually activated when marker has been accepted. */ static void prepare_to_send(struct sbni_softc *sc) { struct mbuf *m; u_int len; /* sc->tx_buf_p == NULL here! */ if (sc->tx_buf_p) printf("sbni: memory leak!\n"); sc->outpos = 0; sc->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); for (;;) { IF_DEQUEUE(&sc->ifp->if_snd, sc->tx_buf_p); if (!sc->tx_buf_p) { /* nothing to transmit... */ sc->pktlen = 0; sc->tx_frameno = 0; sc->framelen = 0; sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; return; } for (len = 0, m = sc->tx_buf_p; m; m = m->m_next) len += m->m_len; if (len != 0) break; m_freem(sc->tx_buf_p); } if (len < SBNI_MIN_LEN) len = SBNI_MIN_LEN; sc->pktlen = len; sc->tx_frameno = howmany(len, sc->maxframe); sc->framelen = min(len, sc->maxframe); sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) | TR_REQ); sc->ifp->if_drv_flags |= IFF_DRV_OACTIVE; BPF_MTAP(sc->ifp, sc->tx_buf_p); } static void drop_xmit_queue(struct sbni_softc *sc) { struct mbuf *m; if (sc->tx_buf_p) { m_freem(sc->tx_buf_p); sc->tx_buf_p = NULL; if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); } for (;;) { IF_DEQUEUE(&sc->ifp->if_snd, m); if (m == NULL) break; m_freem(m); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); } sc->tx_frameno = 0; sc->framelen = 0; sc->outpos = 0; sc->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } static void send_frame_header(struct sbni_softc *sc, u_int32_t *crc_p) { u_int32_t crc; u_int len_field; u_char value; crc = *crc_p; len_field = sc->framelen + 6; /* CRC + frameno + reserved */ if (sc->state & FL_NEED_RESEND) len_field |= FRAME_RETRY; /* non-first attempt... */ if (sc->outpos == 0) len_field |= FRAME_FIRST; len_field |= (sc->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD; sbni_outb(sc, DAT, SBNI_SIG); value = (u_char)len_field; sbni_outb(sc, DAT, value); crc = CRC32(value, crc); value = (u_char)(len_field >> 8); sbni_outb(sc, DAT, value); crc = CRC32(value, crc); sbni_outb(sc, DAT, sc->tx_frameno); crc = CRC32(sc->tx_frameno, crc); sbni_outb(sc, DAT, 0); crc = CRC32(0, crc); *crc_p = crc; } /* * if frame tail not needed (incorrect number or received twice), * it won't store, but CRC will be calculated */ static int skip_tail(struct sbni_softc *sc, u_int tail_len, u_int32_t crc) { while (tail_len--) crc = CRC32(sbni_inb(sc, DAT), crc); return (crc == CRC32_REMAINDER); } static int check_fhdr(struct sbni_softc *sc, u_int *framelen, u_int *frameno, u_int *ack, u_int *is_first, u_int32_t *crc_p) { u_int32_t crc; u_char value; crc = *crc_p; if (sbni_inb(sc, DAT) != SBNI_SIG) return (0); value = sbni_inb(sc, DAT); *framelen = (u_int)value; crc = CRC32(value, crc); value = sbni_inb(sc, DAT); *framelen |= ((u_int)value) << 8; crc = CRC32(value, crc); *ack = *framelen & FRAME_ACK_MASK; *is_first = (*framelen & FRAME_FIRST) != 0; if ((*framelen &= FRAME_LEN_MASK) < 6 || *framelen > SBNI_MAX_FRAME - 3) return (0); value = sbni_inb(sc, DAT); *frameno = (u_int)value; crc = CRC32(value, crc); crc = CRC32(sbni_inb(sc, DAT), crc); /* reserved byte */ *framelen -= 2; *crc_p = crc; return (1); } static int get_rx_buf(struct sbni_softc *sc) { struct mbuf *m; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { if_printf(sc->ifp, "cannot allocate header mbuf\n"); return (0); } /* * We always put the received packet in a single buffer - * either with just an mbuf header or in a cluster attached * to the header. The +2 is to compensate for the alignment * fixup below. */ if (ETHER_MAX_LEN + 2 > MHLEN) { /* Attach an mbuf cluster */ if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); return (0); } } m->m_pkthdr.len = m->m_len = ETHER_MAX_LEN + 2; /* * The +2 is to longword align the start of the real packet. * (sizeof ether_header == 14) * This is important for NFS. */ m_adj(m, 2); sc->rx_buf_p = m; return (1); } static void indicate_pkt(struct sbni_softc *sc) { struct ifnet *ifp = sc->ifp; struct mbuf *m; m = sc->rx_buf_p; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = sc->inppos; sc->rx_buf_p = NULL; SBNI_UNLOCK(sc); (*ifp->if_input)(ifp, m); SBNI_LOCK(sc); } /* -------------------------------------------------------------------------- */ /* * Routine checks periodically wire activity and regenerates marker if * connect was inactive for a long time. */ static void sbni_timeout(void *xsc) { struct sbni_softc *sc; u_char csr0; sc = (struct sbni_softc *)xsc; SBNI_ASSERT_LOCKED(sc); csr0 = sbni_inb(sc, CSR0); if (csr0 & RC_CHK) { if (sc->timer_ticks) { if (csr0 & (RC_RDY | BU_EMP)) /* receiving not active */ sc->timer_ticks--; } else { sc->in_stats.timeout_number++; if (sc->delta_rxl) timeout_change_level(sc); sbni_outb(sc, CSR1, *(u_char *)&sc->csr1 | PR_RES); csr0 = sbni_inb(sc, CSR0); } } sbni_outb(sc, CSR0, csr0 | RC_CHK); callout_reset(&sc->wch, hz/SBNI_HZ, sbni_timeout, sc); } /* -------------------------------------------------------------------------- */ static void card_start(struct sbni_softc *sc) { sc->timer_ticks = CHANGE_LEVEL_START_TICKS; sc->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); sc->state |= FL_PREV_OK; sc->inppos = 0; sc->wait_frameno = 0; sbni_outb(sc, CSR1, *(u_char *)&sc->csr1 | PR_RES); sbni_outb(sc, CSR0, EN_INT); } /* -------------------------------------------------------------------------- */ static u_char rxl_tab[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08, 0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f }; #define SIZE_OF_TIMEOUT_RXL_TAB 4 static u_char timeout_rxl_tab[] = { 0x03, 0x05, 0x08, 0x0b }; static void set_initial_values(struct sbni_softc *sc, struct sbni_flags flags) { if (flags.fixed_rxl) { sc->delta_rxl = 0; /* disable receive level autodetection */ sc->cur_rxl_index = flags.rxl; } else { sc->delta_rxl = DEF_RXL_DELTA; sc->cur_rxl_index = DEF_RXL; } sc->csr1.rate = flags.fixed_rate ? flags.rate : DEFAULT_RATE; sc->csr1.rxl = rxl_tab[sc->cur_rxl_index]; sc->maxframe = DEFAULT_FRAME_LEN; /* * generate Ethernet address (0x00ff01xxxxxx) */ *(u_int16_t *) sc->enaddr = htons(0x00ff); if (flags.mac_addr) { *(u_int32_t *) (sc->enaddr + 2) = htonl(flags.mac_addr | 0x01000000); } else { *(u_char *) (sc->enaddr + 2) = 0x01; read_random(sc->enaddr + 3, 3); } } #ifdef SBNI_DUAL_COMPOUND void sbni_add(struct sbni_softc *sc) { mtx_lock(&headlist_lock); sc->link = sbni_headlist; sbni_headlist = sc; mtx_unlock(&headlist_lock); } struct sbni_softc * connect_to_master(struct sbni_softc *sc) { struct sbni_softc *p, *p_prev; mtx_lock(&headlist_lock); for (p = sbni_headlist, p_prev = NULL; p; p_prev = p, p = p->link) { if (rman_get_start(p->io_res) == rman_get_start(sc->io_res) + 4 || rman_get_start(p->io_res) == rman_get_start(sc->io_res) - 4) { p->slave_sc = sc; if (p_prev) p_prev->link = p->link; else sbni_headlist = p->link; mtx_unlock(&headlist_lock); return p; } } mtx_unlock(&headlist_lock); return (NULL); } #endif /* SBNI_DUAL_COMPOUND */ /* Receive level auto-selection */ static void change_level(struct sbni_softc *sc) { if (sc->delta_rxl == 0) /* do not auto-negotiate RxL */ return; if (sc->cur_rxl_index == 0) sc->delta_rxl = 1; else if (sc->cur_rxl_index == 15) sc->delta_rxl = -1; else if (sc->cur_rxl_rcvd < sc->prev_rxl_rcvd) sc->delta_rxl = -sc->delta_rxl; sc->csr1.rxl = rxl_tab[sc->cur_rxl_index += sc->delta_rxl]; sbni_inb(sc, CSR0); /* it needed for PCI cards */ sbni_outb(sc, CSR1, *(u_char *)&sc->csr1); sc->prev_rxl_rcvd = sc->cur_rxl_rcvd; sc->cur_rxl_rcvd = 0; } static void timeout_change_level(struct sbni_softc *sc) { sc->cur_rxl_index = timeout_rxl_tab[sc->timeout_rxl]; if (++sc->timeout_rxl >= 4) sc->timeout_rxl = 0; sc->csr1.rxl = rxl_tab[sc->cur_rxl_index]; sbni_inb(sc, CSR0); sbni_outb(sc, CSR1, *(u_char *)&sc->csr1); sc->prev_rxl_rcvd = sc->cur_rxl_rcvd; sc->cur_rxl_rcvd = 0; } /* -------------------------------------------------------------------------- */ /* * Process an ioctl request. This code needs some work - it looks * pretty ugly. */ static int sbni_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct sbni_softc *sc; struct ifreq *ifr; struct thread *td; struct sbni_in_stats *in_stats; struct sbni_flags flags; int error; sc = ifp->if_softc; ifr = (struct ifreq *)data; td = curthread; error = 0; switch (command) { case SIOCSIFFLAGS: /* * If the interface is marked up and stopped, then start it. * If it is marked down and running, then stop it. */ SBNI_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) sbni_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { sbni_stop(sc); } } SBNI_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. */ error = 0; /* if (ifr == NULL) error = EAFNOSUPPORT; */ break; /* * SBNI specific ioctl */ case SIOCGHWFLAGS: /* get flags */ SBNI_LOCK(sc); bcopy((caddr_t)IF_LLADDR(sc->ifp)+3, (caddr_t) &flags, 3); flags.rxl = sc->cur_rxl_index; flags.rate = sc->csr1.rate; flags.fixed_rxl = (sc->delta_rxl == 0); flags.fixed_rate = 1; SBNI_UNLOCK(sc); bcopy(&flags, &ifr->ifr_ifru, sizeof(flags)); break; case SIOCGINSTATS: in_stats = malloc(sizeof(struct sbni_in_stats), M_DEVBUF, M_WAITOK); SBNI_LOCK(sc); bcopy(&sc->in_stats, in_stats, sizeof(struct sbni_in_stats)); SBNI_UNLOCK(sc); error = copyout(in_stats, ifr_data_get_ptr(ifr), sizeof(struct sbni_in_stats)); free(in_stats, M_DEVBUF); break; case SIOCSHWFLAGS: /* set flags */ /* root only */ error = priv_check(td, PRIV_DRIVER); if (error) break; bcopy(&ifr->ifr_ifru, &flags, sizeof(flags)); SBNI_LOCK(sc); if (flags.fixed_rxl) { sc->delta_rxl = 0; sc->cur_rxl_index = flags.rxl; } else { sc->delta_rxl = DEF_RXL_DELTA; sc->cur_rxl_index = DEF_RXL; } sc->csr1.rxl = rxl_tab[sc->cur_rxl_index]; sc->csr1.rate = flags.fixed_rate ? flags.rate : DEFAULT_RATE; if (flags.mac_addr) bcopy((caddr_t) &flags, (caddr_t) IF_LLADDR(sc->ifp)+3, 3); /* Don't be afraid... */ sbni_outb(sc, CSR1, *(char*)(&sc->csr1) | PR_RES); SBNI_UNLOCK(sc); break; case SIOCRINSTATS: SBNI_LOCK(sc); if (!(error = priv_check(td, PRIV_DRIVER))) /* root only */ bzero(&sc->in_stats, sizeof(struct sbni_in_stats)); SBNI_UNLOCK(sc); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } /* -------------------------------------------------------------------------- */ static u_int32_t calc_crc32(u_int32_t crc, caddr_t p, u_int len) { while (len--) crc = CRC32(*p++, crc); return (crc); } static u_int32_t crc32tab[] __aligned(8) = { 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37, 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E, 0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605, 0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C, 0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53, 0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A, 0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661, 0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278, 0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF, 0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6, 0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD, 0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4, 0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B, 0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82, 0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9, 0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0, 0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7, 0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE, 0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795, 0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C, 0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3, 0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA, 0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1, 0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8, 0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F, 0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76, 0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D, 0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344, 0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B, 0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12, 0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739, 0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320, 0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17, 0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E, 0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525, 0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C, 0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73, 0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A, 0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541, 0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158, 0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF, 0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6, 0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED, 0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4, 0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB, 0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2, 0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589, 0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190, 0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87, 0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E, 0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5, 0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC, 0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3, 0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA, 0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1, 0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8, 0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F, 0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856, 0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D, 0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064, 0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B, 0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832, 0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419, 0x660951BA, 0x110E612C, 0x88073096, 0xFF000000 }; Index: head/sys/dev/smc/if_smc.c =================================================================== --- head/sys/dev/smc/if_smc.c (revision 355600) +++ head/sys/dev/smc/if_smc.c (revision 355601) @@ -1,1329 +1,1329 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Benno Rice. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for SMSC LAN91C111, may work for older variants. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include #include #include #include #define SMC_LOCK(sc) mtx_lock(&(sc)->smc_mtx) #define SMC_UNLOCK(sc) mtx_unlock(&(sc)->smc_mtx) #define SMC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->smc_mtx, MA_OWNED) #define SMC_INTR_PRIORITY 0 #define SMC_RX_PRIORITY 5 #define SMC_TX_PRIORITY 10 devclass_t smc_devclass; static const char *smc_chip_ids[16] = { NULL, NULL, NULL, /* 3 */ "SMSC LAN91C90 or LAN91C92", /* 4 */ "SMSC LAN91C94", /* 5 */ "SMSC LAN91C95", /* 6 */ "SMSC LAN91C96", /* 7 */ "SMSC LAN91C100", /* 8 */ "SMSC LAN91C100FD", /* 9 */ "SMSC LAN91C110FD or LAN91C111FD", NULL, NULL, NULL, NULL, NULL, NULL }; static void smc_init(void *); static void smc_start(struct ifnet *); static void smc_stop(struct smc_softc *); static int smc_ioctl(struct ifnet *, u_long, caddr_t); static void smc_init_locked(struct smc_softc *); static void smc_start_locked(struct ifnet *); static void smc_reset(struct smc_softc *); static int smc_mii_ifmedia_upd(struct ifnet *); static void smc_mii_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void smc_mii_tick(void *); static void smc_mii_mediachg(struct smc_softc *); static int smc_mii_mediaioctl(struct smc_softc *, struct ifreq *, u_long); static void smc_task_intr(void *, int); static void smc_task_rx(void *, int); static void smc_task_tx(void *, int); static driver_filter_t smc_intr; -static timeout_t smc_watchdog; +static callout_func_t smc_watchdog; #ifdef DEVICE_POLLING static poll_handler_t smc_poll; #endif /* * MII bit-bang glue */ static uint32_t smc_mii_bitbang_read(device_t); static void smc_mii_bitbang_write(device_t, uint32_t); static const struct mii_bitbang_ops smc_mii_bitbang_ops = { smc_mii_bitbang_read, smc_mii_bitbang_write, { MGMT_MDO, /* MII_BIT_MDO */ MGMT_MDI, /* MII_BIT_MDI */ MGMT_MCLK, /* MII_BIT_MDC */ MGMT_MDOE, /* MII_BIT_DIR_HOST_PHY */ 0, /* MII_BIT_DIR_PHY_HOST */ } }; static __inline void smc_select_bank(struct smc_softc *sc, uint16_t bank) { bus_barrier(sc->smc_reg, BSR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); bus_write_2(sc->smc_reg, BSR, bank & BSR_BANK_MASK); bus_barrier(sc->smc_reg, BSR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } /* Never call this when not in bank 2. */ static __inline void smc_mmu_wait(struct smc_softc *sc) { KASSERT((bus_read_2(sc->smc_reg, BSR) & BSR_BANK_MASK) == 2, ("%s: smc_mmu_wait called when not in bank 2", device_get_nameunit(sc->smc_dev))); while (bus_read_2(sc->smc_reg, MMUCR) & MMUCR_BUSY) ; } static __inline uint8_t smc_read_1(struct smc_softc *sc, bus_size_t offset) { return (bus_read_1(sc->smc_reg, offset)); } static __inline void smc_write_1(struct smc_softc *sc, bus_size_t offset, uint8_t val) { bus_write_1(sc->smc_reg, offset, val); } static __inline uint16_t smc_read_2(struct smc_softc *sc, bus_size_t offset) { return (bus_read_2(sc->smc_reg, offset)); } static __inline void smc_write_2(struct smc_softc *sc, bus_size_t offset, uint16_t val) { bus_write_2(sc->smc_reg, offset, val); } static __inline void smc_read_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap, bus_size_t count) { bus_read_multi_2(sc->smc_reg, offset, datap, count); } static __inline void smc_write_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap, bus_size_t count) { bus_write_multi_2(sc->smc_reg, offset, datap, count); } static __inline void smc_barrier(struct smc_softc *sc, bus_size_t offset, bus_size_t length, int flags) { bus_barrier(sc->smc_reg, offset, length, flags); } int smc_probe(device_t dev) { int rid, type, error; uint16_t val; struct smc_softc *sc; struct resource *reg; sc = device_get_softc(dev); rid = 0; type = SYS_RES_IOPORT; error = 0; if (sc->smc_usemem) type = SYS_RES_MEMORY; reg = bus_alloc_resource_anywhere(dev, type, &rid, 16, RF_ACTIVE); if (reg == NULL) { if (bootverbose) device_printf(dev, "could not allocate I/O resource for probe\n"); return (ENXIO); } /* Check for the identification value in the BSR. */ val = bus_read_2(reg, BSR); if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { if (bootverbose) device_printf(dev, "identification value not in BSR\n"); error = ENXIO; goto done; } /* * Try switching banks and make sure we still get the identification * value. */ bus_write_2(reg, BSR, 0); val = bus_read_2(reg, BSR); if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { if (bootverbose) device_printf(dev, "identification value not in BSR after write\n"); error = ENXIO; goto done; } #if 0 /* Check the BAR. */ bus_write_2(reg, BSR, 1); val = bus_read_2(reg, BAR); val = BAR_ADDRESS(val); if (rman_get_start(reg) != val) { if (bootverbose) device_printf(dev, "BAR address %x does not match " "I/O resource address %lx\n", val, rman_get_start(reg)); error = ENXIO; goto done; } #endif /* Compare REV against known chip revisions. */ bus_write_2(reg, BSR, 3); val = bus_read_2(reg, REV); val = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; if (smc_chip_ids[val] == NULL) { if (bootverbose) device_printf(dev, "Unknown chip revision: %d\n", val); error = ENXIO; goto done; } device_set_desc(dev, smc_chip_ids[val]); done: bus_release_resource(dev, type, rid, reg); return (error); } int smc_attach(device_t dev) { int type, error; uint16_t val; u_char eaddr[ETHER_ADDR_LEN]; struct smc_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); error = 0; sc->smc_dev = dev; ifp = sc->smc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { error = ENOSPC; goto done; } mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); /* Set up watchdog callout. */ callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0); type = SYS_RES_IOPORT; if (sc->smc_usemem) type = SYS_RES_MEMORY; sc->smc_reg_rid = 0; sc->smc_reg = bus_alloc_resource_anywhere(dev, type, &sc->smc_reg_rid, 16, RF_ACTIVE); if (sc->smc_reg == NULL) { error = ENXIO; goto done; } sc->smc_irq = bus_alloc_resource_anywhere(dev, SYS_RES_IRQ, &sc->smc_irq_rid, 1, RF_ACTIVE | RF_SHAREABLE); if (sc->smc_irq == NULL) { error = ENXIO; goto done; } SMC_LOCK(sc); smc_reset(sc); SMC_UNLOCK(sc); smc_select_bank(sc, 3); val = smc_read_2(sc, REV); sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT; if (bootverbose) device_printf(dev, "revision %x\n", sc->smc_rev); callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx, CALLOUT_RETURNUNLOCKED); if (sc->smc_chip >= REV_CHIP_91110FD) { (void)mii_attach(dev, &sc->smc_miibus, ifp, smc_mii_ifmedia_upd, smc_mii_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (sc->smc_miibus != NULL) { sc->smc_mii_tick = smc_mii_tick; sc->smc_mii_mediachg = smc_mii_mediachg; sc->smc_mii_mediaioctl = smc_mii_mediaioctl; } } smc_select_bank(sc, 1); eaddr[0] = smc_read_1(sc, IAR0); eaddr[1] = smc_read_1(sc, IAR1); eaddr[2] = smc_read_1(sc, IAR2); eaddr[3] = smc_read_1(sc, IAR3); eaddr[4] = smc_read_1(sc, IAR4); eaddr[5] = smc_read_1(sc, IAR5); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = smc_init; ifp->if_ioctl = smc_ioctl; ifp->if_start = smc_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities = ifp->if_capenable = 0; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ether_ifattach(ifp, eaddr); /* Set up taskqueue */ TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp); TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp); TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp); sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->smc_tq); taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->smc_dev)); /* Mask all interrupts. */ sc->smc_mask = 0; smc_write_1(sc, MSK, 0); /* Wire up interrupt */ error = bus_setup_intr(dev, sc->smc_irq, INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih); if (error != 0) goto done; done: if (error != 0) smc_detach(dev); return (error); } int smc_detach(device_t dev) { int type; struct smc_softc *sc; sc = device_get_softc(dev); SMC_LOCK(sc); smc_stop(sc); SMC_UNLOCK(sc); if (sc->smc_ifp != NULL) { ether_ifdetach(sc->smc_ifp); } callout_drain(&sc->smc_watchdog); callout_drain(&sc->smc_mii_tick_ch); #ifdef DEVICE_POLLING if (sc->smc_ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->smc_ifp); #endif if (sc->smc_ih != NULL) bus_teardown_intr(sc->smc_dev, sc->smc_irq, sc->smc_ih); if (sc->smc_tq != NULL) { taskqueue_drain(sc->smc_tq, &sc->smc_intr); taskqueue_drain(sc->smc_tq, &sc->smc_rx); taskqueue_drain(sc->smc_tq, &sc->smc_tx); taskqueue_free(sc->smc_tq); sc->smc_tq = NULL; } if (sc->smc_ifp != NULL) { if_free(sc->smc_ifp); } if (sc->smc_miibus != NULL) { device_delete_child(sc->smc_dev, sc->smc_miibus); bus_generic_detach(sc->smc_dev); } if (sc->smc_reg != NULL) { type = SYS_RES_IOPORT; if (sc->smc_usemem) type = SYS_RES_MEMORY; bus_release_resource(sc->smc_dev, type, sc->smc_reg_rid, sc->smc_reg); } if (sc->smc_irq != NULL) bus_release_resource(sc->smc_dev, SYS_RES_IRQ, sc->smc_irq_rid, sc->smc_irq); if (mtx_initialized(&sc->smc_mtx)) mtx_destroy(&sc->smc_mtx); return (0); } static void smc_start(struct ifnet *ifp) { struct smc_softc *sc; sc = ifp->if_softc; SMC_LOCK(sc); smc_start_locked(ifp); SMC_UNLOCK(sc); } static void smc_start_locked(struct ifnet *ifp) { struct smc_softc *sc; struct mbuf *m; u_int len, npages, spin_count; sc = ifp->if_softc; SMC_ASSERT_LOCKED(sc); if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; if (IFQ_IS_EMPTY(&ifp->if_snd)) return; /* * Grab the next packet. If it's too big, drop it. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m); len = m_length(m, NULL); len += (len & 1); if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(ifp, "large packet discarded\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); return; /* XXX readcheck? */ } /* * Flag that we're busy. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->smc_pending = m; /* * Work out how many 256 byte "pages" we need. We have to include the * control data for the packet in this calculation. */ npages = (len + PKT_CTRL_DATA_LEN) >> 8; if (npages == 0) npages = 1; /* * Request memory. */ smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_TX_ALLOC | npages); /* * Spin briefly to see if the allocation succeeds. */ spin_count = TX_ALLOC_WAIT_TIME; do { if (smc_read_1(sc, IST) & ALLOC_INT) { smc_write_1(sc, ACK, ALLOC_INT); break; } } while (--spin_count); /* * If the allocation is taking too long, unmask the alloc interrupt * and wait. */ if (spin_count == 0) { sc->smc_mask |= ALLOC_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); return; } taskqueue_enqueue(sc->smc_tq, &sc->smc_tx); } static void smc_task_tx(void *context, int pending) { struct ifnet *ifp; struct smc_softc *sc; struct mbuf *m, *m0; u_int packet, len; int last_len; uint8_t *data; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; SMC_LOCK(sc); if (sc->smc_pending == NULL) { SMC_UNLOCK(sc); goto next_packet; } m = m0 = sc->smc_pending; sc->smc_pending = NULL; smc_select_bank(sc, 2); /* * Check the allocation result. */ packet = smc_read_1(sc, ARR); /* * If the allocation failed, requeue the packet and retry. */ if (packet & ARR_FAILED) { IFQ_DRV_PREPEND(&ifp->if_snd, m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; smc_start_locked(ifp); SMC_UNLOCK(sc); return; } /* * Tell the device to write to our packet number. */ smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_AUTO_INCR); /* * Tell the device how long the packet is (including control data). */ len = m_length(m, 0); len += PKT_CTRL_DATA_LEN; smc_write_2(sc, DATA0, 0); smc_write_2(sc, DATA0, len); /* * Push the data out to the device. */ data = NULL; last_len = 0; for (; m != NULL; m = m->m_next) { data = mtod(m, uint8_t *); smc_write_multi_2(sc, DATA0, (uint16_t *)data, m->m_len / 2); last_len = m->m_len; } /* * Push out the control byte and and the odd byte if needed. */ if ((len & 1) != 0 && data != NULL) smc_write_2(sc, DATA0, (CTRL_ODD << 8) | data[last_len - 1]); else smc_write_2(sc, DATA0, 0); /* * Unmask the TX empty interrupt. */ sc->smc_mask |= TX_EMPTY_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); /* * Enqueue the packet. */ smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_ENQUEUE); callout_reset(&sc->smc_watchdog, hz * 2, smc_watchdog, sc); /* * Finish up. */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; SMC_UNLOCK(sc); BPF_MTAP(ifp, m0); m_freem(m0); next_packet: /* * See if there's anything else to do. */ smc_start(ifp); } static void smc_task_rx(void *context, int pending) { u_int packet, status, len; uint8_t *data; struct ifnet *ifp; struct smc_softc *sc; struct mbuf *m, *mhead, *mtail; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; mhead = mtail = NULL; SMC_LOCK(sc); packet = smc_read_1(sc, FIFO_RX); while ((packet & FIFO_EMPTY) == 0) { /* * Grab an mbuf and attach a cluster. */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { break; } if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); break; } /* * Point to the start of the packet. */ smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); /* * Grab status and packet length. */ status = smc_read_2(sc, DATA0); len = smc_read_2(sc, DATA0) & RX_LEN_MASK; len -= 6; if (status & RX_ODDFRM) len += 1; /* * Check for errors. */ if (status & (RX_TOOSHORT | RX_TOOLNG | RX_BADCRC | RX_ALGNERR)) { smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); m_freem(m); break; } /* * Set the mbuf up the way we want it. */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len + 2; /* XXX: Is this right? */ m_adj(m, ETHER_ALIGN); /* * Pull the packet out of the device. Make sure we're in the * right bank first as things may have changed while we were * allocating our mbuf. */ smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 4 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); data = mtod(m, uint8_t *); smc_read_multi_2(sc, DATA0, (uint16_t *)data, len >> 1); if (len & 1) { data += len & ~1; *data = smc_read_1(sc, DATA0); } /* * Tell the device we're done. */ smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); if (m == NULL) { break; } if (mhead == NULL) { mhead = mtail = m; m->m_next = NULL; } else { mtail->m_next = m; mtail = m; } packet = smc_read_1(sc, FIFO_RX); } sc->smc_mask |= RCV_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); SMC_UNLOCK(sc); while (mhead != NULL) { m = mhead; mhead = mhead->m_next; m->m_next = NULL; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); (*ifp->if_input)(ifp, m); } } #ifdef DEVICE_POLLING static int smc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct smc_softc *sc; sc = ifp->if_softc; SMC_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { SMC_UNLOCK(sc); return (0); } SMC_UNLOCK(sc); if (cmd == POLL_AND_CHECK_STATUS) taskqueue_enqueue(sc->smc_tq, &sc->smc_intr); return (0); } #endif static int smc_intr(void *context) { struct smc_softc *sc; uint32_t curbank; sc = (struct smc_softc *)context; /* * Save current bank and restore later in this function */ curbank = (smc_read_2(sc, BSR) & BSR_BANK_MASK); /* * Block interrupts in order to let smc_task_intr to kick in */ smc_select_bank(sc, 2); smc_write_1(sc, MSK, 0); /* Restore bank */ smc_select_bank(sc, curbank); taskqueue_enqueue(sc->smc_tq, &sc->smc_intr); return (FILTER_HANDLED); } static void smc_task_intr(void *context, int pending) { struct smc_softc *sc; struct ifnet *ifp; u_int status, packet, counter, tcr; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; SMC_LOCK(sc); smc_select_bank(sc, 2); /* * Find out what interrupts are flagged. */ status = smc_read_1(sc, IST) & sc->smc_mask; /* * Transmit error */ if (status & TX_INT) { /* * Kill off the packet if there is one and re-enable transmit. */ packet = smc_read_1(sc, FIFO_TX); if ((packet & FIFO_EMPTY) == 0) { callout_stop(&sc->smc_watchdog); smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_READ | PTR_AUTO_INCR); smc_select_bank(sc, 0); tcr = smc_read_2(sc, EPHSR); #if 0 if ((tcr & EPHSR_TX_SUC) == 0) device_printf(sc->smc_dev, "bad packet\n"); #endif smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE_PKT); smc_select_bank(sc, 0); tcr = smc_read_2(sc, TCR); tcr |= TCR_TXENA | TCR_PAD_EN; smc_write_2(sc, TCR, tcr); smc_select_bank(sc, 2); taskqueue_enqueue(sc->smc_tq, &sc->smc_tx); } /* * Ack the interrupt. */ smc_write_1(sc, ACK, TX_INT); } /* * Receive */ if (status & RCV_INT) { smc_write_1(sc, ACK, RCV_INT); sc->smc_mask &= ~RCV_INT; taskqueue_enqueue(sc->smc_tq, &sc->smc_rx); } /* * Allocation */ if (status & ALLOC_INT) { smc_write_1(sc, ACK, ALLOC_INT); sc->smc_mask &= ~ALLOC_INT; taskqueue_enqueue(sc->smc_tq, &sc->smc_tx); } /* * Receive overrun */ if (status & RX_OVRN_INT) { smc_write_1(sc, ACK, RX_OVRN_INT); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } /* * Transmit empty */ if (status & TX_EMPTY_INT) { smc_write_1(sc, ACK, TX_EMPTY_INT); sc->smc_mask &= ~TX_EMPTY_INT; callout_stop(&sc->smc_watchdog); /* * Update collision stats. */ smc_select_bank(sc, 0); counter = smc_read_2(sc, ECR); smc_select_bank(sc, 2); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, ((counter & ECR_SNGLCOL_MASK) >> ECR_SNGLCOL_SHIFT) + ((counter & ECR_MULCOL_MASK) >> ECR_MULCOL_SHIFT)); /* * See if there are any packets to transmit. */ taskqueue_enqueue(sc->smc_tq, &sc->smc_tx); } /* * Update the interrupt mask. */ smc_select_bank(sc, 2); if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); SMC_UNLOCK(sc); } static uint32_t smc_mii_bitbang_read(device_t dev) { struct smc_softc *sc; uint32_t val; sc = device_get_softc(dev); SMC_ASSERT_LOCKED(sc); KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, ("%s: smc_mii_bitbang_read called with bank %d (!= 3)", device_get_nameunit(sc->smc_dev), smc_read_2(sc, BSR) & BSR_BANK_MASK)); val = smc_read_2(sc, MGMT); smc_barrier(sc, MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } static void smc_mii_bitbang_write(device_t dev, uint32_t val) { struct smc_softc *sc; sc = device_get_softc(dev); SMC_ASSERT_LOCKED(sc); KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, ("%s: smc_mii_bitbang_write called with bank %d (!= 3)", device_get_nameunit(sc->smc_dev), smc_read_2(sc, BSR) & BSR_BANK_MASK)); smc_write_2(sc, MGMT, val); smc_barrier(sc, MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } int smc_miibus_readreg(device_t dev, int phy, int reg) { struct smc_softc *sc; int val; sc = device_get_softc(dev); SMC_LOCK(sc); smc_select_bank(sc, 3); val = mii_bitbang_readreg(dev, &smc_mii_bitbang_ops, phy, reg); SMC_UNLOCK(sc); return (val); } int smc_miibus_writereg(device_t dev, int phy, int reg, int data) { struct smc_softc *sc; sc = device_get_softc(dev); SMC_LOCK(sc); smc_select_bank(sc, 3); mii_bitbang_writereg(dev, &smc_mii_bitbang_ops, phy, reg, data); SMC_UNLOCK(sc); return (0); } void smc_miibus_statchg(device_t dev) { struct smc_softc *sc; struct mii_data *mii; uint16_t tcr; sc = device_get_softc(dev); mii = device_get_softc(sc->smc_miibus); SMC_LOCK(sc); smc_select_bank(sc, 0); tcr = smc_read_2(sc, TCR); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) tcr |= TCR_SWFDUP; else tcr &= ~TCR_SWFDUP; smc_write_2(sc, TCR, tcr); SMC_UNLOCK(sc); } static int smc_mii_ifmedia_upd(struct ifnet *ifp) { struct smc_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->smc_miibus == NULL) return (ENXIO); mii = device_get_softc(sc->smc_miibus); return (mii_mediachg(mii)); } static void smc_mii_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct smc_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->smc_miibus == NULL) return; mii = device_get_softc(sc->smc_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static void smc_mii_tick(void *context) { struct smc_softc *sc; sc = (struct smc_softc *)context; if (sc->smc_miibus == NULL) return; SMC_UNLOCK(sc); mii_tick(device_get_softc(sc->smc_miibus)); callout_reset(&sc->smc_mii_tick_ch, hz, smc_mii_tick, sc); } static void smc_mii_mediachg(struct smc_softc *sc) { if (sc->smc_miibus == NULL) return; mii_mediachg(device_get_softc(sc->smc_miibus)); } static int smc_mii_mediaioctl(struct smc_softc *sc, struct ifreq *ifr, u_long command) { struct mii_data *mii; if (sc->smc_miibus == NULL) return (EINVAL); mii = device_get_softc(sc->smc_miibus); return (ifmedia_ioctl(sc->smc_ifp, ifr, &mii->mii_media, command)); } static void smc_reset(struct smc_softc *sc) { u_int ctr; SMC_ASSERT_LOCKED(sc); smc_select_bank(sc, 2); /* * Mask all interrupts. */ smc_write_1(sc, MSK, 0); /* * Tell the device to reset. */ smc_select_bank(sc, 0); smc_write_2(sc, RCR, RCR_SOFT_RST); /* * Set up the configuration register. */ smc_select_bank(sc, 1); smc_write_2(sc, CR, CR_EPH_POWER_EN); DELAY(1); /* * Turn off transmit and receive. */ smc_select_bank(sc, 0); smc_write_2(sc, TCR, 0); smc_write_2(sc, RCR, 0); /* * Set up the control register. */ smc_select_bank(sc, 1); ctr = smc_read_2(sc, CTR); ctr |= CTR_LE_ENABLE | CTR_AUTO_RELEASE; smc_write_2(sc, CTR, ctr); /* * Reset the MMU. */ smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_MMU_RESET); } static void smc_enable(struct smc_softc *sc) { struct ifnet *ifp; SMC_ASSERT_LOCKED(sc); ifp = sc->smc_ifp; /* * Set up the receive/PHY control register. */ smc_select_bank(sc, 0); smc_write_2(sc, RPCR, RPCR_ANEG | (RPCR_LED_LINK_ANY << RPCR_LSA_SHIFT) | (RPCR_LED_ACT_ANY << RPCR_LSB_SHIFT)); /* * Set up the transmit and receive control registers. */ smc_write_2(sc, TCR, TCR_TXENA | TCR_PAD_EN); smc_write_2(sc, RCR, RCR_RXEN | RCR_STRIP_CRC); /* * Set up the interrupt mask. */ smc_select_bank(sc, 2); sc->smc_mask = EPH_INT | RX_OVRN_INT | RCV_INT | TX_INT; if ((ifp->if_capenable & IFCAP_POLLING) != 0) smc_write_1(sc, MSK, sc->smc_mask); } static void smc_stop(struct smc_softc *sc) { SMC_ASSERT_LOCKED(sc); /* * Turn off callouts. */ callout_stop(&sc->smc_watchdog); callout_stop(&sc->smc_mii_tick_ch); /* * Mask all interrupts. */ smc_select_bank(sc, 2); sc->smc_mask = 0; smc_write_1(sc, MSK, 0); #ifdef DEVICE_POLLING ether_poll_deregister(sc->smc_ifp); sc->smc_ifp->if_capenable &= ~IFCAP_POLLING; #endif /* * Disable transmit and receive. */ smc_select_bank(sc, 0); smc_write_2(sc, TCR, 0); smc_write_2(sc, RCR, 0); sc->smc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } static void smc_watchdog(void *arg) { struct smc_softc *sc; sc = (struct smc_softc *)arg; device_printf(sc->smc_dev, "watchdog timeout\n"); taskqueue_enqueue(sc->smc_tq, &sc->smc_intr); } static void smc_init(void *context) { struct smc_softc *sc; sc = (struct smc_softc *)context; SMC_LOCK(sc); smc_init_locked(sc); SMC_UNLOCK(sc); } static void smc_init_locked(struct smc_softc *sc) { struct ifnet *ifp; SMC_ASSERT_LOCKED(sc); ifp = sc->smc_ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; smc_reset(sc); smc_enable(sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; smc_start_locked(ifp); if (sc->smc_mii_tick != NULL) callout_reset(&sc->smc_mii_tick_ch, hz, sc->smc_mii_tick, sc); #ifdef DEVICE_POLLING SMC_UNLOCK(sc); ether_poll_register(smc_poll, ifp); SMC_LOCK(sc); ifp->if_capenable |= IFCAP_POLLING; #endif } static int smc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct smc_softc *sc; int error; sc = ifp->if_softc; error = 0; switch (cmd) { case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { SMC_LOCK(sc); smc_stop(sc); SMC_UNLOCK(sc); } else { smc_init(sc); if (sc->smc_mii_mediachg != NULL) sc->smc_mii_mediachg(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: /* XXX SMC_LOCK(sc); smc_setmcast(sc); SMC_UNLOCK(sc); */ error = EINVAL; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->smc_mii_mediaioctl == NULL) { error = EINVAL; break; } sc->smc_mii_mediaioctl(sc, (struct ifreq *)data, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } Index: head/sys/dev/syscons/syscons.c =================================================================== --- head/sys/dev/syscons/syscons.c (revision 355600) +++ head/sys/dev/syscons/syscons.c (revision 355601) @@ -1,4387 +1,4387 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1992-1998 Søren Schmidt * All rights reserved. * * This code is derived from software contributed to The DragonFly Project * by Sascha Wildner * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_syscons.h" #include "opt_splash.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__arm__) || defined(__mips__) || defined(__powerpc__) || \ defined(__sparc64__) #include #else #include #endif #if defined(__i386__) || defined(__amd64__) #include #include #endif #include #if defined(__amd64__) || defined(__i386__) #include #include #include #endif #include #include #include #include #define COLD 0 #define WARM 1 #define DEFAULT_BLANKTIME (5 * 60) /* 5 minutes */ #define MAX_BLANKTIME (7 * 24 * 60 * 60) /* 7 days!? */ #define KEYCODE_BS 0x0e /* "<-- Backspace" key, XXX */ /* NULL-safe version of "tty_opened()" */ #define tty_opened_ns(tp) ((tp) != NULL && tty_opened(tp)) static u_char sc_kattrtab[MAXCPU]; static int sc_console_unit = -1; static int sc_saver_keyb_only = 1; static scr_stat *sc_console; static struct consdev *sc_consptr; static void *sc_kts[MAXCPU]; static struct sc_term_sw *sc_ktsw; static scr_stat main_console; static struct tty *main_devs[MAXCONS]; static char init_done = COLD; static int shutdown_in_progress = FALSE; static int suspend_in_progress = FALSE; static char sc_malloc = FALSE; static int saver_mode = CONS_NO_SAVER; /* LKM/user saver */ static int run_scrn_saver = FALSE; /* should run the saver? */ static int enable_bell = TRUE; /* enable beeper */ #ifndef SC_DISABLE_REBOOT static int enable_reboot = TRUE; /* enable keyboard reboot */ #endif #ifndef SC_DISABLE_KDBKEY static int enable_kdbkey = TRUE; /* enable keyboard debug */ #endif static long scrn_blank_time = 0; /* screen saver timeout value */ #ifdef DEV_SPLASH static int scrn_blanked; /* # of blanked screen */ static int sticky_splash = FALSE; static void none_saver(sc_softc_t *sc, int blank) { } static void (*current_saver)(sc_softc_t *, int) = none_saver; #endif #ifdef SC_NO_SUSPEND_VTYSWITCH static int sc_no_suspend_vtswitch = 1; #else static int sc_no_suspend_vtswitch = 0; #endif static int sc_susp_scr; static SYSCTL_NODE(_hw, OID_AUTO, syscons, CTLFLAG_RD, 0, "syscons"); static SYSCTL_NODE(_hw_syscons, OID_AUTO, saver, CTLFLAG_RD, 0, "saver"); SYSCTL_INT(_hw_syscons_saver, OID_AUTO, keybonly, CTLFLAG_RW, &sc_saver_keyb_only, 0, "screen saver interrupted by input only"); SYSCTL_INT( _hw_syscons, OID_AUTO, bell, CTLFLAG_RW, &enable_bell, 0, "enable bell"); #ifndef SC_DISABLE_REBOOT SYSCTL_INT(_hw_syscons, OID_AUTO, kbd_reboot, CTLFLAG_RW | CTLFLAG_SECURE, &enable_reboot, 0, "enable keyboard reboot"); #endif #ifndef SC_DISABLE_KDBKEY SYSCTL_INT(_hw_syscons, OID_AUTO, kbd_debug, CTLFLAG_RW | CTLFLAG_SECURE, &enable_kdbkey, 0, "enable keyboard debug"); #endif SYSCTL_INT(_hw_syscons, OID_AUTO, sc_no_suspend_vtswitch, CTLFLAG_RWTUN, &sc_no_suspend_vtswitch, 0, "Disable VT switch before suspend."); #if !defined(SC_NO_FONT_LOADING) && defined(SC_DFLT_FONT) #include "font.h" #endif tsw_ioctl_t *sc_user_ioctl; static bios_values_t bios_value; static int enable_panic_key; SYSCTL_INT(_machdep, OID_AUTO, enable_panic_key, CTLFLAG_RW, &enable_panic_key, 0, "Enable panic via keypress specified in kbdmap(5)"); #define SC_CONSOLECTL 255 #define VTY_WCHAN(sc, vty) (&SC_DEV(sc, vty)) /* prototypes */ static int sc_allocate_keyboard(sc_softc_t *sc, int unit); static int scvidprobe(int unit, int flags, int cons); static int sckbdprobe(int unit, int flags, int cons); static void scmeminit(void *arg); static int scdevtounit(struct tty *tp); static kbd_callback_func_t sckbdevent; static void scinit(int unit, int flags); static scr_stat *sc_get_stat(struct tty *tp); static void scterm(int unit, int flags); static void scshutdown(void *, int); static void scsuspend(void *); static void scresume(void *); static u_int scgetc(sc_softc_t *sc, u_int flags, struct sc_cnstate *sp); static void sc_puts(scr_stat *scp, u_char *buf, int len); #define SCGETC_CN 1 #define SCGETC_NONBLOCK 2 static void sccnupdate(scr_stat *scp); static scr_stat *alloc_scp(sc_softc_t *sc, int vty); static void init_scp(sc_softc_t *sc, int vty, scr_stat *scp); -static timeout_t scrn_timer; +static callout_func_t scrn_timer; static int and_region(int *s1, int *e1, int s2, int e2); static void scrn_update(scr_stat *scp, int show_cursor); #ifdef DEV_SPLASH static int scsplash_callback(int event, void *arg); static void scsplash_saver(sc_softc_t *sc, int show); static int add_scrn_saver(void (*this_saver)(sc_softc_t *, int)); static int remove_scrn_saver(void (*this_saver)(sc_softc_t *, int)); static int set_scrn_saver_mode( scr_stat *scp, int mode, u_char *pal, int border); static int restore_scrn_saver_mode(scr_stat *scp, int changemode); static void stop_scrn_saver(sc_softc_t *sc, void (*saver)(sc_softc_t *, int)); static int wait_scrn_saver_stop(sc_softc_t *sc); #define scsplash_stick(stick) (sticky_splash = (stick)) #else /* !DEV_SPLASH */ #define scsplash_stick(stick) #endif /* DEV_SPLASH */ static int do_switch_scr(sc_softc_t *sc, int s); static int vt_proc_alive(scr_stat *scp); static int signal_vt_rel(scr_stat *scp); static int signal_vt_acq(scr_stat *scp); static int finish_vt_rel(scr_stat *scp, int release, int *s); static int finish_vt_acq(scr_stat *scp); static void exchange_scr(sc_softc_t *sc); static void update_cursor_image(scr_stat *scp); static void change_cursor_shape(scr_stat *scp, int flags, int base, int height); static void update_font(scr_stat *); static int save_kbd_state(scr_stat *scp); static int update_kbd_state(scr_stat *scp, int state, int mask); static int update_kbd_leds(scr_stat *scp, int which); static int sc_kattr(void); -static timeout_t blink_screen; +static callout_func_t blink_screen; static struct tty *sc_alloc_tty(int, int); static cn_probe_t sc_cnprobe; static cn_init_t sc_cninit; static cn_term_t sc_cnterm; static cn_getc_t sc_cngetc; static cn_putc_t sc_cnputc; static cn_grab_t sc_cngrab; static cn_ungrab_t sc_cnungrab; CONSOLE_DRIVER(sc); static tsw_open_t sctty_open; static tsw_close_t sctty_close; static tsw_outwakeup_t sctty_outwakeup; static tsw_ioctl_t sctty_ioctl; static tsw_mmap_t sctty_mmap; static struct ttydevsw sc_ttydevsw = { .tsw_open = sctty_open, .tsw_close = sctty_close, .tsw_outwakeup = sctty_outwakeup, .tsw_ioctl = sctty_ioctl, .tsw_mmap = sctty_mmap, }; static d_ioctl_t consolectl_ioctl; static d_close_t consolectl_close; static struct cdevsw consolectl_devsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT | D_TRACKCLOSE, .d_ioctl = consolectl_ioctl, .d_close = consolectl_close, .d_name = "consolectl", }; /* ec -- emergency console. */ static u_int ec_scroffset; static void ec_putc(int c) { uintptr_t fb; u_short *scrptr; u_int ind; int attr, column, mysize, width, xsize, yborder, ysize; if (c < 0 || c > 0xff || c == '\a') return; if (sc_console == NULL) { #if !defined(__amd64__) && !defined(__i386__) return; #else /* * This is enough for ec_putc() to work very early on x86 * if the kernel starts in normal color text mode. */ #ifdef __amd64__ fb = KERNBASE + 0xb8000; #else /* __i386__ */ fb = pmap_get_map_low() + 0xb8000; #endif xsize = 80; ysize = 25; #endif } else { if (!ISTEXTSC(&main_console)) return; fb = main_console.sc->adp->va_window; xsize = main_console.xsize; ysize = main_console.ysize; } yborder = ysize / 5; scrptr = (u_short *)(void *)fb + xsize * yborder; mysize = xsize * (ysize - 2 * yborder); do { ind = ec_scroffset; column = ind % xsize; width = (c == '\b' ? -1 : c == '\t' ? (column + 8) & ~7 : c == '\r' ? -column : c == '\n' ? xsize - column : 1); if (width == 0 || (width < 0 && ind < -width)) return; } while (atomic_cmpset_rel_int(&ec_scroffset, ind, ind + width) == 0); if (c == '\b' || c == '\r') return; if (c == '\n') ind += xsize; /* XXX clearing from new pos is not atomic */ attr = sc_kattr(); if (c == '\t' || c == '\n') c = ' '; do scrptr[ind++ % mysize] = (attr << 8) | c; while (--width != 0); } int sc_probe_unit(int unit, int flags) { if (!vty_enabled(VTY_SC)) return ENXIO; if (!scvidprobe(unit, flags, FALSE)) { if (bootverbose) printf("%s%d: no video adapter found.\n", SC_DRIVER_NAME, unit); return ENXIO; } /* syscons will be attached even when there is no keyboard */ sckbdprobe(unit, flags, FALSE); return 0; } /* probe video adapters, return TRUE if found */ static int scvidprobe(int unit, int flags, int cons) { /* * Access the video adapter driver through the back door! * Video adapter drivers need to be configured before syscons. * However, when syscons is being probed as the low-level console, * they have not been initialized yet. We force them to initialize * themselves here. XXX */ vid_configure(cons ? VIO_PROBE_ONLY : 0); return (vid_find_adapter("*", unit) >= 0); } /* probe the keyboard, return TRUE if found */ static int sckbdprobe(int unit, int flags, int cons) { /* access the keyboard driver through the backdoor! */ kbd_configure(cons ? KB_CONF_PROBE_ONLY : 0); return (kbd_find_keyboard("*", unit) >= 0); } static char * adapter_name(video_adapter_t *adp) { static struct { int type; char *name[2]; } names[] = { { KD_MONO, { "MDA", "MDA" } }, { KD_HERCULES, { "Hercules", "Hercules" } }, { KD_CGA, { "CGA", "CGA" } }, { KD_EGA, { "EGA", "EGA (mono)" } }, { KD_VGA, { "VGA", "VGA (mono)" } }, { KD_TGA, { "TGA", "TGA" } }, { -1, { "Unknown", "Unknown" } }, }; int i; for (i = 0; names[i].type != -1; ++i) if (names[i].type == adp->va_type) break; return names[i].name[(adp->va_flags & V_ADP_COLOR) ? 0 : 1]; } static void sctty_outwakeup(struct tty *tp) { size_t len; u_char buf[PCBURST]; scr_stat *scp = sc_get_stat(tp); if (scp->status & SLKED || (scp == scp->sc->cur_scp && scp->sc->blink_in_progress)) return; for (;;) { len = ttydisc_getc(tp, buf, sizeof buf); if (len == 0) break; SC_VIDEO_LOCK(scp->sc); sc_puts(scp, buf, len); SC_VIDEO_UNLOCK(scp->sc); } } static struct tty * sc_alloc_tty(int index, int devnum) { struct sc_ttysoftc *stc; struct tty *tp; /* Allocate TTY object and softc to store unit number. */ stc = malloc(sizeof(struct sc_ttysoftc), M_DEVBUF, M_WAITOK); stc->st_index = index; stc->st_stat = NULL; tp = tty_alloc_mutex(&sc_ttydevsw, stc, &Giant); /* Create device node. */ tty_makedev(tp, NULL, "v%r", devnum); return (tp); } #ifdef SC_PIXEL_MODE static void sc_set_vesa_mode(scr_stat *scp, sc_softc_t *sc, int unit) { video_info_t info; u_char *font; int depth; int fontsize; int i; int vmode; vmode = 0; (void)resource_int_value("sc", unit, "vesa_mode", &vmode); if (vmode < M_VESA_BASE || vmode > M_VESA_MODE_MAX || vidd_get_info(sc->adp, vmode, &info) != 0 || !sc_support_pixel_mode(&info)) vmode = 0; /* * If the mode is unset or unsupported, search for an available * 800x600 graphics mode with the highest color depth. */ if (vmode == 0) { for (depth = 0, i = M_VESA_BASE; i <= M_VESA_MODE_MAX; i++) if (vidd_get_info(sc->adp, i, &info) == 0 && info.vi_width == 800 && info.vi_height == 600 && sc_support_pixel_mode(&info) && info.vi_depth > depth) { vmode = i; depth = info.vi_depth; } if (vmode == 0) return; vidd_get_info(sc->adp, vmode, &info); } #if !defined(SC_NO_FONT_LOADING) && defined(SC_DFLT_FONT) fontsize = info.vi_cheight; #else fontsize = scp->font_size; #endif if (fontsize < 14) fontsize = 8; else if (fontsize >= 16) fontsize = 16; else fontsize = 14; #ifndef SC_NO_FONT_LOADING switch (fontsize) { case 8: if ((sc->fonts_loaded & FONT_8) == 0) return; font = sc->font_8; break; case 14: if ((sc->fonts_loaded & FONT_14) == 0) return; font = sc->font_14; break; case 16: if ((sc->fonts_loaded & FONT_16) == 0) return; font = sc->font_16; break; } #else font = NULL; #endif #ifdef DEV_SPLASH if ((sc->flags & SC_SPLASH_SCRN) != 0) splash_term(sc->adp); #endif #ifndef SC_NO_HISTORY if (scp->history != NULL) { sc_vtb_append(&scp->vtb, 0, scp->history, scp->ypos * scp->xsize + scp->xpos); scp->history_pos = sc_vtb_tail(scp->history); } #endif vidd_set_mode(sc->adp, vmode); scp->status |= (UNKNOWN_MODE | PIXEL_MODE | MOUSE_HIDDEN); scp->status &= ~(GRAPHICS_MODE | MOUSE_VISIBLE); scp->xpixel = info.vi_width; scp->ypixel = info.vi_height; scp->xsize = scp->xpixel / 8; scp->ysize = scp->ypixel / fontsize; scp->xpos = 0; scp->ypos = scp->ysize - 1; scp->xoff = scp->yoff = 0; scp->font = font; scp->font_size = fontsize; scp->font_width = 8; scp->start = scp->xsize * scp->ysize - 1; scp->end = 0; scp->cursor_pos = scp->cursor_oldpos = scp->xsize * scp->xsize; scp->mode = sc->initial_mode = vmode; #ifndef __sparc64__ sc_vtb_init(&scp->scr, VTB_FRAMEBUFFER, scp->xsize, scp->ysize, (void *)sc->adp->va_window, FALSE); #endif sc_alloc_scr_buffer(scp, FALSE, FALSE); sc_init_emulator(scp, NULL); #ifndef SC_NO_CUTPASTE sc_alloc_cut_buffer(scp, FALSE); #endif #ifndef SC_NO_HISTORY sc_alloc_history_buffer(scp, 0, 0, FALSE); #endif sc_set_border(scp, scp->border); sc_set_cursor_image(scp); scp->status &= ~UNKNOWN_MODE; #ifdef DEV_SPLASH if ((sc->flags & SC_SPLASH_SCRN) != 0) splash_init(sc->adp, scsplash_callback, sc); #endif } #endif int sc_attach_unit(int unit, int flags) { sc_softc_t *sc; scr_stat *scp; struct cdev *dev; void *oldts, *ts; int i, vc; if (!vty_enabled(VTY_SC)) return ENXIO; flags &= ~SC_KERNEL_CONSOLE; if (sc_console_unit == unit) { /* * If this unit is being used as the system console, we need to * adjust some variables and buffers before and after scinit(). */ /* assert(sc_console != NULL) */ flags |= SC_KERNEL_CONSOLE; scmeminit(NULL); scinit(unit, flags); if (sc_console->tsw->te_size > 0) { sc_ktsw = sc_console->tsw; /* assert(sc_console->ts != NULL); */ oldts = sc_console->ts; for (i = 0; i <= mp_maxid; i++) { ts = malloc(sc_console->tsw->te_size, M_DEVBUF, M_WAITOK | M_ZERO); (*sc_console->tsw->te_init)( sc_console, &ts, SC_TE_COLD_INIT); sc_console->ts = ts; (*sc_console->tsw->te_default_attr)(sc_console, sc_kattrtab[i], SC_KERNEL_CONS_REV_ATTR); sc_kts[i] = ts; } sc_console->ts = oldts; (*sc_console->tsw->te_default_attr)( sc_console, SC_NORM_ATTR, SC_NORM_REV_ATTR); } } else { scinit(unit, flags); } sc = sc_get_softc(unit, flags & SC_KERNEL_CONSOLE); sc->config = flags; callout_init(&sc->ctimeout, 0); callout_init(&sc->cblink, 0); scp = sc_get_stat(sc->dev[0]); if (sc_console == NULL) /* sc_console_unit < 0 */ sc_console = scp; #ifdef SC_PIXEL_MODE if ((sc->config & SC_VESAMODE) != 0) sc_set_vesa_mode(scp, sc, unit); #endif /* SC_PIXEL_MODE */ /* initialize cursor */ if (!ISGRAPHSC(scp)) update_cursor_image(scp); /* get screen update going */ scrn_timer(sc); /* set up the keyboard */ (void)kbdd_ioctl(sc->kbd, KDSKBMODE, (caddr_t)&scp->kbd_mode); update_kbd_state(scp, scp->status, LOCK_MASK); printf("%s%d: %s <%d virtual consoles, flags=0x%x>\n", SC_DRIVER_NAME, unit, adapter_name(sc->adp), sc->vtys, sc->config); if (bootverbose) { printf("%s%d:", SC_DRIVER_NAME, unit); if (sc->adapter >= 0) printf(" fb%d", sc->adapter); if (sc->keyboard >= 0) printf(", kbd%d", sc->keyboard); if (scp->tsw) printf(", terminal emulator: %s (%s)", scp->tsw->te_name, scp->tsw->te_desc); printf("\n"); } /* Register suspend/resume/shutdown callbacks for the kernel console. */ if (sc_console_unit == unit) { EVENTHANDLER_REGISTER( power_suspend_early, scsuspend, NULL, EVENTHANDLER_PRI_ANY); EVENTHANDLER_REGISTER( power_resume, scresume, NULL, EVENTHANDLER_PRI_ANY); EVENTHANDLER_REGISTER( shutdown_pre_sync, scshutdown, NULL, SHUTDOWN_PRI_DEFAULT); } for (vc = 0; vc < sc->vtys; vc++) { if (sc->dev[vc] == NULL) { sc->dev[vc] = sc_alloc_tty(vc, vc + unit * MAXCONS); if (vc == 0 && sc->dev == main_devs) SC_STAT(sc->dev[0]) = &main_console; } /* * The first vty already has struct tty and scr_stat initialized * in scinit(). The other vtys will have these structs when * first opened. */ } dev = make_dev( &consolectl_devsw, 0, UID_ROOT, GID_WHEEL, 0600, "consolectl"); dev->si_drv1 = sc->dev[0]; return 0; } static void scmeminit(void *arg) { if (!vty_enabled(VTY_SC)) return; if (sc_malloc) return; sc_malloc = TRUE; /* * As soon as malloc() becomes functional, we had better allocate * various buffers for the kernel console. */ if (sc_console_unit < 0) /* sc_console == NULL */ return; /* copy the temporary buffer to the final buffer */ sc_alloc_scr_buffer(sc_console, FALSE, FALSE); #ifndef SC_NO_CUTPASTE sc_alloc_cut_buffer(sc_console, FALSE); #endif #ifndef SC_NO_HISTORY /* initialize history buffer & pointers */ sc_alloc_history_buffer(sc_console, 0, 0, FALSE); #endif } /* XXX */ SYSINIT(sc_mem, SI_SUB_KMEM, SI_ORDER_ANY, scmeminit, NULL); static int scdevtounit(struct tty *tp) { int vty = SC_VTY(tp); if (vty == SC_CONSOLECTL) return ((sc_console != NULL) ? sc_console->sc->unit : -1); else if ((vty < 0) || (vty >= MAXCONS * sc_max_unit())) return -1; else return vty / MAXCONS; } static int sctty_open(struct tty *tp) { int unit = scdevtounit(tp); sc_softc_t *sc; scr_stat *scp; #ifndef __sparc64__ keyarg_t key; #endif DPRINTF(5, ("scopen: dev:%s, unit:%d, vty:%d\n", devtoname(tp->t_dev), unit, SC_VTY(tp))); sc = sc_get_softc( unit, (sc_console_unit == unit) ? SC_KERNEL_CONSOLE : 0); if (sc == NULL) return ENXIO; if (!tty_opened(tp)) { /* Use the current setting of the <-- key as default VERASE. */ /* If the Delete key is preferable, an stty is necessary */ #ifndef __sparc64__ if (sc->kbd != NULL) { key.keynum = KEYCODE_BS; (void)kbdd_ioctl(sc->kbd, GIO_KEYMAPENT, (caddr_t)&key); tp->t_termios.c_cc[VERASE] = key.key.map[0]; } #endif } scp = sc_get_stat(tp); if (scp == NULL) { scp = SC_STAT(tp) = alloc_scp(sc, SC_VTY(tp)); if (ISGRAPHSC(scp)) sc_set_pixel_mode(scp, NULL, 0, 0, 16, 8); } if (!tp->t_winsize.ws_col && !tp->t_winsize.ws_row) { tp->t_winsize.ws_col = scp->xsize; tp->t_winsize.ws_row = scp->ysize; } return (0); } static void sctty_close(struct tty *tp) { scr_stat *scp; int s; if (SC_VTY(tp) != SC_CONSOLECTL) { scp = sc_get_stat(tp); /* were we in the middle of the VT switching process? */ DPRINTF(5, ("sc%d: scclose(), ", scp->sc->unit)); s = spltty(); if ((scp == scp->sc->cur_scp) && (scp->sc->unit == sc_console_unit)) cnavailable(sc_consptr, TRUE); if (finish_vt_rel(scp, TRUE, &s) == 0) /* force release */ DPRINTF(5, ("reset WAIT_REL, ")); if (finish_vt_acq(scp) == 0) /* force acknowledge */ DPRINTF(5, ("reset WAIT_ACQ, ")); #ifdef not_yet_done if (scp == &main_console) { scp->pid = 0; scp->proc = NULL; scp->smode.mode = VT_AUTO; } else { sc_vtb_destroy(&scp->vtb); #ifndef __sparc64__ sc_vtb_destroy(&scp->scr); #endif sc_free_history_buffer(scp, scp->ysize); SC_STAT(tp) = NULL; free(scp, M_DEVBUF); } #else scp->pid = 0; scp->proc = NULL; scp->smode.mode = VT_AUTO; #endif scp->kbd_mode = K_XLATE; if (scp == scp->sc->cur_scp) (void)kbdd_ioctl( scp->sc->kbd, KDSKBMODE, (caddr_t)&scp->kbd_mode); DPRINTF(5, ("done.\n")); } } #if 0 /* XXX mpsafetty: fix screensaver. What about outwakeup? */ static int scread(struct cdev *dev, struct uio *uio, int flag) { if (!sc_saver_keyb_only) sc_touch_scrn_saver(); return ttyread(dev, uio, flag); } #endif static int sckbdevent(keyboard_t *thiskbd, int event, void *arg) { sc_softc_t *sc; struct tty *cur_tty; int c, error = 0; size_t len; const u_char *cp; sc = (sc_softc_t *)arg; /* assert(thiskbd == sc->kbd) */ mtx_lock(&Giant); switch (event) { case KBDIO_KEYINPUT: break; case KBDIO_UNLOADING: sc->kbd = NULL; sc->keyboard = -1; kbd_release(thiskbd, (void *)&sc->keyboard); goto done; default: error = EINVAL; goto done; } /* * Loop while there is still input to get from the keyboard. * I don't think this is nessesary, and it doesn't fix * the Xaccel-2.1 keyboard hang, but it can't hurt. XXX */ while ((c = scgetc(sc, SCGETC_NONBLOCK, NULL)) != NOKEY) { cur_tty = SC_DEV(sc, sc->cur_scp->index); if (!tty_opened_ns(cur_tty)) continue; if ((*sc->cur_scp->tsw->te_input)(sc->cur_scp, c, cur_tty)) continue; switch (KEYFLAGS(c)) { case 0x0000: /* normal key */ ttydisc_rint(cur_tty, KEYCHAR(c), 0); break; case FKEY: /* function key, return string */ cp = (*sc->cur_scp->tsw->te_fkeystr)(sc->cur_scp, c); if (cp != NULL) { ttydisc_rint_simple(cur_tty, cp, strlen(cp)); break; } cp = kbdd_get_fkeystr(thiskbd, KEYCHAR(c), &len); if (cp != NULL) ttydisc_rint_simple(cur_tty, cp, len); break; case MKEY: /* meta is active, prepend ESC */ ttydisc_rint(cur_tty, 0x1b, 0); ttydisc_rint(cur_tty, KEYCHAR(c), 0); break; case BKEY: /* backtab fixed sequence (esc [ Z) */ ttydisc_rint_simple(cur_tty, "\x1B[Z", 3); break; } ttydisc_rint_done(cur_tty); } sc->cur_scp->status |= MOUSE_HIDDEN; done: mtx_unlock(&Giant); return (error); } static int sctty_ioctl(struct tty *tp, u_long cmd, caddr_t data, struct thread *td) { int error; int i; struct cursor_attr *cap; sc_softc_t *sc; scr_stat *scp; int s; #if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD4) || defined(COMPAT_43) int ival; #endif /* If there is a user_ioctl function call that first */ if (sc_user_ioctl) { error = (*sc_user_ioctl)(tp, cmd, data, td); if (error != ENOIOCTL) return error; } error = sc_vid_ioctl(tp, cmd, data, td); if (error != ENOIOCTL) return error; #ifndef SC_NO_HISTORY error = sc_hist_ioctl(tp, cmd, data, td); if (error != ENOIOCTL) return error; #endif #ifndef SC_NO_SYSMOUSE error = sc_mouse_ioctl(tp, cmd, data, td); if (error != ENOIOCTL) return error; #endif scp = sc_get_stat(tp); /* assert(scp != NULL) */ /* scp is sc_console, if SC_VTY(dev) == SC_CONSOLECTL. */ sc = scp->sc; if (scp->tsw) { error = (*scp->tsw->te_ioctl)(scp, tp, cmd, data, td); if (error != ENOIOCTL) return error; } switch (cmd) { /* process console hardware related ioctl's */ case GIO_ATTR: /* get current attributes */ /* this ioctl is not processed here, but in the terminal * emulator */ return ENOTTY; case GIO_COLOR: /* is this a color console ? */ *(int *)data = (sc->adp->va_flags & V_ADP_COLOR) ? 1 : 0; return 0; case CONS_BLANKTIME: /* set screen saver timeout (0 = no saver) */ if (*(int *)data < 0 || *(int *)data > MAX_BLANKTIME) return EINVAL; s = spltty(); scrn_blank_time = *(int *)data; run_scrn_saver = (scrn_blank_time != 0); splx(s); return 0; case CONS_CURSORTYPE: /* set cursor type (old interface + HIDDEN) */ s = spltty(); *(int *)data &= CONS_CURSOR_ATTRS; sc_change_cursor_shape(scp, *(int *)data, -1, -1); splx(s); return 0; case CONS_GETCURSORSHAPE: /* get cursor shape (new interface) */ switch (((int *)data)[0] & (CONS_DEFAULT_CURSOR | CONS_LOCAL_CURSOR)) { case 0: cap = &sc->curs_attr; break; case CONS_LOCAL_CURSOR: cap = &scp->base_curs_attr; break; case CONS_DEFAULT_CURSOR: cap = &sc->dflt_curs_attr; break; case CONS_DEFAULT_CURSOR | CONS_LOCAL_CURSOR: cap = &scp->dflt_curs_attr; break; } if (((int *)data)[0] & CONS_CHARCURSOR_COLORS) { ((int *)data)[1] = cap->bg[0]; ((int *)data)[2] = cap->bg[1]; } else if (((int *)data)[0] & CONS_MOUSECURSOR_COLORS) { ((int *)data)[1] = cap->mouse_ba; ((int *)data)[2] = cap->mouse_ia; } else { ((int *)data)[1] = cap->base; ((int *)data)[2] = cap->height; } ((int *)data)[0] = cap->flags; return 0; case CONS_SETCURSORSHAPE: /* set cursor shape (new interface) */ s = spltty(); sc_change_cursor_shape( scp, ((int *)data)[0], ((int *)data)[1], ((int *)data)[2]); splx(s); return 0; case CONS_BELLTYPE: /* set bell type sound/visual */ if ((*(int *)data) & CONS_VISUAL_BELL) sc->flags |= SC_VISUAL_BELL; else sc->flags &= ~SC_VISUAL_BELL; if ((*(int *)data) & CONS_QUIET_BELL) sc->flags |= SC_QUIET_BELL; else sc->flags &= ~SC_QUIET_BELL; return 0; case CONS_GETINFO: /* get current (virtual) console info */ { vid_info_t *ptr = (vid_info_t *)data; if (ptr->size == sizeof(struct vid_info)) { ptr->m_num = sc->cur_scp->index; ptr->font_size = scp->font_size; ptr->mv_col = scp->xpos; ptr->mv_row = scp->ypos; ptr->mv_csz = scp->xsize; ptr->mv_rsz = scp->ysize; ptr->mv_hsz = (scp->history != NULL) ? scp->history->vtb_rows : 0; /* * The following fields are filled by the terminal * emulator. XXX * * ptr->mv_norm.fore * ptr->mv_norm.back * ptr->mv_rev.fore * ptr->mv_rev.back */ ptr->mv_grfc.fore = 0; /* not supported */ ptr->mv_grfc.back = 0; /* not supported */ ptr->mv_ovscan = scp->border; if (scp == sc->cur_scp) save_kbd_state(scp); ptr->mk_keylock = scp->status & LOCK_MASK; return 0; } return EINVAL; } case CONS_GETVERS: /* get version number */ *(int *)data = 0x200; /* version 2.0 */ return 0; case CONS_IDLE: /* see if the screen has been idle */ /* * When the screen is in the GRAPHICS_MODE or UNKNOWN_MODE, * the user process may have been writing something on the * screen and syscons is not aware of it. Declare the screen * is NOT idle if it is in one of these modes. But there is * an exception to it; if a screen saver is running in the * graphics mode in the current screen, we should say that the * screen has been idle. */ *(int *)data = (sc->flags & SC_SCRN_IDLE) && (!ISGRAPHSC(sc->cur_scp) || (sc->cur_scp->status & SAVER_RUNNING)); return 0; case CONS_SAVERMODE: /* set saver mode */ switch (*(int *)data) { case CONS_NO_SAVER: case CONS_USR_SAVER: /* if a LKM screen saver is running, stop it first. */ scsplash_stick(FALSE); saver_mode = *(int *)data; s = spltty(); #ifdef DEV_SPLASH if ((error = wait_scrn_saver_stop(NULL))) { splx(s); return error; } #endif run_scrn_saver = TRUE; if (saver_mode == CONS_USR_SAVER) scp->status |= SAVER_RUNNING; else scp->status &= ~SAVER_RUNNING; scsplash_stick(TRUE); splx(s); break; case CONS_LKM_SAVER: s = spltty(); if ((saver_mode == CONS_USR_SAVER) && (scp->status & SAVER_RUNNING)) scp->status &= ~SAVER_RUNNING; saver_mode = *(int *)data; splx(s); break; default: return EINVAL; } return 0; case CONS_SAVERSTART: /* immediately start/stop the screen saver */ /* * Note that this ioctl does not guarantee the screen saver * actually starts or stops. It merely attempts to do so... */ s = spltty(); run_scrn_saver = (*(int *)data != 0); if (run_scrn_saver) sc->scrn_time_stamp -= scrn_blank_time; splx(s); return 0; case CONS_SCRSHOT: /* get a screen shot */ { int retval, hist_rsz; size_t lsize, csize; vm_offset_t frbp, hstp; unsigned lnum; scrshot_t *ptr = (scrshot_t *)data; void *outp = ptr->buf; if (ptr->x < 0 || ptr->y < 0 || ptr->xsize < 0 || ptr->ysize < 0) return EINVAL; s = spltty(); if (ISGRAPHSC(scp)) { splx(s); return EOPNOTSUPP; } hist_rsz = (scp->history != NULL) ? scp->history->vtb_rows : 0; if (((u_int)ptr->x + ptr->xsize) > scp->xsize || ((u_int)ptr->y + ptr->ysize) > (scp->ysize + hist_rsz)) { splx(s); return EINVAL; } lsize = scp->xsize * sizeof(u_int16_t); csize = ptr->xsize * sizeof(u_int16_t); /* Pointer to the last line of framebuffer */ frbp = scp->vtb.vtb_buffer + scp->ysize * lsize + ptr->x * sizeof(u_int16_t); /* Pointer to the last line of target buffer */ outp = (char *)outp + ptr->ysize * csize; /* Pointer to the last line of history buffer */ if (scp->history != NULL) hstp = scp->history->vtb_buffer + sc_vtb_tail(scp->history) * sizeof(u_int16_t) + ptr->x * sizeof(u_int16_t); else hstp = 0; retval = 0; for (lnum = 0; lnum < (ptr->y + ptr->ysize); lnum++) { if (lnum < scp->ysize) { frbp -= lsize; } else { hstp -= lsize; if (hstp < scp->history->vtb_buffer) hstp += scp->history->vtb_rows * lsize; frbp = hstp; } if (lnum < ptr->y) continue; outp = (char *)outp - csize; retval = copyout((void *)frbp, outp, csize); if (retval != 0) break; } splx(s); return retval; } case VT_SETMODE: /* set screen switcher mode */ { struct vt_mode *mode; struct proc *p1; mode = (struct vt_mode *)data; DPRINTF(5, ("%s%d: VT_SETMODE ", SC_DRIVER_NAME, sc->unit)); if (scp->smode.mode == VT_PROCESS) { p1 = pfind(scp->pid); if (scp->proc == p1 && scp->proc != td->td_proc) { if (p1) PROC_UNLOCK(p1); DPRINTF(5, ("error EPERM\n")); return EPERM; } if (p1) PROC_UNLOCK(p1); } s = spltty(); if (mode->mode == VT_AUTO) { scp->smode.mode = VT_AUTO; scp->proc = NULL; scp->pid = 0; DPRINTF(5, ("VT_AUTO, ")); if ((scp == sc->cur_scp) && (sc->unit == sc_console_unit)) cnavailable(sc_consptr, TRUE); /* were we in the middle of the vty switching process? */ if (finish_vt_rel(scp, TRUE, &s) == 0) DPRINTF(5, ("reset WAIT_REL, ")); if (finish_vt_acq(scp) == 0) DPRINTF(5, ("reset WAIT_ACQ, ")); } else { if (!ISSIGVALID(mode->relsig) || !ISSIGVALID(mode->acqsig) || !ISSIGVALID(mode->frsig)) { splx(s); DPRINTF(5, ("error EINVAL\n")); return EINVAL; } DPRINTF(5, ("VT_PROCESS %d, ", td->td_proc->p_pid)); bcopy(data, &scp->smode, sizeof(struct vt_mode)); scp->proc = td->td_proc; scp->pid = scp->proc->p_pid; if ((scp == sc->cur_scp) && (sc->unit == sc_console_unit)) cnavailable(sc_consptr, FALSE); } splx(s); DPRINTF(5, ("\n")); return 0; } case VT_GETMODE: /* get screen switcher mode */ bcopy(&scp->smode, data, sizeof(struct vt_mode)); return 0; #if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD4) || defined(COMPAT_43) case _IO('v', 4): ival = IOCPARM_IVAL(data); data = (caddr_t)&ival; /* FALLTHROUGH */ #endif case VT_RELDISP: /* screen switcher ioctl */ s = spltty(); /* * This must be the current vty which is in the VT_PROCESS * switching mode... */ if ((scp != sc->cur_scp) || (scp->smode.mode != VT_PROCESS)) { splx(s); return EINVAL; } /* ...and this process is controlling it. */ if (scp->proc != td->td_proc) { splx(s); return EPERM; } error = EINVAL; switch (*(int *)data) { case VT_FALSE: /* user refuses to release screen, abort */ if ((error = finish_vt_rel(scp, FALSE, &s)) == 0) DPRINTF(5, ("%s%d: VT_FALSE\n", SC_DRIVER_NAME, sc->unit)); break; case VT_TRUE: /* user has released screen, go on */ if ((error = finish_vt_rel(scp, TRUE, &s)) == 0) DPRINTF(5, ("%s%d: VT_TRUE\n", SC_DRIVER_NAME, sc->unit)); break; case VT_ACKACQ: /* acquire acknowledged, switch completed */ if ((error = finish_vt_acq(scp)) == 0) DPRINTF(5, ("%s%d: VT_ACKACQ\n", SC_DRIVER_NAME, sc->unit)); break; default: break; } splx(s); return error; case VT_OPENQRY: /* return free virtual console */ for (i = sc->first_vty; i < sc->first_vty + sc->vtys; i++) { tp = SC_DEV(sc, i); if (!tty_opened_ns(tp)) { *(int *)data = i + 1; return 0; } } return EINVAL; #if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD4) || defined(COMPAT_43) case _IO('v', 5): ival = IOCPARM_IVAL(data); data = (caddr_t)&ival; /* FALLTHROUGH */ #endif case VT_ACTIVATE: /* switch to screen *data */ i = (*(int *)data == 0) ? scp->index : (*(int *)data - 1); s = spltty(); error = sc_clean_up(sc->cur_scp); splx(s); if (error) return error; error = sc_switch_scr(sc, i); return (error); #if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD4) || defined(COMPAT_43) case _IO('v', 6): ival = IOCPARM_IVAL(data); data = (caddr_t)&ival; /* FALLTHROUGH */ #endif case VT_WAITACTIVE: /* wait for switch to occur */ i = (*(int *)data == 0) ? scp->index : (*(int *)data - 1); if ((i < sc->first_vty) || (i >= sc->first_vty + sc->vtys)) return EINVAL; if (i == sc->cur_scp->index) return 0; error = tsleep(VTY_WCHAN(sc, i), (PZERO + 1) | PCATCH, "waitvt", 0); return error; case VT_GETACTIVE: /* get active vty # */ *(int *)data = sc->cur_scp->index + 1; return 0; case VT_GETINDEX: /* get this vty # */ *(int *)data = scp->index + 1; return 0; case VT_LOCKSWITCH: /* prevent vty switching */ if ((*(int *)data) & 0x01) sc->flags |= SC_SCRN_VTYLOCK; else sc->flags &= ~SC_SCRN_VTYLOCK; return 0; case KDENABIO: /* allow io operations */ error = priv_check(td, PRIV_IO); if (error != 0) return error; error = securelevel_gt(td->td_ucred, 0); if (error != 0) return error; #ifdef __i386__ td->td_frame->tf_eflags |= PSL_IOPL; #elif defined(__amd64__) td->td_frame->tf_rflags |= PSL_IOPL; #endif return 0; case KDDISABIO: /* disallow io operations (default) */ #ifdef __i386__ td->td_frame->tf_eflags &= ~PSL_IOPL; #elif defined(__amd64__) td->td_frame->tf_rflags &= ~PSL_IOPL; #endif return 0; #if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD4) || defined(COMPAT_43) case _IO('K', 20): ival = IOCPARM_IVAL(data); data = (caddr_t)&ival; /* FALLTHROUGH */ #endif case KDSKBSTATE: /* set keyboard state (locks) */ if (*(int *)data & ~LOCK_MASK) return EINVAL; scp->status &= ~LOCK_MASK; scp->status |= *(int *)data; if (scp == sc->cur_scp) update_kbd_state(scp, scp->status, LOCK_MASK); return 0; case KDGKBSTATE: /* get keyboard state (locks) */ if (scp == sc->cur_scp) save_kbd_state(scp); *(int *)data = scp->status & LOCK_MASK; return 0; case KDGETREPEAT: /* get keyboard repeat & delay rates */ case KDSETREPEAT: /* set keyboard repeat & delay rates (new) */ error = kbdd_ioctl(sc->kbd, cmd, data); if (error == ENOIOCTL) error = ENODEV; return error; #if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD4) || defined(COMPAT_43) case _IO('K', 67): ival = IOCPARM_IVAL(data); data = (caddr_t)&ival; /* FALLTHROUGH */ #endif case KDSETRAD: /* set keyboard repeat & delay rates (old) */ if (*(int *)data & ~0x7f) return EINVAL; error = kbdd_ioctl(sc->kbd, KDSETRAD, data); if (error == ENOIOCTL) error = ENODEV; return error; #if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD4) || defined(COMPAT_43) case _IO('K', 7): ival = IOCPARM_IVAL(data); data = (caddr_t)&ival; /* FALLTHROUGH */ #endif case KDSKBMODE: /* set keyboard mode */ switch (*(int *)data) { case K_XLATE: /* switch to XLT ascii mode */ case K_RAW: /* switch to RAW scancode mode */ case K_CODE: /* switch to CODE mode */ scp->kbd_mode = *(int *)data; if (scp == sc->cur_scp) (void)kbdd_ioctl(sc->kbd, KDSKBMODE, data); return 0; default: return EINVAL; } /* NOT REACHED */ case KDGKBMODE: /* get keyboard mode */ *(int *)data = scp->kbd_mode; return 0; case KDGKBINFO: error = kbdd_ioctl(sc->kbd, cmd, data); if (error == ENOIOCTL) error = ENODEV; return error; #if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD4) || defined(COMPAT_43) case _IO('K', 8): ival = IOCPARM_IVAL(data); data = (caddr_t)&ival; /* FALLTHROUGH */ #endif case KDMKTONE: /* sound the bell */ if (*(int *)data) sc_bell(scp, (*(int *)data) & 0xffff, (((*(int *)data) >> 16) & 0xffff) * hz / 1000); else sc_bell(scp, scp->bell_pitch, scp->bell_duration); return 0; #if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD4) || defined(COMPAT_43) case _IO('K', 63): ival = IOCPARM_IVAL(data); data = (caddr_t)&ival; /* FALLTHROUGH */ #endif case KIOCSOUND: /* make tone (*data) hz */ if (scp == sc->cur_scp) { if (*(int *)data) return sc_tone(*(int *)data); else return sc_tone(0); } return 0; case KDGKBTYPE: /* get keyboard type */ error = kbdd_ioctl(sc->kbd, cmd, data); if (error == ENOIOCTL) { /* always return something? XXX */ *(int *)data = 0; } return 0; #if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD4) || defined(COMPAT_43) case _IO('K', 66): ival = IOCPARM_IVAL(data); data = (caddr_t)&ival; /* FALLTHROUGH */ #endif case KDSETLED: /* set keyboard LED status */ if (*(int *)data & ~LED_MASK) /* FIXME: LOCK_MASK? */ return EINVAL; scp->status &= ~LED_MASK; scp->status |= *(int *)data; if (scp == sc->cur_scp) update_kbd_leds(scp, scp->status); return 0; case KDGETLED: /* get keyboard LED status */ if (scp == sc->cur_scp) save_kbd_state(scp); *(int *)data = scp->status & LED_MASK; return 0; case KBADDKBD: /* add/remove keyboard to/from mux */ case KBRELKBD: error = kbdd_ioctl(sc->kbd, cmd, data); if (error == ENOIOCTL) error = ENODEV; return error; #if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD4) || defined(COMPAT_43) case _IO('c', 110): ival = IOCPARM_IVAL(data); data = (caddr_t)&ival; /* FALLTHROUGH */ #endif case CONS_SETKBD: /* set the new keyboard */ { keyboard_t *newkbd; s = spltty(); newkbd = kbd_get_keyboard(*(int *)data); if (newkbd == NULL) { splx(s); return EINVAL; } error = 0; if (sc->kbd != newkbd) { i = kbd_allocate(newkbd->kb_name, newkbd->kb_unit, (void *)&sc->keyboard, sckbdevent, sc); /* i == newkbd->kb_index */ if (i >= 0) { if (sc->kbd != NULL) { save_kbd_state(sc->cur_scp); kbd_release( sc->kbd, (void *)&sc->keyboard); } sc->kbd = kbd_get_keyboard(i); /* sc->kbd == newkbd */ sc->keyboard = i; (void)kbdd_ioctl(sc->kbd, KDSKBMODE, (caddr_t)&sc->cur_scp->kbd_mode); update_kbd_state(sc->cur_scp, sc->cur_scp->status, LOCK_MASK); } else { error = EPERM; /* XXX */ } } splx(s); return error; } case CONS_RELKBD: /* release the current keyboard */ s = spltty(); error = 0; if (sc->kbd != NULL) { save_kbd_state(sc->cur_scp); error = kbd_release(sc->kbd, (void *)&sc->keyboard); if (error == 0) { sc->kbd = NULL; sc->keyboard = -1; } } splx(s); return error; case CONS_GETTERM: /* get the current terminal emulator info */ { sc_term_sw_t *sw; if (((term_info_t *)data)->ti_index == 0) { sw = scp->tsw; } else { sw = sc_term_match_by_number( ((term_info_t *)data)->ti_index); } if (sw != NULL) { strncpy(((term_info_t *)data)->ti_name, sw->te_name, sizeof(((term_info_t *)data)->ti_name)); strncpy(((term_info_t *)data)->ti_desc, sw->te_desc, sizeof(((term_info_t *)data)->ti_desc)); ((term_info_t *)data)->ti_flags = 0; return 0; } else { ((term_info_t *)data)->ti_name[0] = '\0'; ((term_info_t *)data)->ti_desc[0] = '\0'; ((term_info_t *)data)->ti_flags = 0; return EINVAL; } } case CONS_SETTERM: /* set the current terminal emulator */ s = spltty(); error = sc_init_emulator(scp, ((term_info_t *)data)->ti_name); /* FIXME: what if scp == sc_console! XXX */ splx(s); return error; case GIO_SCRNMAP: /* get output translation table */ bcopy(&sc->scr_map, data, sizeof(sc->scr_map)); return 0; case PIO_SCRNMAP: /* set output translation table */ bcopy(data, &sc->scr_map, sizeof(sc->scr_map)); for (i = 0; i < sizeof(sc->scr_map); i++) { sc->scr_rmap[sc->scr_map[i]] = i; } return 0; case GIO_KEYMAP: /* get keyboard translation table */ case PIO_KEYMAP: /* set keyboard translation table */ case OGIO_KEYMAP: /* get keyboard translation table (compat) */ case OPIO_KEYMAP: /* set keyboard translation table (compat) */ case GIO_DEADKEYMAP: /* get accent key translation table */ case PIO_DEADKEYMAP: /* set accent key translation table */ case GETFKEY: /* get function key string */ case SETFKEY: /* set function key string */ error = kbdd_ioctl(sc->kbd, cmd, data); if (error == ENOIOCTL) error = ENODEV; return error; #ifndef SC_NO_FONT_LOADING case PIO_FONT8x8: /* set 8x8 dot font */ if (!ISFONTAVAIL(sc->adp->va_flags)) return ENXIO; bcopy(data, sc->font_8, 8 * 256); sc->fonts_loaded |= FONT_8; /* * FONT KLUDGE * Always use the font page #0. XXX * Don't load if the current font size is not 8x8. */ if (ISTEXTSC(sc->cur_scp) && (sc->cur_scp->font_size < 14)) sc_load_font(sc->cur_scp, 0, 8, 8, sc->font_8, 0, 256); return 0; case GIO_FONT8x8: /* get 8x8 dot font */ if (!ISFONTAVAIL(sc->adp->va_flags)) return ENXIO; if (sc->fonts_loaded & FONT_8) { bcopy(sc->font_8, data, 8 * 256); return 0; } else return ENXIO; case PIO_FONT8x14: /* set 8x14 dot font */ if (!ISFONTAVAIL(sc->adp->va_flags)) return ENXIO; bcopy(data, sc->font_14, 14 * 256); sc->fonts_loaded |= FONT_14; /* * FONT KLUDGE * Always use the font page #0. XXX * Don't load if the current font size is not 8x14. */ if (ISTEXTSC(sc->cur_scp) && (sc->cur_scp->font_size >= 14) && (sc->cur_scp->font_size < 16)) sc_load_font( sc->cur_scp, 0, 14, 8, sc->font_14, 0, 256); return 0; case GIO_FONT8x14: /* get 8x14 dot font */ if (!ISFONTAVAIL(sc->adp->va_flags)) return ENXIO; if (sc->fonts_loaded & FONT_14) { bcopy(sc->font_14, data, 14 * 256); return 0; } else return ENXIO; case PIO_FONT8x16: /* set 8x16 dot font */ if (!ISFONTAVAIL(sc->adp->va_flags)) return ENXIO; bcopy(data, sc->font_16, 16 * 256); sc->fonts_loaded |= FONT_16; /* * FONT KLUDGE * Always use the font page #0. XXX * Don't load if the current font size is not 8x16. */ if (ISTEXTSC(sc->cur_scp) && (sc->cur_scp->font_size >= 16)) sc_load_font( sc->cur_scp, 0, 16, 8, sc->font_16, 0, 256); return 0; case GIO_FONT8x16: /* get 8x16 dot font */ if (!ISFONTAVAIL(sc->adp->va_flags)) return ENXIO; if (sc->fonts_loaded & FONT_16) { bcopy(sc->font_16, data, 16 * 256); return 0; } else return ENXIO; #endif /* SC_NO_FONT_LOADING */ default: break; } return (ENOIOCTL); } static int consolectl_ioctl( struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { return sctty_ioctl(dev->si_drv1, cmd, data, td); } static int consolectl_close(struct cdev *dev, int flags, int mode, struct thread *td) { #ifndef SC_NO_SYSMOUSE mouse_info_t info; memset(&info, 0, sizeof(info)); info.operation = MOUSE_ACTION; /* * Make sure all buttons are released when moused and other * console daemons exit, so that no buttons are left pressed. */ (void)sctty_ioctl(dev->si_drv1, CONS_MOUSECTL, (caddr_t)&info, td); #endif return (0); } static void sc_cnprobe(struct consdev *cp) { int unit; int flags; if (!vty_enabled(VTY_SC)) { cp->cn_pri = CN_DEAD; return; } cp->cn_pri = sc_get_cons_priority(&unit, &flags); /* a video card is always required */ if (!scvidprobe(unit, flags, TRUE)) cp->cn_pri = CN_DEAD; /* syscons will become console even when there is no keyboard */ sckbdprobe(unit, flags, TRUE); if (cp->cn_pri == CN_DEAD) return; /* initialize required fields */ strcpy(cp->cn_name, "ttyv0"); } static void sc_cninit(struct consdev *cp) { int unit; int flags; sc_get_cons_priority(&unit, &flags); scinit(unit, flags | SC_KERNEL_CONSOLE); sc_console_unit = unit; sc_console = sc_get_stat(sc_get_softc(unit, SC_KERNEL_CONSOLE)->dev[0]); sc_consptr = cp; } static void sc_cnterm(struct consdev *cp) { void *ts; int i; /* we are not the kernel console any more, release everything */ if (sc_console_unit < 0) return; /* shouldn't happen */ #if 0 /* XXX */ sc_clear_screen(sc_console); sccnupdate(sc_console); #endif if (sc_ktsw != NULL) { for (i = 0; i <= mp_maxid; i++) { ts = sc_kts[i]; sc_kts[i] = NULL; (*sc_ktsw->te_term)(sc_console, &ts); free(ts, M_DEVBUF); } sc_ktsw = NULL; } scterm(sc_console_unit, SC_KERNEL_CONSOLE); sc_console_unit = -1; sc_console = NULL; } static void sccnclose(sc_softc_t *sc, struct sc_cnstate *sp); static int sc_cngetc_locked(struct sc_cnstate *sp); static void sccnkbdlock(sc_softc_t *sc, struct sc_cnstate *sp); static void sccnkbdunlock(sc_softc_t *sc, struct sc_cnstate *sp); static void sccnopen(sc_softc_t *sc, struct sc_cnstate *sp, int flags); static void sccnscrlock(sc_softc_t *sc, struct sc_cnstate *sp); static void sccnscrunlock(sc_softc_t *sc, struct sc_cnstate *sp); static void sccnkbdlock(sc_softc_t *sc, struct sc_cnstate *sp) { /* * Locking method: hope for the best. * The keyboard is supposed to be Giant locked. We can't handle that * in general. The kdb_active case here is not safe, and we will * proceed without the lock in all cases. */ sp->kbd_locked = !kdb_active && mtx_trylock(&Giant); } static void sccnkbdunlock(sc_softc_t *sc, struct sc_cnstate *sp) { if (sp->kbd_locked) mtx_unlock(&Giant); sp->kbd_locked = FALSE; } static void sccnscrlock(sc_softc_t *sc, struct sc_cnstate *sp) { int retries; /** * Locking method: * - if kdb_active and video_mtx is not owned by anyone, then lock * by kdb remaining active * - if !kdb_active, try to acquire video_mtx without blocking or * recursing; if we get it then it works normally. * Note that video_mtx is especially unusable if we already own it, * since then it is protecting something and syscons is not reentrant * enough to ignore the protection even in the kdb_active case. */ if (kdb_active) { sp->kdb_locked = sc->video_mtx.mtx_lock == MTX_UNOWNED || SCHEDULER_STOPPED(); sp->mtx_locked = FALSE; } else { sp->kdb_locked = FALSE; for (retries = 0; retries < 1000; retries++) { sp->mtx_locked = mtx_trylock_spin_flags( &sc->video_mtx, MTX_QUIET) != 0; if (SCHEDULER_STOPPED()) { sp->kdb_locked = TRUE; sp->mtx_locked = FALSE; break; } if (sp->mtx_locked) break; DELAY(1); } } } static void sccnscrunlock(sc_softc_t *sc, struct sc_cnstate *sp) { if (sp->mtx_locked) mtx_unlock_spin(&sc->video_mtx); sp->mtx_locked = sp->kdb_locked = FALSE; } static void sccnopen(sc_softc_t *sc, struct sc_cnstate *sp, int flags) { int kbd_mode; /* assert(sc_console_unit >= 0) */ sp->kbd_opened = FALSE; sp->scr_opened = FALSE; sp->kbd_locked = FALSE; /* Opening the keyboard is optional. */ if (!(flags & 1) || sc->kbd == NULL) goto over_keyboard; sccnkbdlock(sc, sp); /* * Make sure the keyboard is accessible even when the kbd device * driver is disabled. */ kbdd_enable(sc->kbd); /* Switch the keyboard to console mode (K_XLATE, polled) on all scp's. */ kbd_mode = K_XLATE; (void)kbdd_ioctl(sc->kbd, KDSKBMODE, (caddr_t)&kbd_mode); sc->kbd_open_level++; kbdd_poll(sc->kbd, TRUE); sp->kbd_opened = TRUE; over_keyboard:; /* The screen is opened iff locking it succeeds. */ sccnscrlock(sc, sp); if (!sp->kdb_locked && !sp->mtx_locked) return; sp->scr_opened = TRUE; /* The screen switch is optional. */ if (!(flags & 2)) return; /* try to switch to the kernel console screen */ if (!cold && sc->cur_scp->index != sc_console->index && sc->cur_scp->smode.mode == VT_AUTO && sc_console->smode.mode == VT_AUTO) sc_switch_scr(sc, sc_console->index); } static void sccnclose(sc_softc_t *sc, struct sc_cnstate *sp) { sp->scr_opened = FALSE; sccnscrunlock(sc, sp); if (!sp->kbd_opened) return; /* Restore keyboard mode (for the current, possibly-changed scp). */ kbdd_poll(sc->kbd, FALSE); if (--sc->kbd_open_level == 0) (void)kbdd_ioctl( sc->kbd, KDSKBMODE, (caddr_t)&sc->cur_scp->kbd_mode); kbdd_disable(sc->kbd); sp->kbd_opened = FALSE; sccnkbdunlock(sc, sp); } /* * Grabbing switches the screen and keyboard focus to sc_console and the * keyboard mode to (K_XLATE, polled). Only switching to polled mode is * essential (for preventing the interrupt handler from eating input * between polls). Focus is part of the UI, and the other switches are * work just was well when they are done on every entry and exit. * * Screen switches while grabbed are supported, and to maintain focus for * this ungrabbing and closing only restore the polling state and then * the keyboard mode if on the original screen. */ static void sc_cngrab(struct consdev *cp) { sc_softc_t *sc; int lev; sc = sc_console->sc; lev = atomic_fetchadd_int(&sc->grab_level, 1); if (lev >= 0 && lev < 2) { sccnopen(sc, &sc->grab_state[lev], 1 | 2); sccnscrunlock(sc, &sc->grab_state[lev]); sccnkbdunlock(sc, &sc->grab_state[lev]); } } static void sc_cnungrab(struct consdev *cp) { sc_softc_t *sc; int lev; sc = sc_console->sc; lev = atomic_load_acq_int(&sc->grab_level) - 1; if (lev >= 0 && lev < 2) { sccnkbdlock(sc, &sc->grab_state[lev]); sccnscrlock(sc, &sc->grab_state[lev]); sccnclose(sc, &sc->grab_state[lev]); } atomic_add_int(&sc->grab_level, -1); } static char sc_cnputc_log[0x1000]; static u_int sc_cnputc_loghead; static u_int sc_cnputc_logtail; static void sc_cnputc(struct consdev *cd, int c) { struct sc_cnstate st; u_char buf[1]; scr_stat *scp = sc_console; void *oldts, *ts; struct sc_term_sw *oldtsw; #ifndef SC_NO_HISTORY #if 0 struct tty *tp; #endif #endif /* !SC_NO_HISTORY */ u_int head; int s; /* assert(sc_console != NULL) */ sccnopen(scp->sc, &st, 0); /* * Log the output. * * In the unlocked case, the logging is intentionally only * perfectly atomic for the indexes. */ head = atomic_fetchadd_int(&sc_cnputc_loghead, 1); sc_cnputc_log[head % sizeof(sc_cnputc_log)] = c; /* * If we couldn't open, do special reentrant output and return to defer * normal output. */ if (!st.scr_opened) { ec_putc(c); return; } #ifndef SC_NO_HISTORY if (scp == scp->sc->cur_scp && scp->status & SLKED) { scp->status &= ~SLKED; update_kbd_state(scp, scp->status, SLKED); if (scp->status & BUFFER_SAVED) { if (!sc_hist_restore(scp)) sc_remove_cutmarking(scp); scp->status &= ~BUFFER_SAVED; scp->status |= CURSOR_ENABLED; sc_draw_cursor_image(scp); } #if 0 /* * XXX: Now that TTY's have their own locks, we cannot process * any data after disabling scroll lock. cnputs already holds a * spinlock. */ tp = SC_DEV(scp->sc, scp->index); /* XXX "tp" can be NULL */ tty_lock(tp); if (tty_opened(tp)) sctty_outwakeup(tp); tty_unlock(tp); #endif } #endif /* !SC_NO_HISTORY */ /* Play any output still in the log (our char may already be done). */ while (sc_cnputc_logtail != atomic_load_acq_int(&sc_cnputc_loghead)) { buf[0] = sc_cnputc_log[sc_cnputc_logtail++ % sizeof(sc_cnputc_log)]; if (atomic_load_acq_int(&sc_cnputc_loghead) - sc_cnputc_logtail >= sizeof(sc_cnputc_log)) continue; /* Console output has a per-CPU "input" state. Switch for it. */ ts = sc_kts[curcpu]; if (ts != NULL) { oldtsw = scp->tsw; oldts = scp->ts; scp->tsw = sc_ktsw; scp->ts = ts; (*scp->tsw->te_sync)(scp); } else { /* Only 1 tsw early. Switch only its attr. */ (*scp->tsw->te_default_attr)( scp, sc_kattrtab[curcpu], SC_KERNEL_CONS_REV_ATTR); } sc_puts(scp, buf, 1); if (ts != NULL) { scp->tsw = oldtsw; scp->ts = oldts; (*scp->tsw->te_sync)(scp); } else { (*scp->tsw->te_default_attr)( scp, SC_KERNEL_CONS_ATTR, SC_KERNEL_CONS_REV_ATTR); } } s = spltty(); /* block sckbdevent and scrn_timer */ sccnupdate(scp); splx(s); sccnclose(scp->sc, &st); } static int sc_cngetc(struct consdev *cd) { struct sc_cnstate st; int c, s; /* assert(sc_console != NULL) */ sccnopen(sc_console->sc, &st, 1); s = spltty(); /* block sckbdevent and scrn_timer while we poll */ if (!st.kbd_opened) { splx(s); sccnclose(sc_console->sc, &st); return -1; /* means no keyboard since we fudged the locking */ } c = sc_cngetc_locked(&st); splx(s); sccnclose(sc_console->sc, &st); return c; } static int sc_cngetc_locked(struct sc_cnstate *sp) { static struct fkeytab fkey; static int fkeycp; scr_stat *scp; const u_char *p; int c; /* * Stop the screen saver and update the screen if necessary. * What if we have been running in the screen saver code... XXX */ if (sp->scr_opened) sc_touch_scrn_saver(); scp = sc_console->sc->cur_scp; /* XXX */ if (sp->scr_opened) sccnupdate(scp); if (fkeycp < fkey.len) return fkey.str[fkeycp++]; c = scgetc(scp->sc, SCGETC_CN | SCGETC_NONBLOCK, sp); switch (KEYFLAGS(c)) { case 0: /* normal char */ return KEYCHAR(c); case FKEY: /* function key */ p = (*scp->tsw->te_fkeystr)(scp, c); if (p != NULL) { fkey.len = strlen(p); bcopy(p, fkey.str, fkey.len); fkeycp = 1; return fkey.str[0]; } p = kbdd_get_fkeystr( scp->sc->kbd, KEYCHAR(c), (size_t *)&fkeycp); fkey.len = fkeycp; if ((p != NULL) && (fkey.len > 0)) { bcopy(p, fkey.str, fkey.len); fkeycp = 1; return fkey.str[0]; } return c; /* XXX */ case NOKEY: case ERRKEY: default: return -1; } /* NOT REACHED */ } static void sccnupdate(scr_stat *scp) { /* this is a cut-down version of scrn_timer()... */ if (suspend_in_progress || scp->sc->font_loading_in_progress) return; if (kdb_active || panicstr || shutdown_in_progress) { sc_touch_scrn_saver(); } else if (scp != scp->sc->cur_scp) { return; } if (!run_scrn_saver) scp->sc->flags &= ~SC_SCRN_IDLE; #ifdef DEV_SPLASH if ((saver_mode != CONS_LKM_SAVER) || !(scp->sc->flags & SC_SCRN_IDLE)) if (scp->sc->flags & SC_SCRN_BLANKED) stop_scrn_saver(scp->sc, current_saver); #endif if (scp != scp->sc->cur_scp || scp->sc->blink_in_progress || scp->sc->switch_in_progress) return; /* * FIXME: unlike scrn_timer(), we call scrn_update() from here even * when write_in_progress is non-zero. XXX */ if (!ISGRAPHSC(scp) && !(scp->sc->flags & SC_SCRN_BLANKED)) scrn_update(scp, TRUE); } static void scrn_timer(void *arg) { static time_t kbd_time_stamp = 0; sc_softc_t *sc; scr_stat *scp; int again, rate; again = (arg != NULL); if (arg != NULL) sc = (sc_softc_t *)arg; else if (sc_console != NULL) sc = sc_console->sc; else return; /* find the vty to update */ scp = sc->cur_scp; /* don't do anything when we are performing some I/O operations */ if (suspend_in_progress || sc->font_loading_in_progress) goto done; if ((sc->kbd == NULL) && (sc->config & SC_AUTODETECT_KBD)) { /* try to allocate a keyboard automatically */ if (kbd_time_stamp != time_uptime) { kbd_time_stamp = time_uptime; sc->keyboard = sc_allocate_keyboard(sc, -1); if (sc->keyboard >= 0) { sc->kbd = kbd_get_keyboard(sc->keyboard); (void)kbdd_ioctl(sc->kbd, KDSKBMODE, (caddr_t)&sc->cur_scp->kbd_mode); update_kbd_state(sc->cur_scp, sc->cur_scp->status, LOCK_MASK); } } } /* should we stop the screen saver? */ if (kdb_active || panicstr || shutdown_in_progress) sc_touch_scrn_saver(); if (run_scrn_saver) { if (time_uptime > sc->scrn_time_stamp + scrn_blank_time) sc->flags |= SC_SCRN_IDLE; else sc->flags &= ~SC_SCRN_IDLE; } else { sc->scrn_time_stamp = time_uptime; sc->flags &= ~SC_SCRN_IDLE; if (scrn_blank_time > 0) run_scrn_saver = TRUE; } #ifdef DEV_SPLASH if ((saver_mode != CONS_LKM_SAVER) || !(sc->flags & SC_SCRN_IDLE)) if (sc->flags & SC_SCRN_BLANKED) stop_scrn_saver(sc, current_saver); #endif /* should we just return ? */ if (sc->blink_in_progress || sc->switch_in_progress || sc->write_in_progress) goto done; /* Update the screen */ scp = sc->cur_scp; /* cur_scp may have changed... */ if (!ISGRAPHSC(scp) && !(sc->flags & SC_SCRN_BLANKED)) scrn_update(scp, TRUE); #ifdef DEV_SPLASH /* should we activate the screen saver? */ if ((saver_mode == CONS_LKM_SAVER) && (sc->flags & SC_SCRN_IDLE)) if (!ISGRAPHSC(scp) || (sc->flags & SC_SCRN_BLANKED)) (*current_saver)(sc, TRUE); #endif done: if (again) { /* * Use reduced "refresh" rate if we are in graphics and that is * not a graphical screen saver. In such case we just have * nothing to do. */ if (ISGRAPHSC(scp) && !(sc->flags & SC_SCRN_BLANKED)) rate = 2; else rate = 30; callout_reset_sbt( &sc->ctimeout, SBT_1S / rate, 0, scrn_timer, sc, C_PREL(1)); } } static int and_region(int *s1, int *e1, int s2, int e2) { if (*e1 < s2 || e2 < *s1) return FALSE; *s1 = imax(*s1, s2); *e1 = imin(*e1, e2); return TRUE; } static void scrn_update(scr_stat *scp, int show_cursor) { int start; int end; int s; int e; /* assert(scp == scp->sc->cur_scp) */ SC_VIDEO_LOCK(scp->sc); #ifndef SC_NO_CUTPASTE /* remove the previous mouse pointer image if necessary */ if (scp->status & MOUSE_VISIBLE) { s = scp->mouse_pos; e = scp->mouse_pos + scp->xsize + 1; if ((scp->status & (MOUSE_MOVED | MOUSE_HIDDEN)) || and_region(&s, &e, scp->start, scp->end) || ((scp->status & CURSOR_ENABLED) && (scp->cursor_pos != scp->cursor_oldpos) && (and_region(&s, &e, scp->cursor_pos, scp->cursor_pos) || and_region(&s, &e, scp->cursor_oldpos, scp->cursor_oldpos)))) { sc_remove_mouse_image(scp); if (scp->end >= scp->xsize * scp->ysize) scp->end = scp->xsize * scp->ysize - 1; } } #endif /* !SC_NO_CUTPASTE */ #if 1 /* debug: XXX */ if (scp->end >= scp->xsize * scp->ysize) { printf("scrn_update(): scp->end %d > size_of_screen!!\n", scp->end); scp->end = scp->xsize * scp->ysize - 1; } if (scp->start < 0) { printf("scrn_update(): scp->start %d < 0\n", scp->start); scp->start = 0; } #endif /* update screen image */ if (scp->start <= scp->end) { if (scp->mouse_cut_end >= 0) { /* there is a marked region for cut & paste */ if (scp->mouse_cut_start <= scp->mouse_cut_end) { start = scp->mouse_cut_start; end = scp->mouse_cut_end; } else { start = scp->mouse_cut_end; end = scp->mouse_cut_start - 1; } s = start; e = end; /* does the cut-mark region overlap with the update * region? */ if (and_region(&s, &e, scp->start, scp->end)) { (*scp->rndr->draw)(scp, s, e - s + 1, TRUE); s = 0; e = start - 1; if (and_region(&s, &e, scp->start, scp->end)) (*scp->rndr->draw)( scp, s, e - s + 1, FALSE); s = end + 1; e = scp->xsize * scp->ysize - 1; if (and_region(&s, &e, scp->start, scp->end)) (*scp->rndr->draw)( scp, s, e - s + 1, FALSE); } else { (*scp->rndr->draw)(scp, scp->start, scp->end - scp->start + 1, FALSE); } } else { (*scp->rndr->draw)( scp, scp->start, scp->end - scp->start + 1, FALSE); } } /* we are not to show the cursor and the mouse pointer... */ if (!show_cursor) { scp->end = 0; scp->start = scp->xsize * scp->ysize - 1; SC_VIDEO_UNLOCK(scp->sc); return; } /* update cursor image */ if (scp->status & CURSOR_ENABLED) { s = scp->start; e = scp->end; /* did cursor move since last time ? */ if (scp->cursor_pos != scp->cursor_oldpos) { /* do we need to remove old cursor image ? */ if (!and_region( &s, &e, scp->cursor_oldpos, scp->cursor_oldpos)) sc_remove_cursor_image(scp); sc_draw_cursor_image(scp); } else { if (and_region( &s, &e, scp->cursor_pos, scp->cursor_pos)) /* cursor didn't move, but has been overwritten */ sc_draw_cursor_image(scp); else if (scp->curs_attr.flags & CONS_BLINK_CURSOR) /* if it's a blinking cursor, update it */ (*scp->rndr->blink_cursor)(scp, scp->cursor_pos, sc_inside_cutmark(scp, scp->cursor_pos)); } } #ifndef SC_NO_CUTPASTE /* update "pseudo" mouse pointer image */ if (scp->sc->flags & SC_MOUSE_ENABLED) { if (!(scp->status & (MOUSE_VISIBLE | MOUSE_HIDDEN))) { scp->status &= ~MOUSE_MOVED; sc_draw_mouse_image(scp); } } #endif /* SC_NO_CUTPASTE */ scp->end = 0; scp->start = scp->xsize * scp->ysize - 1; SC_VIDEO_UNLOCK(scp->sc); } #ifdef DEV_SPLASH static int scsplash_callback(int event, void *arg) { sc_softc_t *sc; int error; sc = (sc_softc_t *)arg; switch (event) { case SPLASH_INIT: if (add_scrn_saver(scsplash_saver) == 0) { sc->flags &= ~SC_SAVER_FAILED; run_scrn_saver = TRUE; if (cold && !(boothowto & RB_VERBOSE)) { scsplash_stick(TRUE); (*current_saver)(sc, TRUE); } } return 0; case SPLASH_TERM: if (current_saver == scsplash_saver) { scsplash_stick(FALSE); error = remove_scrn_saver(scsplash_saver); if (error) return error; } return 0; default: return EINVAL; } } static void scsplash_saver(sc_softc_t *sc, int show) { static int busy = FALSE; scr_stat *scp; if (busy) return; busy = TRUE; scp = sc->cur_scp; if (show) { if (!(sc->flags & SC_SAVER_FAILED)) { if (!(sc->flags & SC_SCRN_BLANKED)) set_scrn_saver_mode(scp, -1, NULL, 0); switch (splash(sc->adp, TRUE)) { case 0: /* succeeded */ break; case EAGAIN: /* try later */ restore_scrn_saver_mode(scp, FALSE); sc_touch_scrn_saver(); /* XXX */ break; default: sc->flags |= SC_SAVER_FAILED; scsplash_stick(FALSE); restore_scrn_saver_mode(scp, TRUE); printf( "scsplash_saver(): failed to put up the image\n"); break; } } } else if (!sticky_splash) { if ((sc->flags & SC_SCRN_BLANKED) && (splash(sc->adp, FALSE) == 0)) restore_scrn_saver_mode(scp, TRUE); } busy = FALSE; } static int add_scrn_saver(void (*this_saver)(sc_softc_t *, int)) { #if 0 int error; if (current_saver != none_saver) { error = remove_scrn_saver(current_saver); if (error) return error; } #endif if (current_saver != none_saver) return EBUSY; run_scrn_saver = FALSE; saver_mode = CONS_LKM_SAVER; current_saver = this_saver; return 0; } static int remove_scrn_saver(void (*this_saver)(sc_softc_t *, int)) { if (current_saver != this_saver) return EINVAL; #if 0 /* * In order to prevent `current_saver' from being called by * the timeout routine `scrn_timer()' while we manipulate * the saver list, we shall set `current_saver' to `none_saver' * before stopping the current saver, rather than blocking by `splXX()'. */ current_saver = none_saver; if (scrn_blanked) stop_scrn_saver(this_saver); #endif /* unblank all blanked screens */ wait_scrn_saver_stop(NULL); if (scrn_blanked) return EBUSY; current_saver = none_saver; return 0; } static int set_scrn_saver_mode(scr_stat *scp, int mode, u_char *pal, int border) { int s; /* assert(scp == scp->sc->cur_scp) */ s = spltty(); if (!ISGRAPHSC(scp)) sc_remove_cursor_image(scp); scp->splash_save_mode = scp->mode; scp->splash_save_status = scp->status & (GRAPHICS_MODE | PIXEL_MODE); scp->status &= ~(GRAPHICS_MODE | PIXEL_MODE); scp->status |= (UNKNOWN_MODE | SAVER_RUNNING); scp->sc->flags |= SC_SCRN_BLANKED; ++scrn_blanked; splx(s); if (mode < 0) return 0; scp->mode = mode; if (set_mode(scp) == 0) { if (scp->sc->adp->va_info.vi_flags & V_INFO_GRAPHICS) scp->status |= GRAPHICS_MODE; #ifndef SC_NO_PALETTE_LOADING if (pal != NULL) vidd_load_palette(scp->sc->adp, pal); #endif sc_set_border(scp, border); return 0; } else { s = spltty(); scp->mode = scp->splash_save_mode; scp->status &= ~(UNKNOWN_MODE | SAVER_RUNNING); scp->status |= scp->splash_save_status; splx(s); return 1; } } static int restore_scrn_saver_mode(scr_stat *scp, int changemode) { int mode; int status; int s; /* assert(scp == scp->sc->cur_scp) */ s = spltty(); mode = scp->mode; status = scp->status; scp->mode = scp->splash_save_mode; scp->status &= ~(UNKNOWN_MODE | SAVER_RUNNING); scp->status |= scp->splash_save_status; scp->sc->flags &= ~SC_SCRN_BLANKED; if (!changemode) { if (!ISGRAPHSC(scp)) sc_draw_cursor_image(scp); --scrn_blanked; splx(s); return 0; } if (set_mode(scp) == 0) { #ifndef SC_NO_PALETTE_LOADING #ifdef SC_PIXEL_MODE if (scp->sc->adp->va_info.vi_mem_model == V_INFO_MM_DIRECT) vidd_load_palette(scp->sc->adp, scp->sc->palette2); else #endif vidd_load_palette(scp->sc->adp, scp->sc->palette); #endif --scrn_blanked; splx(s); return 0; } else { scp->mode = mode; scp->status = status; splx(s); return 1; } } static void stop_scrn_saver(sc_softc_t *sc, void (*saver)(sc_softc_t *, int)) { (*saver)(sc, FALSE); run_scrn_saver = FALSE; /* the screen saver may have chosen not to stop after all... */ if (sc->flags & SC_SCRN_BLANKED) return; mark_all(sc->cur_scp); if (sc->delayed_next_scr) sc_switch_scr(sc, sc->delayed_next_scr - 1); if (!kdb_active) wakeup(&scrn_blanked); } static int wait_scrn_saver_stop(sc_softc_t *sc) { int error = 0; while (scrn_blanked > 0) { run_scrn_saver = FALSE; if (sc && !(sc->flags & SC_SCRN_BLANKED)) { error = 0; break; } error = tsleep(&scrn_blanked, PZERO | PCATCH, "scrsav", 0); if ((error != 0) && (error != ERESTART)) break; } run_scrn_saver = FALSE; return error; } #endif /* DEV_SPLASH */ void sc_touch_scrn_saver(void) { scsplash_stick(FALSE); run_scrn_saver = FALSE; } int sc_switch_scr(sc_softc_t *sc, u_int next_scr) { scr_stat *cur_scp; struct tty *tp; struct proc *p; int s; DPRINTF(5, ("sc0: sc_switch_scr() %d ", next_scr + 1)); if (sc->cur_scp == NULL) return (0); /* prevent switch if previously requested */ if (sc->flags & SC_SCRN_VTYLOCK) { sc_bell(sc->cur_scp, sc->cur_scp->bell_pitch, sc->cur_scp->bell_duration); return EPERM; } /* delay switch if the screen is blanked or being updated */ if ((sc->flags & SC_SCRN_BLANKED) || sc->write_in_progress || sc->blink_in_progress) { sc->delayed_next_scr = next_scr + 1; sc_touch_scrn_saver(); DPRINTF(5, ("switch delayed\n")); return 0; } sc->delayed_next_scr = 0; s = spltty(); cur_scp = sc->cur_scp; /* we are in the middle of the vty switching process... */ if (sc->switch_in_progress && (cur_scp->smode.mode == VT_PROCESS) && cur_scp->proc) { p = pfind(cur_scp->pid); if (cur_scp->proc != p) { if (p) PROC_UNLOCK(p); /* * The controlling process has died!!. Do some clean * up. NOTE:`cur_scp->proc' and `cur_scp->smode.mode' * are not reset here yet; they will be cleared later. */ DPRINTF(5, ("cur_scp controlling process %d died, ", cur_scp->pid)); if (cur_scp->status & SWITCH_WAIT_REL) { /* * Force the previous switch to finish, but * return now with error. */ DPRINTF(5, ("reset WAIT_REL, ")); finish_vt_rel(cur_scp, TRUE, &s); splx(s); DPRINTF(5, ("finishing previous switch\n")); return EINVAL; } else if (cur_scp->status & SWITCH_WAIT_ACQ) { /* let's assume screen switch has been * completed. */ DPRINTF(5, ("reset WAIT_ACQ, ")); finish_vt_acq(cur_scp); } else { /* * We are in between screen release and * acquisition, and reached here via scgetc() or * scrn_timer() which has interrupted * exchange_scr(). Don't do anything stupid. */ DPRINTF(5, ("waiting nothing, ")); } } else { if (p) PROC_UNLOCK(p); /* * The controlling process is alive, but not * responding... It is either buggy or it may be just * taking time. The following code is a gross kludge to * cope with this problem for which there is no clean * solution. XXX */ if (cur_scp->status & SWITCH_WAIT_REL) { switch (sc->switch_in_progress++) { case 1: break; case 2: DPRINTF(5, ("sending relsig again, ")); signal_vt_rel(cur_scp); break; case 3: break; case 4: default: /* * Act as if the controlling program * returned VT_FALSE. */ DPRINTF(5, ("force reset WAIT_REL, ")); finish_vt_rel(cur_scp, FALSE, &s); splx(s); DPRINTF(5, ("act as if VT_FALSE was seen\n")); return EINVAL; } } else if (cur_scp->status & SWITCH_WAIT_ACQ) { switch (sc->switch_in_progress++) { case 1: break; case 2: DPRINTF(5, ("sending acqsig again, ")); signal_vt_acq(cur_scp); break; case 3: break; case 4: default: /* clear the flag and finish the * previous switch */ DPRINTF(5, ("force reset WAIT_ACQ, ")); finish_vt_acq(cur_scp); break; } } } } /* * Return error if an invalid argument is given, or vty switch * is still in progress. */ if ((next_scr < sc->first_vty) || (next_scr >= sc->first_vty + sc->vtys) || sc->switch_in_progress) { splx(s); sc_bell(cur_scp, bios_value.bell_pitch, BELL_DURATION); DPRINTF(5, ("error 1\n")); return EINVAL; } /* * Don't allow switching away from the graphics mode vty * if the switch mode is VT_AUTO, unless the next vty is the same * as the current or the current vty has been closed (but showing). */ tp = SC_DEV(sc, cur_scp->index); if ((cur_scp->index != next_scr) && tty_opened_ns(tp) && (cur_scp->smode.mode == VT_AUTO) && ISGRAPHSC(cur_scp)) { splx(s); sc_bell(cur_scp, bios_value.bell_pitch, BELL_DURATION); DPRINTF(5, ("error, graphics mode\n")); return EINVAL; } /* * Is the wanted vty open? Don't allow switching to a closed vty. * If we are in DDB, don't switch to a vty in the VT_PROCESS mode. * Note that we always allow the user to switch to the kernel * console even if it is closed. */ if ((sc_console == NULL) || (next_scr != sc_console->index)) { tp = SC_DEV(sc, next_scr); if (!tty_opened_ns(tp)) { splx(s); sc_bell(cur_scp, bios_value.bell_pitch, BELL_DURATION); DPRINTF(5, ("error 2, requested vty isn't open!\n")); return EINVAL; } if (kdb_active && SC_STAT(tp)->smode.mode == VT_PROCESS) { splx(s); DPRINTF(5, ("error 3, requested vty is in the VT_PROCESS mode\n")); return EINVAL; } } /* this is the start of vty switching process... */ ++sc->switch_in_progress; sc->old_scp = cur_scp; sc->new_scp = sc_get_stat(SC_DEV(sc, next_scr)); if (sc->new_scp == sc->old_scp) { sc->switch_in_progress = 0; /* * XXX wakeup() locks the scheduler lock which will hang if * the lock is in an in-between state, e.g., when we stop at * a breakpoint at fork_exit. It has always been wrong to call * wakeup() when the debugger is active. In RELENG_4, wakeup() * is supposed to be locked by splhigh(), but the debugger may * be invoked at splhigh(). */ if (!kdb_active) wakeup(VTY_WCHAN(sc, next_scr)); splx(s); DPRINTF(5, ("switch done (new == old)\n")); return 0; } /* has controlling process died? */ vt_proc_alive(sc->old_scp); vt_proc_alive(sc->new_scp); /* wait for the controlling process to release the screen, if necessary */ if (signal_vt_rel(sc->old_scp)) { splx(s); return 0; } /* go set up the new vty screen */ splx(s); exchange_scr(sc); s = spltty(); /* wake up processes waiting for this vty */ if (!kdb_active) wakeup(VTY_WCHAN(sc, next_scr)); /* wait for the controlling process to acknowledge, if necessary */ if (signal_vt_acq(sc->cur_scp)) { splx(s); return 0; } sc->switch_in_progress = 0; if (sc->unit == sc_console_unit) cnavailable(sc_consptr, TRUE); splx(s); DPRINTF(5, ("switch done\n")); return 0; } static int do_switch_scr(sc_softc_t *sc, int s) { vt_proc_alive(sc->new_scp); splx(s); exchange_scr(sc); s = spltty(); /* sc->cur_scp == sc->new_scp */ wakeup(VTY_WCHAN(sc, sc->cur_scp->index)); /* wait for the controlling process to acknowledge, if necessary */ if (!signal_vt_acq(sc->cur_scp)) { sc->switch_in_progress = 0; if (sc->unit == sc_console_unit) cnavailable(sc_consptr, TRUE); } return s; } static int vt_proc_alive(scr_stat *scp) { struct proc *p; if (scp->proc) { if ((p = pfind(scp->pid)) != NULL) PROC_UNLOCK(p); if (scp->proc == p) return TRUE; scp->proc = NULL; scp->smode.mode = VT_AUTO; DPRINTF(5, ("vt controlling process %d died\n", scp->pid)); } return FALSE; } static int signal_vt_rel(scr_stat *scp) { if (scp->smode.mode != VT_PROCESS) return FALSE; scp->status |= SWITCH_WAIT_REL; PROC_LOCK(scp->proc); kern_psignal(scp->proc, scp->smode.relsig); PROC_UNLOCK(scp->proc); DPRINTF(5, ("sending relsig to %d\n", scp->pid)); return TRUE; } static int signal_vt_acq(scr_stat *scp) { if (scp->smode.mode != VT_PROCESS) return FALSE; if (scp->sc->unit == sc_console_unit) cnavailable(sc_consptr, FALSE); scp->status |= SWITCH_WAIT_ACQ; PROC_LOCK(scp->proc); kern_psignal(scp->proc, scp->smode.acqsig); PROC_UNLOCK(scp->proc); DPRINTF(5, ("sending acqsig to %d\n", scp->pid)); return TRUE; } static int finish_vt_rel(scr_stat *scp, int release, int *s) { if (scp == scp->sc->old_scp && scp->status & SWITCH_WAIT_REL) { scp->status &= ~SWITCH_WAIT_REL; if (release) *s = do_switch_scr(scp->sc, *s); else scp->sc->switch_in_progress = 0; return 0; } return EINVAL; } static int finish_vt_acq(scr_stat *scp) { if (scp == scp->sc->new_scp && scp->status & SWITCH_WAIT_ACQ) { scp->status &= ~SWITCH_WAIT_ACQ; scp->sc->switch_in_progress = 0; return 0; } return EINVAL; } static void exchange_scr(sc_softc_t *sc) { scr_stat *scp; /* save the current state of video and keyboard */ sc_move_cursor(sc->old_scp, sc->old_scp->xpos, sc->old_scp->ypos); if (!ISGRAPHSC(sc->old_scp)) sc_remove_cursor_image(sc->old_scp); if (sc->old_scp->kbd_mode == K_XLATE) save_kbd_state(sc->old_scp); /* set up the video for the new screen */ scp = sc->cur_scp = sc->new_scp; if (sc->old_scp->mode != scp->mode || ISUNKNOWNSC(sc->old_scp)) set_mode(scp); #ifndef __sparc64__ else sc_vtb_init(&scp->scr, VTB_FRAMEBUFFER, scp->xsize, scp->ysize, (void *)sc->adp->va_window, FALSE); #endif scp->status |= MOUSE_HIDDEN; sc_move_cursor(scp, scp->xpos, scp->ypos); if (!ISGRAPHSC(scp)) sc_set_cursor_image(scp); #ifndef SC_NO_PALETTE_LOADING if (ISGRAPHSC(sc->old_scp)) { #ifdef SC_PIXEL_MODE if (sc->adp->va_info.vi_mem_model == V_INFO_MM_DIRECT) vidd_load_palette(sc->adp, sc->palette2); else #endif vidd_load_palette(sc->adp, sc->palette); } #endif sc_set_border(scp, scp->border); /* set up the keyboard for the new screen */ if (sc->kbd_open_level == 0 && sc->old_scp->kbd_mode != scp->kbd_mode) (void)kbdd_ioctl(sc->kbd, KDSKBMODE, (caddr_t)&scp->kbd_mode); update_kbd_state(scp, scp->status, LOCK_MASK); mark_all(scp); } static void sc_puts(scr_stat *scp, u_char *buf, int len) { #ifdef DEV_SPLASH /* make screensaver happy */ if (!sticky_splash && scp == scp->sc->cur_scp && !sc_saver_keyb_only) run_scrn_saver = FALSE; #endif if (scp->tsw) (*scp->tsw->te_puts)(scp, buf, len); if (scp->sc->delayed_next_scr) sc_switch_scr(scp->sc, scp->sc->delayed_next_scr - 1); } void sc_draw_cursor_image(scr_stat *scp) { /* assert(scp == scp->sc->cur_scp); */ SC_VIDEO_LOCK(scp->sc); (*scp->rndr->draw_cursor)(scp, scp->cursor_pos, scp->curs_attr.flags & CONS_BLINK_CURSOR, TRUE, sc_inside_cutmark(scp, scp->cursor_pos)); scp->cursor_oldpos = scp->cursor_pos; SC_VIDEO_UNLOCK(scp->sc); } void sc_remove_cursor_image(scr_stat *scp) { /* assert(scp == scp->sc->cur_scp); */ SC_VIDEO_LOCK(scp->sc); (*scp->rndr->draw_cursor)(scp, scp->cursor_oldpos, scp->curs_attr.flags & CONS_BLINK_CURSOR, FALSE, sc_inside_cutmark(scp, scp->cursor_oldpos)); SC_VIDEO_UNLOCK(scp->sc); } static void update_cursor_image(scr_stat *scp) { /* assert(scp == scp->sc->cur_scp); */ sc_remove_cursor_image(scp); sc_set_cursor_image(scp); sc_draw_cursor_image(scp); } void sc_set_cursor_image(scr_stat *scp) { scp->curs_attr = scp->base_curs_attr; if (scp->curs_attr.flags & CONS_HIDDEN_CURSOR) { /* hidden cursor is internally represented as zero-height * underline */ scp->curs_attr.flags = CONS_CHAR_CURSOR; scp->curs_attr.base = scp->curs_attr.height = 0; } else if (scp->curs_attr.flags & CONS_CHAR_CURSOR) { scp->curs_attr.base = imin(scp->base_curs_attr.base, scp->font_size - 1); scp->curs_attr.height = imin(scp->base_curs_attr.height, scp->font_size - scp->curs_attr.base); } else { /* block cursor */ scp->curs_attr.base = 0; scp->curs_attr.height = scp->font_size; } /* assert(scp == scp->sc->cur_scp); */ SC_VIDEO_LOCK(scp->sc); (*scp->rndr->set_cursor)(scp, scp->curs_attr.base, scp->curs_attr.height, scp->curs_attr.flags & CONS_BLINK_CURSOR); SC_VIDEO_UNLOCK(scp->sc); } static void sc_adjust_ca(struct cursor_attr *cap, int flags, int base, int height) { if (flags & CONS_CHARCURSOR_COLORS) { cap->bg[0] = base & 0xff; cap->bg[1] = height & 0xff; } else if (flags & CONS_MOUSECURSOR_COLORS) { cap->mouse_ba = base & 0xff; cap->mouse_ia = height & 0xff; } else { if (base >= 0) cap->base = base; if (height >= 0) cap->height = height; if (!(flags & CONS_SHAPEONLY_CURSOR)) cap->flags = flags & CONS_CURSOR_ATTRS; } } static void change_cursor_shape(scr_stat *scp, int flags, int base, int height) { if ((scp == scp->sc->cur_scp) && !ISGRAPHSC(scp)) sc_remove_cursor_image(scp); if (flags & CONS_RESET_CURSOR) scp->base_curs_attr = scp->dflt_curs_attr; else if (flags & CONS_DEFAULT_CURSOR) { sc_adjust_ca(&scp->dflt_curs_attr, flags, base, height); scp->base_curs_attr = scp->dflt_curs_attr; } else sc_adjust_ca(&scp->base_curs_attr, flags, base, height); if ((scp == scp->sc->cur_scp) && !ISGRAPHSC(scp)) { sc_set_cursor_image(scp); sc_draw_cursor_image(scp); } } void sc_change_cursor_shape(scr_stat *scp, int flags, int base, int height) { sc_softc_t *sc; struct tty *tp; int s; int i; if (flags == -1) flags = CONS_SHAPEONLY_CURSOR; s = spltty(); if (flags & CONS_LOCAL_CURSOR) { /* local (per vty) change */ change_cursor_shape(scp, flags, base, height); splx(s); return; } /* global change */ sc = scp->sc; if (flags & CONS_RESET_CURSOR) sc->curs_attr = sc->dflt_curs_attr; else if (flags & CONS_DEFAULT_CURSOR) { sc_adjust_ca(&sc->dflt_curs_attr, flags, base, height); sc->curs_attr = sc->dflt_curs_attr; } else sc_adjust_ca(&sc->curs_attr, flags, base, height); for (i = sc->first_vty; i < sc->first_vty + sc->vtys; ++i) { if ((tp = SC_DEV(sc, i)) == NULL) continue; if ((scp = sc_get_stat(tp)) == NULL) continue; scp->dflt_curs_attr = sc->curs_attr; change_cursor_shape(scp, CONS_RESET_CURSOR, -1, -1); } splx(s); } static void scinit(int unit, int flags) { /* * When syscons is being initialized as the kernel console, malloc() * is not yet functional, because various kernel structures has not been * fully initialized yet. Therefore, we need to declare the following * static buffers for the console. This is less than ideal, * but is necessry evil for the time being. XXX */ static u_short sc_buffer[ROW * COL]; /* XXX */ #ifndef SC_NO_FONT_LOADING static u_char font_8[256 * 8]; static u_char font_14[256 * 14]; static u_char font_16[256 * 16]; #endif #ifdef SC_KERNEL_CONS_ATTRS static const u_char dflt_kattrtab[] = SC_KERNEL_CONS_ATTRS; #elif SC_KERNEL_CONS_ATTR == FG_WHITE static const u_char dflt_kattrtab[] = { FG_WHITE, FG_YELLOW, FG_LIGHTMAGENTA, FG_LIGHTRED, FG_LIGHTCYAN, FG_LIGHTGREEN, FG_LIGHTBLUE, FG_GREEN, 0, }; #else static const u_char dflt_kattrtab[] = { FG_WHITE, 0, }; #endif sc_softc_t *sc; scr_stat *scp; video_adapter_t *adp; int col; int row; int i; /* one time initialization */ if (init_done == COLD) { sc_get_bios_values(&bios_value); for (i = 0; i < nitems(sc_kattrtab); i++) sc_kattrtab[i] = dflt_kattrtab[i % (nitems(dflt_kattrtab) - 1)]; } init_done = WARM; /* * Allocate resources. Even if we are being called for the second * time, we must allocate them again, because they might have * disappeared... */ sc = sc_get_softc(unit, flags & SC_KERNEL_CONSOLE); if ((sc->flags & SC_INIT_DONE) == 0) SC_VIDEO_LOCKINIT(sc); adp = NULL; if (sc->adapter >= 0) { vid_release(sc->adp, (void *)&sc->adapter); adp = sc->adp; sc->adp = NULL; } if (sc->keyboard >= 0) { DPRINTF(5, ("sc%d: releasing kbd%d\n", unit, sc->keyboard)); i = kbd_release(sc->kbd, (void *)&sc->keyboard); DPRINTF(5, ("sc%d: kbd_release returned %d\n", unit, i)); if (sc->kbd != NULL) { DPRINTF(5, ("sc%d: kbd != NULL!, index:%d, unit:%d, flags:0x%x\n", unit, sc->kbd->kb_index, sc->kbd->kb_unit, sc->kbd->kb_flags)); } sc->kbd = NULL; } sc->adapter = vid_allocate("*", unit, (void *)&sc->adapter); sc->adp = vid_get_adapter(sc->adapter); /* assert((sc->adapter >= 0) && (sc->adp != NULL)) */ sc->keyboard = sc_allocate_keyboard(sc, unit); DPRINTF(1, ("sc%d: keyboard %d\n", unit, sc->keyboard)); sc->kbd = kbd_get_keyboard(sc->keyboard); if (sc->kbd != NULL) { DPRINTF(1, ("sc%d: kbd index:%d, unit:%d, flags:0x%x\n", unit, sc->kbd->kb_index, sc->kbd->kb_unit, sc->kbd->kb_flags)); } if (!(sc->flags & SC_INIT_DONE) || (adp != sc->adp)) { sc->initial_mode = sc->adp->va_initial_mode; #ifndef SC_NO_FONT_LOADING if (flags & SC_KERNEL_CONSOLE) { sc->font_8 = font_8; sc->font_14 = font_14; sc->font_16 = font_16; } else if (sc->font_8 == NULL) { /* assert(sc_malloc) */ sc->font_8 = malloc(sizeof(font_8), M_DEVBUF, M_WAITOK); sc->font_14 = malloc(sizeof(font_14), M_DEVBUF, M_WAITOK); sc->font_16 = malloc(sizeof(font_16), M_DEVBUF, M_WAITOK); } #endif /* extract the hardware cursor location and hide the cursor for * now */ vidd_read_hw_cursor(sc->adp, &col, &row); vidd_set_hw_cursor(sc->adp, -1, -1); /* set up the first console */ sc->first_vty = unit * MAXCONS; sc->vtys = MAXCONS; /* XXX: should be configurable */ if (flags & SC_KERNEL_CONSOLE) { /* * Set up devs structure but don't use it yet, calling * make_dev() might panic kernel. Wait for * sc_attach_unit() to actually create the devices. */ sc->dev = main_devs; scp = &main_console; init_scp(sc, sc->first_vty, scp); sc_vtb_init(&scp->vtb, VTB_MEMORY, scp->xsize, scp->ysize, (void *)sc_buffer, FALSE); if (sc_init_emulator(scp, SC_DFLT_TERM)) sc_init_emulator(scp, "*"); (*scp->tsw->te_default_attr)( scp, SC_KERNEL_CONS_ATTR, SC_KERNEL_CONS_REV_ATTR); } else { /* assert(sc_malloc) */ sc->dev = malloc(sizeof(struct tty *) * sc->vtys, M_DEVBUF, M_WAITOK | M_ZERO); sc->dev[0] = sc_alloc_tty(0, unit * MAXCONS); scp = alloc_scp(sc, sc->first_vty); SC_STAT(sc->dev[0]) = scp; } sc->cur_scp = scp; #ifndef __sparc64__ /* copy screen to temporary buffer */ sc_vtb_init(&scp->scr, VTB_FRAMEBUFFER, scp->xsize, scp->ysize, (void *)scp->sc->adp->va_window, FALSE); if (ISTEXTSC(scp)) sc_vtb_copy(&scp->scr, 0, &scp->vtb, 0, scp->xsize * scp->ysize); #endif /* Sync h/w cursor position to s/w (sc and teken). */ if (col >= scp->xsize) col = 0; if (row >= scp->ysize) row = scp->ysize - 1; scp->xpos = col; scp->ypos = row; scp->cursor_pos = scp->cursor_oldpos = row * scp->xsize + col; (*scp->tsw->te_sync)(scp); sc->dflt_curs_attr.base = 0; sc->dflt_curs_attr.height = howmany(scp->font_size, 8); sc->dflt_curs_attr.flags = 0; sc->dflt_curs_attr.bg[0] = FG_RED; sc->dflt_curs_attr.bg[1] = FG_LIGHTGREY; sc->dflt_curs_attr.bg[2] = FG_BLUE; sc->dflt_curs_attr.mouse_ba = FG_WHITE; sc->dflt_curs_attr.mouse_ia = FG_RED; sc->curs_attr = sc->dflt_curs_attr; scp->base_curs_attr = scp->dflt_curs_attr = sc->curs_attr; scp->curs_attr = scp->base_curs_attr; #ifndef SC_NO_SYSMOUSE sc_mouse_move(scp, scp->xpixel / 2, scp->ypixel / 2); #endif if (!ISGRAPHSC(scp)) { sc_set_cursor_image(scp); sc_draw_cursor_image(scp); } /* save font and palette */ #ifndef SC_NO_FONT_LOADING sc->fonts_loaded = 0; if (ISFONTAVAIL(sc->adp->va_flags)) { #ifdef SC_DFLT_FONT bcopy(dflt_font_8, sc->font_8, sizeof(dflt_font_8)); bcopy(dflt_font_14, sc->font_14, sizeof(dflt_font_14)); bcopy(dflt_font_16, sc->font_16, sizeof(dflt_font_16)); sc->fonts_loaded = FONT_16 | FONT_14 | FONT_8; if (scp->font_size < 14) { sc_load_font(scp, 0, 8, 8, sc->font_8, 0, 256); } else if (scp->font_size >= 16) { sc_load_font( scp, 0, 16, 8, sc->font_16, 0, 256); } else { sc_load_font( scp, 0, 14, 8, sc->font_14, 0, 256); } #else /* !SC_DFLT_FONT */ if (scp->font_size < 14) { sc_save_font(scp, 0, 8, 8, sc->font_8, 0, 256); sc->fonts_loaded = FONT_8; } else if (scp->font_size >= 16) { sc_save_font( scp, 0, 16, 8, sc->font_16, 0, 256); sc->fonts_loaded = FONT_16; } else { sc_save_font( scp, 0, 14, 8, sc->font_14, 0, 256); sc->fonts_loaded = FONT_14; } #endif /* SC_DFLT_FONT */ /* FONT KLUDGE: always use the font page #0. XXX */ sc_show_font(scp, 0); } #endif /* !SC_NO_FONT_LOADING */ #ifndef SC_NO_PALETTE_LOADING vidd_save_palette(sc->adp, sc->palette); #ifdef SC_PIXEL_MODE for (i = 0; i < sizeof(sc->palette2); i++) sc->palette2[i] = i / 3; #endif #endif #ifdef DEV_SPLASH if (!(sc->flags & SC_SPLASH_SCRN)) { /* we are ready to put up the splash image! */ splash_init(sc->adp, scsplash_callback, sc); sc->flags |= SC_SPLASH_SCRN; } #endif } /* the rest is not necessary, if we have done it once */ if (sc->flags & SC_INIT_DONE) return; /* initialize mapscrn arrays to a one to one map */ for (i = 0; i < sizeof(sc->scr_map); i++) sc->scr_map[i] = sc->scr_rmap[i] = i; sc->flags |= SC_INIT_DONE; } static void scterm(int unit, int flags) { sc_softc_t *sc; scr_stat *scp; sc = sc_get_softc(unit, flags & SC_KERNEL_CONSOLE); if (sc == NULL) return; /* shouldn't happen */ #ifdef DEV_SPLASH /* this console is no longer available for the splash screen */ if (sc->flags & SC_SPLASH_SCRN) { splash_term(sc->adp); sc->flags &= ~SC_SPLASH_SCRN; } #endif #if 0 /* XXX */ /* move the hardware cursor to the upper-left corner */ vidd_set_hw_cursor(sc->adp, 0, 0); #endif /* release the keyboard and the video card */ if (sc->keyboard >= 0) kbd_release(sc->kbd, &sc->keyboard); if (sc->adapter >= 0) vid_release(sc->adp, &sc->adapter); /* stop the terminal emulator, if any */ scp = sc_get_stat(sc->dev[0]); if (scp->tsw) (*scp->tsw->te_term)(scp, &scp->ts); mtx_destroy(&sc->video_mtx); /* clear the structure */ if (!(flags & SC_KERNEL_CONSOLE)) { free(scp->ts, M_DEVBUF); /* XXX: We need delete_dev() for this */ free(sc->dev, M_DEVBUF); #if 0 /* XXX: We need a ttyunregister for this */ free(sc->tty, M_DEVBUF); #endif #ifndef SC_NO_FONT_LOADING free(sc->font_8, M_DEVBUF); free(sc->font_14, M_DEVBUF); free(sc->font_16, M_DEVBUF); #endif /* XXX vtb, history */ } bzero(sc, sizeof(*sc)); sc->keyboard = -1; sc->adapter = -1; } static void scshutdown(__unused void *arg, __unused int howto) { KASSERT(sc_console != NULL, ("sc_console != NULL")); KASSERT(sc_console->sc != NULL, ("sc_console->sc != NULL")); KASSERT(sc_console->sc->cur_scp != NULL, ("sc_console->sc->cur_scp != NULL")); sc_touch_scrn_saver(); if (!cold && sc_console->sc->cur_scp->index != sc_console->index && sc_console->sc->cur_scp->smode.mode == VT_AUTO && sc_console->smode.mode == VT_AUTO) sc_switch_scr(sc_console->sc, sc_console->index); shutdown_in_progress = TRUE; } static void scsuspend(__unused void *arg) { int retry; KASSERT(sc_console != NULL, ("sc_console != NULL")); KASSERT(sc_console->sc != NULL, ("sc_console->sc != NULL")); KASSERT(sc_console->sc->cur_scp != NULL, ("sc_console->sc->cur_scp != NULL")); sc_susp_scr = sc_console->sc->cur_scp->index; if (sc_no_suspend_vtswitch || sc_susp_scr == sc_console->index) { sc_touch_scrn_saver(); sc_susp_scr = -1; return; } for (retry = 0; retry < 10; retry++) { sc_switch_scr(sc_console->sc, sc_console->index); if (!sc_console->sc->switch_in_progress) break; pause("scsuspend", hz); } suspend_in_progress = TRUE; } static void scresume(__unused void *arg) { KASSERT(sc_console != NULL, ("sc_console != NULL")); KASSERT(sc_console->sc != NULL, ("sc_console->sc != NULL")); KASSERT(sc_console->sc->cur_scp != NULL, ("sc_console->sc->cur_scp != NULL")); suspend_in_progress = FALSE; if (sc_susp_scr < 0) { update_font(sc_console->sc->cur_scp); return; } sc_switch_scr(sc_console->sc, sc_susp_scr); } int sc_clean_up(scr_stat *scp) { #ifdef DEV_SPLASH int error; #endif if (scp->sc->flags & SC_SCRN_BLANKED) { sc_touch_scrn_saver(); #ifdef DEV_SPLASH if ((error = wait_scrn_saver_stop(scp->sc))) return error; #endif } scp->status |= MOUSE_HIDDEN; sc_remove_mouse_image(scp); sc_remove_cutmarking(scp); return 0; } void sc_alloc_scr_buffer(scr_stat *scp, int wait, int discard) { sc_vtb_t new; sc_vtb_t old; old = scp->vtb; sc_vtb_init(&new, VTB_MEMORY, scp->xsize, scp->ysize, NULL, wait); if (!discard && (old.vtb_flags & VTB_VALID)) { /* retain the current cursor position and buffer contants */ scp->cursor_oldpos = scp->cursor_pos; /* * This works only if the old buffer has the same size as or * larger than the new one. XXX */ sc_vtb_copy(&old, 0, &new, 0, scp->xsize * scp->ysize); scp->vtb = new; } else { scp->vtb = new; sc_vtb_destroy(&old); } #ifndef SC_NO_SYSMOUSE /* move the mouse cursor at the center of the screen */ sc_mouse_move(scp, scp->xpixel / 2, scp->ypixel / 2); #endif } static scr_stat * alloc_scp(sc_softc_t *sc, int vty) { scr_stat *scp; /* assert(sc_malloc) */ scp = (scr_stat *)malloc(sizeof(scr_stat), M_DEVBUF, M_WAITOK); init_scp(sc, vty, scp); sc_alloc_scr_buffer(scp, TRUE, TRUE); if (sc_init_emulator(scp, SC_DFLT_TERM)) sc_init_emulator(scp, "*"); #ifndef SC_NO_CUTPASTE sc_alloc_cut_buffer(scp, TRUE); #endif #ifndef SC_NO_HISTORY sc_alloc_history_buffer(scp, 0, 0, TRUE); #endif return scp; } static void init_scp(sc_softc_t *sc, int vty, scr_stat *scp) { video_info_t info; bzero(scp, sizeof(*scp)); scp->index = vty; scp->sc = sc; scp->status = 0; scp->mode = sc->initial_mode; vidd_get_info(sc->adp, scp->mode, &info); if (info.vi_flags & V_INFO_GRAPHICS) { scp->status |= GRAPHICS_MODE; scp->xpixel = info.vi_width; scp->ypixel = info.vi_height; scp->xsize = info.vi_width / info.vi_cwidth; scp->ysize = info.vi_height / info.vi_cheight; scp->font_size = 0; scp->font = NULL; } else { scp->xsize = info.vi_width; scp->ysize = info.vi_height; scp->xpixel = scp->xsize * info.vi_cwidth; scp->ypixel = scp->ysize * info.vi_cheight; } scp->font_size = info.vi_cheight; scp->font_width = info.vi_cwidth; #ifndef SC_NO_FONT_LOADING if (info.vi_cheight < 14) scp->font = sc->font_8; else if (info.vi_cheight >= 16) scp->font = sc->font_16; else scp->font = sc->font_14; #else scp->font = NULL; #endif sc_vtb_init(&scp->vtb, VTB_MEMORY, 0, 0, NULL, FALSE); #ifndef __sparc64__ sc_vtb_init(&scp->scr, VTB_FRAMEBUFFER, 0, 0, NULL, FALSE); #endif scp->xoff = scp->yoff = 0; scp->xpos = scp->ypos = 0; scp->start = scp->xsize * scp->ysize - 1; scp->end = 0; scp->tsw = NULL; scp->ts = NULL; scp->rndr = NULL; scp->border = (SC_NORM_ATTR >> 4) & 0x0f; scp->base_curs_attr = scp->dflt_curs_attr = sc->curs_attr; scp->mouse_cut_start = scp->xsize * scp->ysize; scp->mouse_cut_end = -1; scp->mouse_signal = 0; scp->mouse_pid = 0; scp->mouse_proc = NULL; scp->kbd_mode = K_XLATE; scp->bell_pitch = bios_value.bell_pitch; scp->bell_duration = BELL_DURATION; scp->status |= (bios_value.shift_state & NLKED); scp->status |= CURSOR_ENABLED | MOUSE_HIDDEN; scp->pid = 0; scp->proc = NULL; scp->smode.mode = VT_AUTO; scp->history = NULL; scp->history_pos = 0; scp->history_size = 0; } int sc_init_emulator(scr_stat *scp, char *name) { sc_term_sw_t *sw; sc_rndr_sw_t *rndr; void *p; int error; if (name == NULL) /* if no name is given, use the current emulator */ sw = scp->tsw; else /* ...otherwise find the named emulator */ sw = sc_term_match(name); if (sw == NULL) return EINVAL; rndr = NULL; if (strcmp(sw->te_renderer, "*") != 0) { rndr = sc_render_match(scp, sw->te_renderer, scp->status & (GRAPHICS_MODE | PIXEL_MODE)); } if (rndr == NULL) { rndr = sc_render_match(scp, scp->sc->adp->va_name, scp->status & (GRAPHICS_MODE | PIXEL_MODE)); if (rndr == NULL) return ENODEV; } if (sw == scp->tsw) { error = (*sw->te_init)(scp, &scp->ts, SC_TE_WARM_INIT); scp->rndr = rndr; scp->rndr->init(scp); sc_clear_screen(scp); /* assert(error == 0); */ return error; } if (sc_malloc && (sw->te_size > 0)) p = malloc(sw->te_size, M_DEVBUF, M_NOWAIT); else p = NULL; error = (*sw->te_init)(scp, &p, SC_TE_COLD_INIT); if (error) return error; if (scp->tsw) (*scp->tsw->te_term)(scp, &scp->ts); if (scp->ts != NULL) free(scp->ts, M_DEVBUF); scp->tsw = sw; scp->ts = p; scp->rndr = rndr; scp->rndr->init(scp); (*sw->te_default_attr)(scp, SC_NORM_ATTR, SC_NORM_REV_ATTR); sc_clear_screen(scp); return 0; } /* * scgetc(flags) - get character from keyboard. * If flags & SCGETC_CN, then avoid harmful side effects. * If flags & SCGETC_NONBLOCK, then wait until a key is pressed, else * return NOKEY if there is nothing there. */ static u_int scgetc(sc_softc_t *sc, u_int flags, struct sc_cnstate *sp) { scr_stat *scp; #ifndef SC_NO_HISTORY struct tty *tp; #endif u_int c; int this_scr; int f; int i; if (sc->kbd == NULL) return NOKEY; next_code: #if 1 /* I don't like this, but... XXX */ if (flags & SCGETC_CN) sccnupdate(sc->cur_scp); #endif scp = sc->cur_scp; /* first see if there is something in the keyboard port */ for (;;) { if (flags & SCGETC_CN) sccnscrunlock(sc, sp); c = kbdd_read_char(sc->kbd, !(flags & SCGETC_NONBLOCK)); if (flags & SCGETC_CN) sccnscrlock(sc, sp); if (c == ERRKEY) { if (!(flags & SCGETC_CN)) sc_bell( scp, bios_value.bell_pitch, BELL_DURATION); } else if (c == NOKEY) return c; else break; } /* make screensaver happy */ if (!(c & RELKEY)) sc_touch_scrn_saver(); if (!(flags & SCGETC_CN)) random_harvest_queue(&c, sizeof(c), RANDOM_KEYBOARD); if (sc->kbd_open_level == 0 && scp->kbd_mode != K_XLATE) return KEYCHAR(c); /* if scroll-lock pressed allow history browsing */ if (!ISGRAPHSC(scp) && scp->history && scp->status & SLKED) { scp->status &= ~CURSOR_ENABLED; sc_remove_cursor_image(scp); #ifndef SC_NO_HISTORY if (!(scp->status & BUFFER_SAVED)) { scp->status |= BUFFER_SAVED; sc_hist_save(scp); } switch (c) { /* FIXME: key codes */ case SPCLKEY | FKEY | F(49): /* home key */ sc_remove_cutmarking(scp); sc_hist_home(scp); goto next_code; case SPCLKEY | FKEY | F(57): /* end key */ sc_remove_cutmarking(scp); sc_hist_end(scp); goto next_code; case SPCLKEY | FKEY | F(50): /* up arrow key */ sc_remove_cutmarking(scp); if (sc_hist_up_line(scp)) if (!(flags & SCGETC_CN)) sc_bell(scp, bios_value.bell_pitch, BELL_DURATION); goto next_code; case SPCLKEY | FKEY | F(58): /* down arrow key */ sc_remove_cutmarking(scp); if (sc_hist_down_line(scp)) if (!(flags & SCGETC_CN)) sc_bell(scp, bios_value.bell_pitch, BELL_DURATION); goto next_code; case SPCLKEY | FKEY | F(51): /* page up key */ sc_remove_cutmarking(scp); for (i = 0; i < scp->ysize; i++) if (sc_hist_up_line(scp)) { if (!(flags & SCGETC_CN)) sc_bell(scp, bios_value.bell_pitch, BELL_DURATION); break; } goto next_code; case SPCLKEY | FKEY | F(59): /* page down key */ sc_remove_cutmarking(scp); for (i = 0; i < scp->ysize; i++) if (sc_hist_down_line(scp)) { if (!(flags & SCGETC_CN)) sc_bell(scp, bios_value.bell_pitch, BELL_DURATION); break; } goto next_code; } #endif /* SC_NO_HISTORY */ } /* * Process and consume special keys here. Return a plain char code * or a char code with the META flag or a function key code. */ if (c & RELKEY) { /* key released */ /* goto next_code */ } else { /* key pressed */ if (c & SPCLKEY) { c &= ~SPCLKEY; switch (KEYCHAR(c)) { /* LOCKING KEYS */ case NLK: case CLK: case ALK: break; case SLK: (void)kbdd_ioctl( sc->kbd, KDGKBSTATE, (caddr_t)&f); if (f & SLKED) { scp->status |= SLKED; } else { if (scp->status & SLKED) { scp->status &= ~SLKED; #ifndef SC_NO_HISTORY if (scp->status & BUFFER_SAVED) { if (!sc_hist_restore( scp)) sc_remove_cutmarking( scp); scp->status &= ~BUFFER_SAVED; scp->status |= CURSOR_ENABLED; sc_draw_cursor_image( scp); } /* Only safe in Giant-locked * context. */ tp = SC_DEV(sc, scp->index); if (!(flags & SCGETC_CN) && tty_opened_ns(tp)) sctty_outwakeup(tp); #endif } } break; case PASTE: #ifndef SC_NO_CUTPASTE sc_mouse_paste(scp); #endif break; /* NON-LOCKING KEYS */ case NOP: case LSH: case RSH: case LCTR: case RCTR: case LALT: case RALT: case ASH: case META: break; case BTAB: if (!(sc->flags & SC_SCRN_BLANKED)) return c; break; case SPSC: #ifdef DEV_SPLASH /* force activatation/deactivation of the screen * saver */ if (!(sc->flags & SC_SCRN_BLANKED)) { run_scrn_saver = TRUE; sc->scrn_time_stamp -= scrn_blank_time; } if (cold) { /* * While devices are being probed, the * screen saver need to be invoked * explicitly. XXX */ if (sc->flags & SC_SCRN_BLANKED) { scsplash_stick(FALSE); stop_scrn_saver( sc, current_saver); } else { if (!ISGRAPHSC(scp)) { scsplash_stick(TRUE); (*current_saver)( sc, TRUE); } } } #endif /* DEV_SPLASH */ break; case RBT: #ifndef SC_DISABLE_REBOOT if (enable_reboot && !(flags & SCGETC_CN)) shutdown_nice(0); #endif break; case HALT: #ifndef SC_DISABLE_REBOOT if (enable_reboot && !(flags & SCGETC_CN)) shutdown_nice(RB_HALT); #endif break; case PDWN: #ifndef SC_DISABLE_REBOOT if (enable_reboot && !(flags & SCGETC_CN)) shutdown_nice(RB_HALT | RB_POWEROFF); #endif break; case SUSP: power_pm_suspend(POWER_SLEEP_STATE_SUSPEND); break; case STBY: power_pm_suspend(POWER_SLEEP_STATE_STANDBY); break; case DBG: #ifndef SC_DISABLE_KDBKEY if (enable_kdbkey) kdb_break(); #endif break; case PNC: if (enable_panic_key) panic("Forced by the panic key"); break; case NEXT: this_scr = scp->index; for (i = (this_scr - sc->first_vty + 1) % sc->vtys; sc->first_vty + i != this_scr; i = (i + 1) % sc->vtys) { struct tty *tp = SC_DEV(sc, sc->first_vty + i); if (tty_opened_ns(tp)) { sc_switch_scr( scp->sc, sc->first_vty + i); break; } } break; case PREV: this_scr = scp->index; for (i = (this_scr - sc->first_vty + sc->vtys - 1) % sc->vtys; sc->first_vty + i != this_scr; i = (i + sc->vtys - 1) % sc->vtys) { struct tty *tp = SC_DEV(sc, sc->first_vty + i); if (tty_opened_ns(tp)) { sc_switch_scr( scp->sc, sc->first_vty + i); break; } } break; default: if (KEYCHAR(c) >= F_SCR && KEYCHAR(c) <= L_SCR) { sc_switch_scr(scp->sc, sc->first_vty + KEYCHAR(c) - F_SCR); break; } /* assert(c & FKEY) */ if (!(sc->flags & SC_SCRN_BLANKED)) return c; break; } /* goto next_code */ } else { /* regular keys (maybe MKEY is set) */ #if !defined(SC_DISABLE_KDBKEY) && defined(KDB) if (enable_kdbkey) kdb_alt_break(c, &sc->sc_altbrk); #endif if (!(sc->flags & SC_SCRN_BLANKED)) return c; } } goto next_code; } static int sctty_mmap(struct tty *tp, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { scr_stat *scp; scp = sc_get_stat(tp); if (scp != scp->sc->cur_scp) return -1; return vidd_mmap(scp->sc->adp, offset, paddr, nprot, memattr); } static void update_font(scr_stat *scp) { #ifndef SC_NO_FONT_LOADING /* load appropriate font */ if (!(scp->status & GRAPHICS_MODE)) { if (!(scp->status & PIXEL_MODE) && ISFONTAVAIL(scp->sc->adp->va_flags)) { if (scp->font_size < 14) { if (scp->sc->fonts_loaded & FONT_8) sc_load_font(scp, 0, 8, 8, scp->sc->font_8, 0, 256); } else if (scp->font_size >= 16) { if (scp->sc->fonts_loaded & FONT_16) sc_load_font(scp, 0, 16, 8, scp->sc->font_16, 0, 256); } else { if (scp->sc->fonts_loaded & FONT_14) sc_load_font(scp, 0, 14, 8, scp->sc->font_14, 0, 256); } /* * FONT KLUDGE: * This is an interim kludge to display correct font. * Always use the font page #0 on the video plane 2. * Somehow we cannot show the font in other font pages * on some video cards... XXX */ sc_show_font(scp, 0); } mark_all(scp); } #endif /* !SC_NO_FONT_LOADING */ } static int save_kbd_state(scr_stat *scp) { int state; int error; error = kbdd_ioctl(scp->sc->kbd, KDGKBSTATE, (caddr_t)&state); if (error == ENOIOCTL) error = ENODEV; if (error == 0) { scp->status &= ~LOCK_MASK; scp->status |= state; } return error; } static int update_kbd_state(scr_stat *scp, int new_bits, int mask) { int state; int error; if (mask != LOCK_MASK) { error = kbdd_ioctl(scp->sc->kbd, KDGKBSTATE, (caddr_t)&state); if (error == ENOIOCTL) error = ENODEV; if (error) return error; state &= ~mask; state |= new_bits & mask; } else { state = new_bits & LOCK_MASK; } error = kbdd_ioctl(scp->sc->kbd, KDSKBSTATE, (caddr_t)&state); if (error == ENOIOCTL) error = ENODEV; return error; } static int update_kbd_leds(scr_stat *scp, int which) { int error; which &= LOCK_MASK; error = kbdd_ioctl(scp->sc->kbd, KDSETLED, (caddr_t)&which); if (error == ENOIOCTL) error = ENODEV; return error; } int set_mode(scr_stat *scp) { video_info_t info; /* reject unsupported mode */ if (vidd_get_info(scp->sc->adp, scp->mode, &info)) return 1; /* if this vty is not currently showing, do nothing */ if (scp != scp->sc->cur_scp) return 0; /* setup video hardware for the given mode */ vidd_set_mode(scp->sc->adp, scp->mode); scp->rndr->init(scp); #ifndef __sparc64__ sc_vtb_init(&scp->scr, VTB_FRAMEBUFFER, scp->xsize, scp->ysize, (void *)scp->sc->adp->va_window, FALSE); #endif update_font(scp); sc_set_border(scp, scp->border); sc_set_cursor_image(scp); return 0; } void sc_set_border(scr_stat *scp, int color) { SC_VIDEO_LOCK(scp->sc); (*scp->rndr->draw_border)(scp, color); SC_VIDEO_UNLOCK(scp->sc); } #ifndef SC_NO_FONT_LOADING void sc_load_font(scr_stat *scp, int page, int size, int width, u_char *buf, int base, int count) { sc_softc_t *sc; sc = scp->sc; sc->font_loading_in_progress = TRUE; vidd_load_font(sc->adp, page, size, width, buf, base, count); sc->font_loading_in_progress = FALSE; } void sc_save_font(scr_stat *scp, int page, int size, int width, u_char *buf, int base, int count) { sc_softc_t *sc; sc = scp->sc; sc->font_loading_in_progress = TRUE; vidd_save_font(sc->adp, page, size, width, buf, base, count); sc->font_loading_in_progress = FALSE; } void sc_show_font(scr_stat *scp, int page) { vidd_show_font(scp->sc->adp, page); } #endif /* !SC_NO_FONT_LOADING */ void sc_paste(scr_stat *scp, const u_char *p, int count) { struct tty *tp; u_char *rmap; tp = SC_DEV(scp->sc, scp->sc->cur_scp->index); if (!tty_opened_ns(tp)) return; rmap = scp->sc->scr_rmap; for (; count > 0; --count) ttydisc_rint(tp, rmap[*p++], 0); ttydisc_rint_done(tp); } void sc_respond(scr_stat *scp, const u_char *p, int count, int wakeup) { struct tty *tp; tp = SC_DEV(scp->sc, scp->sc->cur_scp->index); if (!tty_opened_ns(tp)) return; ttydisc_rint_simple(tp, p, count); if (wakeup) { /* XXX: we can't always call ttydisc_rint_done() here! */ ttydisc_rint_done(tp); } } void sc_bell(scr_stat *scp, int pitch, int duration) { if (cold || kdb_active || shutdown_in_progress || !enable_bell) return; if (scp != scp->sc->cur_scp && (scp->sc->flags & SC_QUIET_BELL)) return; if (scp->sc->flags & SC_VISUAL_BELL) { if (scp->sc->blink_in_progress) return; scp->sc->blink_in_progress = 3; if (scp != scp->sc->cur_scp) scp->sc->blink_in_progress += 2; blink_screen(scp->sc->cur_scp); } else if (duration != 0 && pitch != 0) { if (scp != scp->sc->cur_scp) pitch *= 2; sysbeep(1193182 / pitch, duration); } } static int sc_kattr(void) { if (sc_console == NULL) return (SC_KERNEL_CONS_ATTR); /* for very early, before pcpu */ return (sc_kattrtab[curcpu % nitems(sc_kattrtab)]); } static void blink_screen(void *arg) { scr_stat *scp = arg; struct tty *tp; if (ISGRAPHSC(scp) || (scp->sc->blink_in_progress <= 1)) { scp->sc->blink_in_progress = 0; mark_all(scp); tp = SC_DEV(scp->sc, scp->index); if (tty_opened_ns(tp)) sctty_outwakeup(tp); if (scp->sc->delayed_next_scr) sc_switch_scr(scp->sc, scp->sc->delayed_next_scr - 1); } else { (*scp->rndr->draw)(scp, 0, scp->xsize * scp->ysize, scp->sc->blink_in_progress & 1); scp->sc->blink_in_progress--; callout_reset_sbt(&scp->sc->cblink, SBT_1S / 15, 0, blink_screen, scp, C_PREL(0)); } } /* * Until sc_attach_unit() gets called no dev structures will be available * to store the per-screen current status. This is the case when the * kernel is initially booting and needs access to its console. During * this early phase of booting the console's current status is kept in * one statically defined scr_stat structure, and any pointers to the * dev structures will be NULL. */ static scr_stat * sc_get_stat(struct tty *tp) { if (tp == NULL) return (&main_console); return (SC_STAT(tp)); } /* * Allocate active keyboard. Try to allocate "kbdmux" keyboard first, and, * if found, add all non-busy keyboards to "kbdmux". Otherwise look for * any keyboard. */ static int sc_allocate_keyboard(sc_softc_t *sc, int unit) { int idx0, idx; keyboard_t *k0, *k; keyboard_info_t ki; idx0 = kbd_allocate("kbdmux", -1, (void *)&sc->keyboard, sckbdevent, sc); if (idx0 != -1) { k0 = kbd_get_keyboard(idx0); for (idx = kbd_find_keyboard2("*", -1, 0); idx != -1; idx = kbd_find_keyboard2("*", -1, idx + 1)) { k = kbd_get_keyboard(idx); if (idx == idx0 || KBD_IS_BUSY(k)) continue; bzero(&ki, sizeof(ki)); strcpy(ki.kb_name, k->kb_name); ki.kb_unit = k->kb_unit; (void)kbdd_ioctl(k0, KBADDKBD, (caddr_t)&ki); } } else idx0 = kbd_allocate( "*", unit, (void *)&sc->keyboard, sckbdevent, sc); return (idx0); } Index: head/sys/dev/uart/uart_core.c =================================================================== --- head/sys/dev/uart/uart_core.c (revision 355600) +++ head/sys/dev/uart/uart_core.c (revision 355601) @@ -1,824 +1,824 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "uart_if.h" devclass_t uart_devclass; const char uart_driver_name[] = "uart"; SLIST_HEAD(uart_devinfo_list, uart_devinfo) uart_sysdevs = SLIST_HEAD_INITIALIZER(uart_sysdevs); static MALLOC_DEFINE(M_UART, "UART", "UART driver"); #ifndef UART_POLL_FREQ #define UART_POLL_FREQ 50 #endif static int uart_poll_freq = UART_POLL_FREQ; SYSCTL_INT(_debug, OID_AUTO, uart_poll_freq, CTLFLAG_RDTUN, &uart_poll_freq, 0, "UART poll frequency"); static int uart_force_poll; SYSCTL_INT(_debug, OID_AUTO, uart_force_poll, CTLFLAG_RDTUN, &uart_force_poll, 0, "Force UART polling"); static inline int uart_pps_mode_valid(int pps_mode) { int opt; switch(pps_mode & UART_PPS_SIGNAL_MASK) { case UART_PPS_DISABLED: case UART_PPS_CTS: case UART_PPS_DCD: break; default: return (false); } opt = pps_mode & UART_PPS_OPTION_MASK; if ((opt & ~(UART_PPS_INVERT_PULSE | UART_PPS_NARROW_PULSE)) != 0) return (false); return (true); } static void uart_pps_print_mode(struct uart_softc *sc) { device_printf(sc->sc_dev, "PPS capture mode: "); switch(sc->sc_pps_mode & UART_PPS_SIGNAL_MASK) { case UART_PPS_DISABLED: printf("disabled"); break; case UART_PPS_CTS: printf("CTS"); break; case UART_PPS_DCD: printf("DCD"); break; default: printf("invalid"); break; } if (sc->sc_pps_mode & UART_PPS_INVERT_PULSE) printf("-Inverted"); if (sc->sc_pps_mode & UART_PPS_NARROW_PULSE) printf("-NarrowPulse"); printf("\n"); } static int uart_pps_mode_sysctl(SYSCTL_HANDLER_ARGS) { struct uart_softc *sc; int err, tmp; sc = arg1; tmp = sc->sc_pps_mode; err = sysctl_handle_int(oidp, &tmp, 0, req); if (err != 0 || req->newptr == NULL) return (err); if (!uart_pps_mode_valid(tmp)) return (EINVAL); sc->sc_pps_mode = tmp; return(0); } static void uart_pps_process(struct uart_softc *sc, int ser_sig) { sbintime_t now; int is_assert, pps_sig; /* Which signal is configured as PPS? Early out if none. */ switch(sc->sc_pps_mode & UART_PPS_SIGNAL_MASK) { case UART_PPS_CTS: pps_sig = SER_CTS; break; case UART_PPS_DCD: pps_sig = SER_DCD; break; default: return; } /* Early out if there is no change in the signal configured as PPS. */ if ((ser_sig & SER_DELTA(pps_sig)) == 0) return; /* * In narrow-pulse mode we need to synthesize both capture and clear * events from a single "delta occurred" indication from the uart * hardware because the pulse width is too narrow to reliably detect * both edges. However, when the pulse width is close to our interrupt * processing latency we might intermittantly catch both edges. To * guard against generating spurious events when that happens, we use a * separate timer to ensure at least half a second elapses before we * generate another event. */ pps_capture(&sc->sc_pps); if (sc->sc_pps_mode & UART_PPS_NARROW_PULSE) { now = getsbinuptime(); if (now > sc->sc_pps_captime + 500 * SBT_1MS) { sc->sc_pps_captime = now; pps_event(&sc->sc_pps, PPS_CAPTUREASSERT); pps_event(&sc->sc_pps, PPS_CAPTURECLEAR); } } else { is_assert = ser_sig & pps_sig; if (sc->sc_pps_mode & UART_PPS_INVERT_PULSE) is_assert = !is_assert; pps_event(&sc->sc_pps, is_assert ? PPS_CAPTUREASSERT : PPS_CAPTURECLEAR); } } static void uart_pps_init(struct uart_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; ctx = device_get_sysctl_ctx(sc->sc_dev); tree = device_get_sysctl_tree(sc->sc_dev); /* * The historical default for pps capture mode is either DCD or CTS, * depending on the UART_PPS_ON_CTS kernel option. Start with that, * then try to fetch the tunable that overrides the mode for all uart * devices, then try to fetch the sysctl-tunable that overrides the mode * for one specific device. */ #ifdef UART_PPS_ON_CTS sc->sc_pps_mode = UART_PPS_CTS; #else sc->sc_pps_mode = UART_PPS_DCD; #endif TUNABLE_INT_FETCH("hw.uart.pps_mode", &sc->sc_pps_mode); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "pps_mode", CTLTYPE_INT | CTLFLAG_RWTUN, sc, 0, uart_pps_mode_sysctl, "I", "pulse mode: 0/1/2=disabled/CTS/DCD; " "add 0x10 to invert, 0x20 for narrow pulse"); if (!uart_pps_mode_valid(sc->sc_pps_mode)) { device_printf(sc->sc_dev, "Invalid pps_mode 0x%02x configured; disabling PPS capture\n", sc->sc_pps_mode); sc->sc_pps_mode = UART_PPS_DISABLED; } else if (bootverbose) { uart_pps_print_mode(sc); } sc->sc_pps.ppscap = PPS_CAPTUREBOTH; sc->sc_pps.driver_mtx = uart_tty_getlock(sc); sc->sc_pps.driver_abi = PPS_ABI_VERSION; pps_init_abi(&sc->sc_pps); } void uart_add_sysdev(struct uart_devinfo *di) { SLIST_INSERT_HEAD(&uart_sysdevs, di, next); } const char * uart_getname(struct uart_class *uc) { return ((uc != NULL) ? uc->name : NULL); } struct uart_ops * uart_getops(struct uart_class *uc) { return ((uc != NULL) ? uc->uc_ops : NULL); } int uart_getrange(struct uart_class *uc) { return ((uc != NULL) ? uc->uc_range : 0); } u_int uart_getregshift(struct uart_class *uc) { return ((uc != NULL) ? uc->uc_rshift : 0); } u_int uart_getregiowidth(struct uart_class *uc) { return ((uc != NULL) ? uc->uc_riowidth : 0); } /* * Schedule a soft interrupt. We do this on the 0 to !0 transition * of the TTY pending interrupt status. */ void uart_sched_softih(struct uart_softc *sc, uint32_t ipend) { uint32_t new, old; do { old = sc->sc_ttypend; new = old | ipend; } while (!atomic_cmpset_32(&sc->sc_ttypend, old, new)); if ((old & SER_INT_MASK) == 0) swi_sched(sc->sc_softih, 0); } /* * A break condition has been detected. We treat the break condition as * a special case that should not happen during normal operation. When * the break condition is to be passed to higher levels in the form of * a NUL character, we really want the break to be in the right place in * the input stream. The overhead to achieve that is not in relation to * the exceptional nature of the break condition, so we permit ourselves * to be sloppy. */ static __inline int uart_intr_break(void *arg) { struct uart_softc *sc = arg; #if defined(KDB) if (sc->sc_sysdev != NULL && sc->sc_sysdev->type == UART_DEV_CONSOLE) { if (kdb_break()) return (0); } #endif if (sc->sc_opened) uart_sched_softih(sc, SER_INT_BREAK); return (0); } /* * Handle a receiver overrun situation. We lost at least 1 byte in the * input stream and it's our job to contain the situation. We grab as * much of the data we can, but otherwise flush the receiver FIFO to * create some breathing room. The net effect is that we avoid the * overrun condition to happen for the next X characters, where X is * related to the FIFO size at the cost of losing data right away. * So, instead of having multiple overrun interrupts in close proximity * to each other and possibly pessimizing UART interrupt latency for * other UARTs in a multiport configuration, we create a longer segment * of missing characters by freeing up the FIFO. * Each overrun condition is marked in the input buffer by a token. The * token represents the loss of at least one, but possible more bytes in * the input stream. */ static __inline int uart_intr_overrun(void *arg) { struct uart_softc *sc = arg; if (sc->sc_opened) { UART_RECEIVE(sc); if (uart_rx_put(sc, UART_STAT_OVERRUN)) sc->sc_rxbuf[sc->sc_rxput] = UART_STAT_OVERRUN; uart_sched_softih(sc, SER_INT_RXREADY); } UART_FLUSH(sc, UART_FLUSH_RECEIVER); return (0); } /* * Received data ready. */ static __inline int uart_intr_rxready(void *arg) { struct uart_softc *sc = arg; int rxp; rxp = sc->sc_rxput; UART_RECEIVE(sc); #if defined(KDB) if (sc->sc_sysdev != NULL && sc->sc_sysdev->type == UART_DEV_CONSOLE) { while (rxp != sc->sc_rxput) { kdb_alt_break(sc->sc_rxbuf[rxp++], &sc->sc_altbrk); if (rxp == sc->sc_rxbufsz) rxp = 0; } } #endif if (sc->sc_opened) uart_sched_softih(sc, SER_INT_RXREADY); else sc->sc_rxput = sc->sc_rxget; /* Ignore received data. */ return (1); } /* * Line or modem status change (OOB signalling). * We pass the signals to the software interrupt handler for further * processing. Note that we merge the delta bits, but set the state * bits. This is to avoid losing state transitions due to having more * than 1 hardware interrupt between software interrupts. */ static __inline int uart_intr_sigchg(void *arg) { struct uart_softc *sc = arg; int new, old, sig; sig = UART_GETSIG(sc); /* * Time pulse counting support, invoked whenever the PPS parameters are * currently set to capture either edge of the signal. */ if (sc->sc_pps.ppsparam.mode & PPS_CAPTUREBOTH) { uart_pps_process(sc, sig); } /* * Keep track of signal changes, even when the device is not * opened. This allows us to inform upper layers about a * possible loss of DCD and thus the existence of a (possibly) * different connection when we have DCD back, during the time * that the device was closed. */ do { old = sc->sc_ttypend; new = old & ~SER_MASK_STATE; new |= sig & SER_INT_SIGMASK; } while (!atomic_cmpset_32(&sc->sc_ttypend, old, new)); if (sc->sc_opened) uart_sched_softih(sc, SER_INT_SIGCHG); return (1); } /* * The transmitter can accept more data. */ static __inline int uart_intr_txidle(void *arg) { struct uart_softc *sc = arg; if (sc->sc_txbusy) { sc->sc_txbusy = 0; uart_sched_softih(sc, SER_INT_TXIDLE); } return (0); } static int uart_intr(void *arg) { struct uart_softc *sc = arg; int cnt, ipend, testintr; if (sc->sc_leaving) return (FILTER_STRAY); cnt = 0; testintr = sc->sc_testintr; while ((!testintr || cnt < 20) && (ipend = UART_IPEND(sc)) != 0) { cnt++; if (ipend & SER_INT_OVERRUN) uart_intr_overrun(sc); if (ipend & SER_INT_BREAK) uart_intr_break(sc); if (ipend & SER_INT_RXREADY) uart_intr_rxready(sc); if (ipend & SER_INT_SIGCHG) uart_intr_sigchg(sc); if (ipend & SER_INT_TXIDLE) uart_intr_txidle(sc); } if (sc->sc_polled) { callout_reset(&sc->sc_timer, hz / uart_poll_freq, - (timeout_t *)uart_intr, sc); + (callout_func_t *)uart_intr, sc); } return ((cnt == 0) ? FILTER_STRAY : ((testintr && cnt == 20) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED)); } serdev_intr_t * uart_bus_ihand(device_t dev, int ipend) { switch (ipend) { case SER_INT_BREAK: return (uart_intr_break); case SER_INT_OVERRUN: return (uart_intr_overrun); case SER_INT_RXREADY: return (uart_intr_rxready); case SER_INT_SIGCHG: return (uart_intr_sigchg); case SER_INT_TXIDLE: return (uart_intr_txidle); } return (NULL); } int uart_bus_ipend(device_t dev) { struct uart_softc *sc; sc = device_get_softc(dev); return (UART_IPEND(sc)); } int uart_bus_sysdev(device_t dev) { struct uart_softc *sc; sc = device_get_softc(dev); return ((sc->sc_sysdev != NULL) ? 1 : 0); } int uart_bus_probe(device_t dev, int regshft, int regiowidth, int rclk, int rid, int chan, int quirks) { struct uart_softc *sc; struct uart_devinfo *sysdev; int error; sc = device_get_softc(dev); /* * All uart_class references are weak. Check that the needed * class has been compiled-in. Fail if not. */ if (sc->sc_class == NULL) return (ENXIO); /* * Initialize the instance. Note that the instance (=softc) does * not necessarily match the hardware specific softc. We can't do * anything about it now, because we may not attach to the device. * Hardware drivers cannot use any of the class specific fields * while probing. */ kobj_init((kobj_t)sc, (kobj_class_t)sc->sc_class); sc->sc_dev = dev; if (device_get_desc(dev) == NULL) device_set_desc(dev, uart_getname(sc->sc_class)); /* * Allocate the register resource. We assume that all UARTs have * a single register window in either I/O port space or memory * mapped I/O space. Any UART that needs multiple windows will * consequently not be supported by this driver as-is. We try I/O * port space first because that's the common case. */ sc->sc_rrid = rid; sc->sc_rtype = SYS_RES_IOPORT; sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) { sc->sc_rrid = rid; sc->sc_rtype = SYS_RES_MEMORY; sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); } /* * Fill in the bus access structure and compare this device with * a possible console device and/or a debug port. We set the flags * in the softc so that the hardware dependent probe can adjust * accordingly. In general, you don't want to permanently disrupt * console I/O. */ sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); sc->sc_bas.chan = chan; sc->sc_bas.regshft = regshft; sc->sc_bas.regiowidth = regiowidth; sc->sc_bas.rclk = (rclk == 0) ? sc->sc_class->uc_rclk : rclk; sc->sc_bas.busy_detect = !!(quirks & UART_F_BUSY_DETECT); SLIST_FOREACH(sysdev, &uart_sysdevs, next) { if (chan == sysdev->bas.chan && uart_cpu_eqres(&sc->sc_bas, &sysdev->bas)) { /* XXX check if ops matches class. */ sc->sc_sysdev = sysdev; sysdev->bas.rclk = sc->sc_bas.rclk; } } error = UART_PROBE(sc); bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return ((error) ? error : BUS_PROBE_DEFAULT); } int uart_bus_attach(device_t dev) { struct uart_softc *sc, *sc0; const char *sep; int error, filt; /* * The sc_class field defines the type of UART we're going to work * with and thus the size of the softc. Replace the generic softc * with one that matches the UART now that we're certain we handle * the device. */ sc0 = device_get_softc(dev); if (sc0->sc_class->size > device_get_driver(dev)->size) { sc = malloc(sc0->sc_class->size, M_UART, M_WAITOK|M_ZERO); bcopy(sc0, sc, sizeof(*sc)); device_set_softc(dev, sc); } else sc = sc0; /* * Now that we know the softc for this device, connect the back * pointer from the sysdev for this device, if any */ if (sc->sc_sysdev != NULL) sc->sc_sysdev->sc = sc; /* * Protect ourselves against interrupts while we're not completely * finished attaching and initializing. We don't expect interrupts * until after UART_ATTACH(), though. */ sc->sc_leaving = 1; mtx_init(&sc->sc_hwmtx_s, "uart_hwmtx", NULL, MTX_SPIN); if (sc->sc_hwmtx == NULL) sc->sc_hwmtx = &sc->sc_hwmtx_s; /* * Re-allocate. We expect that the softc contains the information * collected by uart_bus_probe() intact. */ sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) { mtx_destroy(&sc->sc_hwmtx_s); return (ENXIO); } sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); /* * Ensure there is room for at least three full FIFOs of data in the * receive buffer (handles the case of low-level drivers with huge * FIFOs), and also ensure that there is no less than the historical * size of 384 bytes (handles the typical small-FIFO case). */ sc->sc_rxbufsz = MAX(384, sc->sc_rxfifosz * 3); sc->sc_rxbuf = malloc(sc->sc_rxbufsz * sizeof(*sc->sc_rxbuf), M_UART, M_WAITOK); sc->sc_txbuf = malloc(sc->sc_txfifosz * sizeof(*sc->sc_txbuf), M_UART, M_WAITOK); error = UART_ATTACH(sc); if (error) goto fail; if (sc->sc_hwiflow || sc->sc_hwoflow) { sep = ""; device_print_prettyname(dev); if (sc->sc_hwiflow) { printf("%sRTS iflow", sep); sep = ", "; } if (sc->sc_hwoflow) { printf("%sCTS oflow", sep); sep = ", "; } printf("\n"); } if (sc->sc_sysdev != NULL) { if (sc->sc_sysdev->baudrate == 0) { if (UART_IOCTL(sc, UART_IOCTL_BAUD, (intptr_t)&sc->sc_sysdev->baudrate) != 0) sc->sc_sysdev->baudrate = -1; } switch (sc->sc_sysdev->type) { case UART_DEV_CONSOLE: device_printf(dev, "console"); break; case UART_DEV_DBGPORT: device_printf(dev, "debug port"); break; case UART_DEV_KEYBOARD: device_printf(dev, "keyboard"); break; default: device_printf(dev, "unknown system device"); break; } printf(" (%d,%c,%d,%d)\n", sc->sc_sysdev->baudrate, "noems"[sc->sc_sysdev->parity], sc->sc_sysdev->databits, sc->sc_sysdev->stopbits); } sc->sc_leaving = 0; sc->sc_testintr = 1; filt = uart_intr(sc); sc->sc_testintr = 0; /* * Don't use interrupts if we couldn't clear any pending interrupt * conditions. We may have broken H/W and polling is probably the * safest thing to do. */ if (filt != FILTER_SCHEDULE_THREAD && !uart_force_poll) { sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE); } if (sc->sc_ires != NULL) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY, uart_intr, NULL, sc, &sc->sc_icookie); sc->sc_fastintr = (error == 0) ? 1 : 0; if (!sc->sc_fastintr) error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)uart_intr, sc, &sc->sc_icookie); if (error) { device_printf(dev, "could not activate interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); sc->sc_ires = NULL; } } if (sc->sc_ires == NULL) { /* No interrupt resource. Force polled mode. */ sc->sc_polled = 1; callout_init(&sc->sc_timer, 1); callout_reset(&sc->sc_timer, hz / uart_poll_freq, - (timeout_t *)uart_intr, sc); + (callout_func_t *)uart_intr, sc); } if (bootverbose && (sc->sc_fastintr || sc->sc_polled)) { sep = ""; device_print_prettyname(dev); if (sc->sc_fastintr) { printf("%sfast interrupt", sep); sep = ", "; } if (sc->sc_polled) { printf("%spolled mode (%dHz)", sep, uart_poll_freq); sep = ", "; } printf("\n"); } if (sc->sc_sysdev != NULL && sc->sc_sysdev->attach != NULL) { if ((error = sc->sc_sysdev->attach(sc)) != 0) goto fail; } else { if ((error = uart_tty_attach(sc)) != 0) goto fail; uart_pps_init(sc); } if (sc->sc_sysdev != NULL) sc->sc_sysdev->hwmtx = sc->sc_hwmtx; return (0); fail: free(sc->sc_txbuf, M_UART); free(sc->sc_rxbuf, M_UART); if (sc->sc_ires != NULL) { bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); } bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); mtx_destroy(&sc->sc_hwmtx_s); return (error); } int uart_bus_detach(device_t dev) { struct uart_softc *sc; sc = device_get_softc(dev); sc->sc_leaving = 1; if (sc->sc_sysdev != NULL) sc->sc_sysdev->hwmtx = NULL; UART_DETACH(sc); if (sc->sc_sysdev != NULL && sc->sc_sysdev->detach != NULL) (*sc->sc_sysdev->detach)(sc); else uart_tty_detach(sc); free(sc->sc_txbuf, M_UART); free(sc->sc_rxbuf, M_UART); if (sc->sc_ires != NULL) { bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); } bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); mtx_destroy(&sc->sc_hwmtx_s); if (sc->sc_class->size > device_get_driver(dev)->size) { device_set_softc(dev, NULL); free(sc, M_UART); } return (0); } int uart_bus_resume(device_t dev) { struct uart_softc *sc; sc = device_get_softc(dev); return (UART_ATTACH(sc)); } void uart_grab(struct uart_devinfo *di) { if (di->sc) UART_GRAB(di->sc); } void uart_ungrab(struct uart_devinfo *di) { if (di->sc) UART_UNGRAB(di->sc); } Index: head/sys/netgraph/netflow/ng_netflow.h =================================================================== --- head/sys/netgraph/netflow/ng_netflow.h (revision 355600) +++ head/sys/netgraph/netflow/ng_netflow.h (revision 355601) @@ -1,544 +1,544 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010-2011 Alexander V. Chernikov * Copyright (c) 2004-2005 Gleb Smirnoff * Copyright (c) 2001-2003 Roman V. Palagin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $SourceForge: ng_netflow.h,v 1.26 2004/09/04 15:44:55 glebius Exp $ * $FreeBSD$ */ #ifndef _NG_NETFLOW_H_ #define _NG_NETFLOW_H_ #define NG_NETFLOW_NODE_TYPE "netflow" #define NGM_NETFLOW_COOKIE 1365756954 #define NGM_NETFLOW_V9_COOKIE 1349865386 #define NG_NETFLOW_MAXIFACES USHRT_MAX /* Hook names */ #define NG_NETFLOW_HOOK_DATA "iface" #define NG_NETFLOW_HOOK_OUT "out" #define NG_NETFLOW_HOOK_EXPORT "export" #define NG_NETFLOW_HOOK_EXPORT9 "export9" /* This define effectively disable (v5) netflow export hook! */ /* #define COUNTERS_64 */ /* Netgraph commands understood by netflow node */ enum { NGM_NETFLOW_INFO = 1|NGM_READONLY|NGM_HASREPLY, /* get node info */ NGM_NETFLOW_IFINFO = 2|NGM_READONLY|NGM_HASREPLY, /* get iface info */ NGM_NETFLOW_SHOW = 3|NGM_READONLY|NGM_HASREPLY, /* show ip cache flow */ NGM_NETFLOW_SETDLT = 4, /* set data-link type */ NGM_NETFLOW_SETIFINDEX = 5, /* set interface index */ NGM_NETFLOW_SETTIMEOUTS = 6, /* set active/inactive flow timeouts */ NGM_NETFLOW_SETCONFIG = 7, /* set flow generation options */ NGM_NETFLOW_SETTEMPLATE = 8, /* set v9 flow template periodic */ NGM_NETFLOW_SETMTU = 9, /* set outgoing interface MTU */ NGM_NETFLOW_V9INFO = 10|NGM_READONLY|NGM_HASREPLY, /* get v9 info */ }; /* This structure is returned by the NGM_NETFLOW_INFO message */ struct ng_netflow_info { uint64_t nfinfo_bytes; /* accounted IPv4 bytes */ uint64_t nfinfo_packets; /* accounted IPv4 packets */ uint64_t nfinfo_bytes6; /* accounted IPv6 bytes */ uint64_t nfinfo_packets6; /* accounted IPv6 packets */ uint64_t nfinfo_sbytes; /* skipped IPv4 bytes */ uint64_t nfinfo_spackets; /* skipped IPv4 packets */ uint64_t nfinfo_sbytes6; /* skipped IPv6 bytes */ uint64_t nfinfo_spackets6; /* skipped IPv6 packets */ uint64_t nfinfo_act_exp; /* active expiries */ uint64_t nfinfo_inact_exp; /* inactive expiries */ uint32_t nfinfo_used; /* used cache records */ uint32_t nfinfo_used6; /* used IPv6 cache records */ uint32_t nfinfo_alloc_failed; /* failed allocations */ uint32_t nfinfo_export_failed; /* failed exports */ uint32_t nfinfo_export9_failed; /* failed exports */ uint32_t nfinfo_realloc_mbuf; /* reallocated mbufs */ uint32_t nfinfo_alloc_fibs; /* fibs allocated */ uint32_t nfinfo_inact_t; /* flow inactive timeout */ uint32_t nfinfo_act_t; /* flow active timeout */ }; /* Parse the info structure */ #define NG_NETFLOW_INFO_TYPE { \ { "IPv4 bytes", &ng_parse_uint64_type },\ { "IPv4 packets", &ng_parse_uint64_type },\ { "IPv6 bytes", &ng_parse_uint64_type },\ { "IPv6 packets", &ng_parse_uint64_type },\ { "IPv4 skipped bytes", &ng_parse_uint64_type },\ { "IPv4 skipped packets", &ng_parse_uint64_type },\ { "IPv6 skipped bytes", &ng_parse_uint64_type },\ { "IPv6 skipped packets", &ng_parse_uint64_type },\ { "Active expiries", &ng_parse_uint64_type },\ { "Inactive expiries", &ng_parse_uint64_type },\ { "IPv4 records used", &ng_parse_uint32_type },\ { "IPv6 records used", &ng_parse_uint32_type },\ { "Failed allocations", &ng_parse_uint32_type },\ { "V5 failed exports", &ng_parse_uint32_type },\ { "V9 failed exports", &ng_parse_uint32_type },\ { "mbuf reallocations", &ng_parse_uint32_type },\ { "fibs allocated", &ng_parse_uint32_type },\ { "Inactive timeout", &ng_parse_uint32_type },\ { "Active timeout", &ng_parse_uint32_type },\ { NULL } \ } /* This structure is returned by the NGM_NETFLOW_IFINFO message */ struct ng_netflow_ifinfo { uint32_t ifinfo_packets; /* number of packets for this iface */ uint8_t ifinfo_dlt; /* Data Link Type, DLT_XXX */ #define MAXDLTNAMELEN 20 uint16_t ifinfo_index; /* connected iface index */ uint32_t conf; }; /* This structure is passed to NGM_NETFLOW_SETDLT message */ struct ng_netflow_setdlt { uint16_t iface; /* which iface dlt change */ uint8_t dlt; /* DLT_XXX from bpf.h */ }; /* This structure is passed to NGM_NETFLOW_SETIFINDEX */ struct ng_netflow_setifindex { uint16_t iface; /* which iface index change */ uint16_t index; /* new index */ }; /* This structure is passed to NGM_NETFLOW_SETTIMEOUTS */ struct ng_netflow_settimeouts { uint32_t inactive_timeout; /* flow inactive timeout */ uint32_t active_timeout; /* flow active timeout */ }; #define NG_NETFLOW_CONF_INGRESS 0x01 /* Account on ingress */ #define NG_NETFLOW_CONF_EGRESS 0x02 /* Account on egress */ #define NG_NETFLOW_CONF_ONCE 0x04 /* Add tag to account only once */ #define NG_NETFLOW_CONF_THISONCE 0x08 /* Account once in current node */ #define NG_NETFLOW_CONF_NOSRCLOOKUP 0x10 /* No radix lookup on src */ #define NG_NETFLOW_CONF_NODSTLOOKUP 0x20 /* No radix lookup on dst */ #define NG_NETFLOW_IS_FRAG 0x01 #define NG_NETFLOW_FLOW_FLAGS (NG_NETFLOW_CONF_NOSRCLOOKUP|\ NG_NETFLOW_CONF_NODSTLOOKUP) /* This structure is passed to NGM_NETFLOW_SETCONFIG */ struct ng_netflow_setconfig { uint16_t iface; /* which iface config change */ uint32_t conf; /* new config */ }; /* This structure is passed to NGM_NETFLOW_SETTEMPLATE */ struct ng_netflow_settemplate { uint16_t time; /* max time between announce */ uint16_t packets; /* max packets between announce */ }; /* This structure is passed to NGM_NETFLOW_SETMTU */ struct ng_netflow_setmtu { uint16_t mtu; /* MTU for packet */ }; /* This structure is used in NGM_NETFLOW_SHOW request/response */ struct ngnf_show_header { u_char version; /* IPv4 or IPv6 */ uint32_t hash_id; /* current hash index */ uint32_t list_id; /* current record number in hash */ uint32_t nentries; /* number of records in response */ }; /* This structure is used in NGM_NETFLOW_V9INFO message */ struct ng_netflow_v9info { uint16_t templ_packets; /* v9 template packets */ uint16_t templ_time; /* v9 template time */ uint16_t mtu; /* v9 MTU */ }; /* XXXGL * Somewhere flow_rec6 is casted to flow_rec, and flow6_entry_data is * casted to flow_entry_data. After casting, fle->r.fib is accessed. * So beginning of these structs up to fib should be kept common. */ /* This is unique data, which identifies flow */ struct flow_rec { uint16_t flow_type; uint16_t fib; struct in_addr r_src; struct in_addr r_dst; union { struct { uint16_t s_port; /* source TCP/UDP port */ uint16_t d_port; /* destination TCP/UDP port */ } dir; uint32_t both; } ports; union { struct { u_char prot; /* IP protocol */ u_char tos; /* IP TOS */ uint16_t i_ifx; /* input interface index */ } i; uint32_t all; } misc; }; /* This is unique data, which identifies flow */ struct flow6_rec { uint16_t flow_type; uint16_t fib; union { struct in_addr r_src; struct in6_addr r_src6; } src; union { struct in_addr r_dst; struct in6_addr r_dst6; } dst; union { struct { uint16_t s_port; /* source TCP/UDP port */ uint16_t d_port; /* destination TCP/UDP port */ } dir; uint32_t both; } ports; union { struct { u_char prot; /* IP protocol */ u_char tos; /* IP TOS */ uint16_t i_ifx; /* input interface index */ } i; uint32_t all; } misc; }; #define r_ip_p misc.i.prot #define r_tos misc.i.tos #define r_i_ifx misc.i.i_ifx #define r_misc misc.all #define r_ports ports.both #define r_sport ports.dir.s_port #define r_dport ports.dir.d_port /* A flow entry which accumulates statistics */ struct flow_entry_data { uint16_t version; /* Protocol version */ struct flow_rec r; struct in_addr next_hop; uint16_t fle_o_ifx; /* output interface index */ #define fle_i_ifx r.misc.i.i_ifx uint8_t dst_mask; /* destination route mask bits */ uint8_t src_mask; /* source route mask bits */ u_long packets; u_long bytes; long first; /* uptime on first packet */ long last; /* uptime on last packet */ u_char tcp_flags; /* cumulative OR */ }; struct flow6_entry_data { uint16_t version; /* Protocol version */ struct flow6_rec r; union { struct in_addr next_hop; struct in6_addr next_hop6; } n; uint16_t fle_o_ifx; /* output interface index */ #define fle_i_ifx r.misc.i.i_ifx uint8_t dst_mask; /* destination route mask bits */ uint8_t src_mask; /* source route mask bits */ u_long packets; u_long bytes; long first; /* uptime on first packet */ long last; /* uptime on last packet */ u_char tcp_flags; /* cumulative OR */ }; /* * How many flow records we will transfer at once * without overflowing socket receive buffer */ #define NREC_AT_ONCE 1000 #define NREC6_AT_ONCE (NREC_AT_ONCE * sizeof(struct flow_entry_data) / \ sizeof(struct flow6_entry_data)) #define NGRESP_SIZE (sizeof(struct ngnf_show_header) + (NREC_AT_ONCE * \ sizeof(struct flow_entry_data))) #define SORCVBUF_SIZE (NGRESP_SIZE + 2 * sizeof(struct ng_mesg)) /* Everything below is for kernel */ #ifdef _KERNEL struct flow_entry { TAILQ_ENTRY(flow_entry) fle_hash; /* entries in hash slot */ struct flow_entry_data f; }; struct flow6_entry { TAILQ_ENTRY(flow_entry) fle_hash; /* entries in hash slot */ struct flow6_entry_data f; }; /* Parsing declarations */ /* Parse the ifinfo structure */ #define NG_NETFLOW_IFINFO_TYPE { \ { "packets", &ng_parse_uint32_type },\ { "data link type", &ng_parse_uint8_type }, \ { "index", &ng_parse_uint16_type },\ { "conf", &ng_parse_uint32_type },\ { NULL } \ } /* Parse the setdlt structure */ #define NG_NETFLOW_SETDLT_TYPE { \ { "iface", &ng_parse_uint16_type }, \ { "dlt", &ng_parse_uint8_type }, \ { NULL } \ } /* Parse the setifindex structure */ #define NG_NETFLOW_SETIFINDEX_TYPE { \ { "iface", &ng_parse_uint16_type }, \ { "index", &ng_parse_uint16_type }, \ { NULL } \ } /* Parse the settimeouts structure */ #define NG_NETFLOW_SETTIMEOUTS_TYPE { \ { "inactive", &ng_parse_uint32_type }, \ { "active", &ng_parse_uint32_type }, \ { NULL } \ } /* Parse the setifindex structure */ #define NG_NETFLOW_SETCONFIG_TYPE { \ { "iface", &ng_parse_uint16_type }, \ { "conf", &ng_parse_uint32_type }, \ { NULL } \ } /* Parse the settemplate structure */ #define NG_NETFLOW_SETTEMPLATE_TYPE { \ { "time", &ng_parse_uint16_type }, \ { "packets", &ng_parse_uint16_type }, \ { NULL } \ } /* Parse the setmtu structure */ #define NG_NETFLOW_SETMTU_TYPE { \ { "mtu", &ng_parse_uint16_type }, \ { NULL } \ } /* Parse the v9info structure */ #define NG_NETFLOW_V9INFO_TYPE { \ { "v9 template packets", &ng_parse_uint16_type },\ { "v9 template time", &ng_parse_uint16_type },\ { "v9 MTU", &ng_parse_uint16_type },\ { NULL } \ } /* Private hook data */ struct ng_netflow_iface { hook_p hook; /* NULL when disconnected */ hook_p out; /* NULL when no bypass hook */ struct ng_netflow_ifinfo info; }; typedef struct ng_netflow_iface *iface_p; typedef struct ng_netflow_ifinfo *ifinfo_p; struct netflow_export_item { item_p item; item_p item9; struct netflow_v9_packet_opt *item9_opt; }; /* Structure contatining fib-specific data */ struct fib_export { uint32_t fib; /* kernel fib id */ /* Various data used for export */ struct netflow_export_item exp; struct mtx export_mtx; /* exp.item mutex */ struct mtx export9_mtx; /* exp.item9 mutex */ uint32_t flow_seq; /* current V5 flow sequence */ uint32_t flow9_seq; /* current V9 flow sequence */ uint32_t domain_id; /* Observartion domain id */ /* Netflow V9 counters */ uint32_t templ_last_ts; /* unixtime of last template announce */ uint32_t templ_last_pkt; /* packet count on last announce */ uint32_t sent_packets; /* packets sent by exporter; */ /* Current packet specific options */ struct netflow_v9_packet_opt *export9_opt; }; typedef struct fib_export *fib_export_p; /* Structure describing our flow engine */ struct netflow { node_p node; /* link to the node itself */ hook_p export; /* export data goes there */ hook_p export9; /* Netflow V9 export data goes there */ struct callout exp_callout; /* expiry periodic job */ /* * Flow entries are allocated in uma(9) zone zone. They are * indexed by hash hash. Each hash element consist of tailqueue * head and mutex to protect this element. */ #define CACHESIZE (65536*16) #define CACHELOWAT (CACHESIZE * 3/4) #define CACHEHIGHWAT (CACHESIZE * 9/10) uma_zone_t zone; struct flow_hash_entry *hash; /* * NetFlow data export * * export_item is a data item, it has an mbuf with cluster * attached to it. A thread detaches export_item from priv * and works with it. If the export is full it is sent, and * a new one is allocated. Before exiting thread re-attaches * its current item back to priv. If there is item already, * current incomplete datagram is sent. * export_mtx is used for attaching/detaching. */ /* IPv6 support */ #ifdef INET6 uma_zone_t zone6; struct flow_hash_entry *hash6; #endif /* Statistics. */ counter_u64_t nfinfo_bytes; /* accounted IPv4 bytes */ counter_u64_t nfinfo_packets; /* accounted IPv4 packets */ counter_u64_t nfinfo_bytes6; /* accounted IPv6 bytes */ counter_u64_t nfinfo_packets6; /* accounted IPv6 packets */ counter_u64_t nfinfo_sbytes; /* skipped IPv4 bytes */ counter_u64_t nfinfo_spackets; /* skipped IPv4 packets */ counter_u64_t nfinfo_sbytes6; /* skipped IPv6 bytes */ counter_u64_t nfinfo_spackets6; /* skipped IPv6 packets */ counter_u64_t nfinfo_act_exp; /* active expiries */ counter_u64_t nfinfo_inact_exp; /* inactive expiries */ uint32_t nfinfo_alloc_failed; /* failed allocations */ uint32_t nfinfo_export_failed; /* failed exports */ uint32_t nfinfo_export9_failed; /* failed exports */ uint32_t nfinfo_realloc_mbuf; /* reallocated mbufs */ uint32_t nfinfo_alloc_fibs; /* fibs allocated */ uint32_t nfinfo_inact_t; /* flow inactive timeout */ uint32_t nfinfo_act_t; /* flow active timeout */ /* Multiple FIB support */ fib_export_p *fib_data; /* vector to per-fib data */ uint16_t maxfibs; /* number of allocated fibs */ /* Netflow v9 configuration options */ /* * RFC 3954 clause 7.3 * "Both options MUST be configurable by the user on the Exporter." */ uint16_t templ_time; /* time between sending templates */ uint16_t templ_packets; /* packets between sending templates */ #define NETFLOW_V9_MAX_FLOWSETS 2 u_char flowsets_count; /* current flowsets used */ /* Count of records in each flowset */ u_char flowset_records[NETFLOW_V9_MAX_FLOWSETS - 1]; uint16_t mtu; /* export interface MTU */ /* Pointers to pre-compiled flowsets */ struct netflow_v9_flowset_header *v9_flowsets[NETFLOW_V9_MAX_FLOWSETS - 1]; struct ng_netflow_iface ifaces[NG_NETFLOW_MAXIFACES]; }; typedef struct netflow *priv_p; /* Header of a small list in hash cell */ struct flow_hash_entry { struct mtx mtx; TAILQ_HEAD(fhead, flow_entry) head; }; #define ERROUT(x) { error = (x); goto done; } #define MTAG_NETFLOW 1221656444 #define MTAG_NETFLOW_CALLED 0 #define m_pktlen(m) ((m)->m_pkthdr.len) #define IP6VERSION 6 #define priv_to_fib(priv, fib) (priv)->fib_data[(fib)] /* * Cisco uses milliseconds for uptime. Bad idea, since it overflows * every 48+ days. But we will do same to keep compatibility. This macro * does overflowable multiplication to 1000. */ #define MILLIUPTIME(t) (((t) << 9) + /* 512 */ \ ((t) << 8) + /* 256 */ \ ((t) << 7) + /* 128 */ \ ((t) << 6) + /* 64 */ \ ((t) << 5) + /* 32 */ \ ((t) << 3)) /* 8 */ /* Prototypes for netflow.c */ void ng_netflow_cache_init(priv_p); void ng_netflow_cache_flush(priv_p); int ng_netflow_fib_init(priv_p priv, int fib); void ng_netflow_copyinfo(priv_p, struct ng_netflow_info *); void ng_netflow_copyv9info(priv_p, struct ng_netflow_v9info *); -timeout_t ng_netflow_expire; +callout_func_t ng_netflow_expire; int ng_netflow_flow_add(priv_p, fib_export_p, struct ip *, caddr_t, uint8_t, uint8_t, unsigned int); int ng_netflow_flow6_add(priv_p, fib_export_p, struct ip6_hdr *, caddr_t, uint8_t, uint8_t, unsigned int); int ng_netflow_flow_show(priv_p, struct ngnf_show_header *req, struct ngnf_show_header *resp); void ng_netflow_v9_cache_init(priv_p); void ng_netflow_v9_cache_flush(priv_p); item_p get_export9_dgram(priv_p, fib_export_p, struct netflow_v9_packet_opt **); void return_export9_dgram(priv_p, fib_export_p, item_p, struct netflow_v9_packet_opt *, int); int export9_add(item_p, struct netflow_v9_packet_opt *, struct flow_entry *); int export9_send(priv_p, fib_export_p, item_p, struct netflow_v9_packet_opt *, int); #endif /* _KERNEL */ #endif /* _NG_NETFLOW_H_ */ Index: head/sys/netinet/tcp_timer.c =================================================================== --- head/sys/netinet/tcp_timer.c (revision 355600) +++ head/sys/netinet/tcp_timer.c (revision 355601) @@ -1,1066 +1,1066 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95 */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_tcpdebug.h" #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #endif #include #include #include #include #include #include #include #include #ifdef INET6 #include #endif #include #ifdef TCPDEBUG #include #endif int tcp_persmin; SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmin, CTLTYPE_INT|CTLFLAG_RW, &tcp_persmin, 0, sysctl_msec_to_ticks, "I", "minimum persistence interval"); int tcp_persmax; SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmax, CTLTYPE_INT|CTLFLAG_RW, &tcp_persmax, 0, sysctl_msec_to_ticks, "I", "maximum persistence interval"); int tcp_keepinit; SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW, &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "time to establish connection"); int tcp_keepidle; SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW, &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "time before keepalive probes begin"); int tcp_keepintvl; SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW, &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "time between keepalive probes"); int tcp_delacktime; SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, CTLTYPE_INT|CTLFLAG_RW, &tcp_delacktime, 0, sysctl_msec_to_ticks, "I", "Time before a delayed ACK is sent"); int tcp_msl; SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW, &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime"); int tcp_rexmit_initial; SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_initial, CTLTYPE_INT|CTLFLAG_RW, &tcp_rexmit_initial, 0, sysctl_msec_to_ticks, "I", "Initial Retransmission Timeout"); int tcp_rexmit_min; SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, CTLTYPE_INT|CTLFLAG_RW, &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I", "Minimum Retransmission Timeout"); int tcp_rexmit_slop; SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, CTLTYPE_INT|CTLFLAG_RW, &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I", "Retransmission Timer Slop"); int tcp_always_keepalive = 1; SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW, &tcp_always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections"); int tcp_fast_finwait2_recycle = 0; SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW, &tcp_fast_finwait2_recycle, 0, "Recycle closed FIN_WAIT_2 connections faster"); int tcp_finwait2_timeout; SYSCTL_PROC(_net_inet_tcp, OID_AUTO, finwait2_timeout, CTLTYPE_INT|CTLFLAG_RW, &tcp_finwait2_timeout, 0, sysctl_msec_to_ticks, "I", "FIN-WAIT2 timeout"); int tcp_keepcnt = TCPTV_KEEPCNT; SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, CTLFLAG_RW, &tcp_keepcnt, 0, "Number of keepalive probes to send"); /* max idle probes */ int tcp_maxpersistidle; int tcp_rexmit_drop_options = 0; SYSCTL_INT(_net_inet_tcp, OID_AUTO, rexmit_drop_options, CTLFLAG_RW, &tcp_rexmit_drop_options, 0, "Drop TCP options from 3rd and later retransmitted SYN"); VNET_DEFINE(int, tcp_pmtud_blackhole_detect); SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection, CTLFLAG_RW|CTLFLAG_VNET, &VNET_NAME(tcp_pmtud_blackhole_detect), 0, "Path MTU Discovery Black Hole Detection Enabled"); #ifdef INET VNET_DEFINE(int, tcp_pmtud_blackhole_mss) = 1200; SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss, CTLFLAG_RW|CTLFLAG_VNET, &VNET_NAME(tcp_pmtud_blackhole_mss), 0, "Path MTU Discovery Black Hole Detection lowered MSS"); #endif #ifdef INET6 VNET_DEFINE(int, tcp_v6pmtud_blackhole_mss) = 1220; SYSCTL_INT(_net_inet_tcp, OID_AUTO, v6pmtud_blackhole_mss, CTLFLAG_RW|CTLFLAG_VNET, &VNET_NAME(tcp_v6pmtud_blackhole_mss), 0, "Path MTU Discovery IPv6 Black Hole Detection lowered MSS"); #endif #ifdef RSS static int per_cpu_timers = 1; #else static int per_cpu_timers = 0; #endif SYSCTL_INT(_net_inet_tcp, OID_AUTO, per_cpu_timers, CTLFLAG_RW, &per_cpu_timers , 0, "run tcp timers on all cpus"); /* * Map the given inp to a CPU id. * * This queries RSS if it's compiled in, else it defaults to the current * CPU ID. */ inline int inp_to_cpuid(struct inpcb *inp) { u_int cpuid; #ifdef RSS if (per_cpu_timers) { cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); if (cpuid == NETISR_CPUID_NONE) return (curcpu); /* XXX */ else return (cpuid); } #else /* Legacy, pre-RSS behaviour */ if (per_cpu_timers) { /* * We don't have a flowid -> cpuid mapping, so cheat and * just map unknown cpuids to curcpu. Not the best, but * apparently better than defaulting to swi 0. */ cpuid = inp->inp_flowid % (mp_maxid + 1); if (! CPU_ABSENT(cpuid)) return (cpuid); return (curcpu); } #endif /* Default for RSS and non-RSS - cpuid 0 */ else { return (0); } } /* * Tcp protocol timeout routine called every 500 ms. * Updates timestamps used for TCP * causes finite state machine actions if timers expire. */ void tcp_slowtimo(void) { VNET_ITERATOR_DECL(vnet_iter); VNET_LIST_RLOCK_NOSLEEP(); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); (void) tcp_tw_2msl_scan(0); CURVNET_RESTORE(); } VNET_LIST_RUNLOCK_NOSLEEP(); } int tcp_backoff[TCP_MAXRXTSHIFT + 1] = { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 }; int tcp_totbackoff = 2559; /* sum of tcp_backoff[] */ /* * TCP timer processing. */ void tcp_timer_delack(void *xtp) { struct tcpcb *tp = xtp; struct inpcb *inp; CURVNET_SET(tp->t_vnet); inp = tp->t_inpcb; KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); INP_WLOCK(inp); if (callout_pending(&tp->t_timers->tt_delack) || !callout_active(&tp->t_timers->tt_delack)) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } callout_deactivate(&tp->t_timers->tt_delack); if ((inp->inp_flags & INP_DROPPED) != 0) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } tp->t_flags |= TF_ACKNOW; TCPSTAT_INC(tcps_delack); (void) tp->t_fb->tfb_tcp_output(tp); INP_WUNLOCK(inp); CURVNET_RESTORE(); } void tcp_inpinfo_lock_del(struct inpcb *inp, struct tcpcb *tp) { if (inp && tp != NULL) INP_WUNLOCK(inp); } void tcp_timer_2msl(void *xtp) { struct tcpcb *tp = xtp; struct inpcb *inp; struct epoch_tracker et; CURVNET_SET(tp->t_vnet); #ifdef TCPDEBUG int ostate; ostate = tp->t_state; #endif inp = tp->t_inpcb; KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); INP_WLOCK(inp); tcp_free_sackholes(tp); if (callout_pending(&tp->t_timers->tt_2msl) || !callout_active(&tp->t_timers->tt_2msl)) { INP_WUNLOCK(tp->t_inpcb); CURVNET_RESTORE(); return; } callout_deactivate(&tp->t_timers->tt_2msl); if ((inp->inp_flags & INP_DROPPED) != 0) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); /* * 2 MSL timeout in shutdown went off. If we're closed but * still waiting for peer to close and connection has been idle * too long delete connection control block. Otherwise, check * again in a bit. * * If in TIME_WAIT state just ignore as this timeout is handled in * tcp_tw_2msl_scan(). * * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed, * there's no point in hanging onto FIN_WAIT_2 socket. Just close it. * Ignore fact that there were recent incoming segments. */ if ((inp->inp_flags & INP_TIMEWAIT) != 0) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 && tp->t_inpcb && tp->t_inpcb->inp_socket && (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) { TCPSTAT_INC(tcps_finwait2_drops); if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { tcp_inpinfo_lock_del(inp, tp); goto out; } NET_EPOCH_ENTER(et); tp = tcp_close(tp); NET_EPOCH_EXIT(et); tcp_inpinfo_lock_del(inp, tp); goto out; } else { if (ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) { callout_reset(&tp->t_timers->tt_2msl, TP_KEEPINTVL(tp), tcp_timer_2msl, tp); } else { if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { tcp_inpinfo_lock_del(inp, tp); goto out; } NET_EPOCH_ENTER(et); tp = tcp_close(tp); NET_EPOCH_EXIT(et); tcp_inpinfo_lock_del(inp, tp); goto out; } } #ifdef TCPDEBUG if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, PRU_SLOWTIMO); #endif TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); if (tp != NULL) INP_WUNLOCK(inp); out: CURVNET_RESTORE(); } void tcp_timer_keep(void *xtp) { struct tcpcb *tp = xtp; struct tcptemp *t_template; struct inpcb *inp; struct epoch_tracker et; CURVNET_SET(tp->t_vnet); #ifdef TCPDEBUG int ostate; ostate = tp->t_state; #endif inp = tp->t_inpcb; KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); INP_WLOCK(inp); if (callout_pending(&tp->t_timers->tt_keep) || !callout_active(&tp->t_timers->tt_keep)) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } callout_deactivate(&tp->t_timers->tt_keep); if ((inp->inp_flags & INP_DROPPED) != 0) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); /* * Because we don't regularly reset the keepalive callout in * the ESTABLISHED state, it may be that we don't actually need * to send a keepalive yet. If that occurs, schedule another * call for the next time the keepalive timer might expire. */ if (TCPS_HAVEESTABLISHED(tp->t_state)) { u_int idletime; idletime = ticks - tp->t_rcvtime; if (idletime < TP_KEEPIDLE(tp)) { callout_reset(&tp->t_timers->tt_keep, TP_KEEPIDLE(tp) - idletime, tcp_timer_keep, tp); INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } } /* * Keep-alive timer went off; send something * or drop connection if idle for too long. */ TCPSTAT_INC(tcps_keeptimeo); if (tp->t_state < TCPS_ESTABLISHED) goto dropit; if ((tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && tp->t_state <= TCPS_CLOSING) { if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) goto dropit; /* * Send a packet designed to force a response * if the peer is up and reachable: * either an ACK if the connection is still alive, * or an RST if the peer has closed the connection * due to timeout or reboot. * Using sequence number tp->snd_una-1 * causes the transmitted zero-length segment * to lie outside the receive window; * by the protocol spec, this requires the * correspondent TCP to respond. */ TCPSTAT_INC(tcps_keepprobe); t_template = tcpip_maketemplate(inp); if (t_template) { tcp_respond(tp, t_template->tt_ipgen, &t_template->tt_t, (struct mbuf *)NULL, tp->rcv_nxt, tp->snd_una - 1, 0); free(t_template, M_TEMP); } callout_reset(&tp->t_timers->tt_keep, TP_KEEPINTVL(tp), tcp_timer_keep, tp); } else callout_reset(&tp->t_timers->tt_keep, TP_KEEPIDLE(tp), tcp_timer_keep, tp); #ifdef TCPDEBUG if (inp->inp_socket->so_options & SO_DEBUG) tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, PRU_SLOWTIMO); #endif TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); INP_WUNLOCK(inp); CURVNET_RESTORE(); return; dropit: TCPSTAT_INC(tcps_keepdrops); if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { tcp_inpinfo_lock_del(inp, tp); goto out; } NET_EPOCH_ENTER(et); tp = tcp_drop(tp, ETIMEDOUT); #ifdef TCPDEBUG if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, PRU_SLOWTIMO); #endif TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); NET_EPOCH_EXIT(et); tcp_inpinfo_lock_del(inp, tp); out: CURVNET_RESTORE(); } void tcp_timer_persist(void *xtp) { struct tcpcb *tp = xtp; struct inpcb *inp; struct epoch_tracker et; CURVNET_SET(tp->t_vnet); #ifdef TCPDEBUG int ostate; ostate = tp->t_state; #endif inp = tp->t_inpcb; KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); INP_WLOCK(inp); if (callout_pending(&tp->t_timers->tt_persist) || !callout_active(&tp->t_timers->tt_persist)) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } callout_deactivate(&tp->t_timers->tt_persist); if ((inp->inp_flags & INP_DROPPED) != 0) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); /* * Persistence timer into zero window. * Force a byte to be output, if possible. */ TCPSTAT_INC(tcps_persisttimeo); /* * Hack: if the peer is dead/unreachable, we do not * time out if the window is closed. After a full * backoff, drop the connection if the idle time * (no responses to probes) reaches the maximum * backoff that we would use if retransmitting. */ if (tp->t_rxtshift == TCP_MAXRXTSHIFT && (ticks - tp->t_rcvtime >= tcp_maxpersistidle || ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { TCPSTAT_INC(tcps_persistdrop); if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { tcp_inpinfo_lock_del(inp, tp); goto out; } NET_EPOCH_ENTER(et); tp = tcp_drop(tp, ETIMEDOUT); NET_EPOCH_EXIT(et); tcp_inpinfo_lock_del(inp, tp); goto out; } /* * If the user has closed the socket then drop a persisting * connection after a much reduced timeout. */ if (tp->t_state > TCPS_CLOSE_WAIT && (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { TCPSTAT_INC(tcps_persistdrop); if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { tcp_inpinfo_lock_del(inp, tp); goto out; } NET_EPOCH_ENTER(et); tp = tcp_drop(tp, ETIMEDOUT); NET_EPOCH_EXIT(et); tcp_inpinfo_lock_del(inp, tp); goto out; } tcp_setpersist(tp); tp->t_flags |= TF_FORCEDATA; (void) tp->t_fb->tfb_tcp_output(tp); tp->t_flags &= ~TF_FORCEDATA; #ifdef TCPDEBUG if (tp != NULL && tp->t_inpcb->inp_socket->so_options & SO_DEBUG) tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO); #endif TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); INP_WUNLOCK(inp); out: CURVNET_RESTORE(); } void tcp_timer_rexmt(void * xtp) { struct tcpcb *tp = xtp; CURVNET_SET(tp->t_vnet); int rexmt; struct inpcb *inp; struct epoch_tracker et; #ifdef TCPDEBUG int ostate; ostate = tp->t_state; #endif inp = tp->t_inpcb; KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); INP_WLOCK(inp); if (callout_pending(&tp->t_timers->tt_rexmt) || !callout_active(&tp->t_timers->tt_rexmt)) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } callout_deactivate(&tp->t_timers->tt_rexmt); if ((inp->inp_flags & INP_DROPPED) != 0) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); tcp_free_sackholes(tp); TCP_LOG_EVENT(tp, NULL, NULL, NULL, TCP_LOG_RTO, 0, 0, NULL, false); if (tp->t_fb->tfb_tcp_rexmit_tmr) { /* The stack has a timer action too. */ (*tp->t_fb->tfb_tcp_rexmit_tmr)(tp); } /* * Retransmission timer went off. Message has not * been acked within retransmit interval. Back off * to a longer retransmit interval and retransmit one segment. */ if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { tp->t_rxtshift = TCP_MAXRXTSHIFT; TCPSTAT_INC(tcps_timeoutdrop); if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { tcp_inpinfo_lock_del(inp, tp); goto out; } NET_EPOCH_ENTER(et); tp = tcp_drop(tp, ETIMEDOUT); NET_EPOCH_EXIT(et); tcp_inpinfo_lock_del(inp, tp); goto out; } if (tp->t_state == TCPS_SYN_SENT) { /* * If the SYN was retransmitted, indicate CWND to be * limited to 1 segment in cc_conn_init(). */ tp->snd_cwnd = 1; } else if (tp->t_rxtshift == 1) { /* * first retransmit; record ssthresh and cwnd so they can * be recovered if this turns out to be a "bad" retransmit. * A retransmit is considered "bad" if an ACK for this * segment is received within RTT/2 interval; the assumption * here is that the ACK was already in flight. See * "On Estimating End-to-End Network Path Properties" by * Allman and Paxson for more details. */ tp->snd_cwnd_prev = tp->snd_cwnd; tp->snd_ssthresh_prev = tp->snd_ssthresh; tp->snd_recover_prev = tp->snd_recover; if (IN_FASTRECOVERY(tp->t_flags)) tp->t_flags |= TF_WASFRECOVERY; else tp->t_flags &= ~TF_WASFRECOVERY; if (IN_CONGRECOVERY(tp->t_flags)) tp->t_flags |= TF_WASCRECOVERY; else tp->t_flags &= ~TF_WASCRECOVERY; if ((tp->t_flags & TF_RCVD_TSTMP) == 0) tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); /* In the event that we've negotiated timestamps * badrxtwin will be set to the value that we set * the retransmitted packet's to_tsval to by tcp_output */ tp->t_flags |= TF_PREVVALID; } else tp->t_flags &= ~TF_PREVVALID; TCPSTAT_INC(tcps_rexmttimeo); if ((tp->t_state == TCPS_SYN_SENT) || (tp->t_state == TCPS_SYN_RECEIVED)) rexmt = tcp_rexmit_initial * tcp_backoff[tp->t_rxtshift]; else rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX); /* * We enter the path for PLMTUD if connection is established or, if * connection is FIN_WAIT_1 status, reason for the last is that if * amount of data we send is very small, we could send it in couple of * packets and process straight to FIN. In that case we won't catch * ESTABLISHED state. */ if (V_tcp_pmtud_blackhole_detect && (((tp->t_state == TCPS_ESTABLISHED)) || (tp->t_state == TCPS_FIN_WAIT_1))) { #ifdef INET6 int isipv6; #endif /* * Idea here is that at each stage of mtu probe (usually, 1448 * -> 1188 -> 524) should be given 2 chances to recover before * further clamping down. 'tp->t_rxtshift % 2 == 0' should * take care of that. */ if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) == (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) && (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && tp->t_rxtshift % 2 == 0)) { /* * Enter Path MTU Black-hole Detection mechanism: * - Disable Path MTU Discovery (IP "DF" bit). * - Reduce MTU to lower value than what we * negotiated with peer. */ if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { /* Record that we may have found a black hole. */ tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; /* Keep track of previous MSS. */ tp->t_pmtud_saved_maxseg = tp->t_maxseg; } /* * Reduce the MSS to blackhole value or to the default * in an attempt to retransmit. */ #ifdef INET6 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? 1 : 0; if (isipv6 && tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { /* Use the sysctl tuneable blackhole MSS. */ tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; TCPSTAT_INC(tcps_pmtud_blackhole_activated); } else if (isipv6) { /* Use the default MSS. */ tp->t_maxseg = V_tcp_v6mssdflt; /* * Disable Path MTU Discovery when we switch to * minmss. */ tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); } #endif #if defined(INET6) && defined(INET) else #endif #ifdef INET if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { /* Use the sysctl tuneable blackhole MSS. */ tp->t_maxseg = V_tcp_pmtud_blackhole_mss; TCPSTAT_INC(tcps_pmtud_blackhole_activated); } else { /* Use the default MSS. */ tp->t_maxseg = V_tcp_mssdflt; /* * Disable Path MTU Discovery when we switch to * minmss. */ tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); } #endif /* * Reset the slow-start flight size * as it may depend on the new MSS. */ if (CC_ALGO(tp)->conn_init != NULL) CC_ALGO(tp)->conn_init(tp->ccv); } else { /* * If further retransmissions are still unsuccessful * with a lowered MTU, maybe this isn't a blackhole and * we restore the previous MSS and blackhole detection * flags. * The limit '6' is determined by giving each probe * stage (1448, 1188, 524) 2 chances to recover. */ if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && (tp->t_rxtshift >= 6)) { tp->t_flags2 |= TF2_PLPMTU_PMTUD; tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; tp->t_maxseg = tp->t_pmtud_saved_maxseg; TCPSTAT_INC(tcps_pmtud_blackhole_failed); /* * Reset the slow-start flight size as it * may depend on the new MSS. */ if (CC_ALGO(tp)->conn_init != NULL) CC_ALGO(tp)->conn_init(tp->ccv); } } } /* * Disable RFC1323 and SACK if we haven't got any response to * our third SYN to work-around some broken terminal servers * (most of which have hopefully been retired) that have bad VJ * header compression code which trashes TCP segments containing * unknown-to-them TCP options. */ if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && (tp->t_rxtshift == 3)) tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); /* * If we backed off this far, notify the L3 protocol that we're having * connection problems. */ if (tp->t_rxtshift > TCP_RTT_INVALIDATE) { #ifdef INET6 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) in6_losing(tp->t_inpcb); else #endif in_losing(tp->t_inpcb); } tp->snd_nxt = tp->snd_una; tp->snd_recover = tp->snd_max; /* * Force a segment to be sent. */ tp->t_flags |= TF_ACKNOW; /* * If timing a segment in this window, stop the timer. */ tp->t_rtttime = 0; cc_cong_signal(tp, NULL, CC_RTO); (void) tp->t_fb->tfb_tcp_output(tp); #ifdef TCPDEBUG if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, PRU_SLOWTIMO); #endif TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); INP_WUNLOCK(inp); out: CURVNET_RESTORE(); } void tcp_timer_activate(struct tcpcb *tp, uint32_t timer_type, u_int delta) { struct callout *t_callout; - timeout_t *f_callout; + callout_func_t *f_callout; struct inpcb *inp = tp->t_inpcb; int cpu = inp_to_cpuid(inp); #ifdef TCP_OFFLOAD if (tp->t_flags & TF_TOE) return; #endif if (tp->t_timers->tt_flags & TT_STOPPED) return; switch (timer_type) { case TT_DELACK: t_callout = &tp->t_timers->tt_delack; f_callout = tcp_timer_delack; break; case TT_REXMT: t_callout = &tp->t_timers->tt_rexmt; f_callout = tcp_timer_rexmt; break; case TT_PERSIST: t_callout = &tp->t_timers->tt_persist; f_callout = tcp_timer_persist; break; case TT_KEEP: t_callout = &tp->t_timers->tt_keep; f_callout = tcp_timer_keep; break; case TT_2MSL: t_callout = &tp->t_timers->tt_2msl; f_callout = tcp_timer_2msl; break; default: if (tp->t_fb->tfb_tcp_timer_activate) { tp->t_fb->tfb_tcp_timer_activate(tp, timer_type, delta); return; } panic("tp %p bad timer_type %#x", tp, timer_type); } if (delta == 0) { callout_stop(t_callout); } else { callout_reset_on(t_callout, delta, f_callout, tp, cpu); } } int tcp_timer_active(struct tcpcb *tp, uint32_t timer_type) { struct callout *t_callout; switch (timer_type) { case TT_DELACK: t_callout = &tp->t_timers->tt_delack; break; case TT_REXMT: t_callout = &tp->t_timers->tt_rexmt; break; case TT_PERSIST: t_callout = &tp->t_timers->tt_persist; break; case TT_KEEP: t_callout = &tp->t_timers->tt_keep; break; case TT_2MSL: t_callout = &tp->t_timers->tt_2msl; break; default: if (tp->t_fb->tfb_tcp_timer_active) { return(tp->t_fb->tfb_tcp_timer_active(tp, timer_type)); } panic("tp %p bad timer_type %#x", tp, timer_type); } return callout_active(t_callout); } /* * Stop the timer from running, and apply a flag * against the timer_flags that will force the * timer never to run. The flag is needed to assure * a race does not leave it running and cause * the timer to possibly restart itself (keep and persist * especially do this). */ int tcp_timer_suspend(struct tcpcb *tp, uint32_t timer_type) { struct callout *t_callout; uint32_t t_flags; switch (timer_type) { case TT_DELACK: t_flags = TT_DELACK_SUS; t_callout = &tp->t_timers->tt_delack; break; case TT_REXMT: t_flags = TT_REXMT_SUS; t_callout = &tp->t_timers->tt_rexmt; break; case TT_PERSIST: t_flags = TT_PERSIST_SUS; t_callout = &tp->t_timers->tt_persist; break; case TT_KEEP: t_flags = TT_KEEP_SUS; t_callout = &tp->t_timers->tt_keep; break; case TT_2MSL: t_flags = TT_2MSL_SUS; t_callout = &tp->t_timers->tt_2msl; break; default: panic("tp:%p bad timer_type 0x%x", tp, timer_type); } tp->t_timers->tt_flags |= t_flags; return (callout_stop(t_callout)); } void tcp_timers_unsuspend(struct tcpcb *tp, uint32_t timer_type) { switch (timer_type) { case TT_DELACK: if (tp->t_timers->tt_flags & TT_DELACK_SUS) { tp->t_timers->tt_flags &= ~TT_DELACK_SUS; if (tp->t_flags & TF_DELACK) { /* Delayed ack timer should be up activate a timer */ tp->t_flags &= ~TF_DELACK; tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); } } break; case TT_REXMT: if (tp->t_timers->tt_flags & TT_REXMT_SUS) { tp->t_timers->tt_flags &= ~TT_REXMT_SUS; if (SEQ_GT(tp->snd_max, tp->snd_una) && (tcp_timer_active((tp), TT_PERSIST) == 0) && tp->snd_wnd) { /* We have outstanding data activate a timer */ tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); } } break; case TT_PERSIST: if (tp->t_timers->tt_flags & TT_PERSIST_SUS) { tp->t_timers->tt_flags &= ~TT_PERSIST_SUS; if (tp->snd_wnd == 0) { /* Activate the persists timer */ tp->t_rxtshift = 0; tcp_setpersist(tp); } } break; case TT_KEEP: if (tp->t_timers->tt_flags & TT_KEEP_SUS) { tp->t_timers->tt_flags &= ~TT_KEEP_SUS; tcp_timer_activate(tp, TT_KEEP, TCPS_HAVEESTABLISHED(tp->t_state) ? TP_KEEPIDLE(tp) : TP_KEEPINIT(tp)); } break; case TT_2MSL: if (tp->t_timers->tt_flags &= TT_2MSL_SUS) { tp->t_timers->tt_flags &= ~TT_2MSL_SUS; if ((tp->t_state == TCPS_FIN_WAIT_2) && ((tp->t_inpcb->inp_socket == NULL) || (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE))) { /* Star the 2MSL timer */ tcp_timer_activate(tp, TT_2MSL, (tcp_fast_finwait2_recycle) ? tcp_finwait2_timeout : TP_MAXIDLE(tp)); } } break; default: panic("tp:%p bad timer_type 0x%x", tp, timer_type); } } void tcp_timer_stop(struct tcpcb *tp, uint32_t timer_type) { struct callout *t_callout; tp->t_timers->tt_flags |= TT_STOPPED; switch (timer_type) { case TT_DELACK: t_callout = &tp->t_timers->tt_delack; break; case TT_REXMT: t_callout = &tp->t_timers->tt_rexmt; break; case TT_PERSIST: t_callout = &tp->t_timers->tt_persist; break; case TT_KEEP: t_callout = &tp->t_timers->tt_keep; break; case TT_2MSL: t_callout = &tp->t_timers->tt_2msl; break; default: if (tp->t_fb->tfb_tcp_timer_stop) { /* * XXXrrs we need to look at this with the * stop case below (flags). */ tp->t_fb->tfb_tcp_timer_stop(tp, timer_type); return; } panic("tp %p bad timer_type %#x", tp, timer_type); } if (callout_async_drain(t_callout, tcp_timer_discard) == 0) { /* * Can't stop the callout, defer tcpcb actual deletion * to the last one. We do this using the async drain * function and incrementing the count in */ tp->t_timers->tt_draincnt++; } }